1 /* 2 * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "compiler/oopMap.hpp" 28 #include "gc/shared/continuationGCSupport.hpp" 29 #include "gc/shared/gcTraceTime.inline.hpp" 30 #include "gc/shared/preservedMarks.inline.hpp" 31 #include "gc/shared/slidingForwarding.inline.hpp" 32 #include "gc/shared/tlab_globals.hpp" 33 #include "gc/shared/workerThread.hpp" 34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 36 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 37 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 38 #include "gc/shenandoah/shenandoahFreeSet.hpp" 39 #include "gc/shenandoah/shenandoahFullGC.hpp" 40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 41 #include "gc/shenandoah/shenandoahMark.inline.hpp" 42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 44 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 45 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 46 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 47 #include "gc/shenandoah/shenandoahMetrics.hpp" 48 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 49 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 50 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 51 #include "gc/shenandoah/shenandoahSTWMark.hpp" 52 #include "gc/shenandoah/shenandoahUtils.hpp" 53 #include "gc/shenandoah/shenandoahVerifier.hpp" 54 #include "gc/shenandoah/shenandoahVMOperations.hpp" 55 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 56 #include "memory/metaspaceUtils.hpp" 57 #include "memory/universe.hpp" 58 #include "oops/compressedOops.inline.hpp" 59 #include "oops/oop.inline.hpp" 60 #include "runtime/javaThread.hpp" 61 #include "runtime/orderAccess.hpp" 62 #include "runtime/vmThread.hpp" 63 #include "utilities/copy.hpp" 64 #include "utilities/events.hpp" 65 #include "utilities/growableArray.hpp" 66 67 ShenandoahFullGC::ShenandoahFullGC() : 68 _gc_timer(ShenandoahHeap::heap()->gc_timer()), 69 _preserved_marks(new PreservedMarksSet(true)) {} 70 71 ShenandoahFullGC::~ShenandoahFullGC() { 72 delete _preserved_marks; 73 } 74 75 bool ShenandoahFullGC::collect(GCCause::Cause cause) { 76 vmop_entry_full(cause); 77 // Always success 78 return true; 79 } 80 81 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) { 82 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 83 TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters()); 84 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross); 85 86 heap->try_inject_alloc_failure(); 87 VM_ShenandoahFullGC op(cause, this); 88 VMThread::execute(&op); 89 } 90 91 void ShenandoahFullGC::entry_full(GCCause::Cause cause) { 92 static const char* msg = "Pause Full"; 93 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */); 94 EventMark em("%s", msg); 95 96 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 97 ShenandoahWorkerPolicy::calc_workers_for_fullgc(), 98 "full gc"); 99 100 op_full(cause); 101 } 102 103 void ShenandoahFullGC::op_full(GCCause::Cause cause) { 104 ShenandoahMetricsSnapshot metrics; 105 metrics.snap_before(); 106 107 // Perform full GC 108 do_it(cause); 109 110 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 111 112 metrics.snap_after(); 113 114 if (metrics.is_good_progress()) { 115 heap->notify_gc_progress(); 116 } else { 117 // Nothing to do. Tell the allocation path that we have failed to make 118 // progress, and it can finally fail. 119 heap->notify_gc_no_progress(); 120 } 121 122 // Regardless if progress was made, we record that we completed a "successful" full GC. 123 heap->heuristics()->record_success_full(); 124 heap->shenandoah_policy()->record_success_full(); 125 } 126 127 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { 128 ShenandoahHeap* heap = ShenandoahHeap::heap(); 129 130 if (ShenandoahVerify) { 131 heap->verifier()->verify_before_fullgc(); 132 } 133 134 if (VerifyBeforeGC) { 135 Universe::verify(); 136 } 137 138 // Degenerated GC may carry concurrent root flags when upgrading to 139 // full GC. We need to reset it before mutators resume. 140 heap->set_concurrent_strong_root_in_progress(false); 141 heap->set_concurrent_weak_root_in_progress(false); 142 143 heap->set_full_gc_in_progress(true); 144 145 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 146 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 147 148 { 149 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre); 150 heap->pre_full_gc_dump(_gc_timer); 151 } 152 153 { 154 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare); 155 // Full GC is supposed to recover from any GC state: 156 157 // a0. Remember if we have forwarded objects 158 bool has_forwarded_objects = heap->has_forwarded_objects(); 159 160 // a1. Cancel evacuation, if in progress 161 if (heap->is_evacuation_in_progress()) { 162 heap->set_evacuation_in_progress(false); 163 } 164 assert(!heap->is_evacuation_in_progress(), "sanity"); 165 166 // a2. Cancel update-refs, if in progress 167 if (heap->is_update_refs_in_progress()) { 168 heap->set_update_refs_in_progress(false); 169 } 170 assert(!heap->is_update_refs_in_progress(), "sanity"); 171 172 // b. Cancel concurrent mark, if in progress 173 if (heap->is_concurrent_mark_in_progress()) { 174 ShenandoahConcurrentGC::cancel(); 175 heap->set_concurrent_mark_in_progress(false); 176 } 177 assert(!heap->is_concurrent_mark_in_progress(), "sanity"); 178 179 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots. 180 if (has_forwarded_objects) { 181 update_roots(true /*full_gc*/); 182 } 183 184 // d. Reset the bitmaps for new marking 185 heap->reset_mark_bitmap(); 186 assert(heap->marking_context()->is_bitmap_clear(), "sanity"); 187 assert(!heap->marking_context()->is_complete(), "sanity"); 188 189 // e. Abandon reference discovery and clear all discovered references. 190 ShenandoahReferenceProcessor* rp = heap->ref_processor(); 191 rp->abandon_partial_discovery(); 192 193 // f. Sync pinned region status from the CP marks 194 heap->sync_pinned_region_status(); 195 196 // The rest of prologue: 197 _preserved_marks->init(heap->workers()->active_workers()); 198 199 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change"); 200 } 201 202 if (UseTLAB) { 203 heap->gclabs_retire(ResizeTLAB); 204 heap->tlabs_retire(ResizeTLAB); 205 } 206 207 OrderAccess::fence(); 208 209 phase1_mark_heap(); 210 211 // Once marking is done, which may have fixed up forwarded objects, we can drop it. 212 // Coming out of Full GC, we would not have any forwarded objects. 213 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3. 214 heap->set_has_forwarded_objects(false); 215 216 heap->set_full_gc_move_in_progress(true); 217 218 // Setup workers for the rest 219 OrderAccess::fence(); 220 221 // Initialize worker slices 222 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); 223 for (uint i = 0; i < heap->max_workers(); i++) { 224 worker_slices[i] = new ShenandoahHeapRegionSet(); 225 } 226 227 { 228 // The rest of code performs region moves, where region status is undefined 229 // until all phases run together. 230 ShenandoahHeapLocker lock(heap->lock()); 231 232 SlidingForwarding::begin(); 233 234 phase2_calculate_target_addresses(worker_slices); 235 236 OrderAccess::fence(); 237 238 phase3_update_references(); 239 240 phase4_compact_objects(worker_slices); 241 242 phase5_epilog(); 243 244 SlidingForwarding::end(); 245 } 246 247 // Resize metaspace 248 MetaspaceGC::compute_new_size(); 249 250 // Free worker slices 251 for (uint i = 0; i < heap->max_workers(); i++) { 252 delete worker_slices[i]; 253 } 254 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices); 255 256 heap->set_full_gc_move_in_progress(false); 257 heap->set_full_gc_in_progress(false); 258 259 if (ShenandoahVerify) { 260 heap->verifier()->verify_after_fullgc(); 261 } 262 263 if (VerifyAfterGC) { 264 Universe::verify(); 265 } 266 267 { 268 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post); 269 heap->post_full_gc_dump(_gc_timer); 270 } 271 } 272 273 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { 274 private: 275 ShenandoahMarkingContext* const _ctx; 276 277 public: 278 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 279 280 void heap_region_do(ShenandoahHeapRegion *r) { 281 _ctx->capture_top_at_mark_start(r); 282 r->clear_live_data(); 283 } 284 285 bool is_thread_safe() { return true; } 286 }; 287 288 void ShenandoahFullGC::phase1_mark_heap() { 289 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); 290 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); 291 292 ShenandoahHeap* heap = ShenandoahHeap::heap(); 293 294 ShenandoahPrepareForMarkClosure cl; 295 heap->parallel_heap_region_iterate(&cl); 296 297 heap->set_unload_classes(heap->heuristics()->can_unload_classes()); 298 299 ShenandoahReferenceProcessor* rp = heap->ref_processor(); 300 // enable ("weak") refs discovery 301 rp->set_soft_reference_policy(true); // forcefully purge all soft references 302 303 ShenandoahSTWMark mark(true /*full_gc*/); 304 mark.mark(); 305 heap->parallel_cleaning(true /* full_gc */); 306 } 307 308 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 309 private: 310 PreservedMarks* const _preserved_marks; 311 ShenandoahHeap* const _heap; 312 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 313 int _empty_regions_pos; 314 ShenandoahHeapRegion* _to_region; 315 ShenandoahHeapRegion* _from_region; 316 HeapWord* _compact_point; 317 318 public: 319 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, 320 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 321 ShenandoahHeapRegion* to_region) : 322 _preserved_marks(preserved_marks), 323 _heap(ShenandoahHeap::heap()), 324 _empty_regions(empty_regions), 325 _empty_regions_pos(0), 326 _to_region(to_region), 327 _from_region(nullptr), 328 _compact_point(to_region->bottom()) {} 329 330 void set_from_region(ShenandoahHeapRegion* from_region) { 331 _from_region = from_region; 332 } 333 334 void finish() { 335 assert(_to_region != nullptr, "should not happen"); 336 _to_region->set_new_top(_compact_point); 337 } 338 339 bool is_compact_same_region() { 340 return _from_region == _to_region; 341 } 342 343 int empty_regions_pos() { 344 return _empty_regions_pos; 345 } 346 347 void do_object(oop p) { 348 assert(_from_region != nullptr, "must set before work"); 349 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 350 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked"); 351 352 size_t obj_size = p->size(); 353 if (_compact_point + obj_size > _to_region->end()) { 354 finish(); 355 356 // Object doesn't fit. Pick next empty region and start compacting there. 357 ShenandoahHeapRegion* new_to_region; 358 if (_empty_regions_pos < _empty_regions.length()) { 359 new_to_region = _empty_regions.at(_empty_regions_pos); 360 _empty_regions_pos++; 361 } else { 362 // Out of empty region? Compact within the same region. 363 new_to_region = _from_region; 364 } 365 366 assert(new_to_region != _to_region, "must not reuse same to-region"); 367 assert(new_to_region != nullptr, "must not be null"); 368 _to_region = new_to_region; 369 _compact_point = _to_region->bottom(); 370 } 371 372 // Object fits into current region, record new location, if object does not move: 373 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 374 shenandoah_assert_not_forwarded(nullptr, p); 375 if (_compact_point != cast_from_oop<HeapWord*>(p)) { 376 _preserved_marks->push_if_necessary(p, p->mark()); 377 SlidingForwarding::forward_to(p, cast_to_oop(_compact_point)); 378 } 379 _compact_point += obj_size; 380 } 381 }; 382 383 class ShenandoahPrepareForCompactionTask : public WorkerTask { 384 private: 385 PreservedMarksSet* const _preserved_marks; 386 ShenandoahHeap* const _heap; 387 ShenandoahHeapRegionSet** const _worker_slices; 388 389 public: 390 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) : 391 WorkerTask("Shenandoah Prepare For Compaction"), 392 _preserved_marks(preserved_marks), 393 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) { 394 } 395 396 static bool is_candidate_region(ShenandoahHeapRegion* r) { 397 // Empty region: get it into the slice to defragment the slice itself. 398 // We could have skipped this without violating correctness, but we really 399 // want to compact all live regions to the start of the heap, which sometimes 400 // means moving them into the fully empty regions. 401 if (r->is_empty()) return true; 402 403 // Can move the region, and this is not the humongous region. Humongous 404 // moves are special cased here, because their moves are handled separately. 405 return r->is_stw_move_allowed() && !r->is_humongous(); 406 } 407 408 void work(uint worker_id) override; 409 private: 410 template<typename ClosureType> 411 void prepare_for_compaction(ClosureType& cl, 412 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 413 ShenandoahHeapRegionSetIterator& it, 414 ShenandoahHeapRegion* from_region); 415 }; 416 417 void ShenandoahPrepareForCompactionTask::work(uint worker_id) { 418 ShenandoahParallelWorkerSession worker_session(worker_id); 419 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; 420 ShenandoahHeapRegionSetIterator it(slice); 421 ShenandoahHeapRegion* from_region = it.next(); 422 // No work? 423 if (from_region == nullptr) { 424 return; 425 } 426 427 // Sliding compaction. Walk all regions in the slice, and compact them. 428 // Remember empty regions and reuse them as needed. 429 ResourceMark rm; 430 431 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions()); 432 433 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); 434 prepare_for_compaction(cl, empty_regions, it, from_region); 435 } 436 437 template<typename ClosureType> 438 void ShenandoahPrepareForCompactionTask::prepare_for_compaction(ClosureType& cl, 439 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 440 ShenandoahHeapRegionSetIterator& it, 441 ShenandoahHeapRegion* from_region) { 442 while (from_region != nullptr) { 443 assert(is_candidate_region(from_region), "Sanity"); 444 cl.set_from_region(from_region); 445 if (from_region->has_live()) { 446 _heap->marked_object_iterate(from_region, &cl); 447 } 448 449 // Compacted the region to somewhere else? From-region is empty then. 450 if (!cl.is_compact_same_region()) { 451 empty_regions.append(from_region); 452 } 453 from_region = it.next(); 454 } 455 cl.finish(); 456 457 // Mark all remaining regions as empty 458 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 459 ShenandoahHeapRegion* r = empty_regions.at(pos); 460 r->set_new_top(r->bottom()); 461 } 462 } 463 464 void ShenandoahFullGC::calculate_target_humongous_objects() { 465 ShenandoahHeap* heap = ShenandoahHeap::heap(); 466 467 // Compute the new addresses for humongous objects. We need to do this after addresses 468 // for regular objects are calculated, and we know what regions in heap suffix are 469 // available for humongous moves. 470 // 471 // Scan the heap backwards, because we are compacting humongous regions towards the end. 472 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide 473 // humongous start there. 474 // 475 // The complication is potential non-movable regions during the scan. If such region is 476 // detected, then sliding restarts towards that non-movable region. 477 478 size_t to_begin = heap->num_regions(); 479 size_t to_end = heap->num_regions(); 480 481 for (size_t c = heap->num_regions(); c > 0; c--) { 482 ShenandoahHeapRegion *r = heap->get_region(c - 1); 483 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { 484 // To-region candidate: record this, and continue scan 485 to_begin = r->index(); 486 continue; 487 } 488 489 if (r->is_humongous_start() && r->is_stw_move_allowed()) { 490 // From-region candidate: movable humongous region 491 oop old_obj = cast_to_oop(r->bottom()); 492 size_t words_size = old_obj->size(); 493 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 494 495 size_t start = to_end - num_regions; 496 497 if (start >= to_begin && start != r->index()) { 498 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. 499 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); 500 SlidingForwarding::forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom())); 501 to_end = start; 502 continue; 503 } 504 } 505 506 // Failed to fit. Scan starting from current region. 507 to_begin = r->index(); 508 to_end = r->index(); 509 } 510 } 511 512 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { 513 private: 514 ShenandoahHeap* const _heap; 515 516 public: 517 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} 518 void heap_region_do(ShenandoahHeapRegion* r) { 519 if (r->is_trash()) { 520 r->recycle(); 521 } 522 if (r->is_cset()) { 523 r->make_regular_bypass(); 524 } 525 if (r->is_empty_uncommitted()) { 526 r->make_committed_bypass(); 527 } 528 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index()); 529 530 // Record current region occupancy: this communicates empty regions are free 531 // to the rest of Full GC code. 532 r->set_new_top(r->top()); 533 } 534 }; 535 536 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { 537 private: 538 ShenandoahHeap* const _heap; 539 ShenandoahMarkingContext* const _ctx; 540 541 public: 542 ShenandoahTrashImmediateGarbageClosure() : 543 _heap(ShenandoahHeap::heap()), 544 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 545 546 void heap_region_do(ShenandoahHeapRegion* r) { 547 if (r->is_humongous_start()) { 548 oop humongous_obj = cast_to_oop(r->bottom()); 549 if (!_ctx->is_marked(humongous_obj)) { 550 assert(!r->has_live(), 551 "Region " SIZE_FORMAT " is not marked, should not have live", r->index()); 552 _heap->trash_humongous_region_at(r); 553 } else { 554 assert(r->has_live(), 555 "Region " SIZE_FORMAT " should have live", r->index()); 556 } 557 } else if (r->is_humongous_continuation()) { 558 // If we hit continuation, the non-live humongous starts should have been trashed already 559 assert(r->humongous_start_region()->has_live(), 560 "Region " SIZE_FORMAT " should have live", r->index()); 561 } else if (r->is_regular()) { 562 if (!r->has_live()) { 563 r->make_trash_immediate(); 564 } 565 } 566 } 567 }; 568 569 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) { 570 ShenandoahHeap* heap = ShenandoahHeap::heap(); 571 572 uint n_workers = heap->workers()->active_workers(); 573 size_t n_regions = heap->num_regions(); 574 575 // What we want to accomplish: have the dense prefix of data, while still balancing 576 // out the parallel work. 577 // 578 // Assuming the amount of work is driven by the live data that needs moving, we can slice 579 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each 580 // thread takes all regions in its prefix subset, and then it takes some regions from 581 // the tail. 582 // 583 // Tail region selection becomes interesting. 584 // 585 // First, we want to distribute the regions fairly between the workers, and those regions 586 // might have different amount of live data. So, until we sure no workers need live data, 587 // we need to only take what the worker needs. 588 // 589 // Second, since we slide everything to the left in each slice, the most busy regions 590 // would be the ones on the left. Which means we want to have all workers have their after-tail 591 // regions as close to the left as possible. 592 // 593 // The easiest way to do this is to distribute after-tail regions in round-robin between 594 // workers that still need live data. 595 // 596 // Consider parallel workers A, B, C, then the target slice layout would be: 597 // 598 // AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA 599 // 600 // (.....dense-prefix.....) (.....................tail...................) 601 // [all regions fully live] [left-most regions are fuller that right-most] 602 // 603 604 // Compute how much live data is there. This would approximate the size of dense prefix 605 // we target to create. 606 size_t total_live = 0; 607 for (size_t idx = 0; idx < n_regions; idx++) { 608 ShenandoahHeapRegion *r = heap->get_region(idx); 609 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 610 total_live += r->get_live_data_words(); 611 } 612 } 613 614 // Estimate the size for the dense prefix. Note that we specifically count only the 615 // "full" regions, so there would be some non-full regions in the slice tail. 616 size_t live_per_worker = total_live / n_workers; 617 size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words(); 618 size_t prefix_regions_total = prefix_regions_per_worker * n_workers; 619 prefix_regions_total = MIN2(prefix_regions_total, n_regions); 620 assert(prefix_regions_total <= n_regions, "Sanity"); 621 622 // There might be non-candidate regions in the prefix. To compute where the tail actually 623 // ends up being, we need to account those as well. 624 size_t prefix_end = prefix_regions_total; 625 for (size_t idx = 0; idx < prefix_regions_total; idx++) { 626 ShenandoahHeapRegion *r = heap->get_region(idx); 627 if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 628 prefix_end++; 629 } 630 } 631 prefix_end = MIN2(prefix_end, n_regions); 632 assert(prefix_end <= n_regions, "Sanity"); 633 634 // Distribute prefix regions per worker: each thread definitely gets its own same-sized 635 // subset of dense prefix. 636 size_t prefix_idx = 0; 637 638 size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC); 639 640 for (size_t wid = 0; wid < n_workers; wid++) { 641 ShenandoahHeapRegionSet* slice = worker_slices[wid]; 642 643 live[wid] = 0; 644 size_t regs = 0; 645 646 // Add all prefix regions for this worker 647 while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) { 648 ShenandoahHeapRegion *r = heap->get_region(prefix_idx); 649 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 650 slice->add_region(r); 651 live[wid] += r->get_live_data_words(); 652 regs++; 653 } 654 prefix_idx++; 655 } 656 } 657 658 // Distribute the tail among workers in round-robin fashion. 659 size_t wid = n_workers - 1; 660 661 for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) { 662 ShenandoahHeapRegion *r = heap->get_region(tail_idx); 663 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 664 assert(wid < n_workers, "Sanity"); 665 666 size_t live_region = r->get_live_data_words(); 667 668 // Select next worker that still needs live data. 669 size_t old_wid = wid; 670 do { 671 wid++; 672 if (wid == n_workers) wid = 0; 673 } while (live[wid] + live_region >= live_per_worker && old_wid != wid); 674 675 if (old_wid == wid) { 676 // Circled back to the same worker? This means liveness data was 677 // miscalculated. Bump the live_per_worker limit so that 678 // everyone gets a piece of the leftover work. 679 live_per_worker += ShenandoahHeapRegion::region_size_words(); 680 } 681 682 worker_slices[wid]->add_region(r); 683 live[wid] += live_region; 684 } 685 } 686 687 FREE_C_HEAP_ARRAY(size_t, live); 688 689 #ifdef ASSERT 690 ResourceBitMap map(n_regions); 691 for (size_t wid = 0; wid < n_workers; wid++) { 692 ShenandoahHeapRegionSetIterator it(worker_slices[wid]); 693 ShenandoahHeapRegion* r = it.next(); 694 while (r != nullptr) { 695 size_t idx = r->index(); 696 assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx); 697 assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx); 698 map.at_put(idx, true); 699 r = it.next(); 700 } 701 } 702 703 for (size_t rid = 0; rid < n_regions; rid++) { 704 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid)); 705 bool is_distributed = map.at(rid); 706 assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid); 707 } 708 #endif 709 } 710 711 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { 712 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); 713 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); 714 715 ShenandoahHeap* heap = ShenandoahHeap::heap(); 716 717 // About to figure out which regions can be compacted, make sure pinning status 718 // had been updated in GC prologue. 719 heap->assert_pinned_region_status(); 720 721 { 722 // Trash the immediately collectible regions before computing addresses 723 ShenandoahTrashImmediateGarbageClosure tigcl; 724 heap->heap_region_iterate(&tigcl); 725 726 // Make sure regions are in good state: committed, active, clean. 727 // This is needed because we are potentially sliding the data through them. 728 ShenandoahEnsureHeapActiveClosure ecl; 729 heap->heap_region_iterate(&ecl); 730 } 731 732 // Compute the new addresses for regular objects 733 { 734 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); 735 736 distribute_slices(worker_slices); 737 738 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices); 739 heap->workers()->run_task(&task); 740 } 741 742 // Compute the new addresses for humongous objects 743 { 744 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); 745 calculate_target_humongous_objects(); 746 } 747 } 748 749 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure { 750 private: 751 ShenandoahHeap* const _heap; 752 ShenandoahMarkingContext* const _ctx; 753 754 template <class T> 755 inline void do_oop_work(T* p) { 756 T o = RawAccess<>::oop_load(p); 757 if (!CompressedOops::is_null(o)) { 758 oop obj = CompressedOops::decode_not_null(o); 759 assert(_ctx->is_marked(obj), "must be marked"); 760 if (SlidingForwarding::is_forwarded(obj)) { 761 oop forw = SlidingForwarding::forwardee(obj); 762 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 763 } 764 } 765 } 766 767 public: 768 ShenandoahAdjustPointersClosure() : 769 _heap(ShenandoahHeap::heap()), 770 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 771 772 void do_oop(oop* p) { do_oop_work(p); } 773 void do_oop(narrowOop* p) { do_oop_work(p); } 774 void do_method(Method* m) {} 775 void do_nmethod(nmethod* nm) {} 776 }; 777 778 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 779 private: 780 ShenandoahHeap* const _heap; 781 ShenandoahAdjustPointersClosure _cl; 782 783 public: 784 ShenandoahAdjustPointersObjectClosure() : 785 _heap(ShenandoahHeap::heap()) { 786 } 787 void do_object(oop p) { 788 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 789 p->oop_iterate(&_cl); 790 } 791 }; 792 793 class ShenandoahAdjustPointersTask : public WorkerTask { 794 private: 795 ShenandoahHeap* const _heap; 796 ShenandoahRegionIterator _regions; 797 798 public: 799 ShenandoahAdjustPointersTask() : 800 WorkerTask("Shenandoah Adjust Pointers"), 801 _heap(ShenandoahHeap::heap()) { 802 } 803 804 void work(uint worker_id) { 805 ShenandoahParallelWorkerSession worker_session(worker_id); 806 ShenandoahAdjustPointersObjectClosure obj_cl; 807 ShenandoahHeapRegion* r = _regions.next(); 808 while (r != nullptr) { 809 if (!r->is_humongous_continuation() && r->has_live()) { 810 _heap->marked_object_iterate(r, &obj_cl); 811 } 812 r = _regions.next(); 813 } 814 } 815 }; 816 817 class ShenandoahAdjustRootPointersTask : public WorkerTask { 818 private: 819 ShenandoahRootAdjuster* _rp; 820 PreservedMarksSet* _preserved_marks; 821 public: 822 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : 823 WorkerTask("Shenandoah Adjust Root Pointers"), 824 _rp(rp), 825 _preserved_marks(preserved_marks) {} 826 827 void work(uint worker_id) { 828 ShenandoahParallelWorkerSession worker_session(worker_id); 829 ShenandoahAdjustPointersClosure cl; 830 _rp->roots_do(worker_id, &cl); 831 _preserved_marks->get(worker_id)->adjust_during_full_gc(); 832 } 833 }; 834 835 void ShenandoahFullGC::phase3_update_references() { 836 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer); 837 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); 838 839 ShenandoahHeap* heap = ShenandoahHeap::heap(); 840 841 WorkerThreads* workers = heap->workers(); 842 uint nworkers = workers->active_workers(); 843 { 844 #if COMPILER2_OR_JVMCI 845 DerivedPointerTable::clear(); 846 #endif 847 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots); 848 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); 849 workers->run_task(&task); 850 #if COMPILER2_OR_JVMCI 851 DerivedPointerTable::update_pointers(); 852 #endif 853 } 854 855 ShenandoahAdjustPointersTask adjust_pointers_task; 856 workers->run_task(&adjust_pointers_task); 857 } 858 859 class ShenandoahCompactObjectsClosure : public ObjectClosure { 860 private: 861 ShenandoahHeap* const _heap; 862 uint const _worker_id; 863 864 public: 865 ShenandoahCompactObjectsClosure(uint worker_id) : 866 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {} 867 868 void do_object(oop p) { 869 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 870 size_t size = p->size(); 871 if (SlidingForwarding::is_forwarded(p)) { 872 HeapWord* compact_from = cast_from_oop<HeapWord*>(p); 873 HeapWord* compact_to = cast_from_oop<HeapWord*>(SlidingForwarding::forwardee(p)); 874 assert(compact_from != compact_to, "Forwarded object should move"); 875 Copy::aligned_conjoint_words(compact_from, compact_to, size); 876 oop new_obj = cast_to_oop(compact_to); 877 878 ContinuationGCSupport::relativize_stack_chunk(new_obj); 879 new_obj->init_mark(); 880 } 881 } 882 }; 883 884 class ShenandoahCompactObjectsTask : public WorkerTask { 885 private: 886 ShenandoahHeap* const _heap; 887 ShenandoahHeapRegionSet** const _worker_slices; 888 889 public: 890 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : 891 WorkerTask("Shenandoah Compact Objects"), 892 _heap(ShenandoahHeap::heap()), 893 _worker_slices(worker_slices) { 894 } 895 896 void work(uint worker_id) { 897 ShenandoahParallelWorkerSession worker_session(worker_id); 898 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); 899 900 ShenandoahCompactObjectsClosure cl(worker_id); 901 ShenandoahHeapRegion* r = slice.next(); 902 while (r != nullptr) { 903 assert(!r->is_humongous(), "must not get humongous regions here"); 904 if (r->has_live()) { 905 _heap->marked_object_iterate(r, &cl); 906 } 907 r->set_top(r->new_top()); 908 r = slice.next(); 909 } 910 } 911 }; 912 913 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 914 private: 915 ShenandoahHeap* const _heap; 916 size_t _live; 917 918 public: 919 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { 920 } 921 922 void heap_region_do(ShenandoahHeapRegion* r) { 923 assert (!r->is_cset(), "cset regions should have been demoted already"); 924 925 // Need to reset the complete-top-at-mark-start pointer here because 926 // the complete marking bitmap is no longer valid. This ensures 927 // size-based iteration in marked_object_iterate(). 928 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip 929 // pinned regions. 930 if (!r->is_pinned()) { 931 _heap->complete_marking_context()->reset_top_at_mark_start(r); 932 } 933 934 size_t live = r->used(); 935 936 // Make empty regions that have been allocated into regular 937 if (r->is_empty() && live > 0) { 938 r->make_regular_bypass(); 939 if (ZapUnusedHeapArea) { 940 SpaceMangler::mangle_region(MemRegion(r->top(), r->end())); 941 } 942 } 943 944 // Reclaim regular regions that became empty 945 if (r->is_regular() && live == 0) { 946 r->make_trash(); 947 } 948 949 // Recycle all trash regions 950 if (r->is_trash()) { 951 live = 0; 952 r->recycle(); 953 } 954 955 r->set_live_data(live); 956 r->reset_alloc_metadata(); 957 _live += live; 958 } 959 960 size_t get_live() { 961 return _live; 962 } 963 }; 964 965 void ShenandoahFullGC::compact_humongous_objects() { 966 // Compact humongous regions, based on their fwdptr objects. 967 // 968 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, 969 // humongous regions are already compacted, and do not require further moves, which alleviates 970 // sliding costs. We may consider doing this in parallel in the future. 971 972 ShenandoahHeap* heap = ShenandoahHeap::heap(); 973 974 for (size_t c = heap->num_regions(); c > 0; c--) { 975 ShenandoahHeapRegion* r = heap->get_region(c - 1); 976 if (r->is_humongous_start()) { 977 oop old_obj = cast_to_oop(r->bottom()); 978 if (SlidingForwarding::is_not_forwarded(old_obj)) { 979 // No need to move the object, it stays at the same slot 980 continue; 981 } 982 size_t words_size = old_obj->size(); 983 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 984 985 size_t old_start = r->index(); 986 size_t old_end = old_start + num_regions - 1; 987 size_t new_start = heap->heap_region_index_containing(SlidingForwarding::forwardee(old_obj)); 988 size_t new_end = new_start + num_regions - 1; 989 assert(old_start != new_start, "must be real move"); 990 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); 991 992 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size); 993 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom())); 994 995 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom()); 996 new_obj->init_mark(); 997 998 { 999 for (size_t c = old_start; c <= old_end; c++) { 1000 ShenandoahHeapRegion* r = heap->get_region(c); 1001 r->make_regular_bypass(); 1002 r->set_top(r->bottom()); 1003 } 1004 1005 for (size_t c = new_start; c <= new_end; c++) { 1006 ShenandoahHeapRegion* r = heap->get_region(c); 1007 if (c == new_start) { 1008 r->make_humongous_start_bypass(); 1009 } else { 1010 r->make_humongous_cont_bypass(); 1011 } 1012 1013 // Trailing region may be non-full, record the remainder there 1014 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 1015 if ((c == new_end) && (remainder != 0)) { 1016 r->set_top(r->bottom() + remainder); 1017 } else { 1018 r->set_top(r->end()); 1019 } 1020 1021 r->reset_alloc_metadata(); 1022 } 1023 } 1024 } 1025 } 1026 } 1027 1028 // This is slightly different to ShHeap::reset_next_mark_bitmap: 1029 // we need to remain able to walk pinned regions. 1030 // Since pinned region do not move and don't get compacted, we will get holes with 1031 // unreachable objects in them (which may have pointers to unloaded Klasses and thus 1032 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using 1033 // a valid marking bitmap and valid TAMS pointer. This class only resets marking 1034 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. 1035 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask { 1036 private: 1037 ShenandoahRegionIterator _regions; 1038 1039 public: 1040 ShenandoahMCResetCompleteBitmapTask() : 1041 WorkerTask("Shenandoah Reset Bitmap") { 1042 } 1043 1044 void work(uint worker_id) { 1045 ShenandoahParallelWorkerSession worker_session(worker_id); 1046 ShenandoahHeapRegion* region = _regions.next(); 1047 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1048 ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); 1049 while (region != nullptr) { 1050 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { 1051 ctx->clear_bitmap(region); 1052 } 1053 region = _regions.next(); 1054 } 1055 } 1056 }; 1057 1058 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { 1059 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); 1060 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); 1061 1062 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1063 1064 // Compact regular objects first 1065 { 1066 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); 1067 ShenandoahCompactObjectsTask compact_task(worker_slices); 1068 heap->workers()->run_task(&compact_task); 1069 } 1070 1071 // Compact humongous objects after regular object moves 1072 { 1073 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); 1074 compact_humongous_objects(); 1075 } 1076 } 1077 1078 void ShenandoahFullGC::phase5_epilog() { 1079 GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer); 1080 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1081 1082 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 1083 // and must ensure the bitmap is in sync. 1084 { 1085 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); 1086 ShenandoahMCResetCompleteBitmapTask task; 1087 heap->workers()->run_task(&task); 1088 } 1089 1090 // Bring regions in proper states after the collection, and set heap properties. 1091 { 1092 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); 1093 ShenandoahPostCompactClosure post_compact; 1094 heap->heap_region_iterate(&post_compact); 1095 heap->set_used(post_compact.get_live()); 1096 1097 heap->collection_set()->clear(); 1098 heap->free_set()->rebuild(); 1099 heap->clear_cancelled_gc(); 1100 } 1101 1102 _preserved_marks->restore(heap->workers()); 1103 _preserved_marks->reclaim(); 1104 }