1 /* 2 * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "compiler/oopMap.hpp" 28 #include "gc/shared/continuationGCSupport.hpp" 29 #include "gc/shared/gcTraceTime.inline.hpp" 30 #include "gc/shared/preservedMarks.inline.hpp" 31 #include "gc/shared/tlab_globals.hpp" 32 #include "gc/shared/workerThread.hpp" 33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 35 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 36 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 37 #include "gc/shenandoah/shenandoahFreeSet.hpp" 38 #include "gc/shenandoah/shenandoahFullGC.hpp" 39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 40 #include "gc/shenandoah/shenandoahMark.inline.hpp" 41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 42 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 43 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 44 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 46 #include "gc/shenandoah/shenandoahMetrics.hpp" 47 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 48 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 49 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 50 #include "gc/shenandoah/shenandoahSTWMark.hpp" 51 #include "gc/shenandoah/shenandoahUtils.hpp" 52 #include "gc/shenandoah/shenandoahVerifier.hpp" 53 #include "gc/shenandoah/shenandoahVMOperations.hpp" 54 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 55 #include "memory/metaspaceUtils.hpp" 56 #include "memory/universe.hpp" 57 #include "oops/compressedOops.inline.hpp" 58 #include "oops/oop.inline.hpp" 59 #include "runtime/javaThread.hpp" 60 #include "runtime/orderAccess.hpp" 61 #include "runtime/vmThread.hpp" 62 #include "utilities/copy.hpp" 63 #include "utilities/events.hpp" 64 #include "utilities/growableArray.hpp" 65 66 ShenandoahFullGC::ShenandoahFullGC() : 67 _gc_timer(ShenandoahHeap::heap()->gc_timer()), 68 _preserved_marks(new PreservedMarksSet(true)) {} 69 70 ShenandoahFullGC::~ShenandoahFullGC() { 71 delete _preserved_marks; 72 } 73 74 bool ShenandoahFullGC::collect(GCCause::Cause cause) { 75 vmop_entry_full(cause); 76 // Always success 77 return true; 78 } 79 80 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) { 81 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 82 TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters()); 83 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross); 84 85 heap->try_inject_alloc_failure(); 86 VM_ShenandoahFullGC op(cause, this); 87 VMThread::execute(&op); 88 } 89 90 void ShenandoahFullGC::entry_full(GCCause::Cause cause) { 91 static const char* msg = "Pause Full"; 92 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */); 93 EventMark em("%s", msg); 94 95 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 96 ShenandoahWorkerPolicy::calc_workers_for_fullgc(), 97 "full gc"); 98 99 op_full(cause); 100 } 101 102 void ShenandoahFullGC::op_full(GCCause::Cause cause) { 103 ShenandoahMetricsSnapshot metrics; 104 metrics.snap_before(); 105 106 // Perform full GC 107 do_it(cause); 108 109 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 110 111 metrics.snap_after(); 112 113 if (metrics.is_good_progress()) { 114 heap->notify_gc_progress(); 115 } else { 116 // Nothing to do. Tell the allocation path that we have failed to make 117 // progress, and it can finally fail. 118 heap->notify_gc_no_progress(); 119 } 120 121 // Regardless if progress was made, we record that we completed a "successful" full GC. 122 heap->heuristics()->record_success_full(); 123 heap->shenandoah_policy()->record_success_full(); 124 } 125 126 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { 127 ShenandoahHeap* heap = ShenandoahHeap::heap(); 128 129 if (ShenandoahVerify) { 130 heap->verifier()->verify_before_fullgc(); 131 } 132 133 if (VerifyBeforeGC) { 134 Universe::verify(); 135 } 136 137 // Degenerated GC may carry concurrent root flags when upgrading to 138 // full GC. We need to reset it before mutators resume. 139 heap->set_concurrent_strong_root_in_progress(false); 140 heap->set_concurrent_weak_root_in_progress(false); 141 142 heap->set_full_gc_in_progress(true); 143 144 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 145 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 146 147 { 148 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre); 149 heap->pre_full_gc_dump(_gc_timer); 150 } 151 152 { 153 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare); 154 // Full GC is supposed to recover from any GC state: 155 156 // a0. Remember if we have forwarded objects 157 bool has_forwarded_objects = heap->has_forwarded_objects(); 158 159 // a1. Cancel evacuation, if in progress 160 if (heap->is_evacuation_in_progress()) { 161 heap->set_evacuation_in_progress(false); 162 } 163 assert(!heap->is_evacuation_in_progress(), "sanity"); 164 165 // a2. Cancel update-refs, if in progress 166 if (heap->is_update_refs_in_progress()) { 167 heap->set_update_refs_in_progress(false); 168 } 169 assert(!heap->is_update_refs_in_progress(), "sanity"); 170 171 // b. Cancel concurrent mark, if in progress 172 if (heap->is_concurrent_mark_in_progress()) { 173 ShenandoahConcurrentGC::cancel(); 174 heap->set_concurrent_mark_in_progress(false); 175 } 176 assert(!heap->is_concurrent_mark_in_progress(), "sanity"); 177 178 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots. 179 if (has_forwarded_objects) { 180 update_roots(true /*full_gc*/); 181 } 182 183 // d. Reset the bitmaps for new marking 184 heap->reset_mark_bitmap(); 185 assert(heap->marking_context()->is_bitmap_clear(), "sanity"); 186 assert(!heap->marking_context()->is_complete(), "sanity"); 187 188 // e. Abandon reference discovery and clear all discovered references. 189 ShenandoahReferenceProcessor* rp = heap->ref_processor(); 190 rp->abandon_partial_discovery(); 191 192 // f. Sync pinned region status from the CP marks 193 heap->sync_pinned_region_status(); 194 195 // The rest of prologue: 196 _preserved_marks->init(heap->workers()->active_workers()); 197 198 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change"); 199 } 200 201 if (UseTLAB) { 202 heap->gclabs_retire(ResizeTLAB); 203 heap->tlabs_retire(ResizeTLAB); 204 } 205 206 OrderAccess::fence(); 207 208 phase1_mark_heap(); 209 210 // Once marking is done, which may have fixed up forwarded objects, we can drop it. 211 // Coming out of Full GC, we would not have any forwarded objects. 212 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3. 213 heap->set_has_forwarded_objects(false); 214 215 heap->set_full_gc_move_in_progress(true); 216 217 // Setup workers for the rest 218 OrderAccess::fence(); 219 220 // Initialize worker slices 221 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); 222 for (uint i = 0; i < heap->max_workers(); i++) { 223 worker_slices[i] = new ShenandoahHeapRegionSet(); 224 } 225 226 { 227 // The rest of code performs region moves, where region status is undefined 228 // until all phases run together. 229 ShenandoahHeapLocker lock(heap->lock()); 230 231 phase2_calculate_target_addresses(worker_slices); 232 233 OrderAccess::fence(); 234 235 phase3_update_references(); 236 237 phase4_compact_objects(worker_slices); 238 239 phase5_epilog(); 240 } 241 242 // Resize metaspace 243 MetaspaceGC::compute_new_size(); 244 245 // Free worker slices 246 for (uint i = 0; i < heap->max_workers(); i++) { 247 delete worker_slices[i]; 248 } 249 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices); 250 251 heap->set_full_gc_move_in_progress(false); 252 heap->set_full_gc_in_progress(false); 253 254 if (ShenandoahVerify) { 255 heap->verifier()->verify_after_fullgc(); 256 } 257 258 if (VerifyAfterGC) { 259 Universe::verify(); 260 } 261 262 { 263 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post); 264 heap->post_full_gc_dump(_gc_timer); 265 } 266 } 267 268 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { 269 private: 270 ShenandoahMarkingContext* const _ctx; 271 272 public: 273 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 274 275 void heap_region_do(ShenandoahHeapRegion *r) { 276 _ctx->capture_top_at_mark_start(r); 277 r->clear_live_data(); 278 } 279 280 bool is_thread_safe() { return true; } 281 }; 282 283 void ShenandoahFullGC::phase1_mark_heap() { 284 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); 285 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); 286 287 ShenandoahHeap* heap = ShenandoahHeap::heap(); 288 289 ShenandoahPrepareForMarkClosure cl; 290 heap->parallel_heap_region_iterate(&cl); 291 292 heap->set_unload_classes(heap->heuristics()->can_unload_classes()); 293 294 ShenandoahReferenceProcessor* rp = heap->ref_processor(); 295 // enable ("weak") refs discovery 296 rp->set_soft_reference_policy(true); // forcefully purge all soft references 297 298 ShenandoahSTWMark mark(true /*full_gc*/); 299 mark.mark(); 300 heap->parallel_cleaning(true /* full_gc */); 301 } 302 303 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 304 private: 305 PreservedMarks* const _preserved_marks; 306 ShenandoahHeap* const _heap; 307 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 308 int _empty_regions_pos; 309 ShenandoahHeapRegion* _to_region; 310 ShenandoahHeapRegion* _from_region; 311 HeapWord* _compact_point; 312 313 public: 314 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, 315 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 316 ShenandoahHeapRegion* to_region) : 317 _preserved_marks(preserved_marks), 318 _heap(ShenandoahHeap::heap()), 319 _empty_regions(empty_regions), 320 _empty_regions_pos(0), 321 _to_region(to_region), 322 _from_region(nullptr), 323 _compact_point(to_region->bottom()) {} 324 325 void set_from_region(ShenandoahHeapRegion* from_region) { 326 _from_region = from_region; 327 } 328 329 void finish() { 330 assert(_to_region != nullptr, "should not happen"); 331 _to_region->set_new_top(_compact_point); 332 } 333 334 bool is_compact_same_region() { 335 return _from_region == _to_region; 336 } 337 338 int empty_regions_pos() { 339 return _empty_regions_pos; 340 } 341 342 void do_object(oop p) { 343 assert(_from_region != nullptr, "must set before work"); 344 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 345 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked"); 346 347 size_t obj_size = p->size(); 348 if (_compact_point + obj_size > _to_region->end()) { 349 finish(); 350 351 // Object doesn't fit. Pick next empty region and start compacting there. 352 ShenandoahHeapRegion* new_to_region; 353 if (_empty_regions_pos < _empty_regions.length()) { 354 new_to_region = _empty_regions.at(_empty_regions_pos); 355 _empty_regions_pos++; 356 } else { 357 // Out of empty region? Compact within the same region. 358 new_to_region = _from_region; 359 } 360 361 assert(new_to_region != _to_region, "must not reuse same to-region"); 362 assert(new_to_region != nullptr, "must not be null"); 363 _to_region = new_to_region; 364 _compact_point = _to_region->bottom(); 365 } 366 367 // Object fits into current region, record new location, if object does not move: 368 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 369 shenandoah_assert_not_forwarded(nullptr, p); 370 if (_compact_point != cast_from_oop<HeapWord*>(p)) { 371 _preserved_marks->push_if_necessary(p, p->mark()); 372 p->forward_to(cast_to_oop(_compact_point)); 373 } 374 _compact_point += obj_size; 375 } 376 }; 377 378 class ShenandoahPrepareForCompactionTask : public WorkerTask { 379 private: 380 PreservedMarksSet* const _preserved_marks; 381 ShenandoahHeap* const _heap; 382 ShenandoahHeapRegionSet** const _worker_slices; 383 384 public: 385 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) : 386 WorkerTask("Shenandoah Prepare For Compaction"), 387 _preserved_marks(preserved_marks), 388 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) { 389 } 390 391 static bool is_candidate_region(ShenandoahHeapRegion* r) { 392 // Empty region: get it into the slice to defragment the slice itself. 393 // We could have skipped this without violating correctness, but we really 394 // want to compact all live regions to the start of the heap, which sometimes 395 // means moving them into the fully empty regions. 396 if (r->is_empty()) return true; 397 398 // Can move the region, and this is not the humongous region. Humongous 399 // moves are special cased here, because their moves are handled separately. 400 return r->is_stw_move_allowed() && !r->is_humongous(); 401 } 402 403 void work(uint worker_id) override; 404 private: 405 template<typename ClosureType> 406 void prepare_for_compaction(ClosureType& cl, 407 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 408 ShenandoahHeapRegionSetIterator& it, 409 ShenandoahHeapRegion* from_region); 410 }; 411 412 void ShenandoahPrepareForCompactionTask::work(uint worker_id) { 413 ShenandoahParallelWorkerSession worker_session(worker_id); 414 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; 415 ShenandoahHeapRegionSetIterator it(slice); 416 ShenandoahHeapRegion* from_region = it.next(); 417 // No work? 418 if (from_region == nullptr) { 419 return; 420 } 421 422 // Sliding compaction. Walk all regions in the slice, and compact them. 423 // Remember empty regions and reuse them as needed. 424 ResourceMark rm; 425 426 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions()); 427 428 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); 429 prepare_for_compaction(cl, empty_regions, it, from_region); 430 } 431 432 template<typename ClosureType> 433 void ShenandoahPrepareForCompactionTask::prepare_for_compaction(ClosureType& cl, 434 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 435 ShenandoahHeapRegionSetIterator& it, 436 ShenandoahHeapRegion* from_region) { 437 while (from_region != nullptr) { 438 assert(is_candidate_region(from_region), "Sanity"); 439 cl.set_from_region(from_region); 440 if (from_region->has_live()) { 441 _heap->marked_object_iterate(from_region, &cl); 442 } 443 444 // Compacted the region to somewhere else? From-region is empty then. 445 if (!cl.is_compact_same_region()) { 446 empty_regions.append(from_region); 447 } 448 from_region = it.next(); 449 } 450 cl.finish(); 451 452 // Mark all remaining regions as empty 453 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 454 ShenandoahHeapRegion* r = empty_regions.at(pos); 455 r->set_new_top(r->bottom()); 456 } 457 } 458 459 void ShenandoahFullGC::calculate_target_humongous_objects() { 460 ShenandoahHeap* heap = ShenandoahHeap::heap(); 461 462 // Compute the new addresses for humongous objects. We need to do this after addresses 463 // for regular objects are calculated, and we know what regions in heap suffix are 464 // available for humongous moves. 465 // 466 // Scan the heap backwards, because we are compacting humongous regions towards the end. 467 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide 468 // humongous start there. 469 // 470 // The complication is potential non-movable regions during the scan. If such region is 471 // detected, then sliding restarts towards that non-movable region. 472 473 size_t to_begin = heap->num_regions(); 474 size_t to_end = heap->num_regions(); 475 476 for (size_t c = heap->num_regions(); c > 0; c--) { 477 ShenandoahHeapRegion *r = heap->get_region(c - 1); 478 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { 479 // To-region candidate: record this, and continue scan 480 to_begin = r->index(); 481 continue; 482 } 483 484 if (r->is_humongous_start() && r->is_stw_move_allowed()) { 485 // From-region candidate: movable humongous region 486 oop old_obj = cast_to_oop(r->bottom()); 487 size_t words_size = old_obj->size(); 488 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 489 490 size_t start = to_end - num_regions; 491 492 if (start >= to_begin && start != r->index()) { 493 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. 494 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); 495 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom())); 496 to_end = start; 497 continue; 498 } 499 } 500 501 // Failed to fit. Scan starting from current region. 502 to_begin = r->index(); 503 to_end = r->index(); 504 } 505 } 506 507 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { 508 private: 509 ShenandoahHeap* const _heap; 510 511 public: 512 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} 513 void heap_region_do(ShenandoahHeapRegion* r) { 514 if (r->is_trash()) { 515 r->recycle(); 516 } 517 if (r->is_cset()) { 518 r->make_regular_bypass(); 519 } 520 if (r->is_empty_uncommitted()) { 521 r->make_committed_bypass(); 522 } 523 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index()); 524 525 // Record current region occupancy: this communicates empty regions are free 526 // to the rest of Full GC code. 527 r->set_new_top(r->top()); 528 } 529 }; 530 531 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { 532 private: 533 ShenandoahHeap* const _heap; 534 ShenandoahMarkingContext* const _ctx; 535 536 public: 537 ShenandoahTrashImmediateGarbageClosure() : 538 _heap(ShenandoahHeap::heap()), 539 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 540 541 void heap_region_do(ShenandoahHeapRegion* r) { 542 if (r->is_humongous_start()) { 543 oop humongous_obj = cast_to_oop(r->bottom()); 544 if (!_ctx->is_marked(humongous_obj)) { 545 assert(!r->has_live(), 546 "Region " SIZE_FORMAT " is not marked, should not have live", r->index()); 547 _heap->trash_humongous_region_at(r); 548 } else { 549 assert(r->has_live(), 550 "Region " SIZE_FORMAT " should have live", r->index()); 551 } 552 } else if (r->is_humongous_continuation()) { 553 // If we hit continuation, the non-live humongous starts should have been trashed already 554 assert(r->humongous_start_region()->has_live(), 555 "Region " SIZE_FORMAT " should have live", r->index()); 556 } else if (r->is_regular()) { 557 if (!r->has_live()) { 558 r->make_trash_immediate(); 559 } 560 } 561 } 562 }; 563 564 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) { 565 ShenandoahHeap* heap = ShenandoahHeap::heap(); 566 567 uint n_workers = heap->workers()->active_workers(); 568 size_t n_regions = heap->num_regions(); 569 570 // What we want to accomplish: have the dense prefix of data, while still balancing 571 // out the parallel work. 572 // 573 // Assuming the amount of work is driven by the live data that needs moving, we can slice 574 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each 575 // thread takes all regions in its prefix subset, and then it takes some regions from 576 // the tail. 577 // 578 // Tail region selection becomes interesting. 579 // 580 // First, we want to distribute the regions fairly between the workers, and those regions 581 // might have different amount of live data. So, until we sure no workers need live data, 582 // we need to only take what the worker needs. 583 // 584 // Second, since we slide everything to the left in each slice, the most busy regions 585 // would be the ones on the left. Which means we want to have all workers have their after-tail 586 // regions as close to the left as possible. 587 // 588 // The easiest way to do this is to distribute after-tail regions in round-robin between 589 // workers that still need live data. 590 // 591 // Consider parallel workers A, B, C, then the target slice layout would be: 592 // 593 // AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA 594 // 595 // (.....dense-prefix.....) (.....................tail...................) 596 // [all regions fully live] [left-most regions are fuller that right-most] 597 // 598 599 // Compute how much live data is there. This would approximate the size of dense prefix 600 // we target to create. 601 size_t total_live = 0; 602 for (size_t idx = 0; idx < n_regions; idx++) { 603 ShenandoahHeapRegion *r = heap->get_region(idx); 604 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 605 total_live += r->get_live_data_words(); 606 } 607 } 608 609 // Estimate the size for the dense prefix. Note that we specifically count only the 610 // "full" regions, so there would be some non-full regions in the slice tail. 611 size_t live_per_worker = total_live / n_workers; 612 size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words(); 613 size_t prefix_regions_total = prefix_regions_per_worker * n_workers; 614 prefix_regions_total = MIN2(prefix_regions_total, n_regions); 615 assert(prefix_regions_total <= n_regions, "Sanity"); 616 617 // There might be non-candidate regions in the prefix. To compute where the tail actually 618 // ends up being, we need to account those as well. 619 size_t prefix_end = prefix_regions_total; 620 for (size_t idx = 0; idx < prefix_regions_total; idx++) { 621 ShenandoahHeapRegion *r = heap->get_region(idx); 622 if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 623 prefix_end++; 624 } 625 } 626 prefix_end = MIN2(prefix_end, n_regions); 627 assert(prefix_end <= n_regions, "Sanity"); 628 629 // Distribute prefix regions per worker: each thread definitely gets its own same-sized 630 // subset of dense prefix. 631 size_t prefix_idx = 0; 632 633 size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC); 634 635 for (size_t wid = 0; wid < n_workers; wid++) { 636 ShenandoahHeapRegionSet* slice = worker_slices[wid]; 637 638 live[wid] = 0; 639 size_t regs = 0; 640 641 // Add all prefix regions for this worker 642 while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) { 643 ShenandoahHeapRegion *r = heap->get_region(prefix_idx); 644 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 645 slice->add_region(r); 646 live[wid] += r->get_live_data_words(); 647 regs++; 648 } 649 prefix_idx++; 650 } 651 } 652 653 // Distribute the tail among workers in round-robin fashion. 654 size_t wid = n_workers - 1; 655 656 for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) { 657 ShenandoahHeapRegion *r = heap->get_region(tail_idx); 658 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 659 assert(wid < n_workers, "Sanity"); 660 661 size_t live_region = r->get_live_data_words(); 662 663 // Select next worker that still needs live data. 664 size_t old_wid = wid; 665 do { 666 wid++; 667 if (wid == n_workers) wid = 0; 668 } while (live[wid] + live_region >= live_per_worker && old_wid != wid); 669 670 if (old_wid == wid) { 671 // Circled back to the same worker? This means liveness data was 672 // miscalculated. Bump the live_per_worker limit so that 673 // everyone gets a piece of the leftover work. 674 live_per_worker += ShenandoahHeapRegion::region_size_words(); 675 } 676 677 worker_slices[wid]->add_region(r); 678 live[wid] += live_region; 679 } 680 } 681 682 FREE_C_HEAP_ARRAY(size_t, live); 683 684 #ifdef ASSERT 685 ResourceBitMap map(n_regions); 686 for (size_t wid = 0; wid < n_workers; wid++) { 687 ShenandoahHeapRegionSetIterator it(worker_slices[wid]); 688 ShenandoahHeapRegion* r = it.next(); 689 while (r != nullptr) { 690 size_t idx = r->index(); 691 assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx); 692 assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx); 693 map.at_put(idx, true); 694 r = it.next(); 695 } 696 } 697 698 for (size_t rid = 0; rid < n_regions; rid++) { 699 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid)); 700 bool is_distributed = map.at(rid); 701 assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid); 702 } 703 #endif 704 } 705 706 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { 707 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); 708 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); 709 710 ShenandoahHeap* heap = ShenandoahHeap::heap(); 711 712 // About to figure out which regions can be compacted, make sure pinning status 713 // had been updated in GC prologue. 714 heap->assert_pinned_region_status(); 715 716 { 717 // Trash the immediately collectible regions before computing addresses 718 ShenandoahTrashImmediateGarbageClosure tigcl; 719 heap->heap_region_iterate(&tigcl); 720 721 // Make sure regions are in good state: committed, active, clean. 722 // This is needed because we are potentially sliding the data through them. 723 ShenandoahEnsureHeapActiveClosure ecl; 724 heap->heap_region_iterate(&ecl); 725 } 726 727 // Compute the new addresses for regular objects 728 { 729 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); 730 731 distribute_slices(worker_slices); 732 733 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices); 734 heap->workers()->run_task(&task); 735 } 736 737 // Compute the new addresses for humongous objects 738 { 739 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); 740 calculate_target_humongous_objects(); 741 } 742 } 743 744 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure { 745 private: 746 ShenandoahHeap* const _heap; 747 ShenandoahMarkingContext* const _ctx; 748 749 template <class T> 750 inline void do_oop_work(T* p) { 751 T o = RawAccess<>::oop_load(p); 752 if (!CompressedOops::is_null(o)) { 753 oop obj = CompressedOops::decode_not_null(o); 754 assert(_ctx->is_marked(obj), "must be marked"); 755 if (obj->is_forwarded()) { 756 oop forw = obj->forwardee(); 757 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 758 } 759 } 760 } 761 762 public: 763 ShenandoahAdjustPointersClosure() : 764 _heap(ShenandoahHeap::heap()), 765 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 766 767 void do_oop(oop* p) { do_oop_work(p); } 768 void do_oop(narrowOop* p) { do_oop_work(p); } 769 void do_method(Method* m) {} 770 void do_nmethod(nmethod* nm) {} 771 }; 772 773 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 774 private: 775 ShenandoahHeap* const _heap; 776 ShenandoahAdjustPointersClosure _cl; 777 778 public: 779 ShenandoahAdjustPointersObjectClosure() : 780 _heap(ShenandoahHeap::heap()) { 781 } 782 void do_object(oop p) { 783 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 784 p->oop_iterate(&_cl); 785 } 786 }; 787 788 class ShenandoahAdjustPointersTask : public WorkerTask { 789 private: 790 ShenandoahHeap* const _heap; 791 ShenandoahRegionIterator _regions; 792 793 public: 794 ShenandoahAdjustPointersTask() : 795 WorkerTask("Shenandoah Adjust Pointers"), 796 _heap(ShenandoahHeap::heap()) { 797 } 798 799 void work(uint worker_id) { 800 ShenandoahParallelWorkerSession worker_session(worker_id); 801 ShenandoahAdjustPointersObjectClosure obj_cl; 802 ShenandoahHeapRegion* r = _regions.next(); 803 while (r != nullptr) { 804 if (!r->is_humongous_continuation() && r->has_live()) { 805 _heap->marked_object_iterate(r, &obj_cl); 806 } 807 r = _regions.next(); 808 } 809 } 810 }; 811 812 class ShenandoahAdjustRootPointersTask : public WorkerTask { 813 private: 814 ShenandoahRootAdjuster* _rp; 815 PreservedMarksSet* _preserved_marks; 816 public: 817 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : 818 WorkerTask("Shenandoah Adjust Root Pointers"), 819 _rp(rp), 820 _preserved_marks(preserved_marks) {} 821 822 void work(uint worker_id) { 823 ShenandoahParallelWorkerSession worker_session(worker_id); 824 ShenandoahAdjustPointersClosure cl; 825 _rp->roots_do(worker_id, &cl); 826 _preserved_marks->get(worker_id)->adjust_during_full_gc(); 827 } 828 }; 829 830 void ShenandoahFullGC::phase3_update_references() { 831 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer); 832 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); 833 834 ShenandoahHeap* heap = ShenandoahHeap::heap(); 835 836 WorkerThreads* workers = heap->workers(); 837 uint nworkers = workers->active_workers(); 838 { 839 #if COMPILER2_OR_JVMCI 840 DerivedPointerTable::clear(); 841 #endif 842 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots); 843 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); 844 workers->run_task(&task); 845 #if COMPILER2_OR_JVMCI 846 DerivedPointerTable::update_pointers(); 847 #endif 848 } 849 850 ShenandoahAdjustPointersTask adjust_pointers_task; 851 workers->run_task(&adjust_pointers_task); 852 } 853 854 class ShenandoahCompactObjectsClosure : public ObjectClosure { 855 private: 856 ShenandoahHeap* const _heap; 857 uint const _worker_id; 858 859 public: 860 ShenandoahCompactObjectsClosure(uint worker_id) : 861 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {} 862 863 void do_object(oop p) { 864 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 865 size_t size = p->size(); 866 if (p->is_forwarded()) { 867 HeapWord* compact_from = cast_from_oop<HeapWord*>(p); 868 HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee()); 869 assert(compact_from != compact_to, "Forwarded object should move"); 870 Copy::aligned_conjoint_words(compact_from, compact_to, size); 871 oop new_obj = cast_to_oop(compact_to); 872 873 ContinuationGCSupport::relativize_stack_chunk(new_obj); 874 new_obj->init_mark(); 875 } 876 } 877 }; 878 879 class ShenandoahCompactObjectsTask : public WorkerTask { 880 private: 881 ShenandoahHeap* const _heap; 882 ShenandoahHeapRegionSet** const _worker_slices; 883 884 public: 885 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : 886 WorkerTask("Shenandoah Compact Objects"), 887 _heap(ShenandoahHeap::heap()), 888 _worker_slices(worker_slices) { 889 } 890 891 void work(uint worker_id) { 892 ShenandoahParallelWorkerSession worker_session(worker_id); 893 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); 894 895 ShenandoahCompactObjectsClosure cl(worker_id); 896 ShenandoahHeapRegion* r = slice.next(); 897 while (r != nullptr) { 898 assert(!r->is_humongous(), "must not get humongous regions here"); 899 if (r->has_live()) { 900 _heap->marked_object_iterate(r, &cl); 901 } 902 r->set_top(r->new_top()); 903 r = slice.next(); 904 } 905 } 906 }; 907 908 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 909 private: 910 ShenandoahHeap* const _heap; 911 size_t _live; 912 913 public: 914 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { 915 } 916 917 void heap_region_do(ShenandoahHeapRegion* r) { 918 assert (!r->is_cset(), "cset regions should have been demoted already"); 919 920 // Need to reset the complete-top-at-mark-start pointer here because 921 // the complete marking bitmap is no longer valid. This ensures 922 // size-based iteration in marked_object_iterate(). 923 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip 924 // pinned regions. 925 if (!r->is_pinned()) { 926 _heap->complete_marking_context()->reset_top_at_mark_start(r); 927 } 928 929 size_t live = r->used(); 930 931 // Make empty regions that have been allocated into regular 932 if (r->is_empty() && live > 0) { 933 r->make_regular_bypass(); 934 if (ZapUnusedHeapArea) { 935 SpaceMangler::mangle_region(MemRegion(r->top(), r->end())); 936 } 937 } 938 939 // Reclaim regular regions that became empty 940 if (r->is_regular() && live == 0) { 941 r->make_trash(); 942 } 943 944 // Recycle all trash regions 945 if (r->is_trash()) { 946 live = 0; 947 r->recycle(); 948 } 949 950 r->set_live_data(live); 951 r->reset_alloc_metadata(); 952 _live += live; 953 } 954 955 size_t get_live() { 956 return _live; 957 } 958 }; 959 960 void ShenandoahFullGC::compact_humongous_objects() { 961 // Compact humongous regions, based on their fwdptr objects. 962 // 963 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, 964 // humongous regions are already compacted, and do not require further moves, which alleviates 965 // sliding costs. We may consider doing this in parallel in the future. 966 967 ShenandoahHeap* heap = ShenandoahHeap::heap(); 968 969 for (size_t c = heap->num_regions(); c > 0; c--) { 970 ShenandoahHeapRegion* r = heap->get_region(c - 1); 971 if (r->is_humongous_start()) { 972 oop old_obj = cast_to_oop(r->bottom()); 973 if (!old_obj->is_forwarded()) { 974 // No need to move the object, it stays at the same slot 975 continue; 976 } 977 size_t words_size = old_obj->size(); 978 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 979 980 size_t old_start = r->index(); 981 size_t old_end = old_start + num_regions - 1; 982 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); 983 size_t new_end = new_start + num_regions - 1; 984 assert(old_start != new_start, "must be real move"); 985 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); 986 987 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size); 988 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom())); 989 990 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom()); 991 new_obj->init_mark(); 992 993 { 994 for (size_t c = old_start; c <= old_end; c++) { 995 ShenandoahHeapRegion* r = heap->get_region(c); 996 r->make_regular_bypass(); 997 r->set_top(r->bottom()); 998 } 999 1000 for (size_t c = new_start; c <= new_end; c++) { 1001 ShenandoahHeapRegion* r = heap->get_region(c); 1002 if (c == new_start) { 1003 r->make_humongous_start_bypass(); 1004 } else { 1005 r->make_humongous_cont_bypass(); 1006 } 1007 1008 // Trailing region may be non-full, record the remainder there 1009 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 1010 if ((c == new_end) && (remainder != 0)) { 1011 r->set_top(r->bottom() + remainder); 1012 } else { 1013 r->set_top(r->end()); 1014 } 1015 1016 r->reset_alloc_metadata(); 1017 } 1018 } 1019 } 1020 } 1021 } 1022 1023 // This is slightly different to ShHeap::reset_next_mark_bitmap: 1024 // we need to remain able to walk pinned regions. 1025 // Since pinned region do not move and don't get compacted, we will get holes with 1026 // unreachable objects in them (which may have pointers to unloaded Klasses and thus 1027 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using 1028 // a valid marking bitmap and valid TAMS pointer. This class only resets marking 1029 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. 1030 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask { 1031 private: 1032 ShenandoahRegionIterator _regions; 1033 1034 public: 1035 ShenandoahMCResetCompleteBitmapTask() : 1036 WorkerTask("Shenandoah Reset Bitmap") { 1037 } 1038 1039 void work(uint worker_id) { 1040 ShenandoahParallelWorkerSession worker_session(worker_id); 1041 ShenandoahHeapRegion* region = _regions.next(); 1042 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1043 ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); 1044 while (region != nullptr) { 1045 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { 1046 ctx->clear_bitmap(region); 1047 } 1048 region = _regions.next(); 1049 } 1050 } 1051 }; 1052 1053 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { 1054 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); 1055 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); 1056 1057 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1058 1059 // Compact regular objects first 1060 { 1061 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); 1062 ShenandoahCompactObjectsTask compact_task(worker_slices); 1063 heap->workers()->run_task(&compact_task); 1064 } 1065 1066 // Compact humongous objects after regular object moves 1067 { 1068 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); 1069 compact_humongous_objects(); 1070 } 1071 } 1072 1073 void ShenandoahFullGC::phase5_epilog() { 1074 GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer); 1075 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1076 1077 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 1078 // and must ensure the bitmap is in sync. 1079 { 1080 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); 1081 ShenandoahMCResetCompleteBitmapTask task; 1082 heap->workers()->run_task(&task); 1083 } 1084 1085 // Bring regions in proper states after the collection, and set heap properties. 1086 { 1087 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); 1088 ShenandoahPostCompactClosure post_compact; 1089 heap->heap_region_iterate(&post_compact); 1090 heap->set_used(post_compact.get_live()); 1091 1092 heap->collection_set()->clear(); 1093 heap->free_set()->rebuild(); 1094 heap->clear_cancelled_gc(); 1095 } 1096 1097 _preserved_marks->restore(heap->workers()); 1098 _preserved_marks->reclaim(); 1099 }