1 /* 2 * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "compiler/oopMap.hpp" 28 #include "gc/shared/gcTraceTime.inline.hpp" 29 #include "gc/shared/preservedMarks.inline.hpp" 30 #include "gc/shared/slidingForwarding.inline.hpp" 31 #include "gc/shared/tlab_globals.hpp" 32 #include "gc/shared/workerThread.hpp" 33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 35 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 36 #include "gc/shenandoah/shenandoahFreeSet.hpp" 37 #include "gc/shenandoah/shenandoahFullGC.hpp" 38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 39 #include "gc/shenandoah/shenandoahMark.inline.hpp" 40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 42 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 45 #include "gc/shenandoah/shenandoahMetrics.hpp" 46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 47 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 48 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 49 #include "gc/shenandoah/shenandoahSTWMark.hpp" 50 #include "gc/shenandoah/shenandoahUtils.hpp" 51 #include "gc/shenandoah/shenandoahVerifier.hpp" 52 #include "gc/shenandoah/shenandoahVMOperations.hpp" 53 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 54 #include "memory/metaspaceUtils.hpp" 55 #include "memory/universe.hpp" 56 #include "oops/compressedOops.inline.hpp" 57 #include "oops/oop.inline.hpp" 58 #include "runtime/orderAccess.hpp" 59 #include "runtime/thread.hpp" 60 #include "runtime/vmThread.hpp" 61 #include "utilities/copy.hpp" 62 #include "utilities/events.hpp" 63 #include "utilities/growableArray.hpp" 64 65 ShenandoahFullGC::ShenandoahFullGC() : 66 _gc_timer(ShenandoahHeap::heap()->gc_timer()), 67 _preserved_marks(new PreservedMarksSet(true)) {} 68 69 bool ShenandoahFullGC::collect(GCCause::Cause cause) { 70 vmop_entry_full(cause); 71 // Always success 72 return true; 73 } 74 75 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) { 76 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 77 TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters()); 78 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross); 79 80 heap->try_inject_alloc_failure(); 81 VM_ShenandoahFullGC op(cause, this); 82 VMThread::execute(&op); 83 } 84 85 void ShenandoahFullGC::entry_full(GCCause::Cause cause) { 86 static const char* msg = "Pause Full"; 87 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */); 88 EventMark em("%s", msg); 89 90 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 91 ShenandoahWorkerPolicy::calc_workers_for_fullgc(), 92 "full gc"); 93 94 op_full(cause); 95 } 96 97 void ShenandoahFullGC::op_full(GCCause::Cause cause) { 98 ShenandoahMetricsSnapshot metrics; 99 metrics.snap_before(); 100 101 // Perform full GC 102 do_it(cause); 103 104 metrics.snap_after(); 105 106 if (metrics.is_good_progress()) { 107 ShenandoahHeap::heap()->notify_gc_progress(); 108 } else { 109 // Nothing to do. Tell the allocation path that we have failed to make 110 // progress, and it can finally fail. 111 ShenandoahHeap::heap()->notify_gc_no_progress(); 112 } 113 } 114 115 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { 116 ShenandoahHeap* heap = ShenandoahHeap::heap(); 117 118 if (ShenandoahVerify) { 119 heap->verifier()->verify_before_fullgc(); 120 } 121 122 if (VerifyBeforeGC) { 123 Universe::verify(); 124 } 125 126 // Degenerated GC may carry concurrent root flags when upgrading to 127 // full GC. We need to reset it before mutators resume. 128 heap->set_concurrent_strong_root_in_progress(false); 129 heap->set_concurrent_weak_root_in_progress(false); 130 131 heap->set_full_gc_in_progress(true); 132 133 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 134 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 135 136 { 137 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre); 138 heap->pre_full_gc_dump(_gc_timer); 139 } 140 141 { 142 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare); 143 // Full GC is supposed to recover from any GC state: 144 145 // a0. Remember if we have forwarded objects 146 bool has_forwarded_objects = heap->has_forwarded_objects(); 147 148 // a1. Cancel evacuation, if in progress 149 if (heap->is_evacuation_in_progress()) { 150 heap->set_evacuation_in_progress(false); 151 } 152 assert(!heap->is_evacuation_in_progress(), "sanity"); 153 154 // a2. Cancel update-refs, if in progress 155 if (heap->is_update_refs_in_progress()) { 156 heap->set_update_refs_in_progress(false); 157 } 158 assert(!heap->is_update_refs_in_progress(), "sanity"); 159 160 // b. Cancel concurrent mark, if in progress 161 if (heap->is_concurrent_mark_in_progress()) { 162 ShenandoahConcurrentGC::cancel(); 163 heap->set_concurrent_mark_in_progress(false); 164 } 165 assert(!heap->is_concurrent_mark_in_progress(), "sanity"); 166 167 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots. 168 if (has_forwarded_objects) { 169 update_roots(true /*full_gc*/); 170 } 171 172 // d. Reset the bitmaps for new marking 173 heap->reset_mark_bitmap(); 174 assert(heap->marking_context()->is_bitmap_clear(), "sanity"); 175 assert(!heap->marking_context()->is_complete(), "sanity"); 176 177 // e. Abandon reference discovery and clear all discovered references. 178 ShenandoahReferenceProcessor* rp = heap->ref_processor(); 179 rp->abandon_partial_discovery(); 180 181 // f. Sync pinned region status from the CP marks 182 heap->sync_pinned_region_status(); 183 184 // The rest of prologue: 185 _preserved_marks->init(heap->workers()->active_workers()); 186 heap->forwarding()->clear(); 187 188 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change"); 189 } 190 191 if (UseTLAB) { 192 heap->gclabs_retire(ResizeTLAB); 193 heap->tlabs_retire(ResizeTLAB); 194 } 195 196 OrderAccess::fence(); 197 198 phase1_mark_heap(); 199 200 // Once marking is done, which may have fixed up forwarded objects, we can drop it. 201 // Coming out of Full GC, we would not have any forwarded objects. 202 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3. 203 heap->set_has_forwarded_objects(false); 204 205 heap->set_full_gc_move_in_progress(true); 206 207 // Setup workers for the rest 208 OrderAccess::fence(); 209 210 // Initialize worker slices 211 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); 212 for (uint i = 0; i < heap->max_workers(); i++) { 213 worker_slices[i] = new ShenandoahHeapRegionSet(); 214 } 215 216 { 217 // The rest of code performs region moves, where region status is undefined 218 // until all phases run together. 219 ShenandoahHeapLocker lock(heap->lock()); 220 221 phase2_calculate_target_addresses(worker_slices); 222 223 OrderAccess::fence(); 224 225 phase3_update_references(); 226 227 phase4_compact_objects(worker_slices); 228 } 229 230 { 231 // Epilogue 232 _preserved_marks->restore(heap->workers()); 233 _preserved_marks->reclaim(); 234 } 235 236 // Resize metaspace 237 MetaspaceGC::compute_new_size(); 238 239 // Free worker slices 240 for (uint i = 0; i < heap->max_workers(); i++) { 241 delete worker_slices[i]; 242 } 243 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices); 244 245 heap->set_full_gc_move_in_progress(false); 246 heap->set_full_gc_in_progress(false); 247 248 if (ShenandoahVerify) { 249 heap->verifier()->verify_after_fullgc(); 250 } 251 252 if (VerifyAfterGC) { 253 Universe::verify(); 254 } 255 256 { 257 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post); 258 heap->post_full_gc_dump(_gc_timer); 259 } 260 } 261 262 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { 263 private: 264 ShenandoahMarkingContext* const _ctx; 265 266 public: 267 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 268 269 void heap_region_do(ShenandoahHeapRegion *r) { 270 _ctx->capture_top_at_mark_start(r); 271 r->clear_live_data(); 272 } 273 }; 274 275 void ShenandoahFullGC::phase1_mark_heap() { 276 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); 277 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); 278 279 ShenandoahHeap* heap = ShenandoahHeap::heap(); 280 281 ShenandoahPrepareForMarkClosure cl; 282 heap->heap_region_iterate(&cl); 283 284 heap->set_unload_classes(heap->heuristics()->can_unload_classes()); 285 286 ShenandoahReferenceProcessor* rp = heap->ref_processor(); 287 // enable ("weak") refs discovery 288 rp->set_soft_reference_policy(true); // forcefully purge all soft references 289 290 ShenandoahSTWMark mark(true /*full_gc*/); 291 mark.mark(); 292 heap->parallel_cleaning(true /* full_gc */); 293 } 294 295 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 296 private: 297 PreservedMarks* const _preserved_marks; 298 SlidingForwarding* const _forwarding; 299 ShenandoahHeap* const _heap; 300 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 301 int _empty_regions_pos; 302 ShenandoahHeapRegion* _to_region; 303 ShenandoahHeapRegion* _from_region; 304 HeapWord* _compact_point; 305 306 public: 307 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, 308 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 309 ShenandoahHeapRegion* to_region) : 310 _preserved_marks(preserved_marks), 311 _forwarding(ShenandoahHeap::heap()->forwarding()), 312 _heap(ShenandoahHeap::heap()), 313 _empty_regions(empty_regions), 314 _empty_regions_pos(0), 315 _to_region(to_region), 316 _from_region(NULL), 317 _compact_point(to_region->bottom()) {} 318 319 void set_from_region(ShenandoahHeapRegion* from_region) { 320 _from_region = from_region; 321 } 322 323 void finish_region() { 324 assert(_to_region != NULL, "should not happen"); 325 _to_region->set_new_top(_compact_point); 326 } 327 328 bool is_compact_same_region() { 329 return _from_region == _to_region; 330 } 331 332 int empty_regions_pos() { 333 return _empty_regions_pos; 334 } 335 336 void do_object(oop p) { 337 assert(_from_region != NULL, "must set before work"); 338 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 339 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked"); 340 341 size_t obj_size = p->size(); 342 if (_compact_point + obj_size > _to_region->end()) { 343 finish_region(); 344 345 // Object doesn't fit. Pick next empty region and start compacting there. 346 ShenandoahHeapRegion* new_to_region; 347 if (_empty_regions_pos < _empty_regions.length()) { 348 new_to_region = _empty_regions.at(_empty_regions_pos); 349 _empty_regions_pos++; 350 } else { 351 // Out of empty region? Compact within the same region. 352 new_to_region = _from_region; 353 } 354 355 assert(new_to_region != _to_region, "must not reuse same to-region"); 356 assert(new_to_region != NULL, "must not be NULL"); 357 _to_region = new_to_region; 358 _compact_point = _to_region->bottom(); 359 } 360 361 // Object fits into current region, record new location: 362 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 363 shenandoah_assert_not_forwarded(NULL, p); 364 _preserved_marks->push_if_necessary(p, p->mark()); 365 _forwarding->forward_to(p, cast_to_oop(_compact_point)); 366 _compact_point += obj_size; 367 } 368 }; 369 370 class ShenandoahPrepareForCompactionTask : public WorkerTask { 371 private: 372 PreservedMarksSet* const _preserved_marks; 373 ShenandoahHeap* const _heap; 374 ShenandoahHeapRegionSet** const _worker_slices; 375 376 public: 377 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) : 378 WorkerTask("Shenandoah Prepare For Compaction"), 379 _preserved_marks(preserved_marks), 380 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) { 381 } 382 383 static bool is_candidate_region(ShenandoahHeapRegion* r) { 384 // Empty region: get it into the slice to defragment the slice itself. 385 // We could have skipped this without violating correctness, but we really 386 // want to compact all live regions to the start of the heap, which sometimes 387 // means moving them into the fully empty regions. 388 if (r->is_empty()) return true; 389 390 // Can move the region, and this is not the humongous region. Humongous 391 // moves are special cased here, because their moves are handled separately. 392 return r->is_stw_move_allowed() && !r->is_humongous(); 393 } 394 395 void work(uint worker_id) { 396 ShenandoahParallelWorkerSession worker_session(worker_id); 397 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; 398 ShenandoahHeapRegionSetIterator it(slice); 399 ShenandoahHeapRegion* from_region = it.next(); 400 // No work? 401 if (from_region == NULL) { 402 return; 403 } 404 405 // Sliding compaction. Walk all regions in the slice, and compact them. 406 // Remember empty regions and reuse them as needed. 407 ResourceMark rm; 408 409 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions()); 410 411 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); 412 413 while (from_region != NULL) { 414 assert(is_candidate_region(from_region), "Sanity"); 415 416 cl.set_from_region(from_region); 417 if (from_region->has_live()) { 418 _heap->marked_object_iterate(from_region, &cl); 419 } 420 421 // Compacted the region to somewhere else? From-region is empty then. 422 if (!cl.is_compact_same_region()) { 423 empty_regions.append(from_region); 424 } 425 from_region = it.next(); 426 } 427 cl.finish_region(); 428 429 // Mark all remaining regions as empty 430 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 431 ShenandoahHeapRegion* r = empty_regions.at(pos); 432 r->set_new_top(r->bottom()); 433 } 434 } 435 }; 436 437 void ShenandoahFullGC::calculate_target_humongous_objects() { 438 ShenandoahHeap* heap = ShenandoahHeap::heap(); 439 SlidingForwarding* forwarding = heap->forwarding(); 440 441 // Compute the new addresses for humongous objects. We need to do this after addresses 442 // for regular objects are calculated, and we know what regions in heap suffix are 443 // available for humongous moves. 444 // 445 // Scan the heap backwards, because we are compacting humongous regions towards the end. 446 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide 447 // humongous start there. 448 // 449 // The complication is potential non-movable regions during the scan. If such region is 450 // detected, then sliding restarts towards that non-movable region. 451 452 size_t to_begin = heap->num_regions(); 453 size_t to_end = heap->num_regions(); 454 455 for (size_t c = heap->num_regions(); c > 0; c--) { 456 ShenandoahHeapRegion *r = heap->get_region(c - 1); 457 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { 458 // To-region candidate: record this, and continue scan 459 to_begin = r->index(); 460 continue; 461 } 462 463 if (r->is_humongous_start() && r->is_stw_move_allowed()) { 464 // From-region candidate: movable humongous region 465 oop old_obj = cast_to_oop(r->bottom()); 466 size_t words_size = old_obj->size(); 467 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 468 469 size_t start = to_end - num_regions; 470 471 if (start >= to_begin && start != r->index()) { 472 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. 473 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); 474 forwarding->forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom())); 475 to_end = start; 476 continue; 477 } 478 } 479 480 // Failed to fit. Scan starting from current region. 481 to_begin = r->index(); 482 to_end = r->index(); 483 } 484 } 485 486 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { 487 private: 488 ShenandoahHeap* const _heap; 489 490 public: 491 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} 492 void heap_region_do(ShenandoahHeapRegion* r) { 493 if (r->is_trash()) { 494 r->recycle(); 495 } 496 if (r->is_cset()) { 497 r->make_regular_bypass(); 498 } 499 if (r->is_empty_uncommitted()) { 500 r->make_committed_bypass(); 501 } 502 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index()); 503 504 // Record current region occupancy: this communicates empty regions are free 505 // to the rest of Full GC code. 506 r->set_new_top(r->top()); 507 } 508 }; 509 510 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { 511 private: 512 ShenandoahHeap* const _heap; 513 ShenandoahMarkingContext* const _ctx; 514 515 public: 516 ShenandoahTrashImmediateGarbageClosure() : 517 _heap(ShenandoahHeap::heap()), 518 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 519 520 void heap_region_do(ShenandoahHeapRegion* r) { 521 if (r->is_humongous_start()) { 522 oop humongous_obj = cast_to_oop(r->bottom()); 523 if (!_ctx->is_marked(humongous_obj)) { 524 assert(!r->has_live(), 525 "Region " SIZE_FORMAT " is not marked, should not have live", r->index()); 526 _heap->trash_humongous_region_at(r); 527 } else { 528 assert(r->has_live(), 529 "Region " SIZE_FORMAT " should have live", r->index()); 530 } 531 } else if (r->is_humongous_continuation()) { 532 // If we hit continuation, the non-live humongous starts should have been trashed already 533 assert(r->humongous_start_region()->has_live(), 534 "Region " SIZE_FORMAT " should have live", r->index()); 535 } else if (r->is_regular()) { 536 if (!r->has_live()) { 537 r->make_trash_immediate(); 538 } 539 } 540 } 541 }; 542 543 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) { 544 ShenandoahHeap* heap = ShenandoahHeap::heap(); 545 546 uint n_workers = heap->workers()->active_workers(); 547 size_t n_regions = heap->num_regions(); 548 549 // What we want to accomplish: have the dense prefix of data, while still balancing 550 // out the parallel work. 551 // 552 // Assuming the amount of work is driven by the live data that needs moving, we can slice 553 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each 554 // thread takes all regions in its prefix subset, and then it takes some regions from 555 // the tail. 556 // 557 // Tail region selection becomes interesting. 558 // 559 // First, we want to distribute the regions fairly between the workers, and those regions 560 // might have different amount of live data. So, until we sure no workers need live data, 561 // we need to only take what the worker needs. 562 // 563 // Second, since we slide everything to the left in each slice, the most busy regions 564 // would be the ones on the left. Which means we want to have all workers have their after-tail 565 // regions as close to the left as possible. 566 // 567 // The easiest way to do this is to distribute after-tail regions in round-robin between 568 // workers that still need live data. 569 // 570 // Consider parallel workers A, B, C, then the target slice layout would be: 571 // 572 // AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA 573 // 574 // (.....dense-prefix.....) (.....................tail...................) 575 // [all regions fully live] [left-most regions are fuller that right-most] 576 // 577 578 // Compute how much live data is there. This would approximate the size of dense prefix 579 // we target to create. 580 size_t total_live = 0; 581 for (size_t idx = 0; idx < n_regions; idx++) { 582 ShenandoahHeapRegion *r = heap->get_region(idx); 583 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 584 total_live += r->get_live_data_words(); 585 } 586 } 587 588 // Estimate the size for the dense prefix. Note that we specifically count only the 589 // "full" regions, so there would be some non-full regions in the slice tail. 590 size_t live_per_worker = total_live / n_workers; 591 size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words(); 592 size_t prefix_regions_total = prefix_regions_per_worker * n_workers; 593 prefix_regions_total = MIN2(prefix_regions_total, n_regions); 594 assert(prefix_regions_total <= n_regions, "Sanity"); 595 596 // There might be non-candidate regions in the prefix. To compute where the tail actually 597 // ends up being, we need to account those as well. 598 size_t prefix_end = prefix_regions_total; 599 for (size_t idx = 0; idx < prefix_regions_total; idx++) { 600 ShenandoahHeapRegion *r = heap->get_region(idx); 601 if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 602 prefix_end++; 603 } 604 } 605 prefix_end = MIN2(prefix_end, n_regions); 606 assert(prefix_end <= n_regions, "Sanity"); 607 608 // Distribute prefix regions per worker: each thread definitely gets its own same-sized 609 // subset of dense prefix. 610 size_t prefix_idx = 0; 611 612 size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC); 613 614 for (size_t wid = 0; wid < n_workers; wid++) { 615 ShenandoahHeapRegionSet* slice = worker_slices[wid]; 616 617 live[wid] = 0; 618 size_t regs = 0; 619 620 // Add all prefix regions for this worker 621 while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) { 622 ShenandoahHeapRegion *r = heap->get_region(prefix_idx); 623 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 624 slice->add_region(r); 625 live[wid] += r->get_live_data_words(); 626 regs++; 627 } 628 prefix_idx++; 629 } 630 } 631 632 // Distribute the tail among workers in round-robin fashion. 633 size_t wid = n_workers - 1; 634 635 for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) { 636 ShenandoahHeapRegion *r = heap->get_region(tail_idx); 637 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 638 assert(wid < n_workers, "Sanity"); 639 640 size_t live_region = r->get_live_data_words(); 641 642 // Select next worker that still needs live data. 643 size_t old_wid = wid; 644 do { 645 wid++; 646 if (wid == n_workers) wid = 0; 647 } while (live[wid] + live_region >= live_per_worker && old_wid != wid); 648 649 if (old_wid == wid) { 650 // Circled back to the same worker? This means liveness data was 651 // miscalculated. Bump the live_per_worker limit so that 652 // everyone gets a piece of the leftover work. 653 live_per_worker += ShenandoahHeapRegion::region_size_words(); 654 } 655 656 worker_slices[wid]->add_region(r); 657 live[wid] += live_region; 658 } 659 } 660 661 FREE_C_HEAP_ARRAY(size_t, live); 662 663 #ifdef ASSERT 664 ResourceBitMap map(n_regions); 665 for (size_t wid = 0; wid < n_workers; wid++) { 666 ShenandoahHeapRegionSetIterator it(worker_slices[wid]); 667 ShenandoahHeapRegion* r = it.next(); 668 while (r != NULL) { 669 size_t idx = r->index(); 670 assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx); 671 assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx); 672 map.at_put(idx, true); 673 r = it.next(); 674 } 675 } 676 677 for (size_t rid = 0; rid < n_regions; rid++) { 678 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid)); 679 bool is_distributed = map.at(rid); 680 assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid); 681 } 682 #endif 683 } 684 685 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { 686 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); 687 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); 688 689 ShenandoahHeap* heap = ShenandoahHeap::heap(); 690 691 // About to figure out which regions can be compacted, make sure pinning status 692 // had been updated in GC prologue. 693 heap->assert_pinned_region_status(); 694 695 { 696 // Trash the immediately collectible regions before computing addresses 697 ShenandoahTrashImmediateGarbageClosure tigcl; 698 heap->heap_region_iterate(&tigcl); 699 700 // Make sure regions are in good state: committed, active, clean. 701 // This is needed because we are potentially sliding the data through them. 702 ShenandoahEnsureHeapActiveClosure ecl; 703 heap->heap_region_iterate(&ecl); 704 } 705 706 // Compute the new addresses for regular objects 707 { 708 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); 709 710 distribute_slices(worker_slices); 711 712 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices); 713 heap->workers()->run_task(&task); 714 } 715 716 // Compute the new addresses for humongous objects 717 { 718 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); 719 calculate_target_humongous_objects(); 720 } 721 } 722 723 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure { 724 private: 725 ShenandoahHeap* const _heap; 726 const SlidingForwarding* const _forwarding; 727 ShenandoahMarkingContext* const _ctx; 728 729 template <class T> 730 inline void do_oop_work(T* p) { 731 T o = RawAccess<>::oop_load(p); 732 if (!CompressedOops::is_null(o)) { 733 oop obj = CompressedOops::decode_not_null(o); 734 assert(_ctx->is_marked(obj), "must be marked"); 735 if (obj->is_forwarded()) { 736 oop forw = _forwarding->forwardee(obj); 737 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 738 } 739 } 740 } 741 742 public: 743 ShenandoahAdjustPointersClosure() : 744 _heap(ShenandoahHeap::heap()), 745 _forwarding(_heap->forwarding()), 746 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 747 748 void do_oop(oop* p) { do_oop_work(p); } 749 void do_oop(narrowOop* p) { do_oop_work(p); } 750 }; 751 752 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 753 private: 754 ShenandoahHeap* const _heap; 755 ShenandoahAdjustPointersClosure _cl; 756 757 public: 758 ShenandoahAdjustPointersObjectClosure() : 759 _heap(ShenandoahHeap::heap()) { 760 } 761 void do_object(oop p) { 762 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 763 p->oop_iterate(&_cl); 764 } 765 }; 766 767 class ShenandoahAdjustPointersTask : public WorkerTask { 768 private: 769 ShenandoahHeap* const _heap; 770 ShenandoahRegionIterator _regions; 771 772 public: 773 ShenandoahAdjustPointersTask() : 774 WorkerTask("Shenandoah Adjust Pointers"), 775 _heap(ShenandoahHeap::heap()) { 776 } 777 778 void work(uint worker_id) { 779 ShenandoahParallelWorkerSession worker_session(worker_id); 780 ShenandoahAdjustPointersObjectClosure obj_cl; 781 ShenandoahHeapRegion* r = _regions.next(); 782 while (r != NULL) { 783 if (!r->is_humongous_continuation() && r->has_live()) { 784 _heap->marked_object_iterate(r, &obj_cl); 785 } 786 r = _regions.next(); 787 } 788 } 789 }; 790 791 class ShenandoahAdjustRootPointersTask : public WorkerTask { 792 private: 793 ShenandoahRootAdjuster* _rp; 794 PreservedMarksSet* _preserved_marks; 795 public: 796 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : 797 WorkerTask("Shenandoah Adjust Root Pointers"), 798 _rp(rp), 799 _preserved_marks(preserved_marks) {} 800 801 void work(uint worker_id) { 802 ShenandoahParallelWorkerSession worker_session(worker_id); 803 ShenandoahAdjustPointersClosure cl; 804 _rp->roots_do(worker_id, &cl); 805 const SlidingForwarding* const forwarding = ShenandoahHeap::heap()->forwarding(); 806 _preserved_marks->get(worker_id)->adjust_during_full_gc(forwarding); 807 } 808 }; 809 810 void ShenandoahFullGC::phase3_update_references() { 811 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer); 812 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); 813 814 ShenandoahHeap* heap = ShenandoahHeap::heap(); 815 816 WorkerThreads* workers = heap->workers(); 817 uint nworkers = workers->active_workers(); 818 { 819 #if COMPILER2_OR_JVMCI 820 DerivedPointerTable::clear(); 821 #endif 822 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots); 823 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); 824 workers->run_task(&task); 825 #if COMPILER2_OR_JVMCI 826 DerivedPointerTable::update_pointers(); 827 #endif 828 } 829 830 ShenandoahAdjustPointersTask adjust_pointers_task; 831 workers->run_task(&adjust_pointers_task); 832 } 833 834 class ShenandoahCompactObjectsClosure : public ObjectClosure { 835 private: 836 ShenandoahHeap* const _heap; 837 const SlidingForwarding* const _forwarding; 838 uint const _worker_id; 839 840 public: 841 ShenandoahCompactObjectsClosure(uint worker_id) : 842 _heap(ShenandoahHeap::heap()), _forwarding(_heap->forwarding()), _worker_id(worker_id) {} 843 844 void do_object(oop p) { 845 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 846 size_t size = p->size(); 847 if (p->is_forwarded()) { 848 HeapWord* compact_from = cast_from_oop<HeapWord*>(p); 849 HeapWord* compact_to = cast_from_oop<HeapWord*>(_forwarding->forwardee(p)); 850 Copy::aligned_conjoint_words(compact_from, compact_to, size); 851 oop new_obj = cast_to_oop(compact_to); 852 new_obj->init_mark(); 853 } 854 } 855 }; 856 857 class ShenandoahCompactObjectsTask : public WorkerTask { 858 private: 859 ShenandoahHeap* const _heap; 860 ShenandoahHeapRegionSet** const _worker_slices; 861 862 public: 863 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : 864 WorkerTask("Shenandoah Compact Objects"), 865 _heap(ShenandoahHeap::heap()), 866 _worker_slices(worker_slices) { 867 } 868 869 void work(uint worker_id) { 870 ShenandoahParallelWorkerSession worker_session(worker_id); 871 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); 872 873 ShenandoahCompactObjectsClosure cl(worker_id); 874 ShenandoahHeapRegion* r = slice.next(); 875 while (r != NULL) { 876 assert(!r->is_humongous(), "must not get humongous regions here"); 877 if (r->has_live()) { 878 _heap->marked_object_iterate(r, &cl); 879 } 880 r->set_top(r->new_top()); 881 r = slice.next(); 882 } 883 } 884 }; 885 886 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 887 private: 888 ShenandoahHeap* const _heap; 889 size_t _live; 890 891 public: 892 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { 893 _heap->free_set()->clear(); 894 } 895 896 void heap_region_do(ShenandoahHeapRegion* r) { 897 assert (!r->is_cset(), "cset regions should have been demoted already"); 898 899 // Need to reset the complete-top-at-mark-start pointer here because 900 // the complete marking bitmap is no longer valid. This ensures 901 // size-based iteration in marked_object_iterate(). 902 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip 903 // pinned regions. 904 if (!r->is_pinned()) { 905 _heap->complete_marking_context()->reset_top_at_mark_start(r); 906 } 907 908 size_t live = r->used(); 909 910 // Make empty regions that have been allocated into regular 911 if (r->is_empty() && live > 0) { 912 r->make_regular_bypass(); 913 } 914 915 // Reclaim regular regions that became empty 916 if (r->is_regular() && live == 0) { 917 r->make_trash(); 918 } 919 920 // Recycle all trash regions 921 if (r->is_trash()) { 922 live = 0; 923 r->recycle(); 924 } 925 926 r->set_live_data(live); 927 r->reset_alloc_metadata(); 928 _live += live; 929 } 930 931 size_t get_live() { 932 return _live; 933 } 934 }; 935 936 void ShenandoahFullGC::compact_humongous_objects() { 937 // Compact humongous regions, based on their fwdptr objects. 938 // 939 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, 940 // humongous regions are already compacted, and do not require further moves, which alleviates 941 // sliding costs. We may consider doing this in parallel in future. 942 943 ShenandoahHeap* heap = ShenandoahHeap::heap(); 944 const SlidingForwarding* const forwarding = heap->forwarding(); 945 946 for (size_t c = heap->num_regions(); c > 0; c--) { 947 ShenandoahHeapRegion* r = heap->get_region(c - 1); 948 if (r->is_humongous_start()) { 949 oop old_obj = cast_to_oop(r->bottom()); 950 if (!old_obj->is_forwarded()) { 951 // No need to move the object, it stays at the same slot 952 continue; 953 } 954 size_t words_size = old_obj->size(); 955 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 956 957 size_t old_start = r->index(); 958 size_t old_end = old_start + num_regions - 1; 959 size_t new_start = heap->heap_region_index_containing(forwarding->forwardee(old_obj)); 960 size_t new_end = new_start + num_regions - 1; 961 assert(old_start != new_start, "must be real move"); 962 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); 963 964 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(), 965 heap->get_region(new_start)->bottom(), 966 words_size); 967 968 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom()); 969 new_obj->init_mark(); 970 971 { 972 for (size_t c = old_start; c <= old_end; c++) { 973 ShenandoahHeapRegion* r = heap->get_region(c); 974 r->make_regular_bypass(); 975 r->set_top(r->bottom()); 976 } 977 978 for (size_t c = new_start; c <= new_end; c++) { 979 ShenandoahHeapRegion* r = heap->get_region(c); 980 if (c == new_start) { 981 r->make_humongous_start_bypass(); 982 } else { 983 r->make_humongous_cont_bypass(); 984 } 985 986 // Trailing region may be non-full, record the remainder there 987 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 988 if ((c == new_end) && (remainder != 0)) { 989 r->set_top(r->bottom() + remainder); 990 } else { 991 r->set_top(r->end()); 992 } 993 994 r->reset_alloc_metadata(); 995 } 996 } 997 } 998 } 999 } 1000 1001 // This is slightly different to ShHeap::reset_next_mark_bitmap: 1002 // we need to remain able to walk pinned regions. 1003 // Since pinned region do not move and don't get compacted, we will get holes with 1004 // unreachable objects in them (which may have pointers to unloaded Klasses and thus 1005 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using 1006 // a valid marking bitmap and valid TAMS pointer. This class only resets marking 1007 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. 1008 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask { 1009 private: 1010 ShenandoahRegionIterator _regions; 1011 1012 public: 1013 ShenandoahMCResetCompleteBitmapTask() : 1014 WorkerTask("Shenandoah Reset Bitmap") { 1015 } 1016 1017 void work(uint worker_id) { 1018 ShenandoahParallelWorkerSession worker_session(worker_id); 1019 ShenandoahHeapRegion* region = _regions.next(); 1020 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1021 ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); 1022 while (region != NULL) { 1023 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { 1024 ctx->clear_bitmap(region); 1025 } 1026 region = _regions.next(); 1027 } 1028 } 1029 }; 1030 1031 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { 1032 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); 1033 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); 1034 1035 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1036 1037 // Compact regular objects first 1038 { 1039 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); 1040 ShenandoahCompactObjectsTask compact_task(worker_slices); 1041 heap->workers()->run_task(&compact_task); 1042 } 1043 1044 // Compact humongous objects after regular object moves 1045 { 1046 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); 1047 compact_humongous_objects(); 1048 } 1049 1050 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 1051 // and must ensure the bitmap is in sync. 1052 { 1053 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); 1054 ShenandoahMCResetCompleteBitmapTask task; 1055 heap->workers()->run_task(&task); 1056 } 1057 1058 // Bring regions in proper states after the collection, and set heap properties. 1059 { 1060 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); 1061 1062 ShenandoahPostCompactClosure post_compact; 1063 heap->heap_region_iterate(&post_compact); 1064 heap->set_used(post_compact.get_live()); 1065 1066 heap->collection_set()->clear(); 1067 heap->free_set()->rebuild(); 1068 } 1069 1070 heap->clear_cancelled_gc(); 1071 }