1 /* 2 * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "compiler/oopMap.hpp" 28 #include "gc/shared/gcTraceTime.inline.hpp" 29 #include "gc/shared/preservedMarks.inline.hpp" 30 #include "gc/shared/slidingForwarding.inline.hpp" 31 #include "gc/shared/tlab_globals.hpp" 32 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 34 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 35 #include "gc/shenandoah/shenandoahFreeSet.hpp" 36 #include "gc/shenandoah/shenandoahFullGC.hpp" 37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 38 #include "gc/shenandoah/shenandoahMark.inline.hpp" 39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 41 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 44 #include "gc/shenandoah/shenandoahMetrics.hpp" 45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 46 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 47 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 48 #include "gc/shenandoah/shenandoahSTWMark.hpp" 49 #include "gc/shenandoah/shenandoahUtils.hpp" 50 #include "gc/shenandoah/shenandoahVerifier.hpp" 51 #include "gc/shenandoah/shenandoahVMOperations.hpp" 52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 53 #include "memory/metaspaceUtils.hpp" 54 #include "memory/universe.hpp" 55 #include "oops/compressedOops.inline.hpp" 56 #include "oops/oop.inline.hpp" 57 #include "runtime/biasedLocking.hpp" 58 #include "runtime/orderAccess.hpp" 59 #include "runtime/thread.hpp" 60 #include "runtime/vmThread.hpp" 61 #include "utilities/copy.hpp" 62 #include "utilities/events.hpp" 63 #include "utilities/growableArray.hpp" 64 #include "gc/shared/workgroup.hpp" 65 66 ShenandoahFullGC::ShenandoahFullGC() : 67 _gc_timer(ShenandoahHeap::heap()->gc_timer()), 68 _preserved_marks(new PreservedMarksSet(true)) {} 69 70 ShenandoahFullGC::~ShenandoahFullGC() { 71 delete _preserved_marks; 72 } 73 74 bool ShenandoahFullGC::collect(GCCause::Cause cause) { 75 vmop_entry_full(cause); 76 // Always success 77 return true; 78 } 79 80 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) { 81 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 82 TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters()); 83 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross); 84 85 heap->try_inject_alloc_failure(); 86 VM_ShenandoahFullGC op(cause, this); 87 VMThread::execute(&op); 88 } 89 90 void ShenandoahFullGC::entry_full(GCCause::Cause cause) { 91 static const char* msg = "Pause Full"; 92 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */); 93 EventMark em("%s", msg); 94 95 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 96 ShenandoahWorkerPolicy::calc_workers_for_fullgc(), 97 "full gc"); 98 99 op_full(cause); 100 } 101 102 void ShenandoahFullGC::op_full(GCCause::Cause cause) { 103 ShenandoahMetricsSnapshot metrics; 104 metrics.snap_before(); 105 106 // Perform full GC 107 do_it(cause); 108 109 metrics.snap_after(); 110 111 if (metrics.is_good_progress()) { 112 ShenandoahHeap::heap()->notify_gc_progress(); 113 } else { 114 // Nothing to do. Tell the allocation path that we have failed to make 115 // progress, and it can finally fail. 116 ShenandoahHeap::heap()->notify_gc_no_progress(); 117 } 118 } 119 120 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { 121 ShenandoahHeap* heap = ShenandoahHeap::heap(); 122 123 if (ShenandoahVerify) { 124 heap->verifier()->verify_before_fullgc(); 125 } 126 127 if (VerifyBeforeGC) { 128 Universe::verify(); 129 } 130 131 // Degenerated GC may carry concurrent root flags when upgrading to 132 // full GC. We need to reset it before mutators resume. 133 heap->set_concurrent_strong_root_in_progress(false); 134 heap->set_concurrent_weak_root_in_progress(false); 135 136 heap->set_full_gc_in_progress(true); 137 138 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 139 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 140 141 { 142 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre); 143 heap->pre_full_gc_dump(_gc_timer); 144 } 145 146 { 147 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare); 148 // Full GC is supposed to recover from any GC state: 149 150 // a0. Remember if we have forwarded objects 151 bool has_forwarded_objects = heap->has_forwarded_objects(); 152 153 // a1. Cancel evacuation, if in progress 154 if (heap->is_evacuation_in_progress()) { 155 heap->set_evacuation_in_progress(false); 156 } 157 assert(!heap->is_evacuation_in_progress(), "sanity"); 158 159 // a2. Cancel update-refs, if in progress 160 if (heap->is_update_refs_in_progress()) { 161 heap->set_update_refs_in_progress(false); 162 } 163 assert(!heap->is_update_refs_in_progress(), "sanity"); 164 165 // b. Cancel concurrent mark, if in progress 166 if (heap->is_concurrent_mark_in_progress()) { 167 ShenandoahConcurrentGC::cancel(); 168 heap->set_concurrent_mark_in_progress(false); 169 } 170 assert(!heap->is_concurrent_mark_in_progress(), "sanity"); 171 172 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots. 173 if (has_forwarded_objects) { 174 update_roots(true /*full_gc*/); 175 } 176 177 // d. Reset the bitmaps for new marking 178 heap->reset_mark_bitmap(); 179 assert(heap->marking_context()->is_bitmap_clear(), "sanity"); 180 assert(!heap->marking_context()->is_complete(), "sanity"); 181 182 // e. Abandon reference discovery and clear all discovered references. 183 ShenandoahReferenceProcessor* rp = heap->ref_processor(); 184 rp->abandon_partial_discovery(); 185 186 // f. Sync pinned region status from the CP marks 187 heap->sync_pinned_region_status(); 188 189 // The rest of prologue: 190 BiasedLocking::preserve_marks(); 191 _preserved_marks->init(heap->workers()->active_workers()); 192 193 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change"); 194 } 195 196 if (UseTLAB) { 197 heap->gclabs_retire(ResizeTLAB); 198 heap->tlabs_retire(ResizeTLAB); 199 } 200 201 OrderAccess::fence(); 202 203 phase1_mark_heap(); 204 205 // Once marking is done, which may have fixed up forwarded objects, we can drop it. 206 // Coming out of Full GC, we would not have any forwarded objects. 207 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3. 208 heap->set_has_forwarded_objects(false); 209 210 heap->set_full_gc_move_in_progress(true); 211 212 // Setup workers for the rest 213 OrderAccess::fence(); 214 215 // Initialize worker slices 216 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); 217 for (uint i = 0; i < heap->max_workers(); i++) { 218 worker_slices[i] = new ShenandoahHeapRegionSet(); 219 } 220 221 { 222 // The rest of code performs region moves, where region status is undefined 223 // until all phases run together. 224 ShenandoahHeapLocker lock(heap->lock()); 225 226 SlidingForwarding::begin(); 227 228 phase2_calculate_target_addresses(worker_slices); 229 230 OrderAccess::fence(); 231 232 phase3_update_references(); 233 234 phase4_compact_objects(worker_slices); 235 } 236 237 { 238 // Epilogue 239 _preserved_marks->restore(heap->workers()); 240 BiasedLocking::restore_marks(); 241 _preserved_marks->reclaim(); 242 SlidingForwarding::end(); 243 } 244 245 // Resize metaspace 246 MetaspaceGC::compute_new_size(); 247 248 // Free worker slices 249 for (uint i = 0; i < heap->max_workers(); i++) { 250 delete worker_slices[i]; 251 } 252 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices); 253 254 heap->set_full_gc_move_in_progress(false); 255 heap->set_full_gc_in_progress(false); 256 257 if (ShenandoahVerify) { 258 heap->verifier()->verify_after_fullgc(); 259 } 260 261 if (VerifyAfterGC) { 262 Universe::verify(); 263 } 264 265 { 266 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post); 267 heap->post_full_gc_dump(_gc_timer); 268 } 269 } 270 271 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { 272 private: 273 ShenandoahMarkingContext* const _ctx; 274 275 public: 276 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 277 278 void heap_region_do(ShenandoahHeapRegion *r) { 279 _ctx->capture_top_at_mark_start(r); 280 r->clear_live_data(); 281 } 282 }; 283 284 void ShenandoahFullGC::phase1_mark_heap() { 285 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); 286 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); 287 288 ShenandoahHeap* heap = ShenandoahHeap::heap(); 289 290 ShenandoahPrepareForMarkClosure cl; 291 heap->heap_region_iterate(&cl); 292 293 heap->set_unload_classes(heap->heuristics()->can_unload_classes()); 294 295 ShenandoahReferenceProcessor* rp = heap->ref_processor(); 296 // enable ("weak") refs discovery 297 rp->set_soft_reference_policy(true); // forcefully purge all soft references 298 299 ShenandoahSTWMark mark(true /*full_gc*/); 300 mark.mark(); 301 heap->parallel_cleaning(true /* full_gc */); 302 } 303 304 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 305 private: 306 PreservedMarks* const _preserved_marks; 307 ShenandoahHeap* const _heap; 308 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 309 int _empty_regions_pos; 310 ShenandoahHeapRegion* _to_region; 311 ShenandoahHeapRegion* _from_region; 312 HeapWord* _compact_point; 313 314 public: 315 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, 316 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 317 ShenandoahHeapRegion* to_region) : 318 _preserved_marks(preserved_marks), 319 _heap(ShenandoahHeap::heap()), 320 _empty_regions(empty_regions), 321 _empty_regions_pos(0), 322 _to_region(to_region), 323 _from_region(NULL), 324 _compact_point(to_region->bottom()) {} 325 326 void set_from_region(ShenandoahHeapRegion* from_region) { 327 _from_region = from_region; 328 } 329 330 void finish_region() { 331 assert(_to_region != NULL, "should not happen"); 332 _to_region->set_new_top(_compact_point); 333 } 334 335 bool is_compact_same_region() { 336 return _from_region == _to_region; 337 } 338 339 int empty_regions_pos() { 340 return _empty_regions_pos; 341 } 342 343 void do_object(oop p) { 344 assert(_from_region != NULL, "must set before work"); 345 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 346 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked"); 347 348 size_t obj_size = p->size(); 349 if (_compact_point + obj_size > _to_region->end()) { 350 finish_region(); 351 352 // Object doesn't fit. Pick next empty region and start compacting there. 353 ShenandoahHeapRegion* new_to_region; 354 if (_empty_regions_pos < _empty_regions.length()) { 355 new_to_region = _empty_regions.at(_empty_regions_pos); 356 _empty_regions_pos++; 357 } else { 358 // Out of empty region? Compact within the same region. 359 new_to_region = _from_region; 360 } 361 362 assert(new_to_region != _to_region, "must not reuse same to-region"); 363 assert(new_to_region != NULL, "must not be NULL"); 364 _to_region = new_to_region; 365 _compact_point = _to_region->bottom(); 366 } 367 368 // Object fits into current region, record new location: 369 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 370 shenandoah_assert_not_forwarded(NULL, p); 371 _preserved_marks->push_if_necessary(p, p->mark()); 372 SlidingForwarding::forward_to(p, cast_to_oop(_compact_point)); 373 _compact_point += obj_size; 374 } 375 }; 376 377 class ShenandoahPrepareForCompactionTask : public AbstractGangTask { 378 private: 379 PreservedMarksSet* const _preserved_marks; 380 ShenandoahHeap* const _heap; 381 ShenandoahHeapRegionSet** const _worker_slices; 382 383 public: 384 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) : 385 AbstractGangTask("Shenandoah Prepare For Compaction"), 386 _preserved_marks(preserved_marks), 387 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) { 388 } 389 390 static bool is_candidate_region(ShenandoahHeapRegion* r) { 391 // Empty region: get it into the slice to defragment the slice itself. 392 // We could have skipped this without violating correctness, but we really 393 // want to compact all live regions to the start of the heap, which sometimes 394 // means moving them into the fully empty regions. 395 if (r->is_empty()) return true; 396 397 // Can move the region, and this is not the humongous region. Humongous 398 // moves are special cased here, because their moves are handled separately. 399 return r->is_stw_move_allowed() && !r->is_humongous(); 400 } 401 402 void work(uint worker_id) { 403 ShenandoahParallelWorkerSession worker_session(worker_id); 404 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; 405 ShenandoahHeapRegionSetIterator it(slice); 406 ShenandoahHeapRegion* from_region = it.next(); 407 // No work? 408 if (from_region == NULL) { 409 return; 410 } 411 412 // Sliding compaction. Walk all regions in the slice, and compact them. 413 // Remember empty regions and reuse them as needed. 414 ResourceMark rm; 415 416 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions()); 417 418 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); 419 420 while (from_region != NULL) { 421 assert(is_candidate_region(from_region), "Sanity"); 422 423 cl.set_from_region(from_region); 424 if (from_region->has_live()) { 425 _heap->marked_object_iterate(from_region, &cl); 426 } 427 428 // Compacted the region to somewhere else? From-region is empty then. 429 if (!cl.is_compact_same_region()) { 430 empty_regions.append(from_region); 431 } 432 from_region = it.next(); 433 } 434 cl.finish_region(); 435 436 // Mark all remaining regions as empty 437 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 438 ShenandoahHeapRegion* r = empty_regions.at(pos); 439 r->set_new_top(r->bottom()); 440 } 441 } 442 }; 443 444 void ShenandoahFullGC::calculate_target_humongous_objects() { 445 ShenandoahHeap* heap = ShenandoahHeap::heap(); 446 447 // Compute the new addresses for humongous objects. We need to do this after addresses 448 // for regular objects are calculated, and we know what regions in heap suffix are 449 // available for humongous moves. 450 // 451 // Scan the heap backwards, because we are compacting humongous regions towards the end. 452 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide 453 // humongous start there. 454 // 455 // The complication is potential non-movable regions during the scan. If such region is 456 // detected, then sliding restarts towards that non-movable region. 457 458 size_t to_begin = heap->num_regions(); 459 size_t to_end = heap->num_regions(); 460 461 for (size_t c = heap->num_regions(); c > 0; c--) { 462 ShenandoahHeapRegion *r = heap->get_region(c - 1); 463 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { 464 // To-region candidate: record this, and continue scan 465 to_begin = r->index(); 466 continue; 467 } 468 469 if (r->is_humongous_start() && r->is_stw_move_allowed()) { 470 // From-region candidate: movable humongous region 471 oop old_obj = cast_to_oop(r->bottom()); 472 size_t words_size = old_obj->size(); 473 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 474 475 size_t start = to_end - num_regions; 476 477 if (start >= to_begin && start != r->index()) { 478 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. 479 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); 480 SlidingForwarding::forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom())); 481 to_end = start; 482 continue; 483 } 484 } 485 486 // Failed to fit. Scan starting from current region. 487 to_begin = r->index(); 488 to_end = r->index(); 489 } 490 } 491 492 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { 493 private: 494 ShenandoahHeap* const _heap; 495 496 public: 497 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} 498 void heap_region_do(ShenandoahHeapRegion* r) { 499 if (r->is_trash()) { 500 r->recycle(); 501 } 502 if (r->is_cset()) { 503 r->make_regular_bypass(); 504 } 505 if (r->is_empty_uncommitted()) { 506 r->make_committed_bypass(); 507 } 508 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index()); 509 510 // Record current region occupancy: this communicates empty regions are free 511 // to the rest of Full GC code. 512 r->set_new_top(r->top()); 513 } 514 }; 515 516 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { 517 private: 518 ShenandoahHeap* const _heap; 519 ShenandoahMarkingContext* const _ctx; 520 521 public: 522 ShenandoahTrashImmediateGarbageClosure() : 523 _heap(ShenandoahHeap::heap()), 524 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 525 526 void heap_region_do(ShenandoahHeapRegion* r) { 527 if (r->is_humongous_start()) { 528 oop humongous_obj = cast_to_oop(r->bottom()); 529 if (!_ctx->is_marked(humongous_obj)) { 530 assert(!r->has_live(), 531 "Region " SIZE_FORMAT " is not marked, should not have live", r->index()); 532 _heap->trash_humongous_region_at(r); 533 } else { 534 assert(r->has_live(), 535 "Region " SIZE_FORMAT " should have live", r->index()); 536 } 537 } else if (r->is_humongous_continuation()) { 538 // If we hit continuation, the non-live humongous starts should have been trashed already 539 assert(r->humongous_start_region()->has_live(), 540 "Region " SIZE_FORMAT " should have live", r->index()); 541 } else if (r->is_regular()) { 542 if (!r->has_live()) { 543 r->make_trash_immediate(); 544 } 545 } 546 } 547 }; 548 549 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) { 550 ShenandoahHeap* heap = ShenandoahHeap::heap(); 551 552 uint n_workers = heap->workers()->active_workers(); 553 size_t n_regions = heap->num_regions(); 554 555 // What we want to accomplish: have the dense prefix of data, while still balancing 556 // out the parallel work. 557 // 558 // Assuming the amount of work is driven by the live data that needs moving, we can slice 559 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each 560 // thread takes all regions in its prefix subset, and then it takes some regions from 561 // the tail. 562 // 563 // Tail region selection becomes interesting. 564 // 565 // First, we want to distribute the regions fairly between the workers, and those regions 566 // might have different amount of live data. So, until we sure no workers need live data, 567 // we need to only take what the worker needs. 568 // 569 // Second, since we slide everything to the left in each slice, the most busy regions 570 // would be the ones on the left. Which means we want to have all workers have their after-tail 571 // regions as close to the left as possible. 572 // 573 // The easiest way to do this is to distribute after-tail regions in round-robin between 574 // workers that still need live data. 575 // 576 // Consider parallel workers A, B, C, then the target slice layout would be: 577 // 578 // AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA 579 // 580 // (.....dense-prefix.....) (.....................tail...................) 581 // [all regions fully live] [left-most regions are fuller that right-most] 582 // 583 584 // Compute how much live data is there. This would approximate the size of dense prefix 585 // we target to create. 586 size_t total_live = 0; 587 for (size_t idx = 0; idx < n_regions; idx++) { 588 ShenandoahHeapRegion *r = heap->get_region(idx); 589 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 590 total_live += r->get_live_data_words(); 591 } 592 } 593 594 // Estimate the size for the dense prefix. Note that we specifically count only the 595 // "full" regions, so there would be some non-full regions in the slice tail. 596 size_t live_per_worker = total_live / n_workers; 597 size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words(); 598 size_t prefix_regions_total = prefix_regions_per_worker * n_workers; 599 prefix_regions_total = MIN2(prefix_regions_total, n_regions); 600 assert(prefix_regions_total <= n_regions, "Sanity"); 601 602 // There might be non-candidate regions in the prefix. To compute where the tail actually 603 // ends up being, we need to account those as well. 604 size_t prefix_end = prefix_regions_total; 605 for (size_t idx = 0; idx < prefix_regions_total; idx++) { 606 ShenandoahHeapRegion *r = heap->get_region(idx); 607 if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 608 prefix_end++; 609 } 610 } 611 prefix_end = MIN2(prefix_end, n_regions); 612 assert(prefix_end <= n_regions, "Sanity"); 613 614 // Distribute prefix regions per worker: each thread definitely gets its own same-sized 615 // subset of dense prefix. 616 size_t prefix_idx = 0; 617 618 size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC); 619 620 for (size_t wid = 0; wid < n_workers; wid++) { 621 ShenandoahHeapRegionSet* slice = worker_slices[wid]; 622 623 live[wid] = 0; 624 size_t regs = 0; 625 626 // Add all prefix regions for this worker 627 while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) { 628 ShenandoahHeapRegion *r = heap->get_region(prefix_idx); 629 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 630 slice->add_region(r); 631 live[wid] += r->get_live_data_words(); 632 regs++; 633 } 634 prefix_idx++; 635 } 636 } 637 638 // Distribute the tail among workers in round-robin fashion. 639 size_t wid = n_workers - 1; 640 641 for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) { 642 ShenandoahHeapRegion *r = heap->get_region(tail_idx); 643 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 644 assert(wid < n_workers, "Sanity"); 645 646 size_t live_region = r->get_live_data_words(); 647 648 // Select next worker that still needs live data. 649 size_t old_wid = wid; 650 do { 651 wid++; 652 if (wid == n_workers) wid = 0; 653 } while (live[wid] + live_region >= live_per_worker && old_wid != wid); 654 655 if (old_wid == wid) { 656 // Circled back to the same worker? This means liveness data was 657 // miscalculated. Bump the live_per_worker limit so that 658 // everyone gets a piece of the leftover work. 659 live_per_worker += ShenandoahHeapRegion::region_size_words(); 660 } 661 662 worker_slices[wid]->add_region(r); 663 live[wid] += live_region; 664 } 665 } 666 667 FREE_C_HEAP_ARRAY(size_t, live); 668 669 #ifdef ASSERT 670 ResourceBitMap map(n_regions); 671 for (size_t wid = 0; wid < n_workers; wid++) { 672 ShenandoahHeapRegionSetIterator it(worker_slices[wid]); 673 ShenandoahHeapRegion* r = it.next(); 674 while (r != NULL) { 675 size_t idx = r->index(); 676 assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx); 677 assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx); 678 map.at_put(idx, true); 679 r = it.next(); 680 } 681 } 682 683 for (size_t rid = 0; rid < n_regions; rid++) { 684 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid)); 685 bool is_distributed = map.at(rid); 686 assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid); 687 } 688 #endif 689 } 690 691 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { 692 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); 693 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); 694 695 ShenandoahHeap* heap = ShenandoahHeap::heap(); 696 697 // About to figure out which regions can be compacted, make sure pinning status 698 // had been updated in GC prologue. 699 heap->assert_pinned_region_status(); 700 701 { 702 // Trash the immediately collectible regions before computing addresses 703 ShenandoahTrashImmediateGarbageClosure tigcl; 704 heap->heap_region_iterate(&tigcl); 705 706 // Make sure regions are in good state: committed, active, clean. 707 // This is needed because we are potentially sliding the data through them. 708 ShenandoahEnsureHeapActiveClosure ecl; 709 heap->heap_region_iterate(&ecl); 710 } 711 712 // Compute the new addresses for regular objects 713 { 714 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); 715 716 distribute_slices(worker_slices); 717 718 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices); 719 heap->workers()->run_task(&task); 720 } 721 722 // Compute the new addresses for humongous objects 723 { 724 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); 725 calculate_target_humongous_objects(); 726 } 727 } 728 729 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure { 730 private: 731 ShenandoahHeap* const _heap; 732 ShenandoahMarkingContext* const _ctx; 733 734 template <class T> 735 inline void do_oop_work(T* p) { 736 T o = RawAccess<>::oop_load(p); 737 if (!CompressedOops::is_null(o)) { 738 oop obj = CompressedOops::decode_not_null(o); 739 assert(_ctx->is_marked(obj), "must be marked"); 740 if (SlidingForwarding::is_forwarded(obj)) { 741 oop forw = SlidingForwarding::forwardee(obj); 742 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 743 } 744 } 745 } 746 747 public: 748 ShenandoahAdjustPointersClosure() : 749 _heap(ShenandoahHeap::heap()), 750 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 751 752 void do_oop(oop* p) { do_oop_work(p); } 753 void do_oop(narrowOop* p) { do_oop_work(p); } 754 }; 755 756 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 757 private: 758 ShenandoahHeap* const _heap; 759 ShenandoahAdjustPointersClosure _cl; 760 761 public: 762 ShenandoahAdjustPointersObjectClosure() : 763 _heap(ShenandoahHeap::heap()) { 764 } 765 void do_object(oop p) { 766 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 767 p->oop_iterate(&_cl); 768 } 769 }; 770 771 class ShenandoahAdjustPointersTask : public AbstractGangTask { 772 private: 773 ShenandoahHeap* const _heap; 774 ShenandoahRegionIterator _regions; 775 776 public: 777 ShenandoahAdjustPointersTask() : 778 AbstractGangTask("Shenandoah Adjust Pointers"), 779 _heap(ShenandoahHeap::heap()) { 780 } 781 782 void work(uint worker_id) { 783 ShenandoahParallelWorkerSession worker_session(worker_id); 784 ShenandoahAdjustPointersObjectClosure obj_cl; 785 ShenandoahHeapRegion* r = _regions.next(); 786 while (r != NULL) { 787 if (!r->is_humongous_continuation() && r->has_live()) { 788 _heap->marked_object_iterate(r, &obj_cl); 789 } 790 r = _regions.next(); 791 } 792 } 793 }; 794 795 class ShenandoahAdjustRootPointersTask : public AbstractGangTask { 796 private: 797 ShenandoahRootAdjuster* _rp; 798 PreservedMarksSet* _preserved_marks; 799 public: 800 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : 801 AbstractGangTask("Shenandoah Adjust Root Pointers"), 802 _rp(rp), 803 _preserved_marks(preserved_marks) {} 804 805 void work(uint worker_id) { 806 ShenandoahParallelWorkerSession worker_session(worker_id); 807 ShenandoahAdjustPointersClosure cl; 808 _rp->roots_do(worker_id, &cl); 809 _preserved_marks->get(worker_id)->adjust_during_full_gc(); 810 } 811 }; 812 813 void ShenandoahFullGC::phase3_update_references() { 814 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer); 815 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); 816 817 ShenandoahHeap* heap = ShenandoahHeap::heap(); 818 819 WorkGang* workers = heap->workers(); 820 uint nworkers = workers->active_workers(); 821 { 822 #if COMPILER2_OR_JVMCI 823 DerivedPointerTable::clear(); 824 #endif 825 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots); 826 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); 827 workers->run_task(&task); 828 #if COMPILER2_OR_JVMCI 829 DerivedPointerTable::update_pointers(); 830 #endif 831 } 832 833 ShenandoahAdjustPointersTask adjust_pointers_task; 834 workers->run_task(&adjust_pointers_task); 835 } 836 837 class ShenandoahCompactObjectsClosure : public ObjectClosure { 838 private: 839 ShenandoahHeap* const _heap; 840 uint const _worker_id; 841 842 public: 843 ShenandoahCompactObjectsClosure(uint worker_id) : 844 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {} 845 846 void do_object(oop p) { 847 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 848 size_t size = (size_t)p->size(); 849 if (SlidingForwarding::is_forwarded(p)) { 850 HeapWord* compact_from = cast_from_oop<HeapWord*>(p); 851 HeapWord* compact_to = cast_from_oop<HeapWord*>(SlidingForwarding::forwardee(p)); 852 Copy::aligned_conjoint_words(compact_from, compact_to, size); 853 oop new_obj = cast_to_oop(compact_to); 854 new_obj->init_mark(); 855 } 856 } 857 }; 858 859 class ShenandoahCompactObjectsTask : public AbstractGangTask { 860 private: 861 ShenandoahHeap* const _heap; 862 ShenandoahHeapRegionSet** const _worker_slices; 863 864 public: 865 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : 866 AbstractGangTask("Shenandoah Compact Objects"), 867 _heap(ShenandoahHeap::heap()), 868 _worker_slices(worker_slices) { 869 } 870 871 void work(uint worker_id) { 872 ShenandoahParallelWorkerSession worker_session(worker_id); 873 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); 874 875 ShenandoahCompactObjectsClosure cl(worker_id); 876 ShenandoahHeapRegion* r = slice.next(); 877 while (r != NULL) { 878 assert(!r->is_humongous(), "must not get humongous regions here"); 879 if (r->has_live()) { 880 _heap->marked_object_iterate(r, &cl); 881 } 882 r->set_top(r->new_top()); 883 r = slice.next(); 884 } 885 } 886 }; 887 888 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 889 private: 890 ShenandoahHeap* const _heap; 891 size_t _live; 892 893 public: 894 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { 895 _heap->free_set()->clear(); 896 } 897 898 void heap_region_do(ShenandoahHeapRegion* r) { 899 assert (!r->is_cset(), "cset regions should have been demoted already"); 900 901 // Need to reset the complete-top-at-mark-start pointer here because 902 // the complete marking bitmap is no longer valid. This ensures 903 // size-based iteration in marked_object_iterate(). 904 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip 905 // pinned regions. 906 if (!r->is_pinned()) { 907 _heap->complete_marking_context()->reset_top_at_mark_start(r); 908 } 909 910 size_t live = r->used(); 911 912 // Make empty regions that have been allocated into regular 913 if (r->is_empty() && live > 0) { 914 r->make_regular_bypass(); 915 } 916 917 // Reclaim regular regions that became empty 918 if (r->is_regular() && live == 0) { 919 r->make_trash(); 920 } 921 922 // Recycle all trash regions 923 if (r->is_trash()) { 924 live = 0; 925 r->recycle(); 926 } 927 928 r->set_live_data(live); 929 r->reset_alloc_metadata(); 930 _live += live; 931 } 932 933 size_t get_live() { 934 return _live; 935 } 936 }; 937 938 void ShenandoahFullGC::compact_humongous_objects() { 939 // Compact humongous regions, based on their fwdptr objects. 940 // 941 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, 942 // humongous regions are already compacted, and do not require further moves, which alleviates 943 // sliding costs. We may consider doing this in parallel in future. 944 945 ShenandoahHeap* heap = ShenandoahHeap::heap(); 946 947 for (size_t c = heap->num_regions(); c > 0; c--) { 948 ShenandoahHeapRegion* r = heap->get_region(c - 1); 949 if (r->is_humongous_start()) { 950 oop old_obj = cast_to_oop(r->bottom()); 951 if (SlidingForwarding::is_not_forwarded(old_obj)) { 952 // No need to move the object, it stays at the same slot 953 continue; 954 } 955 size_t words_size = old_obj->size(); 956 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 957 958 size_t old_start = r->index(); 959 size_t old_end = old_start + num_regions - 1; 960 size_t new_start = heap->heap_region_index_containing(SlidingForwarding::forwardee(old_obj)); 961 size_t new_end = new_start + num_regions - 1; 962 assert(old_start != new_start, "must be real move"); 963 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); 964 965 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(), 966 heap->get_region(new_start)->bottom(), 967 words_size); 968 969 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom()); 970 new_obj->init_mark(); 971 972 { 973 for (size_t c = old_start; c <= old_end; c++) { 974 ShenandoahHeapRegion* r = heap->get_region(c); 975 r->make_regular_bypass(); 976 r->set_top(r->bottom()); 977 } 978 979 for (size_t c = new_start; c <= new_end; c++) { 980 ShenandoahHeapRegion* r = heap->get_region(c); 981 if (c == new_start) { 982 r->make_humongous_start_bypass(); 983 } else { 984 r->make_humongous_cont_bypass(); 985 } 986 987 // Trailing region may be non-full, record the remainder there 988 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 989 if ((c == new_end) && (remainder != 0)) { 990 r->set_top(r->bottom() + remainder); 991 } else { 992 r->set_top(r->end()); 993 } 994 995 r->reset_alloc_metadata(); 996 } 997 } 998 } 999 } 1000 } 1001 1002 // This is slightly different to ShHeap::reset_next_mark_bitmap: 1003 // we need to remain able to walk pinned regions. 1004 // Since pinned region do not move and don't get compacted, we will get holes with 1005 // unreachable objects in them (which may have pointers to unloaded Klasses and thus 1006 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using 1007 // a valid marking bitmap and valid TAMS pointer. This class only resets marking 1008 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. 1009 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask { 1010 private: 1011 ShenandoahRegionIterator _regions; 1012 1013 public: 1014 ShenandoahMCResetCompleteBitmapTask() : 1015 AbstractGangTask("Shenandoah Reset Bitmap") { 1016 } 1017 1018 void work(uint worker_id) { 1019 ShenandoahParallelWorkerSession worker_session(worker_id); 1020 ShenandoahHeapRegion* region = _regions.next(); 1021 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1022 ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); 1023 while (region != NULL) { 1024 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { 1025 ctx->clear_bitmap(region); 1026 } 1027 region = _regions.next(); 1028 } 1029 } 1030 }; 1031 1032 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { 1033 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); 1034 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); 1035 1036 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1037 1038 // Compact regular objects first 1039 { 1040 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); 1041 ShenandoahCompactObjectsTask compact_task(worker_slices); 1042 heap->workers()->run_task(&compact_task); 1043 } 1044 1045 // Compact humongous objects after regular object moves 1046 { 1047 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); 1048 compact_humongous_objects(); 1049 } 1050 1051 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 1052 // and must ensure the bitmap is in sync. 1053 { 1054 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); 1055 ShenandoahMCResetCompleteBitmapTask task; 1056 heap->workers()->run_task(&task); 1057 } 1058 1059 // Bring regions in proper states after the collection, and set heap properties. 1060 { 1061 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); 1062 1063 ShenandoahPostCompactClosure post_compact; 1064 heap->heap_region_iterate(&post_compact); 1065 heap->set_used(post_compact.get_live()); 1066 1067 heap->collection_set()->clear(); 1068 heap->free_set()->rebuild(); 1069 } 1070 1071 heap->clear_cancelled_gc(); 1072 } --- EOF ---