1 /*
   2  * Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "code/codeCache.hpp"
  27 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"
  28 #include "gc_implementation/shared/gcTimer.hpp"
  29 #include "gc_implementation/shenandoah/preservedMarks.inline.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahForwarding.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
  38 #include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp"
  39 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
  41 #include "gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp"
  42 #include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp"
  43 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  44 #include "gc_implementation/shenandoah/shenandoahVerifier.hpp"
  45 #include "gc_implementation/shenandoah/shenandoahVMOperations.hpp"
  46 #include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp"
  47 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
  48 #include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp"
  49 #include "memory/metaspace.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/biasedLocking.hpp"
  52 #include "runtime/thread.hpp"
  53 #include "utilities/copy.hpp"
  54 #include "utilities/growableArray.hpp"
  55 #include "utilities/workgroup.hpp"
  56 
  57 ShenandoahMarkCompact::ShenandoahMarkCompact() :
  58   _gc_timer(NULL),
  59   _preserved_marks(new PreservedMarksSet(true)) {}
  60 
  61 void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) {
  62   _gc_timer = gc_timer;
  63 }
  64 
  65 void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
  66   ShenandoahHeap* heap = ShenandoahHeap::heap();
  67 
  68   if (ShenandoahVerify) {
  69     heap->verifier()->verify_before_fullgc();
  70   }
  71 
  72   if (VerifyBeforeGC) {
  73     Universe::verify();
  74   }
  75 
  76   heap->set_full_gc_in_progress(true);
  77 
  78   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
  79   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
  80 
  81   {
  82     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
  83     heap->pre_full_gc_dump(_gc_timer);
  84   }
  85 
  86   {
  87     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_prepare);
  88     // Full GC is supposed to recover from any GC state:
  89 
  90     // a0. Remember if we have forwarded objects
  91     bool has_forwarded_objects = heap->has_forwarded_objects();
  92 
  93     // a1. Cancel evacuation, if in progress
  94     if (heap->is_evacuation_in_progress()) {
  95       heap->set_evacuation_in_progress(false);
  96     }
  97     assert(!heap->is_evacuation_in_progress(), "sanity");
  98 
  99     // a2. Cancel update-refs, if in progress
 100     if (heap->is_update_refs_in_progress()) {
 101       heap->set_update_refs_in_progress(false);
 102     }
 103     assert(!heap->is_update_refs_in_progress(), "sanity");
 104 
 105     // b. Cancel concurrent mark, if in progress
 106     if (heap->is_concurrent_mark_in_progress()) {
 107       heap->concurrent_mark()->cancel();
 108       heap->stop_concurrent_marking();
 109     }
 110     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 111 
 112     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 113     if (has_forwarded_objects) {
 114       heap->concurrent_mark()->update_roots(ShenandoahPhaseTimings::full_gc_update_roots);
 115     }
 116 
 117     // d. Reset the bitmaps for new marking
 118     heap->reset_mark_bitmap();
 119     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 120     assert(!heap->marking_context()->is_complete(), "sanity");
 121 
 122     // e. Abandon reference discovery and clear all discovered references.
 123     ReferenceProcessor *rp = heap->ref_processor();
 124     rp->disable_discovery();
 125     rp->abandon_partial_discovery();
 126     rp->verify_no_references_recorded();
 127 
 128     // f. Set back forwarded objects bit back, in case some steps above dropped it.
 129     heap->set_has_forwarded_objects(has_forwarded_objects);
 130 
 131     // g. Sync pinned region status from the CP marks
 132     heap->sync_pinned_region_status();
 133 
 134     // The rest of prologue:
 135     BiasedLocking::preserve_marks();
 136     _preserved_marks->init(heap->workers()->active_workers());
 137   }
 138 
 139   heap->make_parsable(true);
 140 
 141   CodeCache::gc_prologue();
 142 
 143   OrderAccess::fence();
 144 
 145   phase1_mark_heap();
 146 
 147   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 148   // Coming out of Full GC, we would not have any forwarded objects.
 149   // This also prevents read barrier from kicking in while adjusting pointers in phase3.
 150   heap->set_has_forwarded_objects(false);
 151 
 152   heap->set_full_gc_move_in_progress(true);
 153 
 154   // Setup workers for the rest
 155   OrderAccess::fence();
 156 
 157   // Initialize worker slices
 158   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 159   for (uint i = 0; i < heap->max_workers(); i++) {
 160     worker_slices[i] = new ShenandoahHeapRegionSet();
 161   }
 162 
 163   {
 164     // The rest of code performs region moves, where region status is undefined
 165     // until all phases run together.
 166     ShenandoahHeapLocker lock(heap->lock());
 167 
 168     phase2_calculate_target_addresses(worker_slices);
 169 
 170     OrderAccess::fence();
 171 
 172     phase3_update_references();
 173 
 174     phase4_compact_objects(worker_slices);
 175   }
 176 
 177   {
 178     // Epilogue
 179     SharedRestorePreservedMarksTaskExecutor exec(heap->workers());
 180     _preserved_marks->restore(&exec);
 181     BiasedLocking::restore_marks();
 182     _preserved_marks->reclaim();
 183 
 184     JvmtiExport::gc_epilogue();
 185   }
 186 
 187   // Resize metaspace
 188   MetaspaceGC::compute_new_size();
 189 
 190   // Free worker slices
 191   for (uint i = 0; i < heap->max_workers(); i++) {
 192     delete worker_slices[i];
 193   }
 194   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices, mtGC);
 195 
 196   CodeCache::gc_epilogue();
 197   JvmtiExport::gc_epilogue();
 198 
 199   heap->set_full_gc_move_in_progress(false);
 200   heap->set_full_gc_in_progress(false);
 201 
 202   if (ShenandoahVerify) {
 203     heap->verifier()->verify_after_fullgc();
 204   }
 205 
 206   if (VerifyAfterGC) {
 207     Universe::verify();
 208   }
 209 
 210   {
 211     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 212     heap->post_full_gc_dump(_gc_timer);
 213   }
 214 
 215   if (UseTLAB) {
 216     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
 217     heap->resize_all_tlabs();
 218   }
 219 }
 220 
 221 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 222 private:
 223   ShenandoahMarkingContext* const _ctx;
 224 
 225 public:
 226   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 227 
 228   void heap_region_do(ShenandoahHeapRegion *r) {
 229     _ctx->capture_top_at_mark_start(r);
 230     r->clear_live_data();
 231   }
 232 };
 233 
 234 void ShenandoahMarkCompact::phase1_mark_heap() {
 235   ShenandoahHeap* heap = ShenandoahHeap::heap();
 236   GCTraceTime time("Phase 1: Mark live objects", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id());
 237   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 238 
 239   ShenandoahPrepareForMarkClosure cl;
 240   heap->heap_region_iterate(&cl);
 241 
 242   ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 243 
 244   heap->set_process_references(heap->heuristics()->can_process_references());
 245   heap->set_unload_classes(heap->heuristics()->can_unload_classes());
 246 
 247   ReferenceProcessor* rp = heap->ref_processor();
 248   // enable ("weak") refs discovery
 249   rp->enable_discovery(true /*verify_no_refs*/, true);
 250   rp->setup_policy(true); // forcefully purge all soft references
 251   rp->set_active_mt_degree(heap->workers()->active_workers());
 252 
 253   cm->mark_roots(ShenandoahPhaseTimings::full_gc_scan_roots);
 254   cm->finish_mark_from_roots(/* full_gc = */ true);
 255 
 256   heap->mark_complete_marking_context();
 257 }
 258 
 259 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 260 private:
 261   PreservedMarks*          const _preserved_marks;
 262   ShenandoahHeap*          const _heap;
 263   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 264   int _empty_regions_pos;
 265   ShenandoahHeapRegion*          _to_region;
 266   ShenandoahHeapRegion*          _from_region;
 267   HeapWord* _compact_point;
 268 
 269 public:
 270   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 271                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 272                                               ShenandoahHeapRegion* to_region) :
 273     _preserved_marks(preserved_marks),
 274     _heap(ShenandoahHeap::heap()),
 275     _empty_regions(empty_regions),
 276     _empty_regions_pos(0),
 277     _to_region(to_region),
 278     _from_region(NULL),
 279     _compact_point(to_region->bottom()) {}
 280 
 281   void set_from_region(ShenandoahHeapRegion* from_region) {
 282     _from_region = from_region;
 283   }
 284 
 285   void finish_region() {
 286     assert(_to_region != NULL, "should not happen");
 287     _to_region->set_new_top(_compact_point);
 288   }
 289 
 290   bool is_compact_same_region() {
 291     return _from_region == _to_region;
 292   }
 293 
 294   int empty_regions_pos() {
 295     return _empty_regions_pos;
 296   }
 297 
 298   void do_object(oop p) {
 299     assert(_from_region != NULL, "must set before work");
 300     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 301     assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked");
 302 
 303     size_t obj_size = p->size();
 304     if (_compact_point + obj_size > _to_region->end()) {
 305       finish_region();
 306 
 307       // Object doesn't fit. Pick next empty region and start compacting there.
 308       ShenandoahHeapRegion* new_to_region;
 309       if (_empty_regions_pos < _empty_regions.length()) {
 310         new_to_region = _empty_regions.at(_empty_regions_pos);
 311         _empty_regions_pos++;
 312       } else {
 313         // Out of empty region? Compact within the same region.
 314         new_to_region = _from_region;
 315       }
 316 
 317       assert(new_to_region != _to_region, "must not reuse same to-region");
 318       assert(new_to_region != NULL, "must not be NULL");
 319       _to_region = new_to_region;
 320       _compact_point = _to_region->bottom();
 321     }
 322 
 323     // Object fits into current region, record new location:
 324     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 325     shenandoah_assert_not_forwarded(NULL, p);
 326     _preserved_marks->push_if_necessary(p, p->mark());
 327     p->forward_to(oop(_compact_point));
 328     _compact_point += obj_size;
 329   }
 330 };
 331 
 332 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
 333 private:
 334   PreservedMarksSet*        const _preserved_marks;
 335   ShenandoahHeap*           const _heap;
 336   ShenandoahHeapRegionSet** const _worker_slices;
 337 
 338 public:
 339   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
 340     AbstractGangTask("Shenandoah Prepare For Compaction Task"),
 341     _preserved_marks(preserved_marks),
 342     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 343   }
 344 
 345   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 346     // Empty region: get it into the slice to defragment the slice itself.
 347     // We could have skipped this without violating correctness, but we really
 348     // want to compact all live regions to the start of the heap, which sometimes
 349     // means moving them into the fully empty regions.
 350     if (r->is_empty()) return true;
 351 
 352     // Can move the region, and this is not the humongous region. Humongous
 353     // moves are special cased here, because their moves are handled separately.
 354     return r->is_stw_move_allowed() && !r->is_humongous();
 355   }
 356 
 357   void work(uint worker_id) {
 358     ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 359     ShenandoahHeapRegionSetIterator it(slice);
 360     ShenandoahHeapRegion* from_region = it.next();
 361 
 362     // No work?
 363     if (from_region == NULL) {
 364       return;
 365     }
 366 
 367     // Sliding compaction. Walk all regions in the slice, and compact them.
 368     // Remember empty regions and reuse them as needed.
 369     ResourceMark rm;
 370 
 371     GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 372 
 373     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 374 
 375     while (from_region != NULL) {
 376       assert(is_candidate_region(from_region), "Sanity");
 377 
 378       cl.set_from_region(from_region);
 379       if (from_region->has_live()) {
 380         _heap->marked_object_iterate(from_region, &cl);
 381       }
 382 
 383       // Compacted the region to somewhere else? From-region is empty then.
 384       if (!cl.is_compact_same_region()) {
 385         empty_regions.append(from_region);
 386       }
 387       from_region = it.next();
 388     }
 389     cl.finish_region();
 390 
 391     // Mark all remaining regions as empty
 392     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 393       ShenandoahHeapRegion* r = empty_regions.at(pos);
 394       r->set_new_top(r->bottom());
 395     }
 396   }
 397 };
 398 
 399 void ShenandoahMarkCompact::calculate_target_humongous_objects() {
 400   ShenandoahHeap* heap = ShenandoahHeap::heap();
 401 
 402   // Compute the new addresses for humongous objects. We need to do this after addresses
 403   // for regular objects are calculated, and we know what regions in heap suffix are
 404   // available for humongous moves.
 405   //
 406   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 407   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 408   // humongous start there.
 409   //
 410   // The complication is potential non-movable regions during the scan. If such region is
 411   // detected, then sliding restarts towards that non-movable region.
 412 
 413   size_t to_begin = heap->num_regions();
 414   size_t to_end = heap->num_regions();
 415 
 416   for (size_t c = heap->num_regions(); c > 0; c--) {
 417     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 418     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 419       // To-region candidate: record this, and continue scan
 420       to_begin = r->index();
 421       continue;
 422     }
 423 
 424     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 425       // From-region candidate: movable humongous region
 426       oop old_obj = oop(r->bottom());
 427       size_t words_size = old_obj->size();
 428       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 429 
 430       size_t start = to_end - num_regions;
 431 
 432       if (start >= to_begin && start != r->index()) {
 433         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 434         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 435         old_obj->forward_to(oop(heap->get_region(start)->bottom()));
 436         to_end = start;
 437         continue;
 438       }
 439     }
 440 
 441     // Failed to fit. Scan starting from current region.
 442     to_begin = r->index();
 443     to_end = r->index();
 444   }
 445 }
 446 
 447 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 448 private:
 449   ShenandoahHeap* const _heap;
 450 
 451 public:
 452   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 453   void heap_region_do(ShenandoahHeapRegion* r) {
 454     if (r->is_trash()) {
 455       r->recycle();
 456     }
 457     if (r->is_cset()) {
 458       r->make_regular_bypass();
 459     }
 460     if (r->is_empty_uncommitted()) {
 461       r->make_committed_bypass();
 462     }
 463     assert (r->is_committed(), err_msg("only committed regions in heap now, see region " SIZE_FORMAT, r->index()));
 464 
 465     // Record current region occupancy: this communicates empty regions are free
 466     // to the rest of Full GC code.
 467     r->set_new_top(r->top());
 468   }
 469 };
 470 
 471 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 472 private:
 473   ShenandoahHeap* const _heap;
 474   ShenandoahMarkingContext* const _ctx;
 475 
 476 public:
 477   ShenandoahTrashImmediateGarbageClosure() :
 478     _heap(ShenandoahHeap::heap()),
 479     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 480 
 481   void heap_region_do(ShenandoahHeapRegion* r) {
 482     if (r->is_humongous_start()) {
 483       oop humongous_obj = oop(r->bottom());
 484       if (!_ctx->is_marked(humongous_obj)) {
 485         assert(!r->has_live(),
 486                err_msg("Region " SIZE_FORMAT " is not marked, should not have live", r->index()));
 487         _heap->trash_humongous_region_at(r);
 488       } else {
 489         assert(r->has_live(),
 490                err_msg("Region " SIZE_FORMAT " should have live", r->index()));
 491       }
 492     } else if (r->is_humongous_continuation()) {
 493       // If we hit continuation, the non-live humongous starts should have been trashed already
 494       assert(r->humongous_start_region()->has_live(),
 495              err_msg("Region " SIZE_FORMAT " should have live", r->index()));
 496     } else if (r->is_regular()) {
 497       if (!r->has_live()) {
 498         r->make_trash_immediate();
 499       }
 500     }
 501   }
 502 };
 503 
 504 void ShenandoahMarkCompact::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 505   ShenandoahHeap* heap = ShenandoahHeap::heap();
 506 
 507   uint n_workers = heap->workers()->active_workers();
 508   size_t n_regions = heap->num_regions();
 509 
 510   // What we want to accomplish: have the dense prefix of data, while still balancing
 511   // out the parallel work.
 512   //
 513   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 514   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 515   // thread takes all regions in its prefix subset, and then it takes some regions from
 516   // the tail.
 517   //
 518   // Tail region selection becomes interesting.
 519   //
 520   // First, we want to distribute the regions fairly between the workers, and those regions
 521   // might have different amount of live data. So, until we sure no workers need live data,
 522   // we need to only take what the worker needs.
 523   //
 524   // Second, since we slide everything to the left in each slice, the most busy regions
 525   // would be the ones on the left. Which means we want to have all workers have their after-tail
 526   // regions as close to the left as possible.
 527   //
 528   // The easiest way to do this is to distribute after-tail regions in round-robin between
 529   // workers that still need live data.
 530   //
 531   // Consider parallel workers A, B, C, then the target slice layout would be:
 532   //
 533   //  AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA
 534   //
 535   //  (.....dense-prefix.....) (.....................tail...................)
 536   //  [all regions fully live] [left-most regions are fuller that right-most]
 537   //
 538 
 539   // Compute how much live data is there. This would approximate the size of dense prefix
 540   // we target to create.
 541   size_t total_live = 0;
 542   for (size_t idx = 0; idx < n_regions; idx++) {
 543     ShenandoahHeapRegion *r = heap->get_region(idx);
 544     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 545       total_live += r->get_live_data_words();
 546     }
 547   }
 548 
 549   // Estimate the size for the dense prefix. Note that we specifically count only the
 550   // "full" regions, so there would be some non-full regions in the slice tail.
 551   size_t live_per_worker = total_live / n_workers;
 552   size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words();
 553   size_t prefix_regions_total = prefix_regions_per_worker * n_workers;
 554   prefix_regions_total = MIN2(prefix_regions_total, n_regions);
 555   assert(prefix_regions_total <= n_regions, "Sanity");
 556 
 557   // There might be non-candidate regions in the prefix. To compute where the tail actually
 558   // ends up being, we need to account those as well.
 559   size_t prefix_end = prefix_regions_total;
 560   for (size_t idx = 0; idx < prefix_regions_total; idx++) {
 561     ShenandoahHeapRegion *r = heap->get_region(idx);
 562     if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 563       prefix_end++;
 564     }
 565   }
 566   prefix_end = MIN2(prefix_end, n_regions);
 567   assert(prefix_end <= n_regions, "Sanity");
 568 
 569   // Distribute prefix regions per worker: each thread definitely gets its own same-sized
 570   // subset of dense prefix.
 571   size_t prefix_idx = 0;
 572 
 573   size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC);
 574 
 575   for (size_t wid = 0; wid < n_workers; wid++) {
 576     ShenandoahHeapRegionSet* slice = worker_slices[wid];
 577 
 578     live[wid] = 0;
 579     size_t regs = 0;
 580 
 581     // Add all prefix regions for this worker
 582     while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) {
 583       ShenandoahHeapRegion *r = heap->get_region(prefix_idx);
 584       if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 585         slice->add_region(r);
 586         live[wid] += r->get_live_data_words();
 587         regs++;
 588       }
 589       prefix_idx++;
 590     }
 591   }
 592 
 593   // Distribute the tail among workers in round-robin fashion.
 594   size_t wid = n_workers - 1;
 595 
 596   for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) {
 597     ShenandoahHeapRegion *r = heap->get_region(tail_idx);
 598     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 599       assert(wid < n_workers, "Sanity");
 600 
 601       size_t live_region = r->get_live_data_words();
 602 
 603       // Select next worker that still needs live data.
 604       size_t old_wid = wid;
 605       do {
 606         wid++;
 607         if (wid == n_workers) wid = 0;
 608       } while (live[wid] + live_region >= live_per_worker && old_wid != wid);
 609 
 610       if (old_wid == wid) {
 611         // Circled back to the same worker? This means liveness data was
 612         // miscalculated. Bump the live_per_worker limit so that
 613         // everyone gets a piece of the leftover work.
 614         live_per_worker += ShenandoahHeapRegion::region_size_words();
 615       }
 616 
 617       worker_slices[wid]->add_region(r);
 618       live[wid] += live_region;
 619     }
 620   }
 621 
 622   FREE_C_HEAP_ARRAY(size_t, live, mtGC);
 623 
 624 #ifdef ASSERT
 625   BitMap map(n_regions, true /* in_resource_area */);
 626   for (size_t wid = 0; wid < n_workers; wid++) {
 627     ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
 628     ShenandoahHeapRegion* r = it.next();
 629     while (r != NULL) {
 630       size_t idx = r->index();
 631       assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), err_msg("Sanity: " SIZE_FORMAT, idx));
 632       assert(!map.at(idx), err_msg("No region distributed twice: " SIZE_FORMAT, idx));
 633       map.at_put(idx, true);
 634       r = it.next();
 635     }
 636   }
 637 
 638   for (size_t rid = 0; rid < n_regions; rid++) {
 639     bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
 640     bool is_distributed = map.at(rid);
 641     assert(is_distributed || !is_candidate, err_msg("All candidates are distributed: " SIZE_FORMAT, rid));
 642   }
 643 #endif
 644 }
 645 
 646 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
 647   ShenandoahHeap* heap = ShenandoahHeap::heap();
 648   GCTraceTime time("Phase 2: Compute new object addresses", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id());
 649   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 650 
 651   // About to figure out which regions can be compacted, make sure pinning status
 652   // had been updated in GC prologue.
 653   heap->assert_pinned_region_status();
 654 
 655   {
 656     // Trash the immediately collectible regions before computing addresses
 657     ShenandoahTrashImmediateGarbageClosure tigcl;
 658     heap->heap_region_iterate(&tigcl);
 659 
 660     // Make sure regions are in good state: committed, active, clean.
 661     // This is needed because we are potentially sliding the data through them.
 662     ShenandoahEnsureHeapActiveClosure ecl;
 663     heap->heap_region_iterate(&ecl);
 664   }
 665 
 666   // Compute the new addresses for regular objects
 667   {
 668     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 669 
 670     distribute_slices(worker_slices);
 671 
 672     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
 673     heap->workers()->run_task(&task);
 674   }
 675 
 676   // Compute the new addresses for humongous objects
 677   {
 678     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 679     calculate_target_humongous_objects();
 680   }
 681 }
 682 
 683 class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure {
 684 private:
 685   ShenandoahHeap* const _heap;
 686   ShenandoahMarkingContext* const _ctx;
 687 
 688   template <class T>
 689   inline void do_oop_work(T* p) {
 690     T o = oopDesc::load_heap_oop(p);
 691     if (! oopDesc::is_null(o)) {
 692       oop obj = oopDesc::decode_heap_oop_not_null(o);
 693       assert(_ctx->is_marked(obj), "must be marked");
 694       if (obj->is_forwarded()) {
 695         oop forw = obj->forwardee();
 696         oopDesc::encode_store_heap_oop(p, forw);
 697       }
 698    }
 699   }
 700 
 701 public:
 702   ShenandoahAdjustPointersClosure() :
 703     _heap(ShenandoahHeap::heap()),
 704     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 705 
 706   void do_oop(oop* p)       { do_oop_work(p); }
 707   void do_oop(narrowOop* p) { do_oop_work(p); }
 708 };
 709 
 710 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 711 private:
 712   ShenandoahHeap* const _heap;
 713   ShenandoahAdjustPointersClosure _cl;
 714 
 715 public:
 716   ShenandoahAdjustPointersObjectClosure() :
 717     _heap(ShenandoahHeap::heap()) {
 718   }
 719   void do_object(oop p) {
 720     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 721     p->oop_iterate(&_cl);
 722   }
 723 };
 724 
 725 class ShenandoahAdjustPointersTask : public AbstractGangTask {
 726 private:
 727   ShenandoahHeap*          const _heap;
 728   ShenandoahRegionIterator       _regions;
 729 
 730 public:
 731   ShenandoahAdjustPointersTask() :
 732     AbstractGangTask("Shenandoah Adjust Pointers Task"),
 733     _heap(ShenandoahHeap::heap()) {
 734   }
 735 
 736   void work(uint worker_id) {
 737     ShenandoahAdjustPointersObjectClosure obj_cl;
 738     ShenandoahHeapRegion* r = _regions.next();
 739     while (r != NULL) {
 740       if (!r->is_humongous_continuation() && r->has_live()) {
 741         _heap->marked_object_iterate(r, &obj_cl);
 742       }
 743       r = _regions.next();
 744     }
 745   }
 746 };
 747 
 748 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
 749 private:
 750   ShenandoahRootAdjuster* _rp;
 751   PreservedMarksSet* _preserved_marks;
 752 public:
 753   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 754     AbstractGangTask("Shenandoah Adjust Root Pointers Task"),
 755     _rp(rp),
 756     _preserved_marks(preserved_marks) {}
 757 
 758   void work(uint worker_id) {
 759     ShenandoahAdjustPointersClosure cl;
 760     _rp->roots_do(worker_id, &cl);
 761     _preserved_marks->get(worker_id)->adjust_during_full_gc();
 762   }
 763 };
 764 
 765 void ShenandoahMarkCompact::phase3_update_references() {
 766   ShenandoahHeap* heap = ShenandoahHeap::heap();
 767   GCTraceTime time("Phase 3: Adjust pointers", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id());
 768   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 769 
 770   WorkGang* workers = heap->workers();
 771   uint nworkers = workers->active_workers();
 772   {
 773     COMPILER2_PRESENT(DerivedPointerTable::clear());
 774     ShenandoahRootAdjuster rp(ShenandoahPhaseTimings::full_gc_adjust_roots);
 775     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
 776     workers->run_task(&task);
 777     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 778   }
 779 
 780   ShenandoahAdjustPointersTask adjust_pointers_task;
 781   workers->run_task(&adjust_pointers_task);
 782 }
 783 
 784 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 785 private:
 786   ShenandoahHeap* const _heap;
 787 
 788 public:
 789   ShenandoahCompactObjectsClosure() : _heap(ShenandoahHeap::heap()) {}
 790 
 791   void do_object(oop p) {
 792     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 793     size_t size = (size_t)p->size();
 794     if (p->is_forwarded()) {
 795       HeapWord* compact_from = (HeapWord*) p;
 796       HeapWord* compact_to = (HeapWord*) p->forwardee();
 797       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 798       oop new_obj = oop(compact_to);
 799       new_obj->init_mark();
 800     }
 801   }
 802 };
 803 
 804 class ShenandoahCompactObjectsTask : public AbstractGangTask {
 805 private:
 806   ShenandoahHeap* const _heap;
 807   ShenandoahHeapRegionSet** const _worker_slices;
 808 
 809 public:
 810   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 811     AbstractGangTask("Shenandoah Compact Objects Task"),
 812     _heap(ShenandoahHeap::heap()),
 813     _worker_slices(worker_slices) {
 814   }
 815 
 816   void work(uint worker_id) {
 817     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 818 
 819     ShenandoahCompactObjectsClosure cl;
 820     ShenandoahHeapRegion* r = slice.next();
 821     while (r != NULL) {
 822       assert(!r->is_humongous(), "must not get humongous regions here");
 823       if (r->has_live()) {
 824         _heap->marked_object_iterate(r, &cl);
 825       }
 826       r->set_top(r->new_top());
 827       r = slice.next();
 828     }
 829   }
 830 };
 831 
 832 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 833 private:
 834   ShenandoahHeap* const _heap;
 835   size_t _live;
 836 
 837 public:
 838   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
 839     _heap->free_set()->clear();
 840   }
 841 
 842   void heap_region_do(ShenandoahHeapRegion* r) {
 843     assert (!r->is_cset(), "cset regions should have been demoted already");
 844 
 845     // Need to reset the complete-top-at-mark-start pointer here because
 846     // the complete marking bitmap is no longer valid. This ensures
 847     // size-based iteration in marked_object_iterate().
 848     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 849     // pinned regions.
 850     if (!r->is_pinned()) {
 851       _heap->complete_marking_context()->reset_top_at_mark_start(r);
 852     }
 853 
 854     size_t live = r->used();
 855 
 856     // Make empty regions that have been allocated into regular
 857     if (r->is_empty() && live > 0) {
 858       r->make_regular_bypass();
 859     }
 860 
 861     // Reclaim regular regions that became empty
 862     if (r->is_regular() && live == 0) {
 863       r->make_trash();
 864     }
 865 
 866     // Recycle all trash regions
 867     if (r->is_trash()) {
 868       live = 0;
 869       r->recycle();
 870     }
 871 
 872     r->set_live_data(live);
 873     r->reset_alloc_metadata();
 874     _live += live;
 875   }
 876 
 877   size_t get_live() {
 878     return _live;
 879   }
 880 };
 881 
 882 void ShenandoahMarkCompact::compact_humongous_objects() {
 883   // Compact humongous regions, based on their fwdptr objects.
 884   //
 885   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 886   // humongous regions are already compacted, and do not require further moves, which alleviates
 887   // sliding costs. We may consider doing this in parallel in future.
 888 
 889   ShenandoahHeap* heap = ShenandoahHeap::heap();
 890 
 891   for (size_t c = heap->num_regions(); c > 0; c--) {
 892     ShenandoahHeapRegion* r = heap->get_region(c - 1);
 893     if (r->is_humongous_start()) {
 894       oop old_obj = oop(r->bottom());
 895       if (!old_obj->is_forwarded()) {
 896         // No need to move the object, it stays at the same slot
 897         continue;
 898       }
 899       size_t words_size = old_obj->size();
 900       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 901 
 902       size_t old_start = r->index();
 903       size_t old_end   = old_start + num_regions - 1;
 904       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
 905       size_t new_end   = new_start + num_regions - 1;
 906       assert(old_start != new_start, "must be real move");
 907       assert(r->is_stw_move_allowed(), err_msg("Region " SIZE_FORMAT " should be movable", r->index()));
 908 
 909       Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
 910                                    heap->get_region(new_start)->bottom(),
 911                                    ShenandoahHeapRegion::region_size_words()*num_regions);
 912 
 913       oop new_obj = oop(heap->get_region(new_start)->bottom());
 914       new_obj->init_mark();
 915 
 916       {
 917         for (size_t c = old_start; c <= old_end; c++) {
 918           ShenandoahHeapRegion* r = heap->get_region(c);
 919           r->make_regular_bypass();
 920           r->set_top(r->bottom());
 921         }
 922 
 923         for (size_t c = new_start; c <= new_end; c++) {
 924           ShenandoahHeapRegion* r = heap->get_region(c);
 925           if (c == new_start) {
 926             r->make_humongous_start_bypass();
 927           } else {
 928             r->make_humongous_cont_bypass();
 929           }
 930 
 931           // Trailing region may be non-full, record the remainder there
 932           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 933           if ((c == new_end) && (remainder != 0)) {
 934             r->set_top(r->bottom() + remainder);
 935           } else {
 936             r->set_top(r->end());
 937           }
 938 
 939           r->reset_alloc_metadata();
 940         }
 941       }
 942     }
 943   }
 944 }
 945 
 946 // This is slightly different to ShHeap::reset_next_mark_bitmap:
 947 // we need to remain able to walk pinned regions.
 948 // Since pinned region do not move and don't get compacted, we will get holes with
 949 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
 950 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
 951 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
 952 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
 953 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
 954 private:
 955   ShenandoahRegionIterator _regions;
 956 
 957 public:
 958   ShenandoahMCResetCompleteBitmapTask() :
 959     AbstractGangTask("Parallel Reset Bitmap Task") {
 960   }
 961 
 962   void work(uint worker_id) {
 963     ShenandoahHeapRegion* region = _regions.next();
 964     ShenandoahHeap* heap = ShenandoahHeap::heap();
 965     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
 966     while (region != NULL) {
 967       if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
 968         ctx->clear_bitmap(region);
 969       }
 970       region = _regions.next();
 971     }
 972   }
 973 };
 974 
 975 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
 976   ShenandoahHeap* heap = ShenandoahHeap::heap();
 977   GCTraceTime time("Phase 4: Move objects", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id());
 978   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
 979 
 980   // Compact regular objects first
 981   {
 982     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
 983     ShenandoahCompactObjectsTask compact_task(worker_slices);
 984     heap->workers()->run_task(&compact_task);
 985   }
 986 
 987   // Compact humongous objects after regular object moves
 988   {
 989     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
 990     compact_humongous_objects();
 991   }
 992 
 993   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
 994   // and must ensure the bitmap is in sync.
 995   {
 996     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
 997     ShenandoahMCResetCompleteBitmapTask task;
 998     heap->workers()->run_task(&task);
 999   }
1000 
1001   // Bring regions in proper states after the collection, and set heap properties.
1002   {
1003     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1004 
1005     ShenandoahPostCompactClosure post_compact;
1006     heap->heap_region_iterate(&post_compact);
1007     heap->set_used(post_compact.get_live());
1008 
1009     heap->collection_set()->clear();
1010     heap->free_set()->rebuild();
1011   }
1012 
1013   heap->clear_cancelled_gc();
1014 }