1 /*
   2  * Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "code/codeCache.hpp"
  27 #include "gc/shared/gcTraceTime.inline.hpp"
  28 #include "gc/shared/preservedMarks.inline.hpp"
  29 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
  30 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  31 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  34 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  35 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  36 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  37 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  38 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  39 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  40 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  41 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  42 #include "gc/shenandoah/shenandoahUtils.hpp"
  43 #include "gc/shenandoah/shenandoahVerifier.hpp"
  44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  46 #include "memory/metaspace.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "runtime/biasedLocking.hpp"
  49 #include "runtime/thread.hpp"
  50 #include "utilities/copy.hpp"
  51 #include "utilities/growableArray.hpp"
  52 #include "gc/shared/workgroup.hpp"
  53 
  54 ShenandoahMarkCompact::ShenandoahMarkCompact() :
  55   _gc_timer(NULL),
  56   _preserved_marks(new PreservedMarksSet(true)) {}
  57 
  58 void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) {
  59   _gc_timer = gc_timer;
  60 }
  61 
  62 void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
  63   ShenandoahHeap* heap = ShenandoahHeap::heap();
  64 
  65   if (ShenandoahVerify) {
  66     heap->verifier()->verify_before_fullgc();
  67   }
  68 
  69   if (VerifyBeforeGC) {
  70     Universe::verify();
  71   }
  72 
  73   heap->set_full_gc_in_progress(true);
  74 
  75   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
  76   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
  77 
  78   {
  79     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
  80     heap->pre_full_gc_dump(_gc_timer);
  81   }
  82 
  83   {
  84     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
  85     // Full GC is supposed to recover from any GC state:
  86 
  87     // a0. Remember if we have forwarded objects
  88     bool has_forwarded_objects = heap->has_forwarded_objects();
  89 
  90     // a1. Cancel evacuation, if in progress
  91     if (heap->is_evacuation_in_progress()) {
  92       heap->set_evacuation_in_progress(false);
  93     }
  94     assert(!heap->is_evacuation_in_progress(), "sanity");
  95 
  96     // a2. Cancel update-refs, if in progress
  97     if (heap->is_update_refs_in_progress()) {
  98       heap->set_update_refs_in_progress(false);
  99     }
 100     assert(!heap->is_update_refs_in_progress(), "sanity");
 101 
 102     // a3. Cancel concurrent traversal GC, if in progress
 103     if (heap->is_concurrent_traversal_in_progress()) {
 104       heap->traversal_gc()->reset();
 105       heap->set_concurrent_traversal_in_progress(false);
 106     }
 107 
 108     // b. Cancel concurrent mark, if in progress
 109     if (heap->is_concurrent_mark_in_progress()) {
 110       heap->concurrent_mark()->cancel();
 111       heap->stop_concurrent_marking();
 112     }
 113     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 114 
 115     // c. Reset the bitmaps for new marking
 116     heap->reset_mark_bitmap();
 117     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 118     assert(!heap->marking_context()->is_complete(), "sanity");
 119 
 120     // d. Abandon reference discovery and clear all discovered references.
 121     ReferenceProcessor* rp = heap->ref_processor();
 122     rp->disable_discovery();
 123     rp->abandon_partial_discovery();
 124     rp->verify_no_references_recorded();
 125 
 126     // e. Set back forwarded objects bit back, in case some steps above dropped it.
 127     heap->set_has_forwarded_objects(has_forwarded_objects);
 128 
 129     // The rest of prologue:
 130     BiasedLocking::preserve_marks();
 131     _preserved_marks->init(heap->workers()->active_workers());
 132   }
 133 
 134   heap->make_parsable(true);
 135 
 136   CodeCache::gc_prologue();
 137 
 138   OrderAccess::fence();
 139 
 140   phase1_mark_heap();
 141 
 142   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 143   // Coming out of Full GC, we would not have any forwarded objects.
 144   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 145   heap->set_has_forwarded_objects(false);
 146 
 147   heap->set_full_gc_move_in_progress(true);
 148 
 149   // Setup workers for the rest
 150   OrderAccess::fence();
 151 
 152   // Initialize worker slices
 153   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 154   for (uint i = 0; i < heap->max_workers(); i++) {
 155     worker_slices[i] = new ShenandoahHeapRegionSet();
 156   }
 157 
 158   {
 159     // The rest of code performs region moves, where region status is undefined
 160     // until all phases run together.
 161     ShenandoahHeapLocker lock(heap->lock());
 162 
 163     phase2_calculate_target_addresses(worker_slices);
 164 
 165     OrderAccess::fence();
 166 
 167     phase3_update_references();
 168 
 169     phase4_compact_objects(worker_slices);
 170   }
 171 
 172   {
 173     // Epilogue
 174     SharedRestorePreservedMarksTaskExecutor exec(heap->workers());
 175     _preserved_marks->restore(&exec);
 176     BiasedLocking::restore_marks();
 177     _preserved_marks->reclaim();
 178 
 179     CodeCache::gc_epilogue();
 180   }
 181 
 182   // Resize metaspace
 183   MetaspaceGC::compute_new_size();
 184 
 185   // Free worker slices
 186   for (uint i = 0; i < heap->max_workers(); i++) {
 187     delete worker_slices[i];
 188   }
 189   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 190 
 191   heap->set_full_gc_move_in_progress(false);
 192   heap->set_full_gc_in_progress(false);
 193 
 194   if (ShenandoahVerify) {
 195     heap->verifier()->verify_after_fullgc();
 196   }
 197 
 198   if (VerifyAfterGC) {
 199     Universe::verify();
 200   }
 201 
 202   {
 203     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
 204     heap->post_full_gc_dump(_gc_timer);
 205   }
 206 }
 207 
 208 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 209 private:
 210   ShenandoahMarkingContext* const _ctx;
 211 
 212 public:
 213   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 214 
 215   void heap_region_do(ShenandoahHeapRegion *r) {
 216     _ctx->capture_top_at_mark_start(r);
 217     r->clear_live_data();
 218     r->set_concurrent_iteration_safe_limit(r->top());
 219   }
 220 };
 221 
 222 void ShenandoahMarkCompact::phase1_mark_heap() {
 223   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 224   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 225 
 226   ShenandoahHeap* heap = ShenandoahHeap::heap();
 227 
 228   ShenandoahPrepareForMarkClosure cl;
 229   heap->heap_region_iterate(&cl);
 230 
 231   ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 232 
 233   heap->set_process_references(heap->heuristics()->can_process_references());
 234   heap->set_unload_classes(heap->heuristics()->can_unload_classes());
 235 
 236   ReferenceProcessor* rp = heap->ref_processor();
 237   // enable ("weak") refs discovery
 238   rp->enable_discovery(true /*verify_no_refs*/);
 239   rp->setup_policy(true); // forcefully purge all soft references
 240   rp->set_active_mt_degree(heap->workers()->active_workers());
 241 
 242   cm->update_roots(ShenandoahPhaseTimings::full_gc_roots);
 243   cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots);
 244   cm->finish_mark_from_roots(/* full_gc = */ true);
 245 
 246   heap->mark_complete_marking_context();
 247 }
 248 
 249 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 250 private:
 251   PreservedMarks*          const _preserved_marks;
 252   ShenandoahHeap*          const _heap;
 253   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 254   int _empty_regions_pos;
 255   ShenandoahHeapRegion*          _to_region;
 256   ShenandoahHeapRegion*          _from_region;
 257   HeapWord* _compact_point;
 258 
 259 public:
 260   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 261                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 262                                               ShenandoahHeapRegion* to_region) :
 263     _preserved_marks(preserved_marks),
 264     _heap(ShenandoahHeap::heap()),
 265     _empty_regions(empty_regions),
 266     _empty_regions_pos(0),
 267     _to_region(to_region),
 268     _from_region(NULL),
 269     _compact_point(to_region->bottom()) {}
 270 
 271   void set_from_region(ShenandoahHeapRegion* from_region) {
 272     _from_region = from_region;
 273   }
 274 
 275   void finish_region() {
 276     assert(_to_region != NULL, "should not happen");
 277     _to_region->set_new_top(_compact_point);
 278   }
 279 
 280   bool is_compact_same_region() {
 281     return _from_region == _to_region;
 282   }
 283 
 284   int empty_regions_pos() {
 285     return _empty_regions_pos;
 286   }
 287 
 288   void do_object(oop p) {
 289     assert(_from_region != NULL, "must set before work");
 290     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 291     assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked");
 292 
 293     size_t obj_size = p->size();
 294     if (_compact_point + obj_size > _to_region->end()) {
 295       finish_region();
 296 
 297       // Object doesn't fit. Pick next empty region and start compacting there.
 298       ShenandoahHeapRegion* new_to_region;
 299       if (_empty_regions_pos < _empty_regions.length()) {
 300         new_to_region = _empty_regions.at(_empty_regions_pos);
 301         _empty_regions_pos++;
 302       } else {
 303         // Out of empty region? Compact within the same region.
 304         new_to_region = _from_region;
 305       }
 306 
 307       assert(new_to_region != _to_region, "must not reuse same to-region");
 308       assert(new_to_region != NULL, "must not be NULL");
 309       _to_region = new_to_region;
 310       _compact_point = _to_region->bottom();
 311     }
 312 
 313     // Object fits into current region, record new location:
 314     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 315     shenandoah_assert_not_forwarded(NULL, p);
 316     _preserved_marks->push_if_necessary(p, p->mark_raw());
 317     p->forward_to(oop(_compact_point));
 318     _compact_point += obj_size;
 319   }
 320 };
 321 
 322 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
 323 private:
 324   PreservedMarksSet*        const _preserved_marks;
 325   ShenandoahHeap*           const _heap;
 326   ShenandoahHeapRegionSet** const _worker_slices;
 327   ShenandoahRegionIterator        _heap_regions;
 328 
 329   ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* slice) {
 330     ShenandoahHeapRegion* from_region = _heap_regions.next();
 331 
 332     while (from_region != NULL && (!from_region->is_move_allowed() || from_region->is_humongous())) {
 333       from_region = _heap_regions.next();
 334     }
 335 
 336     if (from_region != NULL) {
 337       assert(slice != NULL, "sanity");
 338       assert(!from_region->is_humongous(), "this path cannot handle humongous regions");
 339       assert(from_region->is_move_allowed(), "only regions that can be moved in mark-compact");
 340       slice->add_region(from_region);
 341     }
 342 
 343     return from_region;
 344   }
 345 
 346 public:
 347   ShenandoahPrepareForCompactionTask(PreservedMarksSet* preserved_marks, ShenandoahHeapRegionSet** worker_slices) :
 348     AbstractGangTask("Shenandoah Prepare For Compaction Task"),
 349     _preserved_marks(preserved_marks),
 350     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 351   }
 352 
 353   void work(uint worker_id) {
 354     ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 355     ShenandoahHeapRegion* from_region = next_from_region(slice);
 356     // No work?
 357     if (from_region == NULL) {
 358       return;
 359     }
 360 
 361     // Sliding compaction. Walk all regions in the slice, and compact them.
 362     // Remember empty regions and reuse them as needed.
 363     ResourceMark rm;
 364     GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 365     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 366     while (from_region != NULL) {
 367       cl.set_from_region(from_region);
 368       if (from_region->has_live()) {
 369         _heap->marked_object_iterate(from_region, &cl);
 370       }
 371 
 372       // Compacted the region to somewhere else? From-region is empty then.
 373       if (!cl.is_compact_same_region()) {
 374         empty_regions.append(from_region);
 375       }
 376       from_region = next_from_region(slice);
 377     }
 378     cl.finish_region();
 379 
 380     // Mark all remaining regions as empty
 381     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 382       ShenandoahHeapRegion* r = empty_regions.at(pos);
 383       r->set_new_top(r->bottom());
 384     }
 385   }
 386 };
 387 
 388 void ShenandoahMarkCompact::calculate_target_humongous_objects() {
 389   ShenandoahHeap* heap = ShenandoahHeap::heap();
 390 
 391   // Compute the new addresses for humongous objects. We need to do this after addresses
 392   // for regular objects are calculated, and we know what regions in heap suffix are
 393   // available for humongous moves.
 394   //
 395   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 396   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 397   // humongous start there.
 398   //
 399   // The complication is potential non-movable regions during the scan. If such region is
 400   // detected, then sliding restarts towards that non-movable region.
 401 
 402   size_t to_begin = heap->num_regions();
 403   size_t to_end = heap->num_regions();
 404 
 405   for (size_t c = heap->num_regions(); c > 0; c--) {
 406     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 407     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 408       // To-region candidate: record this, and continue scan
 409       to_begin = r->region_number();
 410       continue;
 411     }
 412 
 413     if (r->is_humongous_start() && r->is_move_allowed()) {
 414       // From-region candidate: movable humongous region
 415       oop old_obj = oop(r->bottom());
 416       size_t words_size = old_obj->size();
 417       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 418 
 419       size_t start = to_end - num_regions;
 420 
 421       if (start >= to_begin && start != r->region_number()) {
 422         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 423         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark_raw());
 424         old_obj->forward_to(oop(heap->get_region(start)->bottom()));
 425         to_end = start;
 426         continue;
 427       }
 428     }
 429 
 430     // Failed to fit. Scan starting from current region.
 431     to_begin = r->region_number();
 432     to_end = r->region_number();
 433   }
 434 }
 435 
 436 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 437 private:
 438   ShenandoahHeap* const _heap;
 439 
 440 public:
 441   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 442   void heap_region_do(ShenandoahHeapRegion* r) {
 443     if (r->is_trash()) {
 444       r->recycle();
 445     }
 446     if (r->is_cset()) {
 447       r->make_regular_bypass();
 448     }
 449     if (r->is_empty_uncommitted()) {
 450       r->make_committed_bypass();
 451     }
 452     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->region_number());
 453 
 454     // Record current region occupancy: this communicates empty regions are free
 455     // to the rest of Full GC code.
 456     r->set_new_top(r->top());
 457   }
 458 };
 459 
 460 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 461 private:
 462   ShenandoahHeap* const _heap;
 463   ShenandoahMarkingContext* const _ctx;
 464 
 465 public:
 466   ShenandoahTrashImmediateGarbageClosure() :
 467     _heap(ShenandoahHeap::heap()),
 468     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 469 
 470   void heap_region_do(ShenandoahHeapRegion* r) {
 471     if (r->is_humongous_start()) {
 472       oop humongous_obj = oop(r->bottom());
 473       if (!_ctx->is_marked(humongous_obj)) {
 474         assert(!r->has_live(),
 475                "Region " SIZE_FORMAT " is not marked, should not have live", r->region_number());
 476         _heap->trash_humongous_region_at(r);
 477       } else {
 478         assert(r->has_live(),
 479                "Region " SIZE_FORMAT " should have live", r->region_number());
 480       }
 481     } else if (r->is_humongous_continuation()) {
 482       // If we hit continuation, the non-live humongous starts should have been trashed already
 483       assert(r->humongous_start_region()->has_live(),
 484              "Region " SIZE_FORMAT " should have live", r->region_number());
 485     } else if (r->is_regular()) {
 486       if (!r->has_live()) {
 487         r->make_trash_immediate();
 488       }
 489     }
 490   }
 491 };
 492 
 493 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
 494   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 495   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 496 
 497   ShenandoahHeap* heap = ShenandoahHeap::heap();
 498 
 499   {
 500     // Trash the immediately collectible regions before computing addresses
 501     ShenandoahTrashImmediateGarbageClosure tigcl;
 502     heap->heap_region_iterate(&tigcl);
 503 
 504     // Make sure regions are in good state: committed, active, clean.
 505     // This is needed because we are potentially sliding the data through them.
 506     ShenandoahEnsureHeapActiveClosure ecl;
 507     heap->heap_region_iterate(&ecl);
 508   }
 509 
 510   // Compute the new addresses for regular objects
 511   {
 512     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 513     ShenandoahPrepareForCompactionTask prepare_task(_preserved_marks, worker_slices);
 514     heap->workers()->run_task(&prepare_task);
 515   }
 516 
 517   // Compute the new addresses for humongous objects
 518   {
 519     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 520     calculate_target_humongous_objects();
 521   }
 522 }
 523 
 524 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 525 private:
 526   ShenandoahHeap* const _heap;
 527   ShenandoahMarkingContext* const _ctx;
 528 
 529   template <class T>
 530   inline void do_oop_work(T* p) {
 531     T o = RawAccess<>::oop_load(p);
 532     if (!CompressedOops::is_null(o)) {
 533       oop obj = CompressedOops::decode_not_null(o);
 534       assert(_ctx->is_marked(obj), "must be marked");
 535       if (obj->is_forwarded()) {
 536         oop forw = obj->forwardee();
 537         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 538       }
 539     }
 540   }
 541 
 542 public:
 543   ShenandoahAdjustPointersClosure() :
 544     _heap(ShenandoahHeap::heap()),
 545     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 546 
 547   void do_oop(oop* p)       { do_oop_work(p); }
 548   void do_oop(narrowOop* p) { do_oop_work(p); }
 549 };
 550 
 551 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 552 private:
 553   ShenandoahHeap* const _heap;
 554   ShenandoahAdjustPointersClosure _cl;
 555 
 556 public:
 557   ShenandoahAdjustPointersObjectClosure() :
 558     _heap(ShenandoahHeap::heap()) {
 559   }
 560   void do_object(oop p) {
 561     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 562     p->oop_iterate(&_cl);
 563   }
 564 };
 565 
 566 class ShenandoahAdjustPointersTask : public AbstractGangTask {
 567 private:
 568   ShenandoahHeap*          const _heap;
 569   ShenandoahRegionIterator       _regions;
 570 
 571 public:
 572   ShenandoahAdjustPointersTask() :
 573     AbstractGangTask("Shenandoah Adjust Pointers Task"),
 574     _heap(ShenandoahHeap::heap()) {
 575   }
 576 
 577   void work(uint worker_id) {
 578     ShenandoahAdjustPointersObjectClosure obj_cl;
 579     ShenandoahHeapRegion* r = _regions.next();
 580     while (r != NULL) {
 581       if (!r->is_humongous_continuation() && r->has_live()) {
 582         _heap->marked_object_iterate(r, &obj_cl);
 583       }
 584       r = _regions.next();
 585     }
 586   }
 587 };
 588 
 589 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
 590 private:
 591   ShenandoahRootAdjuster* _rp;
 592   PreservedMarksSet* _preserved_marks;
 593 public:
 594   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 595     AbstractGangTask("Shenandoah Adjust Root Pointers Task"),
 596     _rp(rp),
 597     _preserved_marks(preserved_marks) {}
 598 
 599   void work(uint worker_id) {
 600     ShenandoahAdjustPointersClosure cl;
 601     _rp->roots_do(worker_id, &cl);
 602     _preserved_marks->get(worker_id)->adjust_during_full_gc();
 603   }
 604 };
 605 
 606 void ShenandoahMarkCompact::phase3_update_references() {
 607   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 608   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 609 
 610   ShenandoahHeap* heap = ShenandoahHeap::heap();
 611 
 612   WorkGang* workers = heap->workers();
 613   uint nworkers = workers->active_workers();
 614   {
 615 #if COMPILER2_OR_JVMCI
 616     DerivedPointerTable::clear();
 617 #endif
 618     ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_roots);
 619     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
 620     workers->run_task(&task);
 621 #if COMPILER2_OR_JVMCI
 622     DerivedPointerTable::update_pointers();
 623 #endif
 624   }
 625 
 626   ShenandoahAdjustPointersTask adjust_pointers_task;
 627   workers->run_task(&adjust_pointers_task);
 628 }
 629 
 630 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 631 private:
 632   ShenandoahHeap* const _heap;
 633   uint            const _worker_id;
 634 
 635 public:
 636   ShenandoahCompactObjectsClosure(uint worker_id) :
 637     _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
 638 
 639   void do_object(oop p) {
 640     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 641     size_t size = (size_t)p->size();
 642     if (p->is_forwarded()) {
 643       HeapWord* compact_from = (HeapWord*) p;
 644       HeapWord* compact_to = (HeapWord*) p->forwardee();
 645       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 646       oop new_obj = oop(compact_to);
 647       new_obj->init_mark_raw();
 648     }
 649   }
 650 };
 651 
 652 class ShenandoahCompactObjectsTask : public AbstractGangTask {
 653 private:
 654   ShenandoahHeap* const _heap;
 655   ShenandoahHeapRegionSet** const _worker_slices;
 656 
 657 public:
 658   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 659     AbstractGangTask("Shenandoah Compact Objects Task"),
 660     _heap(ShenandoahHeap::heap()),
 661     _worker_slices(worker_slices) {
 662   }
 663 
 664   void work(uint worker_id) {
 665     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 666 
 667     ShenandoahCompactObjectsClosure cl(worker_id);
 668     ShenandoahHeapRegion* r = slice.next();
 669     while (r != NULL) {
 670       assert(!r->is_humongous(), "must not get humongous regions here");
 671       if (r->has_live()) {
 672         _heap->marked_object_iterate(r, &cl);
 673       }
 674       r->set_top(r->new_top());
 675       r = slice.next();
 676     }
 677   }
 678 };
 679 
 680 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 681 private:
 682   ShenandoahHeap* const _heap;
 683   size_t _live;
 684 
 685 public:
 686   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
 687     _heap->free_set()->clear();
 688   }
 689 
 690   void heap_region_do(ShenandoahHeapRegion* r) {
 691     assert (!r->is_cset(), "cset regions should have been demoted already");
 692 
 693     // Need to reset the complete-top-at-mark-start pointer here because
 694     // the complete marking bitmap is no longer valid. This ensures
 695     // size-based iteration in marked_object_iterate().
 696     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 697     // pinned regions.
 698     if (!r->is_pinned()) {
 699       _heap->complete_marking_context()->reset_top_at_mark_start(r);
 700     }
 701 
 702     size_t live = r->used();
 703 
 704     // Make empty regions that have been allocated into regular
 705     if (r->is_empty() && live > 0) {
 706       r->make_regular_bypass();
 707     }
 708 
 709     // Reclaim regular regions that became empty
 710     if (r->is_regular() && live == 0) {
 711       r->make_trash();
 712     }
 713 
 714     // Recycle all trash regions
 715     if (r->is_trash()) {
 716       live = 0;
 717       r->recycle();
 718     }
 719 
 720     r->set_live_data(live);
 721     r->reset_alloc_metadata_to_shared();
 722     _live += live;
 723   }
 724 
 725   size_t get_live() {
 726     return _live;
 727   }
 728 };
 729 
 730 void ShenandoahMarkCompact::compact_humongous_objects() {
 731   // Compact humongous regions, based on their fwdptr objects.
 732   //
 733   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 734   // humongous regions are already compacted, and do not require further moves, which alleviates
 735   // sliding costs. We may consider doing this in parallel in future.
 736 
 737   ShenandoahHeap* heap = ShenandoahHeap::heap();
 738 
 739   for (size_t c = heap->num_regions(); c > 0; c--) {
 740     ShenandoahHeapRegion* r = heap->get_region(c - 1);
 741     if (r->is_humongous_start()) {
 742       oop old_obj = oop(r->bottom());
 743       if (!old_obj->is_forwarded()) {
 744         // No need to move the object, it stays at the same slot
 745         continue;
 746       }
 747       size_t words_size = old_obj->size();
 748       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 749 
 750       size_t old_start = r->region_number();
 751       size_t old_end   = old_start + num_regions - 1;
 752       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
 753       size_t new_end   = new_start + num_regions - 1;
 754       assert(old_start != new_start, "must be real move");
 755       assert (r->is_move_allowed(), "should be movable");
 756 
 757       Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
 758                                    heap->get_region(new_start)->bottom(),
 759                                    ShenandoahHeapRegion::region_size_words()*num_regions);
 760 
 761       oop new_obj = oop(heap->get_region(new_start)->bottom());
 762       new_obj->init_mark_raw();
 763 
 764       {
 765         for (size_t c = old_start; c <= old_end; c++) {
 766           ShenandoahHeapRegion* r = heap->get_region(c);
 767           r->make_regular_bypass();
 768           r->set_top(r->bottom());
 769         }
 770 
 771         for (size_t c = new_start; c <= new_end; c++) {
 772           ShenandoahHeapRegion* r = heap->get_region(c);
 773           if (c == new_start) {
 774             r->make_humongous_start_bypass();
 775           } else {
 776             r->make_humongous_cont_bypass();
 777           }
 778 
 779           // Trailing region may be non-full, record the remainder there
 780           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 781           if ((c == new_end) && (remainder != 0)) {
 782             r->set_top(r->bottom() + remainder);
 783           } else {
 784             r->set_top(r->end());
 785           }
 786 
 787           r->reset_alloc_metadata_to_shared();
 788         }
 789       }
 790     }
 791   }
 792 }
 793 
 794 // This is slightly different to ShHeap::reset_next_mark_bitmap:
 795 // we need to remain able to walk pinned regions.
 796 // Since pinned region do not move and don't get compacted, we will get holes with
 797 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
 798 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
 799 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
 800 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
 801 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
 802 private:
 803   ShenandoahRegionIterator _regions;
 804 
 805 public:
 806   ShenandoahMCResetCompleteBitmapTask() :
 807     AbstractGangTask("Parallel Reset Bitmap Task") {
 808   }
 809 
 810   void work(uint worker_id) {
 811     ShenandoahHeapRegion* region = _regions.next();
 812     ShenandoahHeap* heap = ShenandoahHeap::heap();
 813     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
 814     while (region != NULL) {
 815       if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
 816         ctx->clear_bitmap(region);
 817       }
 818       region = _regions.next();
 819     }
 820   }
 821 };
 822 
 823 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
 824   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
 825   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
 826 
 827   ShenandoahHeap* heap = ShenandoahHeap::heap();
 828 
 829   // Compact regular objects first
 830   {
 831     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
 832     ShenandoahCompactObjectsTask compact_task(worker_slices);
 833     heap->workers()->run_task(&compact_task);
 834   }
 835 
 836   // Compact humongous objects after regular object moves
 837   {
 838     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
 839     compact_humongous_objects();
 840   }
 841 
 842   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
 843   // and must ensure the bitmap is in sync.
 844   {
 845     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
 846     ShenandoahMCResetCompleteBitmapTask task;
 847     heap->workers()->run_task(&task);
 848   }
 849 
 850   // Bring regions in proper states after the collection, and set heap properties.
 851   {
 852     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
 853 
 854     ShenandoahPostCompactClosure post_compact;
 855     heap->heap_region_iterate(&post_compact);
 856     heap->set_used(post_compact.get_live());
 857 
 858     heap->collection_set()->clear();
 859     heap->free_set()->rebuild();
 860   }
 861 
 862   heap->clear_cancelled_gc();
 863 }