1 /*
   2  * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/referenceProcessor.hpp"
  27 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  28 #include "gc/shared/workgroup.hpp"
  29 #include "gc/shared/weakProcessor.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  31 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  32 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  36 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  37 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  38 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  40 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  43 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  44 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  45 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  46 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  47 #include "gc/shenandoah/shenandoahUtils.hpp"
  48 #include "gc/shenandoah/shenandoahVerifier.hpp"
  49 
  50 #include "memory/iterator.hpp"
  51 #include "memory/metaspace.hpp"
  52 #include "memory/resourceArea.hpp"
  53 
  54 /**
  55  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  56  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  57  * is incremental-update-based.
  58  *
  59  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  60  * several reasons:
  61  * - We will not reclaim them in this cycle anyway, because they are not in the
  62  *   cset
  63  * - It makes up for the bulk of work during final-pause
  64  * - It also shortens the concurrent cycle because we don't need to
  65  *   pointlessly traverse through newly allocated objects.
  66  * - As a nice side-effect, it solves the I-U termination problem (mutators
  67  *   cannot outrun the GC by allocating like crazy)
  68  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  69  *   target object of stores if it's new. Treating new objects live implicitely
  70  *   achieves the same, but without extra barriers. I think the effect of
  71  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  72  *   particular, we will not see the head of a completely new long linked list
  73  *   in final-pause and end up traversing huge chunks of the heap there.
  74  * - We don't need to see/update the fields of new objects either, because they
  75  *   are either still null, or anything that's been stored into them has been
  76  *   evacuated+enqueued before (and will thus be treated later).
  77  *
  78  * We achieve this by setting TAMS for each region, and everything allocated
  79  * beyond TAMS will be 'implicitely marked'.
  80  *
  81  * Gotchas:
  82  * - While we want new objects to be implicitely marked, we don't want to count
  83  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  84  *   them for cset. This means that we need to protect such regions from
  85  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  86  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  87  *   code.
  88  * - We *need* to traverse through evacuated objects. Those objects are
  89  *   pre-existing, and any references in them point to interesting objects that
  90  *   we need to see. We also want to count them as live, because we just
  91  *   determined that they are alive :-) I achieve this by upping TAMS
  92  *   concurrently for every gclab/gc-shared alloc before publishing the
  93  *   evacuated object. This way, the GC threads will not consider such objects
  94  *   implictely marked, and traverse through them as normal.
  95  */
  96 class ShenandoahTraversalSATBBufferClosure : public ShenandoahSATBBufferClosure {
  97 private:
  98   ShenandoahObjToScanQueue* _queue;
  99   ShenandoahTraversalGC* _traversal_gc;
 100   ShenandoahHeap* const _heap;
 101 
 102 public:
 103   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 104     _queue(q),
 105     _heap(ShenandoahHeap::heap())
 106  { }
 107 
 108   void do_buffer(void** buffer, size_t size) {
 109     for (size_t i = 0; i < size; ++i) {
 110       oop* p = (oop*) &buffer[i];
 111       oop obj = RawAccess<>::oop_load(p);
 112       shenandoah_assert_not_forwarded(p, obj);
 113       if (_heap->marking_context()->mark(obj)) {
 114         _queue->push(ShenandoahMarkTask(obj));
 115       }
 116     }
 117   }
 118 };
 119 
 120 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 121 private:
 122   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 123 
 124 public:
 125   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 126     _satb_cl(satb_cl) {}
 127 
 128   void do_thread(Thread* thread) {
 129     if (thread->is_Java_thread()) {
 130       JavaThread* jt = (JavaThread*)thread;
 131       ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 132     } else if (thread->is_VM_thread()) {
 133       ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 134     }
 135   }
 136 };
 137 
 138 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 139 // and remark them later during final-traversal.
 140 class ShenandoahMarkCLDClosure : public CLDClosure {
 141 private:
 142   OopClosure* _cl;
 143 public:
 144   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 145   void do_cld(ClassLoaderData* cld) {
 146     cld->oops_do(_cl, true, true);
 147   }
 148 };
 149 
 150 // Like CLDToOopClosure, but only process modified CLDs
 151 class ShenandoahRemarkCLDClosure : public CLDClosure {
 152 private:
 153   OopClosure* _cl;
 154 public:
 155   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 156   void do_cld(ClassLoaderData* cld) {
 157     if (cld->has_modified_oops()) {
 158       cld->oops_do(_cl, true, true);
 159     }
 160   }
 161 };
 162 
 163 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 164 private:
 165   ShenandoahCSetRootScanner* _rp;
 166   ShenandoahHeap* _heap;
 167   ShenandoahCsetCodeRootsIterator* _cset_coderoots;
 168   ShenandoahStringDedupRoots       _dedup_roots;
 169 
 170 public:
 171   ShenandoahInitTraversalCollectionTask(ShenandoahCSetRootScanner* rp) :
 172     AbstractGangTask("Shenandoah Init Traversal Collection"),
 173     _rp(rp),
 174     _heap(ShenandoahHeap::heap()) {}
 175 
 176   void work(uint worker_id) {
 177     ShenandoahParallelWorkerSession worker_session(worker_id);
 178 
 179     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 180     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 181 
 182     bool process_refs = _heap->process_references();
 183     bool unload_classes = _heap->unload_classes();
 184     ReferenceProcessor* rp = NULL;
 185     if (process_refs) {
 186       rp = _heap->ref_processor();
 187     }
 188 
 189     // Step 1: Process ordinary GC roots.
 190     {
 191       ShenandoahTraversalRootsClosure roots_cl(q, rp);
 192       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 193       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 194       if (unload_classes) {
 195         _rp->roots_do(worker_id, &roots_cl, NULL, &code_cl);
 196       } else {
 197         _rp->roots_do(worker_id, &roots_cl, &cld_cl, &code_cl);
 198       }
 199     }
 200   }
 201 };
 202 
 203 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 204 private:
 205   ShenandoahTaskTerminator* _terminator;
 206   ShenandoahHeap* _heap;
 207 public:
 208   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
 209     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 210     _terminator(terminator),
 211     _heap(ShenandoahHeap::heap()) {}
 212 
 213   void work(uint worker_id) {
 214     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 215     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 216     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 217 
 218     // Drain all outstanding work in queues.
 219     traversal_gc->main_loop(worker_id, _terminator, true);
 220   }
 221 };
 222 
 223 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 224 private:
 225   ShenandoahAllRootScanner* _rp;
 226   ShenandoahTaskTerminator* _terminator;
 227   ShenandoahHeap* _heap;
 228 public:
 229   ShenandoahFinalTraversalCollectionTask(ShenandoahAllRootScanner* rp, ShenandoahTaskTerminator* terminator) :
 230     AbstractGangTask("Shenandoah Final Traversal Collection"),
 231     _rp(rp),
 232     _terminator(terminator),
 233     _heap(ShenandoahHeap::heap()) {}
 234 
 235   void work(uint worker_id) {
 236     ShenandoahParallelWorkerSession worker_session(worker_id);
 237 
 238     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 239 
 240     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 241     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 242 
 243     bool process_refs = _heap->process_references();
 244     bool unload_classes = _heap->unload_classes();
 245     ReferenceProcessor* rp = NULL;
 246     if (process_refs) {
 247       rp = _heap->ref_processor();
 248     }
 249 
 250     // Step 0: Drain outstanding SATB queues.
 251     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 252     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 253     {
 254       // Process remaining finished SATB buffers.
 255       ShenandoahSATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 256       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 257       // Process remaining threads SATB buffers below.
 258     }
 259 
 260     // Step 1: Process GC roots.
 261     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
 262     // and the references to the oops are updated during init pause. We only need to rescan
 263     // on stack code roots, in case of class unloading is enabled. Otherwise, code roots are
 264     // scanned during init traversal or degenerated GC will update them at the end.
 265     if (!_heap->is_degenerated_gc_in_progress()) {
 266       ShenandoahTraversalRootsClosure roots_cl(q, rp);
 267       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 268       if (unload_classes) {
 269         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 270         MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 271         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, &code_cl, &tc);
 272       } else {
 273         CLDToOopClosure cld_cl(&roots_cl);
 274         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
 275       }
 276     } else {
 277       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 278       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 279       if (unload_classes) {
 280         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 281         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc);
 282       } else {
 283         CLDToOopClosure cld_cl(&roots_cl);
 284         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
 285       }
 286     }
 287 
 288     {
 289       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 290       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 291 
 292       // Step 3: Finally drain all outstanding work in queues.
 293       traversal_gc->main_loop(worker_id, _terminator, false);
 294     }
 295 
 296   }
 297 };
 298 
 299 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 300   _heap(heap),
 301   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 302   _traversal_set(ShenandoahHeapRegionSet()) {
 303 
 304   // Traversal does not support concurrent code root scanning
 305   FLAG_SET_DEFAULT(ShenandoahConcurrentScanCodeRoots, false);
 306 
 307   uint num_queues = heap->max_workers();
 308   for (uint i = 0; i < num_queues; ++i) {
 309     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 310     task_queue->initialize();
 311     _task_queues->register_queue(i, task_queue);
 312   }
 313 }
 314 
 315 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 316 }
 317 
 318 void ShenandoahTraversalGC::prepare_regions() {
 319   size_t num_regions = _heap->num_regions();
 320   ShenandoahMarkingContext* const ctx = _heap->marking_context();
 321   for (size_t i = 0; i < num_regions; i++) {
 322     ShenandoahHeapRegion* region = _heap->get_region(i);
 323     if (_heap->is_bitmap_slice_committed(region)) {
 324       if (_traversal_set.is_in(i)) {
 325         ctx->capture_top_at_mark_start(region);
 326         region->clear_live_data();
 327         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 328       } else {
 329         // Everything outside the traversal set is always considered live.
 330         ctx->reset_top_at_mark_start(region);
 331       }
 332     } else {
 333       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 334       // their TAMS may have old values, so reset them here.
 335       ctx->reset_top_at_mark_start(region);
 336     }
 337   }
 338 }
 339 
 340 void ShenandoahTraversalGC::prepare() {
 341   if (UseTLAB) {
 342     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_accumulate_stats);
 343     _heap->accumulate_statistics_tlabs();
 344   }
 345 
 346   {
 347     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 348     _heap->make_parsable(true);
 349   }
 350 
 351   if (UseTLAB) {
 352     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 353     _heap->resize_tlabs();
 354   }
 355 
 356   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 357   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 358 
 359   // About to choose the collection set, make sure we know which regions are pinned.
 360   {
 361     ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_prepare_sync_pinned);
 362     _heap->sync_pinned_region_status();
 363   }
 364 
 365   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 366   {
 367     ShenandoahHeapLocker lock(_heap->lock());
 368 
 369     collection_set->clear();
 370     assert(collection_set->count() == 0, "collection set not clear");
 371 
 372     // Find collection set
 373     _heap->heuristics()->choose_collection_set(collection_set);
 374     prepare_regions();
 375 
 376     // Rebuild free set
 377     _heap->free_set()->rebuild();
 378   }
 379 
 380   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s, " SIZE_FORMAT "%s CSet, " SIZE_FORMAT " CSet regions",
 381                      byte_size_in_proper_unit(collection_set->garbage()),   proper_unit_for_byte_size(collection_set->garbage()),
 382                      byte_size_in_proper_unit(collection_set->live_data()), proper_unit_for_byte_size(collection_set->live_data()),
 383                      collection_set->count());
 384 }
 385 
 386 void ShenandoahTraversalGC::init_traversal_collection() {
 387   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 388 
 389   if (ShenandoahVerify) {
 390     _heap->verifier()->verify_before_traversal();
 391   }
 392 
 393   if (VerifyBeforeGC) {
 394     Universe::verify();
 395   }
 396 
 397   {
 398     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 399     prepare();
 400   }
 401 
 402   _heap->set_concurrent_traversal_in_progress(true);
 403   _heap->set_has_forwarded_objects(true);
 404 
 405   bool process_refs = _heap->process_references();
 406   if (process_refs) {
 407     ReferenceProcessor* rp = _heap->ref_processor();
 408     rp->enable_discovery(true /*verify_no_refs*/);
 409     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 410   }
 411 
 412   {
 413     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 414     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 415     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 416 
 417 #if COMPILER2_OR_JVMCI
 418     DerivedPointerTable::clear();
 419 #endif
 420 
 421     {
 422       uint nworkers = _heap->workers()->active_workers();
 423       task_queues()->reserve(nworkers);
 424       ShenandoahCSetRootScanner rp(nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 425       ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 426       _heap->workers()->run_task(&traversal_task);
 427     }
 428 
 429 #if COMPILER2_OR_JVMCI
 430     DerivedPointerTable::update_pointers();
 431 #endif
 432   }
 433 
 434   if (ShenandoahPacing) {
 435     _heap->pacer()->setup_for_traversal();
 436   }
 437 }
 438 
 439 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
 440   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 441 
 442   // Initialize live data.
 443   jushort* ld = _heap->get_liveness_cache(w);
 444 
 445   ReferenceProcessor* rp = NULL;
 446   if (_heap->process_references()) {
 447     rp = _heap->ref_processor();
 448   }
 449   {
 450     if (!_heap->is_degenerated_gc_in_progress()) {
 451       if (_heap->unload_classes()) {
 452         if (ShenandoahStringDedup::is_enabled()) {
 453           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 454           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
 455         } else {
 456           ShenandoahTraversalMetadataClosure cl(q, rp);
 457           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
 458         }
 459       } else {
 460         if (ShenandoahStringDedup::is_enabled()) {
 461           ShenandoahTraversalDedupClosure cl(q, rp);
 462           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
 463         } else {
 464           ShenandoahTraversalClosure cl(q, rp);
 465           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
 466         }
 467       }
 468     } else {
 469       if (_heap->unload_classes()) {
 470         if (ShenandoahStringDedup::is_enabled()) {
 471           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 472           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 473         } else {
 474           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 475           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
 476         }
 477       } else {
 478         if (ShenandoahStringDedup::is_enabled()) {
 479           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 480           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 481         } else {
 482           ShenandoahTraversalDegenClosure cl(q, rp);
 483           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
 484         }
 485       }
 486     }
 487   }
 488 
 489   _heap->flush_liveness_cache(w);
 490 }
 491 
 492 template <class T>
 493 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
 494   ShenandoahObjToScanQueueSet* queues = task_queues();
 495   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 496   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
 497 
 498   uintx stride = ShenandoahMarkLoopStride;
 499 
 500   ShenandoahMarkTask task;
 501 
 502   // Process outstanding queues, if any.
 503   q = queues->claim_next();
 504   while (q != NULL) {
 505     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
 506       return;
 507     }
 508 
 509     for (uint i = 0; i < stride; i++) {
 510       if (q->pop(task)) {
 511         conc_mark->do_task<T>(q, cl, live_data, &task);
 512       } else {
 513         assert(q->is_empty(), "Must be empty");
 514         q = queues->claim_next();
 515         break;
 516       }
 517     }
 518   }
 519 
 520   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 521 
 522   // Normal loop.
 523   q = queues->queue(worker_id);
 524 
 525   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 526   ShenandoahSATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 527 
 528   int seed = 17;
 529 
 530   while (true) {
 531     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 532 
 533     while (satb_mq_set.completed_buffers_num() > 0) {
 534       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 535     }
 536 
 537     uint work = 0;
 538     for (uint i = 0; i < stride; i++) {
 539       if (q->pop(task) ||
 540           queues->steal(worker_id, &seed, task)) {
 541         conc_mark->do_task<T>(q, cl, live_data, &task);
 542         work++;
 543       } else {
 544         break;
 545       }
 546     }
 547 
 548     if (work == 0) {
 549       // No more work, try to terminate
 550       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 551       ShenandoahTerminatorTerminator tt(_heap);
 552 
 553       if (terminator->offer_termination(&tt)) return;
 554     }
 555   }
 556 }
 557 
 558 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
 559   if (_heap->cancelled_gc()) {
 560     return true;
 561   }
 562   return false;
 563 }
 564 
 565 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 566   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 567   if (!_heap->cancelled_gc()) {
 568     uint nworkers = _heap->workers()->active_workers();
 569     task_queues()->reserve(nworkers);
 570 
 571     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 572     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 573     _heap->workers()->run_task(&task);
 574   }
 575 
 576   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 577     preclean_weak_refs();
 578   }
 579 }
 580 
 581 void ShenandoahTraversalGC::final_traversal_collection() {
 582   if (!_heap->cancelled_gc()) {
 583 #if COMPILER2_OR_JVMCI
 584     DerivedPointerTable::clear();
 585 #endif
 586     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 587     uint nworkers = _heap->workers()->active_workers();
 588     task_queues()->reserve(nworkers);
 589 
 590     // Finish traversal
 591     ShenandoahAllRootScanner rp(nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 592     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 593     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 594     _heap->workers()->run_task(&task);
 595 #if COMPILER2_OR_JVMCI
 596     DerivedPointerTable::update_pointers();
 597 #endif
 598   }
 599 
 600   if (!_heap->cancelled_gc() && _heap->process_references()) {
 601     weak_refs_work();
 602   }
 603 
 604   if (!_heap->cancelled_gc()) {
 605     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 606     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 607     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 608 
 609     // No more marking expected
 610     _heap->set_concurrent_traversal_in_progress(false);
 611     _heap->mark_complete_marking_context();
 612 
 613     // A rare case, TLAB/GCLAB is initialized from an empty region without
 614     // any live data, the region can be trashed and may be uncommitted in later code,
 615     // that results the TLAB/GCLAB not usable. Retire them here.
 616     _heap->make_parsable(true);
 617 
 618     fixup_roots();
 619     _heap->parallel_cleaning(false);
 620 
 621     _heap->set_has_forwarded_objects(false);
 622 
 623     // Resize metaspace
 624     MetaspaceGC::compute_new_size();
 625 
 626     // Need to see that pinned region status is updated: newly pinned regions must not
 627     // be trashed. New unpinned regions should be trashed.
 628     {
 629       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_sync_pinned);
 630       _heap->sync_pinned_region_status();
 631     }
 632 
 633     // Still good? We can now trash the cset, and make final verification
 634     {
 635       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 636       ShenandoahHeapLocker lock(_heap->lock());
 637 
 638       // Trash everything
 639       // Clear immediate garbage regions.
 640       size_t num_regions = _heap->num_regions();
 641 
 642       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 643       ShenandoahFreeSet* free_regions = _heap->free_set();
 644       ShenandoahMarkingContext* const ctx = _heap->marking_context();
 645       free_regions->clear();
 646       for (size_t i = 0; i < num_regions; i++) {
 647         ShenandoahHeapRegion* r = _heap->get_region(i);
 648         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
 649 
 650         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 651         if (r->is_humongous_start() && candidate) {
 652           // Trash humongous.
 653           HeapWord* humongous_obj = r->bottom();
 654           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 655           r->make_trash_immediate();
 656           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 657             i++;
 658             r = _heap->get_region(i);
 659             assert(r->is_humongous_continuation(), "must be humongous continuation");
 660             r->make_trash_immediate();
 661           }
 662         } else if (!r->is_empty() && candidate) {
 663           // Trash regular.
 664           assert(!r->is_humongous(), "handled above");
 665           assert(!r->is_trash(), "must not already be trashed");
 666           r->make_trash_immediate();
 667         }
 668       }
 669       _heap->collection_set()->clear();
 670       _heap->free_set()->rebuild();
 671       reset();
 672     }
 673 
 674     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 675     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 676 
 677     if (ShenandoahVerify) {
 678       _heap->verifier()->verify_after_traversal();
 679     }
 680 #ifdef ASSERT
 681     else {
 682       verify_roots_after_gc();
 683     }
 684 #endif
 685 
 686     if (VerifyAfterGC) {
 687       Universe::verify();
 688     }
 689   }
 690 }
 691 
 692 class ShenandoahVerifyAfterGC : public OopClosure {
 693 private:
 694   template <class T>
 695   void do_oop_work(T* p) {
 696     T o = RawAccess<>::oop_load(p);
 697     if (!CompressedOops::is_null(o)) {
 698       oop obj = CompressedOops::decode_not_null(o);
 699       shenandoah_assert_correct(p, obj);
 700       shenandoah_assert_not_in_cset_except(p, obj, ShenandoahHeap::heap()->cancelled_gc());
 701       shenandoah_assert_not_forwarded(p, obj);
 702     }
 703   }
 704 
 705 public:
 706   void do_oop(narrowOop* p) { do_oop_work(p); }
 707   void do_oop(oop* p)       { do_oop_work(p); }
 708 };
 709 
 710 void ShenandoahTraversalGC::verify_roots_after_gc() {
 711   ShenandoahRootVerifier verifier;
 712   ShenandoahVerifyAfterGC cl;
 713   verifier.oops_do(&cl);
 714 }
 715 
 716 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 717 private:
 718   template <class T>
 719   inline void do_oop_work(T* p) {
 720     T o = RawAccess<>::oop_load(p);
 721     if (!CompressedOops::is_null(o)) {
 722       oop obj = CompressedOops::decode_not_null(o);
 723       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 724       if (obj != forw) {
 725         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 726       }
 727     }
 728   }
 729 
 730 public:
 731   inline void do_oop(oop* p) { do_oop_work(p); }
 732   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 733 };
 734 
 735 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 736 private:
 737   ShenandoahRootUpdater* _rp;
 738 
 739 public:
 740   ShenandoahTraversalFixRootsTask(ShenandoahRootUpdater* rp) :
 741     AbstractGangTask("Shenandoah traversal fix roots"),
 742     _rp(rp) {
 743     assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be");
 744   }
 745 
 746   void work(uint worker_id) {
 747     ShenandoahParallelWorkerSession worker_session(worker_id);
 748     ShenandoahTraversalFixRootsClosure cl;
 749     ShenandoahForwardedIsAliveClosure is_alive;
 750     _rp->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahTraversalFixRootsClosure>(worker_id, &is_alive, &cl);
 751   }
 752 };
 753 
 754 void ShenandoahTraversalGC::fixup_roots() {
 755 #if COMPILER2_OR_JVMCI
 756   DerivedPointerTable::clear();
 757 #endif
 758   ShenandoahRootUpdater rp(_heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots, true /* update code cache */);
 759   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 760   _heap->workers()->run_task(&update_roots_task);
 761 #if COMPILER2_OR_JVMCI
 762   DerivedPointerTable::update_pointers();
 763 #endif
 764 }
 765 
 766 void ShenandoahTraversalGC::reset() {
 767   _task_queues->clear();
 768 }
 769 
 770 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 771   return _task_queues;
 772 }
 773 
 774 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 775 private:
 776   ShenandoahHeap* const _heap;
 777 public:
 778   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 779   virtual bool should_return() { return _heap->cancelled_gc(); }
 780 };
 781 
 782 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 783 public:
 784   void do_void() {
 785     ShenandoahHeap* sh = ShenandoahHeap::heap();
 786     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 787     assert(sh->process_references(), "why else would we be here?");
 788     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
 789     shenandoah_assert_rp_isalive_installed();
 790     traversal_gc->main_loop((uint) 0, &terminator, true);
 791   }
 792 };
 793 
 794 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 795 private:
 796   ShenandoahObjToScanQueue* _queue;
 797   Thread* _thread;
 798   ShenandoahTraversalGC* _traversal_gc;
 799   ShenandoahMarkingContext* const _mark_context;
 800 
 801   template <class T>
 802   inline void do_oop_work(T* p) {
 803     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* atomic update */>(p, _thread, _queue, _mark_context);
 804   }
 805 
 806 public:
 807   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 808     _queue(q), _thread(Thread::current()),
 809     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 810     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 811 
 812   void do_oop(narrowOop* p) { do_oop_work(p); }
 813   void do_oop(oop* p)       { do_oop_work(p); }
 814 };
 815 
 816 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 817 private:
 818   ShenandoahObjToScanQueue* _queue;
 819   Thread* _thread;
 820   ShenandoahTraversalGC* _traversal_gc;
 821   ShenandoahMarkingContext* const _mark_context;
 822 
 823   template <class T>
 824   inline void do_oop_work(T* p) {
 825     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* atomic update */>(p, _thread, _queue, _mark_context);
 826   }
 827 
 828 public:
 829   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 830           _queue(q), _thread(Thread::current()),
 831           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 832           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 833 
 834   void do_oop(narrowOop* p) { do_oop_work(p); }
 835   void do_oop(oop* p)       { do_oop_work(p); }
 836 };
 837 
 838 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
 839 private:
 840   ShenandoahObjToScanQueue* _queue;
 841   Thread* _thread;
 842   ShenandoahTraversalGC* _traversal_gc;
 843   ShenandoahMarkingContext* const _mark_context;
 844 
 845   template <class T>
 846   inline void do_oop_work(T* p) {
 847     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* atomic update */>(p, _thread, _queue, _mark_context);
 848   }
 849 
 850 public:
 851   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 852           _queue(q), _thread(Thread::current()),
 853           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 854           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 855 
 856   void do_oop(narrowOop* p) { do_oop_work(p); }
 857   void do_oop(oop* p)       { do_oop_work(p); }
 858 };
 859 
 860 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
 861 private:
 862   ShenandoahObjToScanQueue* _queue;
 863   Thread* _thread;
 864   ShenandoahTraversalGC* _traversal_gc;
 865   ShenandoahMarkingContext* const _mark_context;
 866 
 867   template <class T>
 868   inline void do_oop_work(T* p) {
 869     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* atomic update */>(p, _thread, _queue, _mark_context);
 870   }
 871 
 872 public:
 873   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 874           _queue(q), _thread(Thread::current()),
 875           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 876           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 877 
 878   void do_oop(narrowOop* p) { do_oop_work(p); }
 879   void do_oop(oop* p)       { do_oop_work(p); }
 880 };
 881 
 882 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
 883 private:
 884   ReferenceProcessor* _rp;
 885 
 886 public:
 887   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
 888           AbstractGangTask("Precleaning task"),
 889           _rp(rp) {}
 890 
 891   void work(uint worker_id) {
 892     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 893     ShenandoahParallelWorkerSession worker_session(worker_id);
 894     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 895 
 896     ShenandoahHeap* sh = ShenandoahHeap::heap();
 897 
 898     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
 899 
 900     ShenandoahForwardedIsAliveClosure is_alive;
 901     ShenandoahTraversalCancelledGCYieldClosure yield;
 902     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 903     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
 904     ResourceMark rm;
 905     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 906                                         &complete_gc, &yield,
 907                                         NULL);
 908   }
 909 };
 910 
 911 void ShenandoahTraversalGC::preclean_weak_refs() {
 912   // Pre-cleaning weak references before diving into STW makes sense at the
 913   // end of concurrent mark. This will filter out the references which referents
 914   // are alive. Note that ReferenceProcessor already filters out these on reference
 915   // discovery, and the bulk of work is done here. This phase processes leftovers
 916   // that missed the initial filtering, i.e. when referent was marked alive after
 917   // reference was discovered by RP.
 918 
 919   assert(_heap->process_references(), "sanity");
 920   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 921 
 922   // Shortcut if no references were discovered to avoid winding up threads.
 923   ReferenceProcessor* rp = _heap->ref_processor();
 924   if (!rp->has_discovered_references()) {
 925     return;
 926   }
 927 
 928   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 929 
 930   shenandoah_assert_rp_isalive_not_installed();
 931   ShenandoahForwardedIsAliveClosure is_alive;
 932   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 933 
 934   assert(task_queues()->is_empty(), "Should be empty");
 935 
 936   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 937   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 938   // parallel precleans, we can extend this to more threads.
 939   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
 940 
 941   WorkGang* workers = _heap->workers();
 942   uint nworkers = workers->active_workers();
 943   assert(nworkers == 1, "This code uses only a single worker");
 944   task_queues()->reserve(nworkers);
 945 
 946   ShenandoahTraversalPrecleanTask task(rp);
 947   workers->run_task(&task);
 948 
 949   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 950 }
 951 
 952 // Weak Reference Closures
 953 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 954   uint _worker_id;
 955   ShenandoahTaskTerminator* _terminator;
 956   bool _reset_terminator;
 957 
 958 public:
 959   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 960     _worker_id(worker_id),
 961     _terminator(t),
 962     _reset_terminator(reset_terminator) {
 963   }
 964 
 965   void do_void() {
 966     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 967 
 968     ShenandoahHeap* sh = ShenandoahHeap::heap();
 969     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 970     assert(sh->process_references(), "why else would we be here?");
 971     shenandoah_assert_rp_isalive_installed();
 972 
 973     traversal_gc->main_loop(_worker_id, _terminator, false);
 974 
 975     if (_reset_terminator) {
 976       _terminator->reset_for_reuse();
 977     }
 978   }
 979 };
 980 
 981 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
 982   uint _worker_id;
 983   ShenandoahTaskTerminator* _terminator;
 984   bool _reset_terminator;
 985 
 986 public:
 987   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 988           _worker_id(worker_id),
 989           _terminator(t),
 990           _reset_terminator(reset_terminator) {
 991   }
 992 
 993   void do_void() {
 994     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 995 
 996     ShenandoahHeap* sh = ShenandoahHeap::heap();
 997     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 998     assert(sh->process_references(), "why else would we be here?");
 999     shenandoah_assert_rp_isalive_installed();
1000 
1001     traversal_gc->main_loop(_worker_id, _terminator, false);
1002 
1003     if (_reset_terminator) {
1004       _terminator->reset_for_reuse();
1005     }
1006   }
1007 };
1008 
1009 void ShenandoahTraversalGC::weak_refs_work() {
1010   assert(_heap->process_references(), "sanity");
1011 
1012   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
1013 
1014   ShenandoahGCPhase phase(phase_root);
1015 
1016   ReferenceProcessor* rp = _heap->ref_processor();
1017 
1018   // NOTE: We cannot shortcut on has_discovered_references() here, because
1019   // we will miss marking JNI Weak refs then, see implementation in
1020   // ReferenceProcessor::process_discovered_references.
1021   weak_refs_work_doit();
1022 
1023   rp->verify_no_references_recorded();
1024   assert(!rp->discovery_enabled(), "Post condition");
1025 
1026 }
1027 
1028 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
1029 private:
1030   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1031   ShenandoahTaskTerminator* _terminator;
1032 
1033 public:
1034   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1035                                       ShenandoahTaskTerminator* t) :
1036     AbstractGangTask("Process reference objects in parallel"),
1037     _proc_task(proc_task),
1038     _terminator(t) {
1039   }
1040 
1041   void work(uint worker_id) {
1042     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1043     ShenandoahHeap* heap = ShenandoahHeap::heap();
1044     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1045 
1046     ShenandoahForwardedIsAliveClosure is_alive;
1047     if (!heap->is_degenerated_gc_in_progress()) {
1048       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1049       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1050     } else {
1051       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1052       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1053     }
1054   }
1055 };
1056 
1057 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1058 private:
1059   WorkGang* _workers;
1060 
1061 public:
1062   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1063 
1064   // Executes a task using worker threads.
1065   void execute(ProcessTask& task, uint ergo_workers) {
1066     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1067 
1068     ShenandoahHeap* heap = ShenandoahHeap::heap();
1069     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1070     ShenandoahPushWorkerQueuesScope scope(_workers,
1071                                           traversal_gc->task_queues(),
1072                                           ergo_workers,
1073                                           /* do_check = */ false);
1074     uint nworkers = _workers->active_workers();
1075     traversal_gc->task_queues()->reserve(nworkers);
1076     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1077     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1078     _workers->run_task(&proc_task_proxy);
1079   }
1080 };
1081 
1082 void ShenandoahTraversalGC::weak_refs_work_doit() {
1083   ReferenceProcessor* rp = _heap->ref_processor();
1084 
1085   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1086 
1087   shenandoah_assert_rp_isalive_not_installed();
1088   ShenandoahForwardedIsAliveClosure is_alive;
1089   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1090 
1091   WorkGang* workers = _heap->workers();
1092   uint nworkers = workers->active_workers();
1093 
1094   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
1095   rp->set_active_mt_degree(nworkers);
1096 
1097   assert(task_queues()->is_empty(), "Should be empty");
1098 
1099   // complete_gc and keep_alive closures instantiated here are only needed for
1100   // single-threaded path in RP. They share the queue 0 for tracking work, which
1101   // simplifies implementation. Since RP may decide to call complete_gc several
1102   // times, we need to be able to reuse the terminator.
1103   uint serial_worker_id = 0;
1104   ShenandoahTaskTerminator terminator(1, task_queues());
1105   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1106   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1107 
1108   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1109 
1110   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
1111   if (!_heap->is_degenerated_gc_in_progress()) {
1112     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1113     rp->process_discovered_references(&is_alive, &keep_alive,
1114                                       &complete_gc, &executor,
1115                                       &pt);
1116   } else {
1117     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1118     rp->process_discovered_references(&is_alive, &keep_alive,
1119                                       &complete_gc, &executor,
1120                                       &pt);
1121   }
1122 
1123   pt.print_all_references();
1124   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1125 }