1 /*
   2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "gc/shared/weakProcessor.inline.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahForwarding.hpp"
  39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  46 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  47 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  48 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  49 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  50 #include "gc/shenandoah/shenandoahUtils.hpp"
  51 #include "gc/shenandoah/shenandoahVerifier.hpp"
  52 
  53 #include "memory/iterator.hpp"
  54 #include "memory/metaspace.hpp"
  55 #include "memory/resourceArea.hpp"
  56 #include "memory/universe.hpp"
  57 
  58 /**
  59  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  60  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  61  * is incremental-update-based.
  62  *
  63  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  64  * several reasons:
  65  * - We will not reclaim them in this cycle anyway, because they are not in the
  66  *   cset
  67  * - It makes up for the bulk of work during final-pause
  68  * - It also shortens the concurrent cycle because we don't need to
  69  *   pointlessly traverse through newly allocated objects.
  70  * - As a nice side-effect, it solves the I-U termination problem (mutators
  71  *   cannot outrun the GC by allocating like crazy)
  72  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  73  *   target object of stores if it's new. Treating new objects live implicitely
  74  *   achieves the same, but without extra barriers. I think the effect of
  75  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  76  *   particular, we will not see the head of a completely new long linked list
  77  *   in final-pause and end up traversing huge chunks of the heap there.
  78  * - We don't need to see/update the fields of new objects either, because they
  79  *   are either still null, or anything that's been stored into them has been
  80  *   evacuated+enqueued before (and will thus be treated later).
  81  *
  82  * We achieve this by setting TAMS for each region, and everything allocated
  83  * beyond TAMS will be 'implicitely marked'.
  84  *
  85  * Gotchas:
  86  * - While we want new objects to be implicitely marked, we don't want to count
  87  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  88  *   them for cset. This means that we need to protect such regions from
  89  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  90  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  91  *   code.
  92  * - We *need* to traverse through evacuated objects. Those objects are
  93  *   pre-existing, and any references in them point to interesting objects that
  94  *   we need to see. We also want to count them as live, because we just
  95  *   determined that they are alive :-) I achieve this by upping TAMS
  96  *   concurrently for every gclab/gc-shared alloc before publishing the
  97  *   evacuated object. This way, the GC threads will not consider such objects
  98  *   implictely marked, and traverse through them as normal.
  99  */
 100 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
 101 private:
 102   ShenandoahObjToScanQueue* _queue;
 103   ShenandoahTraversalGC* _traversal_gc;
 104   ShenandoahHeap* const _heap;
 105 
 106 public:
 107   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 108     _queue(q),
 109     _heap(ShenandoahHeap::heap())
 110  { }
 111 
 112   void do_buffer(void** buffer, size_t size) {
 113     for (size_t i = 0; i < size; ++i) {
 114       oop* p = (oop*) &buffer[i];
 115       oop obj = RawAccess<>::oop_load(p);
 116       shenandoah_assert_not_forwarded(p, obj);
 117       if (_heap->marking_context()->mark(obj)) {
 118         _queue->push(ShenandoahMarkTask(obj));
 119       }
 120     }
 121   }
 122 };
 123 
 124 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 125 private:
 126   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 127 
 128 public:
 129   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 130     _satb_cl(satb_cl) {}
 131 
 132   void do_thread(Thread* thread) {
 133     ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
 134   }
 135 };
 136 
 137 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 138 // and remark them later during final-traversal.
 139 class ShenandoahMarkCLDClosure : public CLDClosure {
 140 private:
 141   OopClosure* _cl;
 142 public:
 143   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 144   void do_cld(ClassLoaderData* cld) {
 145     cld->oops_do(_cl, true, true);
 146   }
 147 };
 148 
 149 // Like CLDToOopClosure, but only process modified CLDs
 150 class ShenandoahRemarkCLDClosure : public CLDClosure {
 151 private:
 152   OopClosure* _cl;
 153 public:
 154   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 155   void do_cld(ClassLoaderData* cld) {
 156     if (cld->has_modified_oops()) {
 157       cld->oops_do(_cl, true, true);
 158     }
 159   }
 160 };
 161 
 162 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 163 private:
 164   ShenandoahCSetRootScanner* _rp;
 165   ShenandoahHeap* _heap;
 166   ShenandoahCsetCodeRootsIterator* _cset_coderoots;
 167   ShenandoahStringDedupRoots       _dedup_roots;
 168 
 169 public:
 170   ShenandoahInitTraversalCollectionTask(ShenandoahCSetRootScanner* rp) :
 171     AbstractGangTask("Shenandoah Init Traversal Collection"),
 172     _rp(rp),
 173     _heap(ShenandoahHeap::heap()) {}
 174 
 175   void work(uint worker_id) {
 176     ShenandoahParallelWorkerSession worker_session(worker_id);
 177 
 178     ShenandoahEvacOOMScope oom_evac_scope;
 179     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 180     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 181 
 182     bool process_refs = _heap->process_references();
 183     bool unload_classes = _heap->unload_classes();
 184     ReferenceProcessor* rp = NULL;
 185     if (process_refs) {
 186       rp = _heap->ref_processor();
 187     }
 188 
 189     // Step 1: Process ordinary GC roots.
 190     {
 191       ShenandoahTraversalClosure roots_cl(q, rp);
 192       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 193       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 194       if (unload_classes) {
 195         _rp->roots_do(worker_id, &roots_cl, NULL, &code_cl);
 196       } else {
 197         _rp->roots_do(worker_id, &roots_cl, &cld_cl, &code_cl);
 198       }
 199 
 200       AlwaysTrueClosure is_alive;
 201       _dedup_roots.oops_do(&is_alive, &roots_cl, worker_id);
 202     }
 203   }
 204 };
 205 
 206 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 207 private:
 208   ShenandoahTaskTerminator* _terminator;
 209   ShenandoahHeap* _heap;
 210 public:
 211   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
 212     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 213     _terminator(terminator),
 214     _heap(ShenandoahHeap::heap()) {}
 215 
 216   void work(uint worker_id) {
 217     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 218     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 219     ShenandoahEvacOOMScope oom_evac_scope;
 220     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 221 
 222     // Drain all outstanding work in queues.
 223     traversal_gc->main_loop(worker_id, _terminator, true);
 224   }
 225 };
 226 
 227 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 228 private:
 229   ShenandoahAllRootScanner* _rp;
 230   ShenandoahTaskTerminator* _terminator;
 231   ShenandoahHeap* _heap;
 232 public:
 233   ShenandoahFinalTraversalCollectionTask(ShenandoahAllRootScanner* rp, ShenandoahTaskTerminator* terminator) :
 234     AbstractGangTask("Shenandoah Final Traversal Collection"),
 235     _rp(rp),
 236     _terminator(terminator),
 237     _heap(ShenandoahHeap::heap()) {}
 238 
 239   void work(uint worker_id) {
 240     ShenandoahParallelWorkerSession worker_session(worker_id);
 241 
 242     ShenandoahEvacOOMScope oom_evac_scope;
 243     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 244 
 245     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 246     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 247 
 248     bool process_refs = _heap->process_references();
 249     bool unload_classes = _heap->unload_classes();
 250     ReferenceProcessor* rp = NULL;
 251     if (process_refs) {
 252       rp = _heap->ref_processor();
 253     }
 254 
 255     // Step 0: Drain outstanding SATB queues.
 256     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 257     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 258     {
 259       // Process remaining finished SATB buffers.
 260       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 261       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 262       // Process remaining threads SATB buffers below.
 263     }
 264 
 265     // Step 1: Process GC roots.
 266     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
 267     // and the references to the oops are updated during init pause. New nmethods are handled
 268     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
 269     // roots here.
 270     if (!_heap->is_degenerated_gc_in_progress()) {
 271       ShenandoahTraversalClosure roots_cl(q, rp);
 272       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 273       if (unload_classes) {
 274         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 275         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc);
 276       } else {
 277         CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 278         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
 279       }
 280     } else {
 281       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 282       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 283       if (unload_classes) {
 284         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 285         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc);
 286       } else {
 287         CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 288         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
 289       }
 290     }
 291 
 292     {
 293       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 294       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 295 
 296       // Step 3: Finally drain all outstanding work in queues.
 297       traversal_gc->main_loop(worker_id, _terminator, false);
 298     }
 299 
 300   }
 301 };
 302 
 303 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 304   _heap(heap),
 305   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 306   _traversal_set(ShenandoahHeapRegionSet()) {
 307 
 308   // Traversal does not support concurrent code root scanning
 309   FLAG_SET_DEFAULT(ShenandoahConcurrentScanCodeRoots, false);
 310 
 311   uint num_queues = heap->max_workers();
 312   for (uint i = 0; i < num_queues; ++i) {
 313     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 314     task_queue->initialize();
 315     _task_queues->register_queue(i, task_queue);
 316   }
 317 }
 318 
 319 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 320 }
 321 
 322 void ShenandoahTraversalGC::prepare_regions() {
 323   size_t num_regions = _heap->num_regions();
 324   ShenandoahMarkingContext* const ctx = _heap->marking_context();
 325   for (size_t i = 0; i < num_regions; i++) {
 326     ShenandoahHeapRegion* region = _heap->get_region(i);
 327     if (_heap->is_bitmap_slice_committed(region)) {
 328       if (_traversal_set.is_in(i)) {
 329         ctx->capture_top_at_mark_start(region);
 330         region->clear_live_data();
 331         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 332       } else {
 333         // Everything outside the traversal set is always considered live.
 334         ctx->reset_top_at_mark_start(region);
 335       }
 336     } else {
 337       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 338       // their TAMS may have old values, so reset them here.
 339       ctx->reset_top_at_mark_start(region);
 340     }
 341   }
 342 }
 343 
 344 void ShenandoahTraversalGC::prepare() {
 345   _heap->collection_set()->clear();
 346   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 347 
 348   {
 349     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 350     _heap->make_parsable(true);
 351   }
 352 
 353   if (UseTLAB) {
 354     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 355     _heap->resize_tlabs();
 356   }
 357 
 358   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 359   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 360 
 361   ShenandoahFreeSet* free_set = _heap->free_set();
 362   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 363 
 364   // Find collection set
 365   _heap->heuristics()->choose_collection_set(collection_set);
 366   prepare_regions();
 367 
 368   // Rebuild free set
 369   free_set->rebuild();
 370 
 371   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
 372                      collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count());
 373 }
 374 
 375 void ShenandoahTraversalGC::init_traversal_collection() {
 376   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 377 
 378   if (ShenandoahVerify) {
 379     _heap->verifier()->verify_before_traversal();
 380   }
 381 
 382   if (VerifyBeforeGC) {
 383     Universe::verify();
 384   }
 385 
 386   {
 387     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 388     ShenandoahHeapLocker lock(_heap->lock());
 389     prepare();
 390   }
 391 
 392   _heap->set_concurrent_traversal_in_progress(true);
 393 
 394   bool process_refs = _heap->process_references();
 395   if (process_refs) {
 396     ReferenceProcessor* rp = _heap->ref_processor();
 397     rp->enable_discovery(true /*verify_no_refs*/);
 398     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 399   }
 400 
 401   {
 402     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 403     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 404     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 405 
 406 #if defined(COMPILER2) || INCLUDE_JVMCI
 407     DerivedPointerTable::clear();
 408 #endif
 409 
 410     {
 411       uint nworkers = _heap->workers()->active_workers();
 412       task_queues()->reserve(nworkers);
 413       ShenandoahCSetRootScanner rp(nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 414       ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 415       _heap->workers()->run_task(&traversal_task);
 416     }
 417 
 418 #if defined(COMPILER2) || INCLUDE_JVMCI
 419     DerivedPointerTable::update_pointers();
 420 #endif
 421   }
 422 
 423   if (ShenandoahPacing) {
 424     _heap->pacer()->setup_for_traversal();
 425   }
 426 }
 427 
 428 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
 429   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 430 
 431   // Initialize live data.
 432   jushort* ld = _heap->get_liveness_cache(w);
 433 
 434   ReferenceProcessor* rp = NULL;
 435   if (_heap->process_references()) {
 436     rp = _heap->ref_processor();
 437   }
 438   {
 439     if (!_heap->is_degenerated_gc_in_progress()) {
 440       if (_heap->unload_classes()) {
 441         if (ShenandoahStringDedup::is_enabled()) {
 442           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 443           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
 444         } else {
 445           ShenandoahTraversalMetadataClosure cl(q, rp);
 446           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
 447         }
 448       } else {
 449         if (ShenandoahStringDedup::is_enabled()) {
 450           ShenandoahTraversalDedupClosure cl(q, rp);
 451           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
 452         } else {
 453           ShenandoahTraversalClosure cl(q, rp);
 454           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
 455         }
 456       }
 457     } else {
 458       if (_heap->unload_classes()) {
 459         if (ShenandoahStringDedup::is_enabled()) {
 460           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 461           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 462         } else {
 463           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 464           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
 465         }
 466       } else {
 467         if (ShenandoahStringDedup::is_enabled()) {
 468           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 469           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 470         } else {
 471           ShenandoahTraversalDegenClosure cl(q, rp);
 472           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
 473         }
 474       }
 475     }
 476   }
 477 
 478   _heap->flush_liveness_cache(w);
 479 }
 480 
 481 template <class T>
 482 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
 483   ShenandoahObjToScanQueueSet* queues = task_queues();
 484   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 485   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
 486 
 487   uintx stride = ShenandoahMarkLoopStride;
 488 
 489   ShenandoahMarkTask task;
 490 
 491   // Process outstanding queues, if any.
 492   q = queues->claim_next();
 493   while (q != NULL) {
 494     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
 495       return;
 496     }
 497 
 498     for (uint i = 0; i < stride; i++) {
 499       if (q->pop(task)) {
 500         conc_mark->do_task<T>(q, cl, live_data, &task);
 501       } else {
 502         assert(q->is_empty(), "Must be empty");
 503         q = queues->claim_next();
 504         break;
 505       }
 506     }
 507   }
 508 
 509   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 510 
 511   // Normal loop.
 512   q = queues->queue(worker_id);
 513 
 514   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 515   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 516 
 517   while (true) {
 518     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 519 
 520     while (satb_mq_set.completed_buffers_num() > 0) {
 521       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 522     }
 523 
 524     uint work = 0;
 525     for (uint i = 0; i < stride; i++) {
 526       if (q->pop(task) ||
 527           queues->steal(worker_id, task)) {
 528         conc_mark->do_task<T>(q, cl, live_data, &task);
 529         work++;
 530       } else {
 531         break;
 532       }
 533     }
 534 
 535     if (work == 0) {
 536       // No more work, try to terminate
 537       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 538       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 539       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 540       ShenandoahTerminatorTerminator tt(_heap);
 541 
 542       if (terminator->offer_termination(&tt)) return;
 543     }
 544   }
 545 }
 546 
 547 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
 548   if (_heap->cancelled_gc()) {
 549     return true;
 550   }
 551   return false;
 552 }
 553 
 554 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 555   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 556   if (!_heap->cancelled_gc()) {
 557     uint nworkers = _heap->workers()->active_workers();
 558     task_queues()->reserve(nworkers);
 559     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 560 
 561     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 562     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 563     _heap->workers()->run_task(&task);
 564   }
 565 
 566   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 567     preclean_weak_refs();
 568   }
 569 }
 570 
 571 void ShenandoahTraversalGC::final_traversal_collection() {
 572   _heap->make_parsable(true);
 573 
 574   if (!_heap->cancelled_gc()) {
 575 #if defined(COMPILER2) || INCLUDE_JVMCI
 576     DerivedPointerTable::clear();
 577 #endif
 578     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 579     uint nworkers = _heap->workers()->active_workers();
 580     task_queues()->reserve(nworkers);
 581 
 582     // Finish traversal
 583     ShenandoahAllRootScanner rp(nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 584     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 585 
 586     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 587     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 588     _heap->workers()->run_task(&task);
 589 #if defined(COMPILER2) || INCLUDE_JVMCI
 590     DerivedPointerTable::update_pointers();
 591 #endif
 592   }
 593 
 594   if (!_heap->cancelled_gc() && _heap->process_references()) {
 595     weak_refs_work();
 596   }
 597 
 598   if (!_heap->cancelled_gc()) {
 599     fixup_roots();
 600     if (_heap->unload_classes()) {
 601       _heap->unload_classes_and_cleanup_tables(false);
 602     }
 603   }
 604 
 605   if (!_heap->cancelled_gc()) {
 606     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 607     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 608     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 609 
 610     // No more marking expected
 611     _heap->mark_complete_marking_context();
 612 
 613     // Resize metaspace
 614     MetaspaceGC::compute_new_size();
 615 
 616     // Still good? We can now trash the cset, and make final verification
 617     {
 618       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 619       ShenandoahHeapLocker lock(_heap->lock());
 620 
 621       // Trash everything
 622       // Clear immediate garbage regions.
 623       size_t num_regions = _heap->num_regions();
 624 
 625       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 626       ShenandoahFreeSet* free_regions = _heap->free_set();
 627       ShenandoahMarkingContext* const ctx = _heap->marking_context();
 628       free_regions->clear();
 629       for (size_t i = 0; i < num_regions; i++) {
 630         ShenandoahHeapRegion* r = _heap->get_region(i);
 631         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
 632 
 633         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 634         if (r->is_humongous_start() && candidate) {
 635           // Trash humongous.
 636           HeapWord* humongous_obj = r->bottom() + ShenandoahForwarding::word_size();
 637           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 638           r->make_trash_immediate();
 639           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 640             i++;
 641             r = _heap->get_region(i);
 642             assert(r->is_humongous_continuation(), "must be humongous continuation");
 643             r->make_trash_immediate();
 644           }
 645         } else if (!r->is_empty() && candidate) {
 646           // Trash regular.
 647           assert(!r->is_humongous(), "handled above");
 648           assert(!r->is_trash(), "must not already be trashed");
 649           r->make_trash_immediate();
 650         }
 651       }
 652       _heap->collection_set()->clear();
 653       _heap->free_set()->rebuild();
 654       reset();
 655     }
 656 
 657     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 658     _heap->set_concurrent_traversal_in_progress(false);
 659     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 660 
 661     if (ShenandoahVerify) {
 662       _heap->verifier()->verify_after_traversal();
 663     }
 664 
 665     if (VerifyAfterGC) {
 666       Universe::verify();
 667     }
 668   }
 669 }
 670 
 671 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 672 private:
 673   template <class T>
 674   inline void do_oop_work(T* p) {
 675     T o = RawAccess<>::oop_load(p);
 676     if (!CompressedOops::is_null(o)) {
 677       oop obj = CompressedOops::decode_not_null(o);
 678       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 679       if (!oopDesc::equals_raw(obj, forw)) {
 680         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 681       }
 682     }
 683   }
 684 
 685 public:
 686   inline void do_oop(oop* p) { do_oop_work(p); }
 687   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 688 };
 689 
 690 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 691 private:
 692   ShenandoahRootUpdater* _rp;
 693 
 694 public:
 695   ShenandoahTraversalFixRootsTask(ShenandoahRootUpdater* rp) :
 696     AbstractGangTask("Shenandoah traversal fix roots"),
 697     _rp(rp) {
 698     assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be");
 699   }
 700 
 701   void work(uint worker_id) {
 702     ShenandoahParallelWorkerSession worker_session(worker_id);
 703     ShenandoahTraversalFixRootsClosure cl;
 704     ShenandoahForwardedIsAliveClosure is_alive;
 705     _rp->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahTraversalFixRootsClosure>(worker_id, &is_alive, &cl);
 706   }
 707 };
 708 
 709 void ShenandoahTraversalGC::fixup_roots() {
 710 #if defined(COMPILER2) || INCLUDE_JVMCI
 711   DerivedPointerTable::clear();
 712 #endif
 713   ShenandoahRootUpdater rp(_heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots, true /* update code cache */);
 714   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 715   _heap->workers()->run_task(&update_roots_task);
 716 #if defined(COMPILER2) || INCLUDE_JVMCI
 717   DerivedPointerTable::update_pointers();
 718 #endif
 719 }
 720 
 721 void ShenandoahTraversalGC::reset() {
 722   _task_queues->clear();
 723 }
 724 
 725 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 726   return _task_queues;
 727 }
 728 
 729 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 730 private:
 731   ShenandoahHeap* const _heap;
 732 public:
 733   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 734   virtual bool should_return() { return _heap->cancelled_gc(); }
 735 };
 736 
 737 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 738 public:
 739   void do_void() {
 740     ShenandoahHeap* sh = ShenandoahHeap::heap();
 741     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 742     assert(sh->process_references(), "why else would we be here?");
 743     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
 744     shenandoah_assert_rp_isalive_installed();
 745     traversal_gc->main_loop((uint) 0, &terminator, true);
 746   }
 747 };
 748 
 749 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 750 private:
 751   ShenandoahObjToScanQueue* _queue;
 752   Thread* _thread;
 753   ShenandoahTraversalGC* _traversal_gc;
 754   ShenandoahMarkingContext* const _mark_context;
 755 
 756   template <class T>
 757   inline void do_oop_work(T* p) {
 758     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 759   }
 760 
 761 public:
 762   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 763     _queue(q), _thread(Thread::current()),
 764     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 765     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 766 
 767   void do_oop(narrowOop* p) { do_oop_work(p); }
 768   void do_oop(oop* p)       { do_oop_work(p); }
 769 };
 770 
 771 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 772 private:
 773   ShenandoahObjToScanQueue* _queue;
 774   Thread* _thread;
 775   ShenandoahTraversalGC* _traversal_gc;
 776   ShenandoahMarkingContext* const _mark_context;
 777 
 778   template <class T>
 779   inline void do_oop_work(T* p) {
 780     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 781   }
 782 
 783 public:
 784   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 785           _queue(q), _thread(Thread::current()),
 786           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 787           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 788 
 789   void do_oop(narrowOop* p) { do_oop_work(p); }
 790   void do_oop(oop* p)       { do_oop_work(p); }
 791 };
 792 
 793 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
 794 private:
 795   ShenandoahObjToScanQueue* _queue;
 796   Thread* _thread;
 797   ShenandoahTraversalGC* _traversal_gc;
 798   ShenandoahMarkingContext* const _mark_context;
 799 
 800   template <class T>
 801   inline void do_oop_work(T* p) {
 802     ShenandoahEvacOOMScope evac_scope;
 803     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 804   }
 805 
 806 public:
 807   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 808           _queue(q), _thread(Thread::current()),
 809           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 810           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 811 
 812   void do_oop(narrowOop* p) { do_oop_work(p); }
 813   void do_oop(oop* p)       { do_oop_work(p); }
 814 };
 815 
 816 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
 817 private:
 818   ShenandoahObjToScanQueue* _queue;
 819   Thread* _thread;
 820   ShenandoahTraversalGC* _traversal_gc;
 821   ShenandoahMarkingContext* const _mark_context;
 822 
 823   template <class T>
 824   inline void do_oop_work(T* p) {
 825     ShenandoahEvacOOMScope evac_scope;
 826     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 827   }
 828 
 829 public:
 830   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 831           _queue(q), _thread(Thread::current()),
 832           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 833           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 834 
 835   void do_oop(narrowOop* p) { do_oop_work(p); }
 836   void do_oop(oop* p)       { do_oop_work(p); }
 837 };
 838 
 839 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
 840 private:
 841   ReferenceProcessor* _rp;
 842 
 843 public:
 844   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
 845           AbstractGangTask("Precleaning task"),
 846           _rp(rp) {}
 847 
 848   void work(uint worker_id) {
 849     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 850     ShenandoahParallelWorkerSession worker_session(worker_id);
 851     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 852     ShenandoahEvacOOMScope oom_evac_scope;
 853 
 854     ShenandoahHeap* sh = ShenandoahHeap::heap();
 855 
 856     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
 857 
 858     ShenandoahForwardedIsAliveClosure is_alive;
 859     ShenandoahTraversalCancelledGCYieldClosure yield;
 860     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 861     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
 862     ResourceMark rm;
 863     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 864                                         &complete_gc, &yield,
 865                                         NULL);
 866   }
 867 };
 868 
 869 void ShenandoahTraversalGC::preclean_weak_refs() {
 870   // Pre-cleaning weak references before diving into STW makes sense at the
 871   // end of concurrent mark. This will filter out the references which referents
 872   // are alive. Note that ReferenceProcessor already filters out these on reference
 873   // discovery, and the bulk of work is done here. This phase processes leftovers
 874   // that missed the initial filtering, i.e. when referent was marked alive after
 875   // reference was discovered by RP.
 876 
 877   assert(_heap->process_references(), "sanity");
 878   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 879 
 880   // Shortcut if no references were discovered to avoid winding up threads.
 881   ReferenceProcessor* rp = _heap->ref_processor();
 882   if (!rp->has_discovered_references()) {
 883     return;
 884   }
 885 
 886   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 887 
 888   shenandoah_assert_rp_isalive_not_installed();
 889   ShenandoahForwardedIsAliveClosure is_alive;
 890   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 891 
 892   assert(task_queues()->is_empty(), "Should be empty");
 893 
 894   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 895   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 896   // parallel precleans, we can extend this to more threads.
 897   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
 898 
 899   WorkGang* workers = _heap->workers();
 900   uint nworkers = workers->active_workers();
 901   assert(nworkers == 1, "This code uses only a single worker");
 902   task_queues()->reserve(nworkers);
 903 
 904   ShenandoahTraversalPrecleanTask task(rp);
 905   workers->run_task(&task);
 906 
 907   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 908 }
 909 
 910 // Weak Reference Closures
 911 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 912   uint _worker_id;
 913   ShenandoahTaskTerminator* _terminator;
 914   bool _reset_terminator;
 915 
 916 public:
 917   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 918     _worker_id(worker_id),
 919     _terminator(t),
 920     _reset_terminator(reset_terminator) {
 921   }
 922 
 923   void do_void() {
 924     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 925 
 926     ShenandoahHeap* sh = ShenandoahHeap::heap();
 927     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 928     assert(sh->process_references(), "why else would we be here?");
 929     shenandoah_assert_rp_isalive_installed();
 930 
 931     traversal_gc->main_loop(_worker_id, _terminator, false);
 932 
 933     if (_reset_terminator) {
 934       _terminator->reset_for_reuse();
 935     }
 936   }
 937 };
 938 
 939 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
 940   uint _worker_id;
 941   ShenandoahTaskTerminator* _terminator;
 942   bool _reset_terminator;
 943 
 944 public:
 945   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 946           _worker_id(worker_id),
 947           _terminator(t),
 948           _reset_terminator(reset_terminator) {
 949   }
 950 
 951   void do_void() {
 952     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 953 
 954     ShenandoahHeap* sh = ShenandoahHeap::heap();
 955     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 956     assert(sh->process_references(), "why else would we be here?");
 957     shenandoah_assert_rp_isalive_installed();
 958 
 959     ShenandoahEvacOOMScope evac_scope;
 960     traversal_gc->main_loop(_worker_id, _terminator, false);
 961 
 962     if (_reset_terminator) {
 963       _terminator->reset_for_reuse();
 964     }
 965   }
 966 };
 967 
 968 void ShenandoahTraversalGC::weak_refs_work() {
 969   assert(_heap->process_references(), "sanity");
 970 
 971   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
 972 
 973   ShenandoahGCPhase phase(phase_root);
 974 
 975   ReferenceProcessor* rp = _heap->ref_processor();
 976 
 977   // NOTE: We cannot shortcut on has_discovered_references() here, because
 978   // we will miss marking JNI Weak refs then, see implementation in
 979   // ReferenceProcessor::process_discovered_references.
 980   weak_refs_work_doit();
 981 
 982   rp->verify_no_references_recorded();
 983   assert(!rp->discovery_enabled(), "Post condition");
 984 
 985 }
 986 
 987 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
 988 private:
 989   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 990   ShenandoahTaskTerminator* _terminator;
 991 
 992 public:
 993   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 994                                       ShenandoahTaskTerminator* t) :
 995     AbstractGangTask("Process reference objects in parallel"),
 996     _proc_task(proc_task),
 997     _terminator(t) {
 998   }
 999 
1000   void work(uint worker_id) {
1001     ShenandoahEvacOOMScope oom_evac_scope;
1002     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1003     ShenandoahHeap* heap = ShenandoahHeap::heap();
1004     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1005 
1006     ShenandoahForwardedIsAliveClosure is_alive;
1007     if (!heap->is_degenerated_gc_in_progress()) {
1008       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1009       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1010     } else {
1011       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1012       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1013     }
1014   }
1015 };
1016 
1017 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1018 private:
1019   WorkGang* _workers;
1020 
1021 public:
1022   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1023 
1024   // Executes a task using worker threads.
1025   void execute(ProcessTask& task, uint ergo_workers) {
1026     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1027 
1028     ShenandoahHeap* heap = ShenandoahHeap::heap();
1029     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1030     ShenandoahPushWorkerQueuesScope scope(_workers,
1031                                           traversal_gc->task_queues(),
1032                                           ergo_workers,
1033                                           /* do_check = */ false);
1034     uint nworkers = _workers->active_workers();
1035     traversal_gc->task_queues()->reserve(nworkers);
1036     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1037     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1038     _workers->run_task(&proc_task_proxy);
1039   }
1040 };
1041 
1042 void ShenandoahTraversalGC::weak_refs_work_doit() {
1043   ReferenceProcessor* rp = _heap->ref_processor();
1044 
1045   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1046 
1047   shenandoah_assert_rp_isalive_not_installed();
1048   ShenandoahForwardedIsAliveClosure is_alive;
1049   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1050 
1051   WorkGang* workers = _heap->workers();
1052   uint nworkers = workers->active_workers();
1053 
1054   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
1055   rp->set_active_mt_degree(nworkers);
1056 
1057   assert(task_queues()->is_empty(), "Should be empty");
1058 
1059   // complete_gc and keep_alive closures instantiated here are only needed for
1060   // single-threaded path in RP. They share the queue 0 for tracking work, which
1061   // simplifies implementation. Since RP may decide to call complete_gc several
1062   // times, we need to be able to reuse the terminator.
1063   uint serial_worker_id = 0;
1064   ShenandoahTaskTerminator terminator(1, task_queues());
1065   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1066   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1067 
1068   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1069 
1070   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
1071   if (!_heap->is_degenerated_gc_in_progress()) {
1072     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1073     rp->process_discovered_references(&is_alive, &keep_alive,
1074                                       &complete_gc, &executor,
1075                                       &pt);
1076   } else {
1077     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1078     rp->process_discovered_references(&is_alive, &keep_alive,
1079                                       &complete_gc, &executor,
1080                                       &pt);
1081   }
1082 
1083   pt.print_all_references();
1084   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1085 }