1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 
  30 #include "gc/shared/weakProcessor.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/referenceProcessor.hpp"
  34 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  35 
  36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  37 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  39 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  41 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  44 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  45 #include "gc/shenandoah/shenandoahUtils.hpp"
  46 #include "gc/shared/weakProcessor.hpp"
  47 
  48 #include "memory/iterator.inline.hpp"
  49 #include "memory/metaspace.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "oops/oop.inline.hpp"
  52 
  53 template<UpdateRefsMode UPDATE_REFS>
  54 class ShenandoahInitMarkRootsClosure : public OopClosure {
  55 private:
  56   ShenandoahObjToScanQueue* _queue;
  57   ShenandoahHeap* _heap;
  58   ShenandoahMarkingContext* const _mark_context;
  59 
  60   template <class T>
  61   inline void do_oop_work(T* p) {
  62     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context);
  63   }
  64 
  65 public:
  66   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  67     _queue(q),
  68     _heap(ShenandoahHeap::heap()),
  69     _mark_context(_heap->marking_context()) {};
  70 
  71   void do_oop(narrowOop* p) { do_oop_work(p); }
  72   void do_oop(oop* p)       { do_oop_work(p); }
  73 };
  74 
  75 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  76   MetadataVisitingOopIterateClosure(rp),
  77   _queue(q),
  78   _heap(ShenandoahHeap::heap()),
  79   _mark_context(_heap->marking_context())
  80 { }
  81 
  82 template<UpdateRefsMode UPDATE_REFS>
  83 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  84 private:
  85   ShenandoahAllRootScanner* _rp;
  86 public:
  87   ShenandoahInitMarkRootsTask(ShenandoahAllRootScanner* rp) :
  88     AbstractGangTask("Shenandoah init mark roots task"),
  89     _rp(rp) {
  90   }
  91 
  92   void work(uint worker_id) {
  93     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  94     ShenandoahParallelWorkerSession worker_session(worker_id);
  95 
  96     ShenandoahHeap* heap = ShenandoahHeap::heap();
  97     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
  98     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
  99 
 100     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 101 
 102     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
 103     do_work(heap, &mark_cl, worker_id);
 104   }
 105 
 106 private:
 107   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 108     // The rationale for selecting the roots to scan is as follows:
 109     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 110     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 111     //      invalidate the relevant code cache blobs. This could be only done together with
 112     //      class unloading.
 113     //   b. With unload_classes = false, we have to nominally retain all the references from code
 114     //      cache, because there could be the case of embedded class/oop in the generated code,
 115     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 116     //      we risk executing that code cache blob, and crashing.
 117     if (heap->unload_classes()) {
 118       _rp->strong_roots_do(worker_id, oops);
 119     } else {
 120       _rp->roots_do(worker_id, oops);
 121     }
 122   }
 123 };
 124 
 125 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 126 private:
 127   ShenandoahRootUpdater*  _root_updater;
 128 public:
 129   ShenandoahUpdateRootsTask(ShenandoahRootUpdater* root_updater) :
 130     AbstractGangTask("Shenandoah update roots task"),
 131     _root_updater(root_updater) {
 132   }
 133 
 134   void work(uint worker_id) {
 135     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 136     ShenandoahParallelWorkerSession worker_session(worker_id);
 137 
 138     ShenandoahHeap* heap = ShenandoahHeap::heap();
 139     ShenandoahUpdateRefsClosure cl;
 140     AlwaysTrueClosure always_true;
 141     _root_updater->roots_do<AlwaysTrueClosure, ShenandoahUpdateRefsClosure>(worker_id, &always_true, &cl);
 142   }
 143 };
 144 
 145 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 146 private:
 147   ShenandoahConcurrentMark* _cm;
 148   ShenandoahTaskTerminator* _terminator;
 149 
 150 public:
 151   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
 152     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 153   }
 154 
 155   void work(uint worker_id) {
 156     ShenandoahHeap* heap = ShenandoahHeap::heap();
 157     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 158     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 159     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 160     ReferenceProcessor* rp;
 161     if (heap->process_references()) {
 162       rp = heap->ref_processor();
 163       shenandoah_assert_rp_isalive_installed();
 164     } else {
 165       rp = NULL;
 166     }
 167 
 168     _cm->concurrent_scan_code_roots(worker_id, rp);
 169     _cm->mark_loop(worker_id, _terminator, rp,
 170                    true, // cancellable
 171                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 172   }
 173 };
 174 
 175 class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure {
 176 private:
 177   ShenandoahConcMarkSATBBufferClosure* _satb_cl;
 178   OopClosure*                    const _cl;
 179   MarkingCodeBlobClosure*              _code_cl;
 180   int _thread_parity;
 181 
 182 public:
 183   ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahConcMarkSATBBufferClosure* satb_cl, OopClosure* cl, MarkingCodeBlobClosure* code_cl) :
 184     _satb_cl(satb_cl), _cl(cl), _code_cl(code_cl),
 185     _thread_parity(Threads::thread_claim_parity()) {}
 186 
 187   void do_thread(Thread* thread) {
 188     if (thread->is_Java_thread()) {
 189       if (thread->claim_oops_do(true, _thread_parity)) {
 190         JavaThread* jt = (JavaThread*)thread;
 191         ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 192         if (_cl != NULL) {
 193           ResourceMark rm;
 194           jt->oops_do(_cl, _code_cl);
 195         } else if (_code_cl != NULL) {
 196           // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
 197           // however the liveness of oops reachable from nmethods have very complex lifecycles:
 198           // * Alive if on the stack of an executing method
 199           // * Weakly reachable otherwise
 200           // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
 201           // live by the SATB invariant but other oops recorded in nmethods may behave differently.
 202           jt->nmethods_do(_code_cl);
 203         }
 204       }
 205     } else if (thread->is_VM_thread()) {
 206       if (thread->claim_oops_do(true, _thread_parity)) {
 207         ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 208       }
 209     }
 210   }
 211 };
 212 
 213 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 214 private:
 215   ShenandoahConcurrentMark* _cm;
 216   ShenandoahTaskTerminator* _terminator;
 217   bool _dedup_string;
 218 
 219 public:
 220   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
 221     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
 222   }
 223 
 224   void work(uint worker_id) {
 225     ShenandoahHeap* heap = ShenandoahHeap::heap();
 226 
 227     ShenandoahParallelWorkerSession worker_session(worker_id);
 228     ReferenceProcessor* rp;
 229     if (heap->process_references()) {
 230       rp = heap->ref_processor();
 231       shenandoah_assert_rp_isalive_installed();
 232     } else {
 233       rp = NULL;
 234     }
 235 
 236     // First drain remaining SATB buffers.
 237     // Notice that this is not strictly necessary for mark-compact. But since
 238     // it requires a StrongRootsScope around the task, we need to claim the
 239     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 240     // full-gc.
 241     {
 242       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 243       ShenandoahConcMarkSATBBufferClosure cl(q);
 244       ShenandoahSATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 245       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 246       bool do_nmethods = heap->unload_classes();
 247       if (heap->has_forwarded_objects()) {
 248         ShenandoahMarkResolveRefsClosure resolve_mark_cl(q, rp);
 249         MarkingCodeBlobClosure blobsCl(&resolve_mark_cl, !CodeBlobToOopClosure::FixRelocations);
 250         ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
 251                                                           ShenandoahStoreValEnqueueBarrier ? &resolve_mark_cl : NULL,
 252                                                           do_nmethods ? &blobsCl : NULL);
 253           Threads::threads_do(&tc);
 254       } else {
 255         ShenandoahMarkRefsClosure mark_cl(q, rp);
 256         MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations);
 257         ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
 258                                                           ShenandoahStoreValEnqueueBarrier ? &mark_cl : NULL,
 259                                                           do_nmethods ? &blobsCl : NULL);
 260          Threads::threads_do(&tc);
 261       }
 262     }
 263 
 264     if (heap->is_degenerated_gc_in_progress()) {
 265       // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 266       // let's check here.
 267       _cm->concurrent_scan_code_roots(worker_id, rp);
 268     }
 269 
 270     _cm->mark_loop(worker_id, _terminator, rp,
 271                    false, // not cancellable
 272                    _dedup_string);
 273 
 274     assert(_cm->task_queues()->is_empty(), "Should be empty");
 275   }
 276 };
 277 
 278 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 279   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 280   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 281 
 282   ShenandoahHeap* heap = ShenandoahHeap::heap();
 283 
 284   ShenandoahGCPhase phase(root_phase);
 285 
 286   WorkGang* workers = heap->workers();
 287   uint nworkers = workers->active_workers();
 288 
 289   assert(nworkers <= task_queues()->size(), "Just check");
 290 
 291   ShenandoahAllRootScanner root_proc(nworkers, root_phase);
 292   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 293   task_queues()->reserve(nworkers);
 294 
 295   if (heap->has_forwarded_objects()) {
 296     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc);
 297     workers->run_task(&mark_roots);
 298   } else {
 299     // No need to update references, which means the heap is stable.
 300     // Can save time not walking through forwarding pointers.
 301     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc);
 302     workers->run_task(&mark_roots);
 303   }
 304 
 305   clear_claim_codecache();
 306 }
 307 
 308 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 309   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 310 
 311   bool update_code_cache = true; // initialize to safer value
 312   switch (root_phase) {
 313     case ShenandoahPhaseTimings::update_roots:
 314     case ShenandoahPhaseTimings::final_update_refs_roots:
 315       update_code_cache = false;
 316       break;
 317     case ShenandoahPhaseTimings::full_gc_update_roots:
 318     case ShenandoahPhaseTimings::full_gc_adjust_roots:
 319     case ShenandoahPhaseTimings::degen_gc_update_roots:
 320       update_code_cache = true;
 321       break;
 322     default:
 323       ShouldNotReachHere();
 324   }
 325 
 326   ShenandoahGCPhase phase(root_phase);
 327 
 328 #if COMPILER2_OR_JVMCI
 329   DerivedPointerTable::clear();
 330 #endif
 331 
 332   uint nworkers = _heap->workers()->active_workers();
 333 
 334   ShenandoahRootUpdater root_updater(nworkers, root_phase, update_code_cache);
 335   ShenandoahUpdateRootsTask update_roots(&root_updater);
 336   _heap->workers()->run_task(&update_roots);
 337 
 338 #if COMPILER2_OR_JVMCI
 339   DerivedPointerTable::update_pointers();
 340 #endif
 341 }
 342 
 343 class ShenandoahUpdateThreadRootsTask : public AbstractGangTask {
 344 private:
 345   ShenandoahThreadRoots           _thread_roots;
 346   ShenandoahPhaseTimings::Phase   _phase;
 347   ShenandoahGCWorkerPhase         _worker_phase;
 348 public:
 349   ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) :
 350     AbstractGangTask("Shenandoah Update Thread Roots"),
 351     _thread_roots(phase, is_par),
 352     _phase(phase),
 353     _worker_phase(phase) {}
 354 
 355   void work(uint worker_id) {
 356     ShenandoahUpdateRefsClosure cl;
 357     _thread_roots.oops_do(&cl, NULL, worker_id);
 358   }
 359 };
 360 
 361 void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) {
 362   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 363 
 364   ShenandoahGCPhase phase(root_phase);
 365 
 366 #if COMPILER2_OR_JVMCI
 367   DerivedPointerTable::clear();
 368 #endif
 369 
 370   WorkGang* workers = _heap->workers();
 371   bool is_par = workers->active_workers() > 1;
 372 
 373   ShenandoahUpdateThreadRootsTask task(is_par, root_phase);
 374   workers->run_task(&task);
 375 
 376 #if COMPILER2_OR_JVMCI
 377   DerivedPointerTable::update_pointers();
 378 #endif
 379 }
 380 
 381 void ShenandoahConcurrentMark::initialize(uint workers) {
 382   _heap = ShenandoahHeap::heap();
 383 
 384   uint num_queues = MAX2(workers, 1U);
 385 
 386   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 387 
 388   for (uint i = 0; i < num_queues; ++i) {
 389     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 390     task_queue->initialize();
 391     _task_queues->register_queue(i, task_queue);
 392   }
 393 
 394   ShenandoahBarrierSet::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 395 }
 396 
 397 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
 398   if (claim_codecache()) {
 399     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 400     if (!_heap->unload_classes()) {
 401       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 402       // TODO: We can not honor StringDeduplication here, due to lock ranking
 403       // inversion. So, we may miss some deduplication candidates.
 404       if (_heap->has_forwarded_objects()) {
 405         ShenandoahMarkResolveRefsClosure cl(q, rp);
 406         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 407         CodeCache::blobs_do(&blobs);
 408       } else {
 409         ShenandoahMarkRefsClosure cl(q, rp);
 410         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 411         CodeCache::blobs_do(&blobs);
 412       }
 413     }
 414   }
 415 }
 416 
 417 void ShenandoahConcurrentMark::mark_from_roots() {
 418   WorkGang* workers = _heap->workers();
 419   uint nworkers = workers->active_workers();
 420 
 421   if (_heap->process_references()) {
 422     ReferenceProcessor* rp = _heap->ref_processor();
 423     rp->set_active_mt_degree(nworkers);
 424 
 425     // enable ("weak") refs discovery
 426     rp->enable_discovery(true /*verify_no_refs*/);
 427     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 428   }
 429 
 430   shenandoah_assert_rp_isalive_not_installed();
 431   ShenandoahIsAliveSelector is_alive;
 432   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 433 
 434   task_queues()->reserve(nworkers);
 435 
 436   {
 437     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 438     ShenandoahConcurrentMarkingTask task(this, &terminator);
 439     workers->run_task(&task);
 440   }
 441 
 442   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
 443 }
 444 
 445 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
 446   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 447 
 448   uint nworkers = _heap->workers()->active_workers();
 449 
 450   // Finally mark everything else we've got in our queues during the previous steps.
 451   // It does two different things for concurrent vs. mark-compact GC:
 452   // - For concurrent GC, it starts with empty task queues, drains the remaining
 453   //   SATB buffers, and then completes the marking closure.
 454   // - For mark-compact GC, it starts out with the task queues seeded by initial
 455   //   root scan, and completes the closure, thus marking through all live objects
 456   // The implementation is the same, so it's shared here.
 457   {
 458     ShenandoahGCPhase phase(full_gc ?
 459                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 460                             ShenandoahPhaseTimings::finish_queues);
 461     task_queues()->reserve(nworkers);
 462 
 463     shenandoah_assert_rp_isalive_not_installed();
 464     ShenandoahIsAliveSelector is_alive;
 465     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 466 
 467     StrongRootsScope scope(nworkers);
 468     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 469     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 470     _heap->workers()->run_task(&task);
 471   }
 472 
 473   assert(task_queues()->is_empty(), "Should be empty");
 474 
 475   // When we're done marking everything, we process weak references.
 476   if (_heap->process_references()) {
 477     weak_refs_work(full_gc);
 478   }
 479 }
 480 
 481 // Weak Reference Closures
 482 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 483   uint _worker_id;
 484   ShenandoahTaskTerminator* _terminator;
 485   bool _reset_terminator;
 486 
 487 public:
 488   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 489     _worker_id(worker_id),
 490     _terminator(t),
 491     _reset_terminator(reset_terminator) {
 492   }
 493 
 494   void do_void() {
 495     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 496 
 497     ShenandoahHeap* sh = ShenandoahHeap::heap();
 498     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 499     assert(sh->process_references(), "why else would we be here?");
 500     ReferenceProcessor* rp = sh->ref_processor();
 501 
 502     shenandoah_assert_rp_isalive_installed();
 503 
 504     scm->mark_loop(_worker_id, _terminator, rp,
 505                    false,   // not cancellable
 506                    false);  // do not do strdedup
 507 
 508     if (_reset_terminator) {
 509       _terminator->reset_for_reuse();
 510     }
 511   }
 512 };
 513 
 514 class ShenandoahCMKeepAliveClosure : public OopClosure {
 515 private:
 516   ShenandoahObjToScanQueue* _queue;
 517   ShenandoahHeap* _heap;
 518   ShenandoahMarkingContext* const _mark_context;
 519 
 520   template <class T>
 521   inline void do_oop_work(T* p) {
 522     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 523   }
 524 
 525 public:
 526   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 527     _queue(q),
 528     _heap(ShenandoahHeap::heap()),
 529     _mark_context(_heap->marking_context()) {}
 530 
 531   void do_oop(narrowOop* p) { do_oop_work(p); }
 532   void do_oop(oop* p)       { do_oop_work(p); }
 533 };
 534 
 535 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 536 private:
 537   ShenandoahObjToScanQueue* _queue;
 538   ShenandoahHeap* _heap;
 539   ShenandoahMarkingContext* const _mark_context;
 540 
 541   template <class T>
 542   inline void do_oop_work(T* p) {
 543     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 544   }
 545 
 546 public:
 547   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 548     _queue(q),
 549     _heap(ShenandoahHeap::heap()),
 550     _mark_context(_heap->marking_context()) {}
 551 
 552   void do_oop(narrowOop* p) { do_oop_work(p); }
 553   void do_oop(oop* p)       { do_oop_work(p); }
 554 };
 555 
 556 class ShenandoahWeakUpdateClosure : public OopClosure {
 557 private:
 558   ShenandoahHeap* const _heap;
 559 
 560   template <class T>
 561   inline void do_oop_work(T* p) {
 562     oop o = _heap->maybe_update_with_forwarded(p);
 563     shenandoah_assert_marked_except(p, o, o == NULL);
 564   }
 565 
 566 public:
 567   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
 568 
 569   void do_oop(narrowOop* p) { do_oop_work(p); }
 570   void do_oop(oop* p)       { do_oop_work(p); }
 571 };
 572 
 573 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 574 private:
 575   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 576   ShenandoahTaskTerminator* _terminator;
 577 
 578 public:
 579   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 580                              ShenandoahTaskTerminator* t) :
 581     AbstractGangTask("Process reference objects in parallel"),
 582     _proc_task(proc_task),
 583     _terminator(t) {
 584   }
 585 
 586   void work(uint worker_id) {
 587     ResourceMark rm;
 588     HandleMark hm;
 589     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 590     ShenandoahHeap* heap = ShenandoahHeap::heap();
 591     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 592     if (heap->has_forwarded_objects()) {
 593       ShenandoahForwardedIsAliveClosure is_alive;
 594       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 595       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 596     } else {
 597       ShenandoahIsAliveClosure is_alive;
 598       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 599       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 600     }
 601   }
 602 };
 603 
 604 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 605 private:
 606   WorkGang* _workers;
 607 
 608 public:
 609   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 610     _workers(workers) {
 611   }
 612 
 613   // Executes a task using worker threads.
 614   void execute(ProcessTask& task, uint ergo_workers) {
 615     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 616 
 617     ShenandoahHeap* heap = ShenandoahHeap::heap();
 618     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 619     ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
 620                                           ergo_workers,
 621                                           /* do_check = */ false);
 622     uint nworkers = _workers->active_workers();
 623     cm->task_queues()->reserve(nworkers);
 624     ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 625     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 626     _workers->run_task(&proc_task_proxy);
 627   }
 628 };
 629 
 630 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 631   assert(_heap->process_references(), "sanity");
 632 
 633   ShenandoahPhaseTimings::Phase phase_root =
 634           full_gc ?
 635           ShenandoahPhaseTimings::full_gc_weakrefs :
 636           ShenandoahPhaseTimings::weakrefs;
 637 
 638   ShenandoahGCPhase phase(phase_root);
 639 
 640   ReferenceProcessor* rp = _heap->ref_processor();
 641 
 642   // NOTE: We cannot shortcut on has_discovered_references() here, because
 643   // we will miss marking JNI Weak refs then, see implementation in
 644   // ReferenceProcessor::process_discovered_references.
 645   weak_refs_work_doit(full_gc);
 646 
 647   rp->verify_no_references_recorded();
 648   assert(!rp->discovery_enabled(), "Post condition");
 649 
 650 }
 651 
 652 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 653   ReferenceProcessor* rp = _heap->ref_processor();
 654 
 655   ShenandoahPhaseTimings::Phase phase_process =
 656           full_gc ?
 657           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 658           ShenandoahPhaseTimings::weakrefs_process;
 659 
 660   shenandoah_assert_rp_isalive_not_installed();
 661   ShenandoahIsAliveSelector is_alive;
 662   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 663 
 664   WorkGang* workers = _heap->workers();
 665   uint nworkers = workers->active_workers();
 666 
 667   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 668   rp->set_active_mt_degree(nworkers);
 669 
 670   assert(task_queues()->is_empty(), "Should be empty");
 671 
 672   // complete_gc and keep_alive closures instantiated here are only needed for
 673   // single-threaded path in RP. They share the queue 0 for tracking work, which
 674   // simplifies implementation. Since RP may decide to call complete_gc several
 675   // times, we need to be able to reuse the terminator.
 676   uint serial_worker_id = 0;
 677   ShenandoahTaskTerminator terminator(1, task_queues());
 678   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 679 
 680   ShenandoahRefProcTaskExecutor executor(workers);
 681 
 682   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
 683 
 684   {
 685     ShenandoahGCPhase phase(phase_process);
 686 
 687     if (_heap->has_forwarded_objects()) {
 688       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 689       const ReferenceProcessorStats& stats =
 690         rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 691                                           &complete_gc, &executor,
 692                                           &pt);
 693        _heap->tracer()->report_gc_reference_stats(stats);
 694     } else {
 695       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 696       const ReferenceProcessorStats& stats =
 697         rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 698                                           &complete_gc, &executor,
 699                                           &pt);
 700       _heap->tracer()->report_gc_reference_stats(stats);
 701     }
 702 
 703     pt.print_all_references();
 704 
 705     assert(task_queues()->is_empty(), "Should be empty");
 706   }
 707 }
 708 
 709 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 710 private:
 711   ShenandoahHeap* const _heap;
 712 public:
 713   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 714   virtual bool should_return() { return _heap->cancelled_gc(); }
 715 };
 716 
 717 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 718 public:
 719   void do_void() {
 720     ShenandoahHeap* sh = ShenandoahHeap::heap();
 721     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 722     assert(sh->process_references(), "why else would we be here?");
 723     ShenandoahTaskTerminator terminator(1, scm->task_queues());
 724 
 725     ReferenceProcessor* rp = sh->ref_processor();
 726     shenandoah_assert_rp_isalive_installed();
 727 
 728     scm->mark_loop(0, &terminator, rp,
 729                    false, // not cancellable
 730                    false); // do not do strdedup
 731   }
 732 };
 733 
 734 class ShenandoahPrecleanTask : public AbstractGangTask {
 735 private:
 736   ReferenceProcessor* _rp;
 737 
 738 public:
 739   ShenandoahPrecleanTask(ReferenceProcessor* rp) :
 740           AbstractGangTask("Precleaning task"),
 741           _rp(rp) {}
 742 
 743   void work(uint worker_id) {
 744     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 745     ShenandoahParallelWorkerSession worker_session(worker_id);
 746 
 747     ShenandoahHeap* sh = ShenandoahHeap::heap();
 748     assert(!sh->has_forwarded_objects(), "No forwarded objects expected here");
 749 
 750     ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
 751 
 752     ShenandoahCancelledGCYieldClosure yield;
 753     ShenandoahPrecleanCompleteGCClosure complete_gc;
 754 
 755     ShenandoahIsAliveClosure is_alive;
 756     ShenandoahCMKeepAliveClosure keep_alive(q);
 757     ResourceMark rm;
 758     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 759                                         &complete_gc, &yield,
 760                                         NULL);
 761   }
 762 };
 763 
 764 void ShenandoahConcurrentMark::preclean_weak_refs() {
 765   // Pre-cleaning weak references before diving into STW makes sense at the
 766   // end of concurrent mark. This will filter out the references which referents
 767   // are alive. Note that ReferenceProcessor already filters out these on reference
 768   // discovery, and the bulk of work is done here. This phase processes leftovers
 769   // that missed the initial filtering, i.e. when referent was marked alive after
 770   // reference was discovered by RP.
 771 
 772   assert(_heap->process_references(), "sanity");
 773 
 774   // Shortcut if no references were discovered to avoid winding up threads.
 775   ReferenceProcessor* rp = _heap->ref_processor();
 776   if (!rp->has_discovered_references()) {
 777     return;
 778   }
 779 
 780   assert(task_queues()->is_empty(), "Should be empty");
 781 
 782   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 783 
 784   shenandoah_assert_rp_isalive_not_installed();
 785   ShenandoahIsAliveSelector is_alive;
 786   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 787 
 788   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 789   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 790   // parallel precleans, we can extend this to more threads.
 791   WorkGang* workers = _heap->workers();
 792   uint nworkers = workers->active_workers();
 793   assert(nworkers == 1, "This code uses only a single worker");
 794   task_queues()->reserve(nworkers);
 795 
 796   ShenandoahPrecleanTask task(rp);
 797   workers->run_task(&task);
 798 
 799   assert(task_queues()->is_empty(), "Should be empty");
 800 }
 801 
 802 void ShenandoahConcurrentMark::cancel() {
 803   // Clean up marking stacks.
 804   ShenandoahObjToScanQueueSet* queues = task_queues();
 805   queues->clear();
 806 
 807   // Cancel SATB buffers.
 808   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
 809 }
 810 
 811 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 812   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 813   return _task_queues->queue(worker_id);
 814 }
 815 
 816 template <bool CANCELLABLE>
 817 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
 818                                                  bool strdedup) {
 819   ShenandoahObjToScanQueue* q = get_queue(w);
 820 
 821   ShenandoahLiveData* ld = _heap->get_liveness_cache(w);
 822 
 823   // TODO: We can clean up this if we figure out how to do templated oop closures that
 824   // play nice with specialized_oop_iterators.
 825   if (_heap->unload_classes()) {
 826     if (_heap->has_forwarded_objects()) {
 827       if (strdedup) {
 828         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
 829         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 830       } else {
 831         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 832         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 833       }
 834     } else {
 835       if (strdedup) {
 836         ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
 837         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 838       } else {
 839         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 840         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 841       }
 842     }
 843   } else {
 844     if (_heap->has_forwarded_objects()) {
 845       if (strdedup) {
 846         ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
 847         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 848       } else {
 849         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 850         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 851       }
 852     } else {
 853       if (strdedup) {
 854         ShenandoahMarkRefsDedupClosure cl(q, rp);
 855         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 856       } else {
 857         ShenandoahMarkRefsClosure cl(q, rp);
 858         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 859       }
 860     }
 861   }
 862 
 863   _heap->flush_liveness_cache(w);
 864 }
 865 
 866 template <class T, bool CANCELLABLE>
 867 void ShenandoahConcurrentMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
 868   int seed = 17;
 869   uintx stride = ShenandoahMarkLoopStride;
 870 
 871   ShenandoahHeap* heap = ShenandoahHeap::heap();
 872   ShenandoahObjToScanQueueSet* queues = task_queues();
 873   ShenandoahObjToScanQueue* q;
 874   ShenandoahMarkTask t;
 875 
 876   /*
 877    * Process outstanding queues, if any.
 878    *
 879    * There can be more queues than workers. To deal with the imbalance, we claim
 880    * extra queues first. Since marking can push new tasks into the queue associated
 881    * with this worker id, we come back to process this queue in the normal loop.
 882    */
 883   assert(queues->get_reserved() == heap->workers()->active_workers(),
 884          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
 885 
 886   q = queues->claim_next();
 887   while (q != NULL) {
 888     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 889       return;
 890     }
 891 
 892     for (uint i = 0; i < stride; i++) {
 893       if (q->pop(t)) {
 894         do_task<T>(q, cl, live_data, &t);
 895       } else {
 896         assert(q->is_empty(), "Must be empty");
 897         q = queues->claim_next();
 898         break;
 899       }
 900     }
 901   }
 902   q = get_queue(worker_id);
 903 
 904   ShenandoahConcMarkSATBBufferClosure drain_satb(q);
 905   ShenandoahSATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 906 
 907   /*
 908    * Normal marking loop:
 909    */
 910   while (true) {
 911     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 912       return;
 913     }
 914 
 915     while (satb_mq_set.completed_buffers_num() > 0) {
 916       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 917     }
 918 
 919     uint work = 0;
 920     for (uint i = 0; i < stride; i++) {
 921       if (q->pop(t) ||
 922           queues->steal(worker_id, &seed, t)) {
 923         do_task<T>(q, cl, live_data, &t);
 924         work++;
 925       } else {
 926         break;
 927       }
 928     }
 929 
 930     if (work == 0) {
 931       // No work encountered in current stride, try to terminate.
 932       // Need to leave the STS here otherwise it might block safepoints.
 933       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
 934       ShenandoahTerminatorTerminator tt(heap);
 935       if (terminator->offer_termination(&tt)) return;
 936     }
 937   }
 938 }
 939 
 940 bool ShenandoahConcurrentMark::claim_codecache() {
 941   return _claimed_codecache.try_set();
 942 }
 943 
 944 void ShenandoahConcurrentMark::clear_claim_codecache() {
 945   _claimed_codecache.unset();
 946 }