1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 
  30 #include "gc_implementation/shared/parallelCleaning.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahClosures.inline.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp"
  38 #include "gc_implementation/shenandoah/shenandoahTimingTracker.hpp"
  39 #include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp"
  40 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  41 #include "gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp"
  42 
  43 #include "memory/referenceProcessor.hpp"
  44 #include "memory/iterator.inline.hpp"
  45 #include "memory/metaspace.hpp"
  46 #include "memory/resourceArea.hpp"
  47 #include "oops/oop.inline.hpp"
  48 
  49 template<UpdateRefsMode UPDATE_REFS>
  50 class ShenandoahInitMarkRootsClosure : public OopClosure {
  51 private:
  52   ShenandoahObjToScanQueue* _queue;
  53   ShenandoahHeap* _heap;
  54   ShenandoahStrDedupQueue*  _dedup_queue;
  55   ShenandoahMarkingContext* const _mark_context;
  56 
  57   template <class T>
  58   inline void do_oop_nv(T* p) {
  59     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context, _dedup_queue);
  60   }
  61 
  62 public:
  63   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq) :
  64     _queue(q),
  65     _heap(ShenandoahHeap::heap()),
  66     _dedup_queue(dq),
  67     _mark_context(_heap->marking_context()) {};
  68 
  69   void do_oop(narrowOop* p) { do_oop_nv(p); }
  70   void do_oop(oop* p)       { do_oop_nv(p); }
  71 };
  72 
  73 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  74   MetadataAwareOopClosure(rp),
  75   _queue(q),
  76   _dedup_queue(NULL),
  77   _heap(ShenandoahHeap::heap()),
  78   _mark_context(_heap->marking_context())
  79 { }
  80 
  81 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) :
  82   MetadataAwareOopClosure(rp),
  83   _queue(q),
  84   _dedup_queue(dq),
  85   _heap(ShenandoahHeap::heap()),
  86   _mark_context(_heap->marking_context())
  87 { }
  88 
  89 template<UpdateRefsMode UPDATE_REFS>
  90 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  91 private:
  92   ShenandoahRootProcessor* _rp;
  93   bool _process_refs;
  94 public:
  95   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  96     AbstractGangTask("Shenandoah init mark roots task"),
  97     _rp(rp),
  98     _process_refs(process_refs) {
  99   }
 100 
 101   void work(uint worker_id) {
 102     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 103     ShenandoahParallelWorkerSession worker_session(worker_id);
 104 
 105     ShenandoahHeap* heap = ShenandoahHeap::heap();
 106     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
 107     assert(queues->get_reserved() > worker_id, err_msg("Queue has not been reserved for worker id: %d", worker_id));
 108 
 109     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 110     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q, NULL);
 111     do_work(heap, &mark_cl, worker_id);
 112   }
 113 
 114 private:
 115   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 116     // The rationale for selecting the roots to scan is as follows:
 117     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 118     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 119     //      invalidate the relevant code cache blobs. This could be only done together with
 120     //      class unloading.
 121     //   b. With unload_classes = false, we have to nominally retain all the references from code
 122     //      cache, because there could be the case of embedded class/oop in the generated code,
 123     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 124     //      we risk executing that code cache blob, and crashing.
 125     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 126     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 127     //      pause time.
 128 
 129     CLDToOopClosure clds_cl(oops);
 130     MarkingCodeBlobClosure blobs_cl(oops, ! CodeBlobToOopClosure::FixRelocations);
 131 
 132     ResourceMark m;
 133     if (heap->unload_classes()) {
 134       _rp->process_strong_roots(oops, &clds_cl, NULL, &blobs_cl, NULL, worker_id);
 135     } else {
 136       if (ShenandoahConcurrentScanCodeRoots) {
 137         CodeBlobClosure* code_blobs = NULL;
 138 #ifdef ASSERT
 139         ShenandoahAssertToSpaceClosure assert_to_space_oops;
 140         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 141         // If conc code cache evac is disabled, code cache should have only to-space ptrs.
 142         // Otherwise, it should have to-space ptrs only if mark does not update refs.
 143         if (!heap->has_forwarded_objects()) {
 144           code_blobs = &assert_to_space;
 145         }
 146 #endif
 147         _rp->process_all_roots(oops, &clds_cl, code_blobs, NULL, worker_id);
 148       } else {
 149         _rp->process_all_roots(oops, &clds_cl, &blobs_cl, NULL, worker_id);
 150       }
 151     }
 152   }
 153 };
 154 
 155 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 156 private:
 157   ShenandoahRootProcessor* _rp;
 158   const bool _update_code_cache;
 159 public:
 160   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
 161     AbstractGangTask("Shenandoah update roots task"),
 162     _rp(rp),
 163     _update_code_cache(update_code_cache) {
 164   }
 165 
 166   void work(uint worker_id) {
 167     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 168     ShenandoahParallelWorkerSession worker_session(worker_id);
 169 
 170     ShenandoahHeap* heap = ShenandoahHeap::heap();
 171     ShenandoahUpdateRefsClosure cl;
 172     CLDToOopClosure cldCl(&cl);
 173 
 174     CodeBlobClosure* code_blobs;
 175     CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
 176 #ifdef ASSERT
 177     ShenandoahAssertToSpaceClosure assert_to_space_oops;
 178     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 179 #endif
 180     if (_update_code_cache) {
 181       code_blobs = &update_blobs;
 182     } else {
 183       code_blobs =
 184         DEBUG_ONLY(&assert_to_space)
 185         NOT_DEBUG(NULL);
 186     }
 187     _rp->process_all_roots(&cl, &cldCl, code_blobs, NULL, worker_id);
 188   }
 189 };
 190 
 191 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 192 private:
 193   ShenandoahConcurrentMark* _cm;
 194   ShenandoahTaskTerminator* _terminator;
 195 
 196 public:
 197   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
 198     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 199   }
 200 
 201   void work(uint worker_id) {
 202     ShenandoahHeap* heap = ShenandoahHeap::heap();
 203     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 204     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 205     ReferenceProcessor* rp;
 206     if (heap->process_references()) {
 207       rp = heap->ref_processor();
 208       shenandoah_assert_rp_isalive_installed();
 209     } else {
 210       rp = NULL;
 211     }
 212 
 213     _cm->concurrent_scan_code_roots(worker_id, rp);
 214     _cm->mark_loop(worker_id, _terminator, rp,
 215                    true, // cancellable
 216                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 217   }
 218 };
 219 
 220 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 221 private:
 222   ShenandoahSATBBufferClosure* _satb_cl;
 223   int _thread_parity;
 224 
 225 public:
 226   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 227     _satb_cl(satb_cl),
 228     _thread_parity(SharedHeap::heap()->strong_roots_parity()) {}
 229 
 230   void do_thread(Thread* thread) {
 231     if (thread->is_Java_thread()) {
 232       if (thread->claim_oops_do(true, _thread_parity)) {
 233         JavaThread* jt = (JavaThread*)thread;
 234         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 235       }
 236     } else if (thread->is_VM_thread()) {
 237       if (thread->claim_oops_do(true, _thread_parity)) {
 238         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 239       }
 240     }
 241   }
 242 };
 243 
 244 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 245 private:
 246   ShenandoahConcurrentMark* _cm;
 247   ShenandoahTaskTerminator* _terminator;
 248   bool _dedup_string;
 249 
 250 public:
 251   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
 252     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
 253   }
 254 
 255   void work(uint worker_id) {
 256     ShenandoahHeap* heap = ShenandoahHeap::heap();
 257 
 258     // First drain remaining SATB buffers.
 259     // Notice that this is not strictly necessary for mark-compact. But since
 260     // it requires a StrongRootsScope around the task, we need to claim the
 261     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 262     // full-gc.
 263     {
 264       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 265       ShenandoahStrDedupQueue *dq = NULL;
 266       if (ShenandoahStringDedup::is_enabled()) {
 267          dq = ShenandoahStringDedup::queue(worker_id);
 268       }
 269       ShenandoahSATBBufferClosure cl(q, dq);
 270       SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 271       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 272       ShenandoahSATBThreadsClosure tc(&cl);
 273       Threads::threads_do(&tc);
 274     }
 275 
 276     ReferenceProcessor* rp;
 277     if (heap->process_references()) {
 278       rp = heap->ref_processor();
 279       shenandoah_assert_rp_isalive_installed();
 280     } else {
 281       rp = NULL;
 282     }
 283 
 284     // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 285     // let's check here.
 286     _cm->concurrent_scan_code_roots(worker_id, rp);
 287     _cm->mark_loop(worker_id, _terminator, rp,
 288                    false, // not cancellable
 289                    _dedup_string);
 290 
 291     assert(_cm->task_queues()->is_empty(), "Should be empty");
 292   }
 293 };
 294 
 295 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 296   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 297   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 298 
 299   ShenandoahGCPhase phase(root_phase);
 300 
 301   WorkGang* workers = _heap->workers();
 302   uint nworkers = workers->active_workers();
 303 
 304   assert(nworkers <= task_queues()->size(), "Just check");
 305 
 306   ShenandoahRootProcessor root_proc(_heap, nworkers, root_phase);
 307   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 308   task_queues()->reserve(nworkers);
 309 
 310   if (_heap->has_forwarded_objects()) {
 311     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references());
 312     workers->run_task(&mark_roots);
 313   } else {
 314     // No need to update references, which means the heap is stable.
 315     // Can save time not walking through forwarding pointers.
 316     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, _heap->process_references());
 317     workers->run_task(&mark_roots);
 318   }
 319 
 320   if (ShenandoahConcurrentScanCodeRoots) {
 321     clear_claim_codecache();
 322   }
 323 }
 324 
 325 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 326   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 327 
 328   bool update_code_cache = true; // initialize to safer value
 329   switch (root_phase) {
 330     case ShenandoahPhaseTimings::update_roots:
 331     case ShenandoahPhaseTimings::final_update_refs_roots:
 332       update_code_cache = false;
 333       break;
 334     case ShenandoahPhaseTimings::full_gc_roots:
 335     case ShenandoahPhaseTimings::degen_gc_update_roots:
 336       update_code_cache = true;
 337       break;
 338     default:
 339       ShouldNotReachHere();
 340   }
 341 
 342   ShenandoahHeap* heap = ShenandoahHeap::heap();
 343 
 344   ShenandoahGCPhase phase(root_phase);
 345 
 346   COMPILER2_PRESENT(DerivedPointerTable::clear());
 347 
 348   uint nworkers = heap->workers()->active_workers();
 349 
 350   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 351   ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache);
 352   heap->workers()->run_task(&update_roots);
 353 
 354   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 355 }
 356 
 357 void ShenandoahConcurrentMark::initialize(uint workers) {
 358   _heap = ShenandoahHeap::heap();
 359 
 360   uint num_queues = MAX2(workers, 1U);
 361 
 362   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 363 
 364   for (uint i = 0; i < num_queues; ++i) {
 365     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 366     task_queue->initialize();
 367     _task_queues->register_queue(i, task_queue);
 368   }
 369 
 370   JavaThread::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 371 }
 372 
 373 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
 374   if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
 375     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 376     if (!_heap->unload_classes()) {
 377       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 378       // TODO: We can not honor StringDeduplication here, due to lock ranking
 379       // inversion. So, we may miss some deduplication candidates.
 380       if (_heap->has_forwarded_objects()) {
 381         ShenandoahMarkResolveRefsClosure cl(q, rp);
 382         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 383         CodeCache::blobs_do(&blobs);
 384       } else {
 385         ShenandoahMarkRefsClosure cl(q, rp);
 386         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 387         CodeCache::blobs_do(&blobs);
 388       }
 389     }
 390   }
 391 }
 392 
 393 void ShenandoahConcurrentMark::mark_from_roots() {
 394   WorkGang* workers = _heap->workers();
 395   uint nworkers = workers->active_workers();
 396 
 397   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 398 
 399   if (_heap->process_references()) {
 400     ReferenceProcessor* rp = _heap->ref_processor();
 401     rp->set_active_mt_degree(nworkers);
 402 
 403     // enable ("weak") refs discovery
 404     rp->enable_discovery(true /*verify_no_refs*/, true);
 405     rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs());
 406   }
 407 
 408   shenandoah_assert_rp_isalive_not_installed();
 409   ShenandoahIsAliveSelector is_alive;
 410   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 411 
 412   task_queues()->reserve(nworkers);
 413 
 414   {
 415     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
 416     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 417     ShenandoahConcurrentMarkingTask task(this, &terminator);
 418     workers->run_task(&task);
 419   }
 420 
 421   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
 422   if (!_heap->cancelled_gc()) {
 423     TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 424   }
 425 
 426   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 427 }
 428 
 429 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
 430   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 431 
 432   uint nworkers = _heap->workers()->active_workers();
 433 
 434   // Finally mark everything else we've got in our queues during the previous steps.
 435   // It does two different things for concurrent vs. mark-compact GC:
 436   // - For concurrent GC, it starts with empty task queues, drains the remaining
 437   //   SATB buffers, and then completes the marking closure.
 438   // - For mark-compact GC, it starts out with the task queues seeded by initial
 439   //   root scan, and completes the closure, thus marking through all live objects
 440   // The implementation is the same, so it's shared here.
 441   {
 442     ShenandoahGCPhase phase(full_gc ?
 443                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 444                             ShenandoahPhaseTimings::finish_queues);
 445     task_queues()->reserve(nworkers);
 446 
 447     shenandoah_assert_rp_isalive_not_installed();
 448     ShenandoahIsAliveSelector is_alive;
 449     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 450 
 451     ShenandoahTerminationTracker termination_tracker(full_gc ?
 452                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
 453                                                      ShenandoahPhaseTimings::termination);
 454 
 455     SharedHeap::StrongRootsScope scope(_heap, true);
 456     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 457     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 458     _heap->workers()->run_task(&task);
 459   }
 460 
 461   assert(task_queues()->is_empty(), "Should be empty");
 462 
 463   // When we're done marking everything, we process weak references.
 464   if (_heap->process_references()) {
 465     weak_refs_work(full_gc);
 466   } else {
 467     cleanup_jni_refs();
 468   }
 469 
 470   // And finally finish class unloading
 471   if (_heap->unload_classes()) {
 472     _heap->unload_classes_and_cleanup_tables(full_gc);
 473   } else if (ShenandoahStringDedup::is_enabled()) {
 474     ShenandoahStringDedup::parallel_cleanup();
 475   }
 476   assert(task_queues()->is_empty(), "Should be empty");
 477   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 478   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 479 
 480   // Resize Metaspace
 481   MetaspaceGC::compute_new_size();
 482 }
 483 
 484 // Weak Reference Closures
 485 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 486   uint _worker_id;
 487   ShenandoahTaskTerminator* _terminator;
 488   bool _reset_terminator;
 489 
 490 public:
 491   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 492     _worker_id(worker_id),
 493     _terminator(t),
 494     _reset_terminator(reset_terminator) {
 495   }
 496 
 497   void do_void() {
 498     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 499 
 500     ShenandoahHeap* sh = ShenandoahHeap::heap();
 501     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 502     assert(sh->process_references(), "why else would we be here?");
 503     ReferenceProcessor* rp = sh->ref_processor();
 504 
 505     shenandoah_assert_rp_isalive_installed();
 506 
 507     scm->mark_loop(_worker_id, _terminator, rp,
 508                    false,   // not cancellable
 509                    false);  // do not do strdedup
 510 
 511     if (_reset_terminator) {
 512       _terminator->reset_for_reuse();
 513     }
 514   }
 515 };
 516 
 517 class ShenandoahCMKeepAliveClosure : public OopClosure {
 518 private:
 519   ShenandoahObjToScanQueue* _queue;
 520   ShenandoahHeap* _heap;
 521   ShenandoahMarkingContext* const _mark_context;
 522 
 523   template <class T>
 524   inline void do_oop_nv(T* p) {
 525     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 526   }
 527 
 528 public:
 529   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 530     _queue(q),
 531     _heap(ShenandoahHeap::heap()),
 532     _mark_context(_heap->marking_context()) {}
 533 
 534   void do_oop(narrowOop* p) { do_oop_nv(p); }
 535   void do_oop(oop* p)       { do_oop_nv(p); }
 536 };
 537 
 538 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 539 private:
 540   ShenandoahObjToScanQueue* _queue;
 541   ShenandoahHeap* _heap;
 542   ShenandoahMarkingContext* const _mark_context;
 543 
 544   template <class T>
 545   inline void do_oop_nv(T* p) {
 546     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 547   }
 548 
 549 public:
 550   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 551     _queue(q),
 552     _heap(ShenandoahHeap::heap()),
 553     _mark_context(_heap->marking_context()) {}
 554 
 555   void do_oop(narrowOop* p) { do_oop_nv(p); }
 556   void do_oop(oop* p)       { do_oop_nv(p); }
 557 };
 558 
 559 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 560 private:
 561   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 562   ShenandoahTaskTerminator* _terminator;
 563 
 564 public:
 565   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 566                              ShenandoahTaskTerminator* t) :
 567     AbstractGangTask("Process reference objects in parallel"),
 568     _proc_task(proc_task),
 569     _terminator(t) {
 570   }
 571 
 572   void work(uint worker_id) {
 573     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 574     ShenandoahHeap* heap = ShenandoahHeap::heap();
 575     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 576     if (heap->has_forwarded_objects()) {
 577       ShenandoahForwardedIsAliveClosure is_alive;
 578       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 579       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 580     } else {
 581       ShenandoahIsAliveClosure is_alive;
 582       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 583       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 584     }
 585   }
 586 };
 587 
 588 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
 589 private:
 590   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
 591 
 592 public:
 593   ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
 594     AbstractGangTask("Enqueue reference objects in parallel"),
 595     _enqueue_task(enqueue_task) {
 596   }
 597 
 598   void work(uint worker_id) {
 599     _enqueue_task.work(worker_id);
 600   }
 601 };
 602 
 603 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 604 private:
 605   WorkGang* _workers;
 606 
 607 public:
 608   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 609     _workers(workers) {
 610   }
 611 
 612   // Executes a task using worker threads.
 613   void execute(ProcessTask& task) {
 614     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 615 
 616     // Shortcut execution if task is empty.
 617     // This should be replaced with the generic ReferenceProcessor shortcut,
 618     // see JDK-8181214, JDK-8043575, JDK-6938732.
 619     if (task.is_empty()) {
 620       return;
 621     }
 622 
 623     ShenandoahHeap* heap = ShenandoahHeap::heap();
 624     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 625     uint nworkers = _workers->active_workers();
 626     cm->task_queues()->reserve(nworkers);
 627 
 628     ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 629     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 630     _workers->run_task(&proc_task_proxy);
 631   }
 632 
 633   void execute(EnqueueTask& task) {
 634     ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
 635     _workers->run_task(&enqueue_task_proxy);
 636   }
 637 };
 638 
 639 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 640   assert(_heap->process_references(), "sanity");
 641 
 642   ShenandoahPhaseTimings::Phase phase_root =
 643           full_gc ?
 644           ShenandoahPhaseTimings::full_gc_weakrefs :
 645           ShenandoahPhaseTimings::weakrefs;
 646 
 647   ShenandoahGCPhase phase(phase_root);
 648 
 649   ReferenceProcessor* rp = _heap->ref_processor();
 650   weak_refs_work_doit(full_gc);
 651 
 652   rp->verify_no_references_recorded();
 653   assert(!rp->discovery_enabled(), "Post condition");
 654 
 655 }
 656 
 657 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 658   ReferenceProcessor* rp = _heap->ref_processor();
 659 
 660   ShenandoahPhaseTimings::Phase phase_process =
 661           full_gc ?
 662           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 663           ShenandoahPhaseTimings::weakrefs_process;
 664 
 665   ShenandoahPhaseTimings::Phase phase_enqueue =
 666           full_gc ?
 667           ShenandoahPhaseTimings::full_gc_weakrefs_enqueue :
 668           ShenandoahPhaseTimings::weakrefs_enqueue;
 669 
 670   ShenandoahPhaseTimings::Phase phase_process_termination =
 671           full_gc ?
 672           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
 673           ShenandoahPhaseTimings::weakrefs_termination;
 674 
 675   shenandoah_assert_rp_isalive_not_installed();
 676   ShenandoahIsAliveSelector is_alive;
 677   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 678 
 679   WorkGang* workers = _heap->workers();
 680   uint nworkers = workers->active_workers();
 681 
 682   rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs());
 683   rp->set_active_mt_degree(nworkers);
 684 
 685   assert(task_queues()->is_empty(), "Should be empty");
 686 
 687   // complete_gc and keep_alive closures instantiated here are only needed for
 688   // single-threaded path in RP. They share the queue 0 for tracking work, which
 689   // simplifies implementation. Since RP may decide to call complete_gc several
 690   // times, we need to be able to reuse the terminator.
 691   uint serial_worker_id = 0;
 692   ShenandoahTaskTerminator terminator(1, task_queues());
 693   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 694 
 695   ShenandoahRefProcTaskExecutor executor(workers);
 696 
 697   {
 698     ShenandoahGCPhase phase(phase_process);
 699     ShenandoahTerminationTracker phase_term(phase_process_termination);
 700 
 701     if (_heap->has_forwarded_objects()) {
 702       ShenandoahForwardedIsAliveClosure is_alive;
 703       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 704       rp->process_discovered_references(&is_alive, &keep_alive,
 705                                         &complete_gc, &executor,
 706                                         NULL, _heap->shenandoah_policy()->tracer()->gc_id());
 707     } else {
 708       ShenandoahIsAliveClosure is_alive;
 709       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 710       rp->process_discovered_references(&is_alive, &keep_alive,
 711                                         &complete_gc, &executor,
 712                                         NULL, _heap->shenandoah_policy()->tracer()->gc_id());
 713     }
 714 
 715     assert(task_queues()->is_empty(), "Should be empty");
 716   }
 717 
 718   {
 719     ShenandoahGCPhase phase(phase_enqueue);
 720     rp->enqueue_discovered_references(&executor);
 721   }
 722 }
 723 
 724 // No-op closure. Weak JNI refs are cleaned by iterating them.
 725 // Nothing else to do here.
 726 class ShenandoahCleanupWeakRootsClosure : public OopClosure {
 727   virtual void do_oop(oop* o) {}
 728   virtual void do_oop(narrowOop* o) {}
 729 };
 730 
 731 void ShenandoahConcurrentMark::cleanup_jni_refs() {
 732   ShenandoahIsAliveSelector is_alive;
 733   ShenandoahCleanupWeakRootsClosure cl;
 734   JNIHandles::weak_oops_do(is_alive.is_alive_closure(), &cl);
 735 }
 736 
 737 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 738 private:
 739   ShenandoahHeap* const _heap;
 740 public:
 741   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 742   virtual bool should_return() { return _heap->cancelled_gc(); }
 743 };
 744 
 745 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 746 public:
 747   void do_void() {
 748     ShenandoahHeap* sh = ShenandoahHeap::heap();
 749     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 750     assert(sh->process_references(), "why else would we be here?");
 751     ShenandoahTaskTerminator terminator(1, scm->task_queues());
 752 
 753     ReferenceProcessor* rp = sh->ref_processor();
 754     shenandoah_assert_rp_isalive_installed();
 755 
 756     scm->mark_loop(0, &terminator, rp,
 757                    false, // not cancellable
 758                    false); // do not do strdedup
 759   }
 760 };
 761 
 762 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 763 private:
 764   ShenandoahObjToScanQueue* _queue;
 765   ShenandoahHeap* _heap;
 766   ShenandoahMarkingContext* const _mark_context;
 767 
 768   template <class T>
 769   inline void do_oop_nv(T* p) {
 770     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
 771   }
 772 
 773 public:
 774   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 775     _queue(q),
 776     _heap(ShenandoahHeap::heap()),
 777     _mark_context(_heap->marking_context()) {}
 778 
 779   void do_oop(narrowOop* p) { do_oop_nv(p); }
 780   void do_oop(oop* p)       { do_oop_nv(p); }
 781 };
 782 
 783 class ShenandoahPrecleanTask : public AbstractGangTask {
 784 private:
 785   ReferenceProcessor* _rp;
 786 
 787 public:
 788   ShenandoahPrecleanTask(ReferenceProcessor* rp) :
 789           AbstractGangTask("Precleaning task"),
 790           _rp(rp) {}
 791 
 792   void work(uint worker_id) {
 793     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 794     ShenandoahParallelWorkerSession worker_session(worker_id);
 795 
 796     ShenandoahHeap* sh = ShenandoahHeap::heap();
 797 
 798     ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
 799 
 800     ShenandoahCancelledGCYieldClosure yield;
 801     ShenandoahPrecleanCompleteGCClosure complete_gc;
 802 
 803     if (sh->has_forwarded_objects()) {
 804       ShenandoahForwardedIsAliveClosure is_alive;
 805       ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(q);
 806       ResourceMark rm;
 807       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 808                                          &complete_gc, &yield,
 809                                          NULL, sh->shenandoah_policy()->tracer()->gc_id());
 810     } else {
 811       ShenandoahIsAliveClosure is_alive;
 812       ShenandoahCMKeepAliveClosure keep_alive(q);
 813       ResourceMark rm;
 814       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 815                                          &complete_gc, &yield,
 816                                          NULL, sh->shenandoah_policy()->tracer()->gc_id());
 817     }
 818   }
 819 };
 820 
 821 void ShenandoahConcurrentMark::preclean_weak_refs() {
 822   // Pre-cleaning weak references before diving into STW makes sense at the
 823   // end of concurrent mark. This will filter out the references which referents
 824   // are alive. Note that ReferenceProcessor already filters out these on reference
 825   // discovery, and the bulk of work is done here. This phase processes leftovers
 826   // that missed the initial filtering, i.e. when referent was marked alive after
 827   // reference was discovered by RP.
 828 
 829   assert(_heap->process_references(), "sanity");
 830 
 831   ReferenceProcessor* rp = _heap->ref_processor();
 832 
 833   assert(task_queues()->is_empty(), "Should be empty");
 834 
 835   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 836 
 837   shenandoah_assert_rp_isalive_not_installed();
 838   ShenandoahIsAliveSelector is_alive;
 839   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 840 
 841   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 842   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 843   // parallel precleans, we can extend this to more threads.
 844   WorkGang* workers = _heap->workers();
 845   uint nworkers = workers->active_workers();
 846   assert(nworkers == 1, "This code uses only a single worker");
 847   task_queues()->reserve(nworkers);
 848 
 849   ShenandoahPrecleanTask task(rp);
 850   workers->run_task(&task);
 851 
 852   assert(task_queues()->is_empty(), "Should be empty");
 853 }
 854 
 855 void ShenandoahConcurrentMark::cancel() {
 856   // Clean up marking stacks.
 857   ShenandoahObjToScanQueueSet* queues = task_queues();
 858   queues->clear();
 859 
 860   // Cancel SATB buffers.
 861   JavaThread::satb_mark_queue_set().abandon_partial_marking();
 862 }
 863 
 864 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 865   assert(task_queues()->get_reserved() > worker_id, err_msg("No reserved queue for worker id: %d", worker_id));
 866   return _task_queues->queue(worker_id);
 867 }
 868 
 869 template <bool CANCELLABLE>
 870 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
 871                                                  bool strdedup) {
 872   ShenandoahObjToScanQueue* q = get_queue(w);
 873 
 874   jushort* ld = _heap->get_liveness_cache(w);
 875 
 876   // TODO: We can clean up this if we figure out how to do templated oop closures that
 877   // play nice with specialized_oop_iterators.
 878   if (_heap->unload_classes()) {
 879     if (_heap->has_forwarded_objects()) {
 880       if (strdedup) {
 881         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 882         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp);
 883         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 884       } else {
 885         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 886         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 887       }
 888     } else {
 889       if (strdedup) {
 890         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 891         ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp);
 892         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 893       } else {
 894         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 895         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 896       }
 897     }
 898   } else {
 899     if (_heap->has_forwarded_objects()) {
 900       if (strdedup) {
 901         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 902         ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp);
 903         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 904       } else {
 905         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 906         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 907       }
 908     } else {
 909       if (strdedup) {
 910         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 911         ShenandoahMarkRefsDedupClosure cl(q, dq, rp);
 912         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 913       } else {
 914         ShenandoahMarkRefsClosure cl(q, rp);
 915         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 916       }
 917     }
 918   }
 919 
 920   _heap->flush_liveness_cache(w);
 921 }
 922 
 923 template <class T, bool CANCELLABLE>
 924 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
 925   int seed = 17;
 926   uintx stride = ShenandoahMarkLoopStride;
 927 
 928   ShenandoahHeap* heap = ShenandoahHeap::heap();
 929   ShenandoahObjToScanQueueSet* queues = task_queues();
 930   ShenandoahObjToScanQueue* q;
 931   ShenandoahMarkTask t;
 932 
 933   /*
 934    * Process outstanding queues, if any.
 935    *
 936    * There can be more queues than workers. To deal with the imbalance, we claim
 937    * extra queues first. Since marking can push new tasks into the queue associated
 938    * with this worker id, we come back to process this queue in the normal loop.
 939    */
 940   assert(queues->get_reserved() == heap->workers()->active_workers(),
 941     "Need to reserve proper number of queues");
 942 
 943   q = queues->claim_next();
 944   while (q != NULL) {
 945     if (CANCELLABLE && heap->cancelled_gc()) {
 946       return;
 947     }
 948 
 949     for (uint i = 0; i < stride; i++) {
 950       if (q->pop(t)) {
 951         do_task<T>(q, cl, live_data, &t);
 952       } else {
 953         assert(q->is_empty(), "Must be empty");
 954         q = queues->claim_next();
 955         break;
 956       }
 957     }
 958   }
 959 
 960   q = get_queue(worker_id);
 961 
 962   ShenandoahStrDedupQueue *dq = NULL;
 963   if (ShenandoahStringDedup::is_enabled()) {
 964     dq = ShenandoahStringDedup::queue(worker_id);
 965   }
 966 
 967   ShenandoahSATBBufferClosure drain_satb(q, dq);
 968   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 969 
 970   /*
 971    * Normal marking loop:
 972    */
 973   while (true) {
 974     if (CANCELLABLE && heap->cancelled_gc()) {
 975       return;
 976     }
 977 
 978     while (satb_mq_set.completed_buffers_num() > 0) {
 979       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 980     }
 981 
 982     uint work = 0;
 983     for (uint i = 0; i < stride; i++) {
 984       if (q->pop(t) ||
 985           queues->steal(worker_id, &seed, t)) {
 986         do_task<T>(q, cl, live_data, &t);
 987         work++;
 988       } else {
 989         break;
 990       }
 991     }
 992 
 993     if (work == 0) {
 994       // No work encountered in current stride, try to terminate.
 995       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 996       ShenandoahTerminatorTerminator tt(heap);
 997       if (terminator->offer_termination(&tt)) return;
 998     }
 999   }
1000 }
1001 
1002 bool ShenandoahConcurrentMark::claim_codecache() {
1003   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1004   return _claimed_codecache.try_set();
1005 }
1006 
1007 void ShenandoahConcurrentMark::clear_claim_codecache() {
1008   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1009   _claimed_codecache.unset();
1010 }