1 /*
   2  * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "code/codeCache.hpp"
  26 #include "code/icBuffer.hpp"
  27 #include "code/nmethod.hpp"
  28 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
  31 #include "gc/shenandoah/shenandoahUtils.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "memory/universe.hpp"
  34 
  35 ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) {
  36   _length = heaps->length();
  37   _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC);
  38   for (int h = 0; h < _length; h++) {
  39     _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h));
  40   }
  41 }
  42 
  43 ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() {
  44   FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters);
  45 }
  46 
  47 void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) {
  48   for (int c = 0; c < _length; c++) {
  49     _iters[c].parallel_blobs_do(f);
  50   }
  51 }
  52 
  53 ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) :
  54         _heap(heap), _claimed_idx(0), _finished(false) {
  55 }
  56 
  57 void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
  58   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
  59 
  60   /*
  61    * Parallel code heap walk.
  62    *
  63    * This code makes all threads scan all code heaps, but only one thread would execute the
  64    * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread
  65    * had claimed the block, it can process all blobs in it. Others have to fast-forward to
  66    * next attempt without processing.
  67    *
  68    * Late threads would return immediately if iterator is finished.
  69    */
  70 
  71   if (_finished) {
  72     return;
  73   }
  74 
  75   int stride = 256; // educated guess
  76   int stride_mask = stride - 1;
  77   assert (is_power_of_2(stride), "sanity");
  78 
  79   int count = 0;
  80   bool process_block = true;
  81 
  82   for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) {
  83     int current = count++;
  84     if ((current & stride_mask) == 0) {
  85       process_block = (current >= _claimed_idx) &&
  86                       (Atomic::cmpxchg(current + stride, &_claimed_idx, current) == current);
  87     }
  88     if (process_block) {
  89       if (cb->is_alive()) {
  90         f->do_code_blob(cb);
  91 #ifdef ASSERT
  92         if (cb->is_nmethod())
  93           Universe::heap()->verify_nmethod((nmethod*)cb);
  94 #endif
  95       }
  96     }
  97   }
  98 
  99   _finished = true;
 100 }
 101 
 102 ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table;
 103 int ShenandoahCodeRoots::_disarmed_value = 1;
 104 
 105 void ShenandoahCodeRoots::initialize() {
 106   _nmethod_table = new ShenandoahNMethodTable();
 107 }
 108 
 109 void ShenandoahCodeRoots::register_nmethod(nmethod* nm) {
 110   switch (ShenandoahCodeRootsStyle) {
 111     case 0:
 112     case 1:
 113       break;
 114     case 2: {
 115       assert_locked_or_safepoint(CodeCache_lock);
 116       _nmethod_table->register_nmethod(nm);
 117       break;
 118     }
 119     default:
 120       ShouldNotReachHere();
 121   }
 122 }
 123 
 124 void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) {
 125   switch (ShenandoahCodeRootsStyle) {
 126     case 0:
 127     case 1: {
 128       break;
 129     }
 130     case 2: {
 131       assert_locked_or_safepoint(CodeCache_lock);
 132       _nmethod_table->unregister_nmethod(nm);
 133       break;
 134     }
 135     default:
 136       ShouldNotReachHere();
 137   }
 138 }
 139 
 140 void ShenandoahCodeRoots::flush_nmethod(nmethod* nm) {
 141   switch (ShenandoahCodeRootsStyle) {
 142     case 0:
 143     case 1: {
 144       break;
 145     }
 146     case 2: {
 147       assert_locked_or_safepoint(CodeCache_lock);
 148       _nmethod_table->flush_nmethod(nm);
 149       break;
 150     }
 151     default:
 152       ShouldNotReachHere();
 153   }
 154 }
 155 
 156 void ShenandoahCodeRoots::prepare_concurrent_unloading() {
 157   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 158   _disarmed_value ++;
 159   // 0 is reserved for new nmethod
 160   if (_disarmed_value == 0) {
 161     _disarmed_value = 1;
 162   }
 163 
 164   JavaThreadIteratorWithHandle jtiwh;
 165   for (JavaThread *thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
 166     ShenandoahThreadLocalData::set_disarmed_value(thr, _disarmed_value);
 167   }
 168 }
 169 
 170 class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
 171 private:
 172   bool            _unloading_occurred;
 173   volatile bool   _failed;
 174   ShenandoahHeap* _heap;
 175 
 176   void set_failed() {
 177     Atomic::store(true, &_failed);
 178   }
 179 
 180    void unlink(nmethod* nm) {
 181      // Unlinking of the dependencies must happen before the
 182      // handshake separating unlink and purge.
 183      nm->flush_dependencies(false /* delete_immediately */);
 184 
 185      // We don't need to take the lock when unlinking nmethods from
 186      // the Method, because it is only concurrently unlinked by
 187      // the entry barrier, which acquires the per nmethod lock.
 188      nm->unlink_from_method(false /* acquire_lock */);
 189 
 190      if (nm->is_osr_method()) {
 191        // Invalidate the osr nmethod only once
 192        nm->invalidate_osr_method();
 193      }
 194    }
 195 public:
 196   ShenandoahNMethodUnlinkClosure(bool unloading_occurred) :
 197       _unloading_occurred(unloading_occurred),
 198       _failed(false),
 199       _heap(ShenandoahHeap::heap()) {}
 200 
 201   virtual void do_nmethod(nmethod* nm) {
 202     if (failed()) {
 203       return;
 204     }
 205 
 206     ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm);
 207     assert(!nm_data->is_unregistered(), "Should not see unregistered entry");
 208 
 209     if (!nm->is_alive()) {
 210       return;
 211     }
 212 
 213     ShenandoahReentrantLocker locker(nm_data->lock());
 214 
 215     if (nm->is_unloading()) {
 216       unlink(nm);
 217       return;
 218     }
 219 
 220     // Heal oops and disarm
 221     ShenandoahNMethod::heal_nmethod(nm);
 222     ShenandoahNMethod::disarm_nmethod(nm);
 223 
 224     // Clear compiled ICs and exception caches
 225     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 226       set_failed();
 227     }
 228   }
 229 
 230   bool failed() const {
 231     return Atomic::load(&_failed);
 232   }
 233 };
 234 
 235 class ShenandoahUnlinkTask : public AbstractGangTask {
 236 private:
 237   ShenandoahNMethodUnlinkClosure      _cl;
 238   ICRefillVerifier*                   _verifier;
 239   ShenandoahConcurrentNMethodIterator _iterator;
 240 
 241 public:
 242   ShenandoahUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 243     AbstractGangTask("ShenandoahNMethodUnlinkTask"),
 244     _cl(unloading_occurred),
 245     _verifier(verifier),
 246     _iterator(ShenandoahCodeRoots::table()) {
 247     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 248     _iterator.nmethods_do_begin();
 249   }
 250 
 251   ~ShenandoahUnlinkTask() {
 252     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 253     _iterator.nmethods_do_end();
 254   }
 255 
 256   virtual void work(uint worker_id) {
 257     ICRefillVerifierMark mark(_verifier);
 258     ShenandoahEvacOOMScope scope;
 259     _iterator.nmethods_do(&_cl);
 260   }
 261 
 262   bool success() const {
 263     return !_cl.failed();
 264   }
 265 };
 266 
 267 void ShenandoahCodeRoots::unlink(WorkGang* workers, bool unloading_occurred) {
 268   assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
 269          "Only when running concurrent class unloading");
 270 
 271   for (;;) {
 272     ICRefillVerifier verifier;
 273 
 274     {
 275       ShenandoahUnlinkTask task(unloading_occurred, &verifier);
 276       workers->run_task(&task);
 277       if (task.success()) {
 278         return;
 279       }
 280     }
 281 
 282     // Cleaning failed because we ran out of transitional IC stubs,
 283     // so we have to refill and try again. Refilling requires taking
 284     // a safepoint, so we temporarily leave the suspendible thread set.
 285     SuspendibleThreadSetLeaver sts;
 286     InlineCacheBuffer::refill_ic_stubs();
 287   }
 288 }
 289 
 290 class ShenandoahNMethodPurgeClosure : public NMethodClosure {
 291 public:
 292   virtual void do_nmethod(nmethod* nm) {
 293     if (nm->is_alive() && nm->is_unloading()) {
 294       nm->make_unloaded();
 295     }
 296   }
 297 };
 298 
 299 class ShenandoahNMethodPurgeTask : public AbstractGangTask {
 300 private:
 301   ShenandoahNMethodPurgeClosure       _cl;
 302   ShenandoahConcurrentNMethodIterator _iterator;
 303 
 304 public:
 305   ShenandoahNMethodPurgeTask() :
 306     AbstractGangTask("ShenandoahNMethodPurgeTask"),
 307     _cl(),
 308     _iterator(ShenandoahCodeRoots::table()) {
 309     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 310     _iterator.nmethods_do_begin();
 311   }
 312 
 313   ~ShenandoahNMethodPurgeTask() {
 314     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 315     _iterator.nmethods_do_end();
 316   }
 317 
 318   virtual void work(uint worker_id) {
 319     _iterator.nmethods_do(&_cl);
 320   }
 321 };
 322 
 323 void ShenandoahCodeRoots::purge(WorkGang* workers) {
 324   assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
 325          "Only when running concurrent class unloading");
 326 
 327   ShenandoahNMethodPurgeTask task;
 328   workers->run_task(&task);
 329 }
 330 
 331 ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
 332         _par_iterator(CodeCache::heaps()),
 333         _table_snapshot(NULL) {
 334   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 335   assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers");
 336   switch (ShenandoahCodeRootsStyle) {
 337     case 0:
 338     case 1: {
 339       // No need to do anything here
 340       break;
 341     }
 342     case 2: {
 343       CodeCache_lock->lock_without_safepoint_check();
 344       _table_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
 345       break;
 346     }
 347     default:
 348       ShouldNotReachHere();
 349   }
 350 }
 351 
 352 ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
 353   switch (ShenandoahCodeRootsStyle) {
 354     case 0:
 355     case 1: {
 356       // No need to do anything here
 357       break;
 358     }
 359     case 2: {
 360       ShenandoahCodeRoots::table()->finish_iteration(_table_snapshot);
 361       _table_snapshot = NULL;
 362       CodeCache_lock->unlock();
 363       break;
 364     }
 365     default:
 366       ShouldNotReachHere();
 367   }
 368 }
 369 
 370 template<bool CSET_FILTER>
 371 void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) {
 372   switch (ShenandoahCodeRootsStyle) {
 373     case 0: {
 374       if (_seq_claimed.try_set()) {
 375         CodeCache::blobs_do(f);
 376       }
 377       break;
 378     }
 379     case 1: {
 380       _par_iterator.parallel_blobs_do(f);
 381       break;
 382     }
 383     case 2: {
 384       ShenandoahCodeRootsIterator::fast_parallel_blobs_do<CSET_FILTER>(f);
 385       break;
 386     }
 387     default:
 388       ShouldNotReachHere();
 389   }
 390 }
 391 
 392 void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
 393   ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<false>(f);
 394 }
 395 
 396 void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
 397   ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<true>(f);
 398 }
 399 
 400 template <bool CSET_FILTER>
 401 void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) {
 402   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 403   assert(_table_snapshot != NULL, "Sanity");
 404   _table_snapshot->parallel_blobs_do<CSET_FILTER>(f);
 405 }
 406