1 /*
   2  * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "code/codeCache.hpp"
  26 #include "code/icBuffer.hpp"
  27 #include "code/nmethod.hpp"
  28 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
  31 #include "gc/shenandoah/shenandoahUtils.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "memory/universe.hpp"
  34 
  35 ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) {
  36   _length = heaps->length();
  37   _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC);
  38   for (int h = 0; h < _length; h++) {
  39     _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h));
  40   }
  41 }
  42 
  43 ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() {
  44   FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters);
  45 }
  46 
  47 void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) {
  48   for (int c = 0; c < _length; c++) {
  49     _iters[c].parallel_blobs_do(f);
  50   }
  51 }
  52 
  53 ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) :
  54         _heap(heap), _claimed_idx(0), _finished(false) {
  55 }
  56 
  57 void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
  58   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
  59 
  60   /*
  61    * Parallel code heap walk.
  62    *
  63    * This code makes all threads scan all code heaps, but only one thread would execute the
  64    * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread
  65    * had claimed the block, it can process all blobs in it. Others have to fast-forward to
  66    * next attempt without processing.
  67    *
  68    * Late threads would return immediately if iterator is finished.
  69    */
  70 
  71   if (_finished) {
  72     return;
  73   }
  74 
  75   int stride = 256; // educated guess
  76   int stride_mask = stride - 1;
  77   assert (is_power_of_2(stride), "sanity");
  78 
  79   int count = 0;
  80   bool process_block = true;
  81 
  82   for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) {
  83     int current = count++;
  84     if ((current & stride_mask) == 0) {
  85       process_block = (current >= _claimed_idx) &&
  86                       (Atomic::cmpxchg(current + stride, &_claimed_idx, current) == current);
  87     }
  88     if (process_block) {
  89       if (cb->is_alive()) {
  90         f->do_code_blob(cb);
  91 #ifdef ASSERT
  92         if (cb->is_nmethod())
  93           Universe::heap()->verify_nmethod((nmethod*)cb);
  94 #endif
  95       }
  96     }
  97   }
  98 
  99   _finished = true;
 100 }
 101 
 102 ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table;
 103 int ShenandoahCodeRoots::_disarmed_value = 1;
 104 
 105 void ShenandoahCodeRoots::initialize() {
 106   _nmethod_table = new ShenandoahNMethodTable();
 107 }
 108 
 109 void ShenandoahCodeRoots::register_nmethod(nmethod* nm) {
 110   switch (ShenandoahCodeRootsStyle) {
 111     case 0:
 112     case 1:
 113       break;
 114     case 2: {
 115       assert_locked_or_safepoint(CodeCache_lock);
 116       _nmethod_table->register_nmethod(nm);
 117       break;
 118     }
 119     default:
 120       ShouldNotReachHere();
 121   }
 122 }
 123 
 124 void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) {
 125   switch (ShenandoahCodeRootsStyle) {
 126     case 0:
 127     case 1: {
 128       break;
 129     }
 130     case 2: {
 131       assert_locked_or_safepoint(CodeCache_lock);
 132       _nmethod_table->unregister_nmethod(nm);
 133       break;
 134     }
 135     default:
 136       ShouldNotReachHere();
 137   }
 138 }
 139 
 140 void ShenandoahCodeRoots::flush_nmethod(nmethod* nm) {
 141   switch (ShenandoahCodeRootsStyle) {
 142     case 0:
 143     case 1: {
 144       break;
 145     }
 146     case 2: {
 147       assert_locked_or_safepoint(CodeCache_lock);
 148       _nmethod_table->flush_nmethod(nm);
 149       break;
 150     }
 151     default:
 152       ShouldNotReachHere();
 153   }
 154 }
 155 
 156 void ShenandoahCodeRoots::prepare_concurrent_unloading() {
 157   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 158   _disarmed_value ++;
 159   // 0 is reserved for new nmethod
 160   if (_disarmed_value == 0) {
 161     _disarmed_value = 1;
 162   }
 163 
 164   JavaThreadIteratorWithHandle jtiwh;
 165   for (JavaThread *thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
 166     ShenandoahThreadLocalData::set_disarmed_value(thr, _disarmed_value);
 167   }
 168 }
 169 
 170 class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
 171 private:
 172   bool            _unloading_occurred;
 173   volatile bool   _failed;
 174   ShenandoahHeap* _heap;
 175 
 176   void set_failed() {
 177     Atomic::store(true, &_failed);
 178   }
 179 
 180    void unlink(nmethod* nm) {
 181      // Unlinking of the dependencies must happen before the
 182      // handshake separating unlink and purge.
 183      nm->flush_dependencies(false /* delete_immediately */);
 184 
 185      // unlink_from_method will take the CompiledMethod_lock.
 186      // In this case we don't strictly need it when unlinking nmethods from
 187      // the Method, because it is only concurrently unlinked by
 188      // the entry barrier, which acquires the per nmethod lock.
 189      nm->unlink_from_method();
 190 
 191      if (nm->is_osr_method()) {
 192        // Invalidate the osr nmethod only once
 193        nm->invalidate_osr_method();
 194      }
 195    }
 196 public:
 197   ShenandoahNMethodUnlinkClosure(bool unloading_occurred) :
 198       _unloading_occurred(unloading_occurred),
 199       _failed(false),
 200       _heap(ShenandoahHeap::heap()) {}
 201 
 202   virtual void do_nmethod(nmethod* nm) {
 203     if (failed()) {
 204       return;
 205     }
 206 
 207     ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm);
 208     assert(!nm_data->is_unregistered(), "Should not see unregistered entry");
 209 
 210     if (!nm->is_alive()) {
 211       return;
 212     }
 213 
 214     ShenandoahReentrantLocker locker(nm_data->lock());
 215 
 216     if (nm->is_unloading()) {
 217       unlink(nm);
 218       return;
 219     }
 220 
 221     // Heal oops and disarm
 222     ShenandoahEvacOOMScope scope;
 223     ShenandoahNMethod::heal_nmethod(nm);
 224     ShenandoahNMethod::disarm_nmethod(nm);
 225 
 226     // Clear compiled ICs and exception caches
 227     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 228       set_failed();
 229     }
 230   }
 231 
 232   bool failed() const {
 233     return Atomic::load(&_failed);
 234   }
 235 };
 236 
 237 class ShenandoahUnlinkTask : public AbstractGangTask {
 238 private:
 239   ShenandoahNMethodUnlinkClosure      _cl;
 240   ICRefillVerifier*                   _verifier;
 241   ShenandoahConcurrentNMethodIterator _iterator;
 242 
 243 public:
 244   ShenandoahUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 245     AbstractGangTask("ShenandoahNMethodUnlinkTask"),
 246     _cl(unloading_occurred),
 247     _verifier(verifier),
 248     _iterator(ShenandoahCodeRoots::table()) {
 249     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 250     _iterator.nmethods_do_begin();
 251   }
 252 
 253   ~ShenandoahUnlinkTask() {
 254     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 255     _iterator.nmethods_do_end();
 256   }
 257 
 258   virtual void work(uint worker_id) {
 259     ICRefillVerifierMark mark(_verifier);
 260     _iterator.nmethods_do(&_cl);
 261   }
 262 
 263   bool success() const {
 264     return !_cl.failed();
 265   }
 266 };
 267 
 268 void ShenandoahCodeRoots::unlink(WorkGang* workers, bool unloading_occurred) {
 269   assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
 270          "Only when running concurrent class unloading");
 271 
 272   for (;;) {
 273     ICRefillVerifier verifier;
 274 
 275     {
 276       ShenandoahUnlinkTask task(unloading_occurred, &verifier);
 277       workers->run_task(&task);
 278       if (task.success()) {
 279         return;
 280       }
 281     }
 282 
 283     // Cleaning failed because we ran out of transitional IC stubs,
 284     // so we have to refill and try again. Refilling requires taking
 285     // a safepoint, so we temporarily leave the suspendible thread set.
 286     SuspendibleThreadSetLeaver sts;
 287     InlineCacheBuffer::refill_ic_stubs();
 288   }
 289 }
 290 
 291 class ShenandoahNMethodPurgeClosure : public NMethodClosure {
 292 public:
 293   virtual void do_nmethod(nmethod* nm) {
 294     if (nm->is_alive() && nm->is_unloading()) {
 295       nm->make_unloaded();
 296     }
 297   }
 298 };
 299 
 300 class ShenandoahNMethodPurgeTask : public AbstractGangTask {
 301 private:
 302   ShenandoahNMethodPurgeClosure       _cl;
 303   ShenandoahConcurrentNMethodIterator _iterator;
 304 
 305 public:
 306   ShenandoahNMethodPurgeTask() :
 307     AbstractGangTask("ShenandoahNMethodPurgeTask"),
 308     _cl(),
 309     _iterator(ShenandoahCodeRoots::table()) {
 310     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 311     _iterator.nmethods_do_begin();
 312   }
 313 
 314   ~ShenandoahNMethodPurgeTask() {
 315     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 316     _iterator.nmethods_do_end();
 317   }
 318 
 319   virtual void work(uint worker_id) {
 320     _iterator.nmethods_do(&_cl);
 321   }
 322 };
 323 
 324 void ShenandoahCodeRoots::purge(WorkGang* workers) {
 325   assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
 326          "Only when running concurrent class unloading");
 327 
 328   ShenandoahNMethodPurgeTask task;
 329   workers->run_task(&task);
 330 }
 331 
 332 ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
 333         _par_iterator(CodeCache::heaps()),
 334         _table_snapshot(NULL) {
 335   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 336   assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers");
 337   switch (ShenandoahCodeRootsStyle) {
 338     case 0:
 339     case 1: {
 340       // No need to do anything here
 341       break;
 342     }
 343     case 2: {
 344       CodeCache_lock->lock_without_safepoint_check();
 345       _table_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
 346       break;
 347     }
 348     default:
 349       ShouldNotReachHere();
 350   }
 351 }
 352 
 353 ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
 354   switch (ShenandoahCodeRootsStyle) {
 355     case 0:
 356     case 1: {
 357       // No need to do anything here
 358       break;
 359     }
 360     case 2: {
 361       ShenandoahCodeRoots::table()->finish_iteration(_table_snapshot);
 362       _table_snapshot = NULL;
 363       CodeCache_lock->unlock();
 364       break;
 365     }
 366     default:
 367       ShouldNotReachHere();
 368   }
 369 }
 370 
 371 template<bool CSET_FILTER>
 372 void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) {
 373   switch (ShenandoahCodeRootsStyle) {
 374     case 0: {
 375       if (_seq_claimed.try_set()) {
 376         CodeCache::blobs_do(f);
 377       }
 378       break;
 379     }
 380     case 1: {
 381       _par_iterator.parallel_blobs_do(f);
 382       break;
 383     }
 384     case 2: {
 385       ShenandoahCodeRootsIterator::fast_parallel_blobs_do<CSET_FILTER>(f);
 386       break;
 387     }
 388     default:
 389       ShouldNotReachHere();
 390   }
 391 }
 392 
 393 void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
 394   ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<false>(f);
 395 }
 396 
 397 void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
 398   ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<true>(f);
 399 }
 400 
 401 template <bool CSET_FILTER>
 402 void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) {
 403   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 404   assert(_table_snapshot != NULL, "Sanity");
 405   _table_snapshot->parallel_blobs_do<CSET_FILTER>(f);
 406 }
 407