< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahLock.cpp

Print this page

 29 #include "gc/shenandoah/shenandoahLock.hpp"
 30 #include "runtime/atomic.hpp"
 31 #include "runtime/interfaceSupport.inline.hpp"
 32 #include "runtime/javaThread.hpp"
 33 #include "runtime/os.inline.hpp"
 34 
 35 void ShenandoahLock::contended_lock(bool allow_block_for_safepoint) {
 36   Thread* thread = Thread::current();
 37   if (allow_block_for_safepoint && thread->is_Java_thread()) {
 38     contended_lock_internal<true>(JavaThread::cast(thread));
 39   } else {
 40     contended_lock_internal<false>(nullptr);
 41   }
 42 }
 43 
 44 template<bool ALLOW_BLOCK>
 45 void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) {
 46   assert(!ALLOW_BLOCK || java_thread != nullptr, "Must have a Java thread when allowing block.");
 47   // Spin this much, but only on multi-processor systems.
 48   int ctr = os::is_MP() ? 0xFF : 0;

 49   // Apply TTAS to avoid more expensive CAS calls if the lock is still held by other thread.
 50   while (Atomic::load(&_state) == locked ||
 51          Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
 52     if (ctr > 0 && !SafepointSynchronize::is_synchronizing()) {
 53       // Lightly contended, spin a little if no safepoint is pending.
 54       SpinPause();
 55       ctr--;
 56     } else if (ALLOW_BLOCK) {
 57       ThreadBlockInVM block(java_thread);
 58       if (SafepointSynchronize::is_synchronizing()) {
 59         // If safepoint is pending, we want to block and allow safepoint to proceed.
 60         // Normally, TBIVM above would block us in its destructor.
 61         //
 62         // But that blocking only happens when TBIVM knows the thread poll is armed.
 63         // There is a window between announcing a safepoint and arming the thread poll
 64         // during which trying to continuously enter TBIVM is counter-productive.
 65         // Under high contention, we may end up going in circles thousands of times.
 66         // To avoid it, we wait here until local poll is armed and then proceed
 67         // to TBVIM exit for blocking. We do not SpinPause, but yield to let
 68         // VM thread to arm the poll sooner.
 69         while (SafepointSynchronize::is_synchronizing() &&
 70                !SafepointMechanism::local_poll_armed(java_thread)) {
 71           os::naked_yield();
 72         }
 73       } else {
 74         os::naked_yield();
 75       }
 76     } else {
 77       os::naked_yield();
 78     }
 79   }
 80 }
 81 












 82 ShenandoahSimpleLock::ShenandoahSimpleLock() {
 83   assert(os::mutex_init_done(), "Too early!");
 84 }
 85 
 86 void ShenandoahSimpleLock::lock() {
 87   _lock.lock();
 88 }
 89 
 90 void ShenandoahSimpleLock::unlock() {
 91   _lock.unlock();
 92 }
 93 
 94 ShenandoahReentrantLock::ShenandoahReentrantLock() :
 95   ShenandoahSimpleLock(), _owner(nullptr), _count(0) {
 96   assert(os::mutex_init_done(), "Too early!");
 97 }
 98 
 99 ShenandoahReentrantLock::~ShenandoahReentrantLock() {
100   assert(_count == 0, "Unbalance");
101 }

 29 #include "gc/shenandoah/shenandoahLock.hpp"
 30 #include "runtime/atomic.hpp"
 31 #include "runtime/interfaceSupport.inline.hpp"
 32 #include "runtime/javaThread.hpp"
 33 #include "runtime/os.inline.hpp"
 34 
 35 void ShenandoahLock::contended_lock(bool allow_block_for_safepoint) {
 36   Thread* thread = Thread::current();
 37   if (allow_block_for_safepoint && thread->is_Java_thread()) {
 38     contended_lock_internal<true>(JavaThread::cast(thread));
 39   } else {
 40     contended_lock_internal<false>(nullptr);
 41   }
 42 }
 43 
 44 template<bool ALLOW_BLOCK>
 45 void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) {
 46   assert(!ALLOW_BLOCK || java_thread != nullptr, "Must have a Java thread when allowing block.");
 47   // Spin this much, but only on multi-processor systems.
 48   int ctr = os::is_MP() ? 0xFF : 0;
 49   int yields = 0;
 50   // Apply TTAS to avoid more expensive CAS calls if the lock is still held by other thread.
 51   while (Atomic::load(&_state) == locked ||
 52          Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
 53     if (ctr > 0 && !SafepointSynchronize::is_synchronizing()) {
 54       // Lightly contended, spin a little if no safepoint is pending.
 55       SpinPause();
 56       ctr--;
 57     } else if (ALLOW_BLOCK) {
 58       ThreadBlockInVM block(java_thread);
 59       if (SafepointSynchronize::is_synchronizing()) {
 60         // If safepoint is pending, we want to block and allow safepoint to proceed.
 61         // Normally, TBIVM above would block us in its destructor.
 62         //
 63         // But that blocking only happens when TBIVM knows the thread poll is armed.
 64         // There is a window between announcing a safepoint and arming the thread poll
 65         // during which trying to continuously enter TBIVM is counter-productive.
 66         // Under high contention, we may end up going in circles thousands of times.
 67         // To avoid it, we wait here until local poll is armed and then proceed
 68         // to TBVIM exit for blocking. We do not SpinPause, but yield to let
 69         // VM thread to arm the poll sooner.
 70         while (SafepointSynchronize::is_synchronizing() &&
 71                !SafepointMechanism::local_poll_armed(java_thread)) {
 72           yield_or_sleep(yields);
 73         }
 74       } else {
 75         yield_or_sleep(yields);
 76       }
 77     } else {
 78       yield_or_sleep(yields);
 79     }
 80   }
 81 }
 82 
 83 void ShenandoahLock::yield_or_sleep(int &yields) {
 84   // Simple yield-sleep policy: do one 100us sleep after every N yields.
 85   // Tested with different values of N, and chose 3 for best performance.
 86   if (yields < 3) {
 87     os::naked_yield();
 88     yields++;
 89   } else {
 90     os::naked_short_nanosleep(100000);
 91     yields = 0;
 92   }
 93 }
 94 
 95 ShenandoahSimpleLock::ShenandoahSimpleLock() {
 96   assert(os::mutex_init_done(), "Too early!");
 97 }
 98 
 99 void ShenandoahSimpleLock::lock() {
100   _lock.lock();
101 }
102 
103 void ShenandoahSimpleLock::unlock() {
104   _lock.unlock();
105 }
106 
107 ShenandoahReentrantLock::ShenandoahReentrantLock() :
108   ShenandoahSimpleLock(), _owner(nullptr), _count(0) {
109   assert(os::mutex_init_done(), "Too early!");
110 }
111 
112 ShenandoahReentrantLock::~ShenandoahReentrantLock() {
113   assert(_count == 0, "Unbalance");
114 }
< prev index next >