< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahLock.hpp

Print this page

 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHLOCK_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHLOCK_HPP
 27 
 28 #include "gc/shenandoah/shenandoahPadding.hpp"
 29 #include "memory/allocation.hpp"
 30 #include "runtime/javaThread.hpp"
 31 #include "runtime/safepoint.hpp"
 32 
 33 class ShenandoahLock  {
 34 private:
 35   enum LockState { unlocked = 0, locked = 1 };
 36 
 37   shenandoah_padding(0);
 38   volatile LockState _state;
 39   shenandoah_padding(1);
 40   Thread* volatile _owner;
 41   shenandoah_padding(2);
 42 
 43   template<bool ALLOW_BLOCK>
 44   void contended_lock_internal(JavaThread* java_thread);


 45 public:
 46   ShenandoahLock() : _state(unlocked), _owner(nullptr) {};
 47 
 48   void lock(bool allow_block_for_safepoint) {
 49     assert(Atomic::load(&_owner) != Thread::current(), "reentrant locking attempt, would deadlock");
 50 
 51     if ((allow_block_for_safepoint && SafepointSynchronize::is_synchronizing()) ||
 52         (Atomic::cmpxchg(&_state, unlocked, locked) != unlocked)) {
 53       // 1. Java thread, and there is a pending safepoint. Dive into contended locking
 54       //    immediately without trying anything else, and block.
 55       // 2. Fast lock fails, dive into contended lock handling.
 56       contended_lock(allow_block_for_safepoint);
 57     }
 58 
 59     assert(Atomic::load(&_state) == locked, "must be locked");
 60     assert(Atomic::load(&_owner) == nullptr, "must not be owned");
 61     DEBUG_ONLY(Atomic::store(&_owner, Thread::current());)
 62   }
 63 
 64   void unlock() {

 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHLOCK_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHLOCK_HPP
 27 
 28 #include "gc/shenandoah/shenandoahPadding.hpp"
 29 #include "memory/allocation.hpp"
 30 #include "runtime/javaThread.hpp"
 31 #include "runtime/safepoint.hpp"
 32 
 33 class ShenandoahLock  {
 34 private:
 35   enum LockState { unlocked = 0, locked = 1 };
 36 
 37   shenandoah_padding(0);
 38   volatile LockState _state;
 39   shenandoah_padding(1);
 40   Thread* volatile _owner;
 41   shenandoah_padding(2);
 42 
 43   template<bool ALLOW_BLOCK>
 44   void contended_lock_internal(JavaThread* java_thread);
 45   static void yield_or_sleep(int &yields);
 46 
 47 public:
 48   ShenandoahLock() : _state(unlocked), _owner(nullptr) {};
 49 
 50   void lock(bool allow_block_for_safepoint) {
 51     assert(Atomic::load(&_owner) != Thread::current(), "reentrant locking attempt, would deadlock");
 52 
 53     if ((allow_block_for_safepoint && SafepointSynchronize::is_synchronizing()) ||
 54         (Atomic::cmpxchg(&_state, unlocked, locked) != unlocked)) {
 55       // 1. Java thread, and there is a pending safepoint. Dive into contended locking
 56       //    immediately without trying anything else, and block.
 57       // 2. Fast lock fails, dive into contended lock handling.
 58       contended_lock(allow_block_for_safepoint);
 59     }
 60 
 61     assert(Atomic::load(&_state) == locked, "must be locked");
 62     assert(Atomic::load(&_owner) == nullptr, "must not be owned");
 63     DEBUG_ONLY(Atomic::store(&_owner, Thread::current());)
 64   }
 65 
 66   void unlock() {
< prev index next >