< prev index next >

src/hotspot/share/runtime/objectMonitor.hpp

Print this page
*** 26,37 ***
  #define SHARE_RUNTIME_OBJECTMONITOR_HPP
  
  #include "memory/allocation.hpp"
  #include "memory/padded.hpp"
  #include "oops/markWord.hpp"
  #include "oops/weakHandle.hpp"
  #include "runtime/perfDataTypes.hpp"
  #include "utilities/checkedCast.hpp"
  
  class ObjectMonitor;
  class ParkEvent;
  
  // ObjectWaiter serves as a "proxy" or surrogate thread.
  // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
  // ParkEvent instead.  Beware, however, that the JVMTI code
  // knows about ObjectWaiters, so we'll have to reconcile that code.
  // See next_waiter(), first_waiter(), etc.
  
! class ObjectWaiter : public StackObj {
   public:
    enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ };
    ObjectWaiter* volatile _next;
    ObjectWaiter* volatile _prev;
    JavaThread*   _thread;
    uint64_t      _notifier_tid;
    ParkEvent *   _event;
    volatile int  _notified;
    volatile TStates TState;
    bool          _active;           // Contention monitoring is enabled
   public:
    ObjectWaiter(JavaThread* current);
! 
    void wait_reenter_begin(ObjectMonitor *mon);
    void wait_reenter_end(ObjectMonitor *mon);
  };
  
  // The ObjectMonitor class implements the heavyweight version of a
--- 26,49 ---
  #define SHARE_RUNTIME_OBJECTMONITOR_HPP
  
  #include "memory/allocation.hpp"
  #include "memory/padded.hpp"
  #include "oops/markWord.hpp"
+ #include "oops/oopHandle.inline.hpp"
  #include "oops/weakHandle.hpp"
+ #include "runtime/javaThread.hpp"
  #include "runtime/perfDataTypes.hpp"
  #include "utilities/checkedCast.hpp"
  
  class ObjectMonitor;
  class ParkEvent;
+ class BasicLock;
  
  // ObjectWaiter serves as a "proxy" or surrogate thread.
  // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
  // ParkEvent instead.  Beware, however, that the JVMTI code
  // knows about ObjectWaiters, so we'll have to reconcile that code.
  // See next_waiter(), first_waiter(), etc.
  
! class ObjectWaiter : public CHeapObj<mtThread> {
   public:
    enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ };
    ObjectWaiter* volatile _next;
    ObjectWaiter* volatile _prev;
    JavaThread*   _thread;
+   OopHandle _vthread;
    uint64_t      _notifier_tid;
    ParkEvent *   _event;
    volatile int  _notified;
    volatile TStates TState;
    bool          _active;           // Contention monitoring is enabled
   public:
    ObjectWaiter(JavaThread* current);
!   ObjectWaiter(oop vthread);
+   ~ObjectWaiter() {
+     if (is_vthread()) {
+       assert(_vthread.resolve() != nullptr, "invariant");
+       _vthread.release(JavaThread::thread_oop_storage());
+     }
+   }
+   oop vthread() { return _vthread.resolve(); }
+   bool is_vthread() { return _thread == nullptr; }
    void wait_reenter_begin(ObjectMonitor *mon);
    void wait_reenter_end(ObjectMonitor *mon);
  };
  
  // The ObjectMonitor class implements the heavyweight version of a

*** 125,14 ***
--- 137,18 ---
  
  class ObjectMonitor : public CHeapObj<mtObjectMonitor> {
    friend class ObjectSynchronizer;
    friend class ObjectWaiter;
    friend class VMStructs;
+   friend class MonitorList;
    JVMCI_ONLY(friend class JVMCIVMStructs;)
  
    static OopStorage* _oop_storage;
  
+   static OopHandle _vthread_cxq_head;
+   static ParkEvent* _vthread_unparker_ParkEvent;
+ 
    // The sync code expects the header field to be at offset zero (0).
    // Enforced by the assert() in header_addr().
    volatile markWord _header;        // displaced object header word - mark
    WeakHandle _object;               // backward object pointer
    // Separate _header and _owner on different cache lines since both can

*** 156,16 ***
  
  private:
    static void* anon_owner_ptr() { return reinterpret_cast<void*>(ANONYMOUS_OWNER); }
  
    void* volatile _owner;            // pointer to owning thread OR BasicLock
    volatile uint64_t _previous_owner_tid;  // thread id of the previous owner of the monitor
    // Separate _owner and _next_om on different cache lines since
    // both can have busy multi-threaded access. _previous_owner_tid is only
    // changed by ObjectMonitor::exit() so it is a good choice to share the
    // cache line with _owner.
!   DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(void* volatile) +
                          sizeof(volatile uint64_t));
    ObjectMonitor* _next_om;          // Next ObjectMonitor* linkage
    volatile intx _recursions;        // recursion count, 0 for first entry
    ObjectWaiter* volatile _EntryList;  // Threads blocked on entry or reentry.
                                        // The list is actually composed of WaitNodes,
--- 172,17 ---
  
  private:
    static void* anon_owner_ptr() { return reinterpret_cast<void*>(ANONYMOUS_OWNER); }
  
    void* volatile _owner;            // pointer to owning thread OR BasicLock
+   BasicLock* volatile _stack_locker;      // can this share a cache line with owner? they're used together
    volatile uint64_t _previous_owner_tid;  // thread id of the previous owner of the monitor
    // Separate _owner and _next_om on different cache lines since
    // both can have busy multi-threaded access. _previous_owner_tid is only
    // changed by ObjectMonitor::exit() so it is a good choice to share the
    // cache line with _owner.
!   DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, 2 * sizeof(void* volatile) +
                          sizeof(volatile uint64_t));
    ObjectMonitor* _next_om;          // Next ObjectMonitor* linkage
    volatile intx _recursions;        // recursion count, 0 for first entry
    ObjectWaiter* volatile _EntryList;  // Threads blocked on entry or reentry.
                                        // The list is actually composed of WaitNodes,

*** 188,10 ***
--- 205,14 ---
    volatile int _WaitSetLock;        // protects Wait Queue - simple spinlock
  
   public:
  
    static void Initialize();
+   static void Initialize2();
+ 
+   static OopHandle& vthread_cxq_head() { return _vthread_cxq_head; }
+   static ParkEvent* vthread_unparker_ParkEvent() { return _vthread_unparker_ParkEvent; }
  
    // Only perform a PerfData operation if the PerfData object has been
    // allocated and if the PerfDataManager has not freed the PerfData
    // objects which can happen at normal VM shutdown.
    //

*** 216,10 ***
--- 237,11 ---
    static ByteSize owner_offset()       { return byte_offset_of(ObjectMonitor, _owner); }
    static ByteSize recursions_offset()  { return byte_offset_of(ObjectMonitor, _recursions); }
    static ByteSize cxq_offset()         { return byte_offset_of(ObjectMonitor, _cxq); }
    static ByteSize succ_offset()        { return byte_offset_of(ObjectMonitor, _succ); }
    static ByteSize EntryList_offset()   { return byte_offset_of(ObjectMonitor, _EntryList); }
+   static ByteSize stack_locker_offset(){ return byte_offset_of(ObjectMonitor, _stack_locker); }
  
    // ObjectMonitor references can be ORed with markWord::monitor_value
    // as part of the ObjectMonitor tagging mechanism. When we combine an
    // ObjectMonitor reference with an offset, we need to remove the tag
    // value in order to generate the proper address.

*** 250,55 ***
      return ret_code != 0;
    }
    const char* is_busy_to_string(stringStream* ss);
  
    bool is_entered(JavaThread* current) const;
  
    // Returns true if this OM has an owner, false otherwise.
!   bool      has_owner() const;
!   void*     owner() const;  // Returns null if DEFLATER_MARKER is observed.
    void*     owner_raw() const;
    // Returns true if owner field == DEFLATER_MARKER and false otherwise.
    bool      owner_is_DEFLATER_MARKER() const;
    // Returns true if 'this' is being async deflated and false otherwise.
    bool      is_being_async_deflated();
    // Clear _owner field; current value must match old_value.
!   void      release_clear_owner(void* old_value);
    // Simply set _owner field to new_value; current value must match old_value.
!   void      set_owner_from(void* old_value, void* new_value);
    // Simply set _owner field to current; current value must match basic_lock_p.
!   void      set_owner_from_BasicLock(void* basic_lock_p, JavaThread* current);
    // Try to set _owner field to new_value if the current value matches
    // old_value, using Atomic::cmpxchg(). Otherwise, does not change the
    // _owner field. Returns the prior value of the _owner field.
!   void*     try_set_owner_from(void* old_value, void* new_value);
  
!   void set_owner_anonymous() {
-     set_owner_from(nullptr, anon_owner_ptr());
-   }
  
!   bool is_owner_anonymous() const {
!     return owner_raw() == anon_owner_ptr();
    }
  
!   void set_owner_from_anonymous(Thread* owner) {
      set_owner_from(anon_owner_ptr(), owner);
    }
  
    // Simply get _next_om field.
    ObjectMonitor* next_om() const;
    // Simply set _next_om field to new_value.
    void set_next_om(ObjectMonitor* new_value);
  
-   int       waiters() const;
- 
-   int       contentions() const;
    void      add_to_contentions(int value);
    intx      recursions() const                                         { return _recursions; }
    void      set_recursions(size_t recursions);
  
    // JVM/TI GetObjectMonitorUsage() needs this:
    ObjectWaiter* first_waiter()                                         { return _WaitSet; }
    ObjectWaiter* next_waiter(ObjectWaiter* o)                           { return o->_next; }
    JavaThread* thread_of_waiter(ObjectWaiter* o)                        { return o->_thread; }
  
    ObjectMonitor(oop object);
--- 272,62 ---
      return ret_code != 0;
    }
    const char* is_busy_to_string(stringStream* ss);
  
    bool is_entered(JavaThread* current) const;
+   int contentions() const;
  
    // Returns true if this OM has an owner, false otherwise.
!   bool   has_owner() const;
!   void*  owner() const;  // Returns null if DEFLATER_MARKER is observed.
+   bool   is_owner(JavaThread* thread) const { return owner() == owner_for(thread); }
+   bool   is_owner_anonymous() const { return owner_raw() == anon_owner_ptr(); }
+   bool   is_stack_locker(JavaThread* current);
+   BasicLock* stack_locker() const;
+ 
+  private:
    void*     owner_raw() const;
+   void*     owner_for(JavaThread* thread) const;
    // Returns true if owner field == DEFLATER_MARKER and false otherwise.
    bool      owner_is_DEFLATER_MARKER() const;
    // Returns true if 'this' is being async deflated and false otherwise.
    bool      is_being_async_deflated();
    // Clear _owner field; current value must match old_value.
!   void      release_clear_owner(JavaThread* old_value);
    // Simply set _owner field to new_value; current value must match old_value.
!   void      set_owner_from_raw(void* old_value, void* new_value);
+   void      set_owner_from(void* old_value, JavaThread* current);
    // Simply set _owner field to current; current value must match basic_lock_p.
!   void      set_owner_from_BasicLock(JavaThread* current);
    // Try to set _owner field to new_value if the current value matches
    // old_value, using Atomic::cmpxchg(). Otherwise, does not change the
    // _owner field. Returns the prior value of the _owner field.
!   void*     try_set_owner_from_raw(void* old_value, void* new_value);
+   void*     try_set_owner_from(void* old_value, JavaThread* current);
  
!   void set_stack_locker(BasicLock* locker);
  
!   void set_owner_anonymous() {
!     set_owner_from_raw(nullptr, anon_owner_ptr());
    }
  
!   void set_owner_from_anonymous(JavaThread* owner) {
      set_owner_from(anon_owner_ptr(), owner);
    }
  
    // Simply get _next_om field.
    ObjectMonitor* next_om() const;
    // Simply set _next_om field to new_value.
    void set_next_om(ObjectMonitor* new_value);
  
    void      add_to_contentions(int value);
    intx      recursions() const                                         { return _recursions; }
    void      set_recursions(size_t recursions);
  
+  public:
    // JVM/TI GetObjectMonitorUsage() needs this:
+   int waiters() const;
    ObjectWaiter* first_waiter()                                         { return _WaitSet; }
    ObjectWaiter* next_waiter(ObjectWaiter* o)                           { return o->_next; }
    JavaThread* thread_of_waiter(ObjectWaiter* o)                        { return o->_thread; }
  
    ObjectMonitor(oop object);

*** 329,10 ***
--- 358,11 ---
      void operator()(JavaThread* current);
    };
   public:
    bool      enter_for(JavaThread* locking_thread);
    bool      enter(JavaThread* current);
+   void      redo_enter(JavaThread* current);
    void      exit(JavaThread* current, bool not_suspended = true);
    void      wait(jlong millis, bool interruptible, TRAPS);
    void      notify(TRAPS);
    void      notifyAll(TRAPS);
  

*** 350,11 ***
    void      INotify(JavaThread* current);
    ObjectWaiter* DequeueWaiter();
    void      DequeueSpecificWaiter(ObjectWaiter* waiter);
    void      EnterI(JavaThread* current);
    void      ReenterI(JavaThread* current, ObjectWaiter* current_node);
!   void      UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* current_node);
  
  
    enum class TryLockResult { Interference = -1, HasOwner = 0, Success = 1 };
  
    TryLockResult  TryLock(JavaThread* current);
--- 380,14 ---
    void      INotify(JavaThread* current);
    ObjectWaiter* DequeueWaiter();
    void      DequeueSpecificWaiter(ObjectWaiter* waiter);
    void      EnterI(JavaThread* current);
    void      ReenterI(JavaThread* current, ObjectWaiter* current_node);
!   bool      HandlePreemptedVThread(JavaThread* current);
+   void      VThreadEpilog(JavaThread* current);
+   void      UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* current_node, oop vthread = nullptr);
+   ObjectWaiter* LookupWaiter(int64_t threadid);
  
  
    enum class TryLockResult { Interference = -1, HasOwner = 0, Success = 1 };
  
    TryLockResult  TryLock(JavaThread* current);
< prev index next >