< prev index next >

src/hotspot/share/runtime/thread.hpp

Print this page
@@ -30,10 +30,12 @@
  #include "gc/shared/gcThreadLocalData.hpp"
  #include "gc/shared/threadLocalAllocBuffer.hpp"
  #include "memory/allocation.hpp"
  #include "oops/oop.hpp"
  #include "oops/oopHandle.hpp"
+ #include "oops/weakHandle.hpp"
+ #include "runtime/continuation.hpp"
  #include "runtime/frame.hpp"
  #include "runtime/globals.hpp"
  #include "runtime/handshake.hpp"
  #include "runtime/javaFrameAnchor.hpp"
  #include "runtime/mutexLocker.hpp"

@@ -137,10 +139,26 @@
  #ifndef USE_LIBRARY_BASED_TLS_ONLY
    // Current thread is maintained as a thread-local variable
    static THREAD_LOCAL Thread* _thr_current;
  #endif
  
+   int _nmethod_disarm_value;
+ 
+  public:
+   int nmethod_disarm_value() {
+     return _nmethod_disarm_value;
+   }
+ 
+   void set_nmethod_disarm_value(int value) {
+     _nmethod_disarm_value = value;
+   }
+ 
+   static ByteSize nmethod_disarmed_offset() {
+     return byte_offset_of(Thread, _nmethod_disarm_value);
+   }
+ 
+  private:
    // Thread local data area available to the GC. The internal
    // structure and contents of this data area is GC-specific.
    // Only GC and GC barrier code should access this data area.
    GCThreadLocalData _gc_data;
  

@@ -686,13 +704,16 @@
    friend class VMStructs;
    friend class JVMCIVMStructs;
    friend class WhiteBox;
    friend class ThreadsSMRSupport; // to access _threadObj for exiting_threads_oops_do
    friend class HandshakeState;
+   friend class Continuation;
   private:
    bool           _on_thread_list;                // Is set when this JavaThread is added to the Threads list
    OopHandle      _threadObj;                     // The Java level thread object
+   OopHandle      _vthread;
+   OopHandle      _scopeLocalCache;
  
  #ifdef ASSERT
   private:
    int _java_call_counter;
  

@@ -720,10 +741,11 @@
    vframeArray*  _vframe_array_last;              // Holds last vFrameArray we popped
    // Holds updates by JVMTI agents for compiled frames that cannot be performed immediately. They
    // will be carried out as soon as possible which, in most cases, is just before deoptimization of
    // the frame, when control returns to it.
    JvmtiDeferredUpdates* _jvmti_deferred_updates;
+   GrowableArray<WeakHandle>* _keepalive_cleanup;
  
    // Handshake value for fixing 6243940. We need a place for the i2c
    // adapter to store the callee Method*. This value is NEVER live
    // across a gc point so it does NOT have to be gc'd
    // The handshake is open ended since we can't be certain that it will

@@ -780,11 +802,12 @@
    enum SuspendFlags {
      // NOTE: avoid using the sign-bit as cc generates different test code
      //       when the sign-bit is used, and sometimes incorrectly - see CR 6398077
      _has_async_exception    = 0x00000001U, // there is a pending async exception
      _trace_flag             = 0x00000004U, // call tracing backend
-     _obj_deopt              = 0x00000008U  // suspend for object reallocation and relocking for JVMTI agent
+     _obj_deopt              = 0x00000008U, // suspend for object reallocation and relocking for JVMTI agent
+     _thread_suspended       = 0x00000010U  // non-virtual thread is externally suspended
    };
  
    // various suspension related flags - atomically updated
    // overloaded with async exceptions so that we do a single check when transitioning from native->Java
    volatile uint32_t _suspend_flags;

@@ -885,10 +908,14 @@
    jint                  _in_deopt_handler;       // count of deoptimization
                                                   // handlers thread is in
    volatile bool         _doing_unsafe_access;    // Thread may fault due to unsafe access
    bool                  _do_not_unlock_if_synchronized;  // Do not unlock the receiver of a synchronized method (since it was
                                                           // never locked) when throwing an exception. Used by interpreter only.
+ #if INCLUDE_JVMTI
+   bool                  _is_in_VTMT;             // thread is in virtual thread mount transition
+   bool                  _is_VTMT_disabler;       // thread currently disabled VTMT
+ #endif
  
    // JNI attach states:
    enum JNIAttachStates {
      _not_attaching_via_jni = 1,  // thread is not attaching via JNI
      _attaching_via_jni,          // thread is attaching via JNI

@@ -1005,20 +1032,34 @@
    // and during exception propagation, pop the top
    // _frames_to_pop_failed_realloc frames, the ones that reference
    // failed reallocations.
    int _frames_to_pop_failed_realloc;
  
+   ContinuationEntry* _cont_entry;
+   bool _cont_yield; // a continuation yield is in progress
+   bool _cont_preempt;
+   int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
+   intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub frame inside the continuation that we know about
+   int _held_monitor_count; // used by continuations for fast lock detection
+ private:
+ 
    friend class VMThread;
    friend class ThreadWaitTransition;
    friend class VM_Exit;
  
    // Stack watermark barriers.
    StackWatermarks _stack_watermarks;
  
   public:
    inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; }
  
+  public:
+   oop _mounted_vthread;
+   jlong _scopeLocal_hash_table_shift;
+ 
+   void allocate_scopeLocal_hash_table(int count);
+ 
   public:
    // Constructor
    JavaThread();                            // delegating constructor
    JavaThread(bool is_attaching_via_jni);   // for main thread and JNI attached threads
    JavaThread(ThreadFunction entry_point, size_t stack_size = 0);

@@ -1064,11 +1105,17 @@
    }
  
    // Thread oop. threadObj() can be NULL for initial JavaThread
    // (or for threads attached via JNI)
    oop threadObj() const;
-   void set_threadObj(oop p);
+   void set_threadOopHandles(oop p);
+   oop vthread() const;
+   void set_vthread(oop p);
+   oop scopeLocalCache() const;
+   void set_scopeLocalCache(oop p);
+   oop mounted_vthread() const                    { return _mounted_vthread; }
+   void set_mounted_vthread(oop p)                { _mounted_vthread = p; }
  
    // Prepare thread and add to priority queue.  If a priority is
    // not specified, use the priority of the thread object. Threads_lock
    // must be held while this function is called.
    void prepare(jobject jni_thread, ThreadPriority prio=NoPriority);

@@ -1127,10 +1174,28 @@
  
    SafepointMechanism::ThreadData* poll_data() { return &_poll_data; }
  
    void set_requires_cross_modify_fence(bool val) PRODUCT_RETURN NOT_PRODUCT({ _requires_cross_modify_fence = val; })
  
+   // Continuation support
+   ContinuationEntry* last_continuation() const { return _cont_entry; }
+   ContinuationEntry* last_continuation(oop cont_scope) const { return Continuation::last_continuation(this, cont_scope); }
+   bool cont_yield() { return _cont_yield; }
+   void set_cont_yield(bool x) { _cont_yield = x; }
+   void set_cont_fastpath(intptr_t* x) { _cont_fastpath = x; }
+   void push_cont_fastpath(intptr_t* sp) { if (sp > _cont_fastpath) _cont_fastpath = sp; }
+   void set_cont_fastpath_thread_state(bool x) { _cont_fastpath_thread_state = (int)x; }
+   intptr_t* raw_cont_fastpath() { return _cont_fastpath; }
+   bool cont_fastpath() { return ((_cont_fastpath == NULL) & _cont_fastpath_thread_state) != 0; }
+   bool cont_fastpath_thread_state() { return _cont_fastpath_thread_state != 0; }
+   bool cont_preempt() { return _cont_preempt; }
+   void set_cont_preempt(bool x) { _cont_preempt = x; }
+   int held_monitor_count() { return _held_monitor_count; }
+   void reset_held_monitor_count() { _held_monitor_count = 0; }
+   void inc_held_monitor_count() { _held_monitor_count++; }
+   void dec_held_monitor_count() { assert (_held_monitor_count > 0, ""); _held_monitor_count--; }
+ 
   private:
    DEBUG_ONLY(void verify_frame_info();)
  
    // Support for thread handshake operations
    HandshakeState _handshake;

@@ -1142,28 +1207,52 @@
    bool is_handshake_safe_for(Thread* th) const {
      return _handshake.active_handshaker() == th || this == th;
    }
  
    // Suspend/resume support for JavaThread
-   bool java_suspend(); // higher-level suspension logic called by the public APIs
-   bool java_resume();  // higher-level resume logic called by the public APIs
+   // higher-level suspension/resume logic called by the public APIs
+   bool java_suspend();
+   bool java_resume();
    bool is_suspended()     { return _handshake.is_suspended(); }
  
+   // lower-level blocking logic called by the JVM.  The caller suspends this
+   // thread, does something, and then releases it.
+   bool block_suspend(JavaThread* caller);
+   bool continue_resume(JavaThread* caller);
+ 
    // Check for async exception in addition to safepoint.
    static void check_special_condition_for_native_trans(JavaThread *thread);
  
    // Synchronize with another thread that is deoptimizing objects of the
    // current thread, i.e. reverts optimizations based on escape analysis.
    void wait_for_object_deoptimization();
  
+   inline void set_thread_suspended();
+   inline void clear_thread_suspended();
+ 
+   bool is_thread_suspended() const {
+     return (_suspend_flags & _thread_suspended) != 0;
+   }
+ 
+ #if INCLUDE_JVMTI
+   bool is_VTMT_disabler() const                  { return _is_VTMT_disabler; }
+   bool is_in_VTMT() const                        { return _is_in_VTMT; }
+ 
+   void set_is_in_VTMT(bool val);
+   void set_is_VTMT_disabler(bool val);
+ #endif
+ 
+   bool is_cont_force_yield() { return cont_preempt(); }
+ 
    // these next two are also used for self-suspension and async exception support
    void handle_special_runtime_exit_condition(bool check_asyncs = true);
  
    // Return true if JavaThread has an asynchronous condition or
    // if external suspension is requested.
    bool has_special_runtime_exit_condition() {
-     return (_suspend_flags & (_has_async_exception | _obj_deopt JFR_ONLY(| _trace_flag))) != 0;
+     return (_suspend_flags & (_has_async_exception | _obj_deopt JFR_ONLY(| _trace_flag))) != 0
+            || is_cont_force_yield();
    }
  
    // Fast-locking support
    bool is_lock_owned(address adr) const;
  

@@ -1175,10 +1264,13 @@
  
    // Side structure for deferring update of java frame locals until deopt occurs
    JvmtiDeferredUpdates* deferred_updates() const      { return _jvmti_deferred_updates; }
    void set_deferred_updates(JvmtiDeferredUpdates* du) { _jvmti_deferred_updates = du; }
  
+   void set_keepalive_cleanup(GrowableArray<WeakHandle>* lst) { _keepalive_cleanup = lst; }
+   GrowableArray<WeakHandle>* keepalive_cleanup() const { return _keepalive_cleanup; }
+ 
    // These only really exist to make debugging deopt problems simpler
  
    void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; }
    vframeArray* vframe_array_last() const         { return _vframe_array_last;  }
  

@@ -1244,12 +1336,15 @@
    // Misc. accessors/mutators
    void set_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = true; }
    void clr_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = false; }
    bool do_not_unlock(void)                       { return _do_not_unlock_if_synchronized; }
  
+   static ByteSize scopeLocalCache_offset()       { return byte_offset_of(JavaThread, _scopeLocalCache); }
+ 
    // For assembly stub generation
    static ByteSize threadObj_offset()             { return byte_offset_of(JavaThread, _threadObj); }
+   static ByteSize vthread_offset()               { return byte_offset_of(JavaThread, _vthread); }
    static ByteSize jni_environment_offset()       { return byte_offset_of(JavaThread, _jni_environment); }
    static ByteSize pending_jni_exception_check_fn_offset() {
      return byte_offset_of(JavaThread, _pending_jni_exception_check_fn);
    }
    static ByteSize last_Java_sp_offset() {

@@ -1300,10 +1395,15 @@
      return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
    }
    static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
    NOT_PRODUCT(static ByteSize requires_cross_modify_fence_offset()  { return byte_offset_of(JavaThread, _requires_cross_modify_fence); })
  
+   static ByteSize cont_entry_offset()         { return byte_offset_of(JavaThread, _cont_entry); }
+   static ByteSize cont_fastpath_offset()      { return byte_offset_of(JavaThread, _cont_fastpath); }
+   static ByteSize cont_preempt_offset()       { return byte_offset_of(JavaThread, _cont_preempt); }
+   static ByteSize held_monitor_count_offset() { return byte_offset_of(JavaThread, _held_monitor_count); }
+ 
    // Returns the jni environment for this thread
    JNIEnv* jni_environment()                      { return &_jni_environment; }
  
    static JavaThread* thread_from_jni_environment(JNIEnv* env) {
      JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));

@@ -1387,21 +1487,27 @@
    static const char* name_for(oop thread_obj);
  
    void print_on(outputStream* st, bool print_extended_info) const;
    void print_on(outputStream* st) const { print_on(st, false); }
    void print() const;
-   void print_thread_state_on(outputStream*) const      PRODUCT_RETURN;
+   void print_thread_state_on(outputStream*) const;
+   const char* thread_state_name() const;
    void print_on_error(outputStream* st, char* buf, int buflen) const;
    void print_name_on_error(outputStream* st, char* buf, int buflen) const;
    void verify();
  
    // Accessing frames
    frame last_frame() {
      _anchor.make_walkable(this);
      return pd_last_frame();
    }
-   javaVFrame* last_java_vframe(RegisterMap* reg_map);
+   javaVFrame* last_java_vframe(RegisterMap* reg_map) { return last_java_vframe(last_frame(), reg_map); }
+ 
+   frame vthread_carrier_last_frame(RegisterMap* reg_map);
+   javaVFrame* vthread_carrier_last_java_vframe(RegisterMap* reg_map) { return last_java_vframe(vthread_carrier_last_frame(reg_map), reg_map); }
+ 
+   javaVFrame* last_java_vframe(const frame f, RegisterMap* reg_map);
  
    // Returns method at 'depth' java or native frames down the stack
    // Used for security checks
    Klass* security_get_caller_class(int depth);
  

@@ -1423,10 +1529,11 @@
    // Function for testing deoptimization
    void deoptimize();
    void make_zombies();
  
    void deoptimize_marked_methods();
+   void deoptimize_marked_methods_only_anchors();
  
   public:
    // Returns the running thread as a JavaThread
    static JavaThread* current() {
      return JavaThread::cast(Thread::current());

@@ -1469,10 +1576,15 @@
    // returns it. JvmtiThreadState::state_for() will return NULL only if
    // the specified JavaThread is exiting.
    JvmtiThreadState *jvmti_thread_state() const                                   { return _jvmti_thread_state; }
    static ByteSize jvmti_thread_state_offset()                                    { return byte_offset_of(JavaThread, _jvmti_thread_state); }
  
+ #if INCLUDE_JVMTI
+   // Rebind JVMTI thread state from carrier to virtual or from virtual to carrier.
+   JvmtiThreadState *rebind_to_jvmti_thread_state_of(oop thread_oop);
+ #endif
+ 
    // JVMTI PopFrame support
    // Setting and clearing popframe_condition
    // All of these enumerated values are bits. popframe_pending
    // indicates that a PopFrame() has been requested and not yet been
    // completed. popframe_processing indicates that that PopFrame() is in

@@ -1531,10 +1643,11 @@
   public:
    // used by the interpreter for fullspeed debugging support (see above)
    static ByteSize interp_only_mode_offset() { return byte_offset_of(JavaThread, _interp_only_mode); }
    bool is_interp_only_mode()                { return (_interp_only_mode != 0); }
    int get_interp_only_mode()                { return _interp_only_mode; }
+   int set_interp_only_mode(int val)         { return _interp_only_mode = val; }
    void increment_interp_only_mode()         { ++_interp_only_mode; }
    void decrement_interp_only_mode()         { --_interp_only_mode; }
  
    // support for cached flag that indicates whether exceptions need to be posted for this thread
    // if this is false, we can avoid deoptimizing when events are thrown
< prev index next >