< prev index next >

src/hotspot/share/runtime/javaThread.hpp

Print this page

  24  */
  25 
  26 #ifndef SHARE_RUNTIME_JAVATHREAD_HPP
  27 #define SHARE_RUNTIME_JAVATHREAD_HPP
  28 
  29 #include "jni.h"
  30 #include "memory/allocation.hpp"
  31 #include "oops/oop.hpp"
  32 #include "oops/oopHandle.hpp"
  33 #include "runtime/frame.hpp"
  34 #include "runtime/globals.hpp"
  35 #include "runtime/handshake.hpp"
  36 #include "runtime/javaFrameAnchor.hpp"
  37 #include "runtime/lockStack.hpp"
  38 #include "runtime/park.hpp"
  39 #include "runtime/safepointMechanism.hpp"
  40 #include "runtime/stackWatermarkSet.hpp"
  41 #include "runtime/stackOverflow.hpp"
  42 #include "runtime/thread.hpp"
  43 #include "runtime/threadHeapSampler.hpp"

  44 #include "runtime/threadStatisticalInfo.hpp"
  45 #include "utilities/exceptions.hpp"
  46 #include "utilities/globalDefinitions.hpp"
  47 #include "utilities/macros.hpp"
  48 #if INCLUDE_JFR
  49 #include "jfr/support/jfrThreadExtension.hpp"
  50 #endif
  51 
  52 class AsyncExceptionHandshake;
  53 class ContinuationEntry;
  54 class DeoptResourceMark;
  55 class InternalOOMEMark;
  56 class JNIHandleBlock;
  57 class JVMCIRuntime;
  58 
  59 class JvmtiDeferredUpdates;
  60 class JvmtiSampledObjectAllocEventCollector;
  61 class JvmtiThreadState;
  62 
  63 class Metadata;

 142 
 143   // Used to pass back results to the interpreter or generated code running Java code.
 144   oop           _vm_result;    // oop result is GC-preserved
 145   Metadata*     _vm_result_2;  // non-oop result
 146 
 147   // See ReduceInitialCardMarks: this holds the precise space interval of
 148   // the most recent slow path allocation for which compiled code has
 149   // elided card-marks for performance along the fast-path.
 150   MemRegion     _deferred_card_mark;
 151 
 152   ObjectMonitor* volatile _current_pending_monitor;     // ObjectMonitor this thread is waiting to lock
 153   bool           _current_pending_monitor_is_from_java; // locking is from Java code
 154   ObjectMonitor* volatile _current_waiting_monitor;     // ObjectMonitor on which this thread called Object.wait()
 155 
 156   // Active_handles points to a block of handles
 157   JNIHandleBlock* _active_handles;
 158 
 159   // One-element thread local free list
 160   JNIHandleBlock* _free_handle_block;
 161 





 162  public:











 163   // For tracking the heavyweight monitor the thread is pending on.
 164   ObjectMonitor* current_pending_monitor() {
 165     // Use Atomic::load() to prevent data race between concurrent modification and
 166     // concurrent readers, e.g. ThreadService::get_current_contended_monitor().
 167     // Especially, reloading pointer from thread after null check must be prevented.
 168     return Atomic::load(&_current_pending_monitor);
 169   }
 170   void set_current_pending_monitor(ObjectMonitor* monitor) {
 171     Atomic::store(&_current_pending_monitor, monitor);
 172   }
 173   void set_current_pending_monitor_is_from_java(bool from_java) {
 174     _current_pending_monitor_is_from_java = from_java;
 175   }
 176   bool current_pending_monitor_is_from_java() {
 177     return _current_pending_monitor_is_from_java;
 178   }
 179   ObjectMonitor* current_waiting_monitor() {
 180     // See the comment in current_pending_monitor() above.
 181     return Atomic::load(&_current_waiting_monitor);
 182   }

 299   //
 300   // _vm_exited is a special value to cover the case of a JavaThread
 301   // executing native code after the VM itself is terminated.
 302   //
 303   // A JavaThread that fails to JNI attach has these _terminated field transitions:
 304   //   _not_terminated => _thread_terminated
 305   //
 306   volatile TerminatedTypes _terminated;
 307 
 308   jint                  _in_deopt_handler;       // count of deoptimization
 309                                                  // handlers thread is in
 310   volatile bool         _doing_unsafe_access;    // Thread may fault due to unsafe access
 311   bool                  _do_not_unlock_if_synchronized;  // Do not unlock the receiver of a synchronized method (since it was
 312                                                          // never locked) when throwing an exception. Used by interpreter only.
 313 #if INCLUDE_JVMTI
 314   volatile bool         _carrier_thread_suspended;       // Carrier thread is externally suspended
 315   bool                  _is_in_VTMS_transition;          // thread is in virtual thread mount state transition
 316   bool                  _is_in_tmp_VTMS_transition;      // thread is in temporary virtual thread mount state transition
 317   bool                  _is_disable_suspend;             // JVMTI suspend is temporarily disabled; used on current thread only
 318   bool                  _VTMS_transition_mark;           // used for sync between VTMS transitions and disablers



 319 #ifdef ASSERT
 320   bool                  _is_VTMS_transition_disabler;    // thread currently disabled VTMS transitions
 321 #endif
 322 #endif
 323 
 324   // JNI attach states:
 325   enum JNIAttachStates {
 326     _not_attaching_via_jni = 1,  // thread is not attaching via JNI
 327     _attaching_via_jni,          // thread is attaching via JNI
 328     _attached_via_jni            // thread has attached via JNI
 329   };
 330 
 331   // A regular JavaThread's _jni_attach_state is _not_attaching_via_jni.
 332   // A native thread that is attaching via JNI starts with a value
 333   // of _attaching_via_jni and transitions to _attached_via_jni.
 334   volatile JNIAttachStates _jni_attach_state;
 335 
 336   // In scope of an InternalOOMEMark?
 337   bool _is_in_internal_oome_mark;
 338 

 449   // JVMTI PopFrame support
 450   // This is set to popframe_pending to signal that top Java frame should be popped immediately
 451   int _popframe_condition;
 452 
 453   // If reallocation of scalar replaced objects fails, we throw OOM
 454   // and during exception propagation, pop the top
 455   // _frames_to_pop_failed_realloc frames, the ones that reference
 456   // failed reallocations.
 457   int _frames_to_pop_failed_realloc;
 458 
 459   ContinuationEntry* _cont_entry;
 460   intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub frame inside the
 461                             // continuation that we know about
 462   int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
 463 
 464   // It's signed for error detection.
 465   intx _held_monitor_count;  // used by continuations for fast lock detection
 466   intx _jni_monitor_count;
 467   ObjectMonitor* _unlocked_inflated_monitor;
 468 




















 469 private:
 470 
 471   friend class VMThread;
 472   friend class ThreadWaitTransition;
 473   friend class VM_Exit;
 474 
 475   // Stack watermark barriers.
 476   StackWatermarks _stack_watermarks;
 477 
 478  public:
 479   inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; }
 480 
 481  public:
 482   // Constructor
 483   JavaThread(MemTag mem_tag = mtThread);   // delegating constructor
 484   JavaThread(ThreadFunction entry_point, size_t stack_size = 0, MemTag mem_tag = mtThread);
 485   ~JavaThread();
 486 
 487   // Factory method to create a new JavaThread whose attach state is "is attaching"
 488   static JavaThread* create_attaching_thread();

 594   void set_doing_unsafe_access(bool val)         { _doing_unsafe_access = val; }
 595 
 596   bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
 597   void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
 598 
 599   SafepointMechanism::ThreadData* poll_data() { return &_poll_data; }
 600 
 601   void set_requires_cross_modify_fence(bool val) PRODUCT_RETURN NOT_PRODUCT({ _requires_cross_modify_fence = val; })
 602 
 603   // Continuation support
 604   ContinuationEntry* last_continuation() const { return _cont_entry; }
 605   void set_cont_fastpath(intptr_t* x)          { _cont_fastpath = x; }
 606   void push_cont_fastpath(intptr_t* sp)        { if (sp > _cont_fastpath) _cont_fastpath = sp; }
 607   void set_cont_fastpath_thread_state(bool x)  { _cont_fastpath_thread_state = (int)x; }
 608   intptr_t* raw_cont_fastpath() const          { return _cont_fastpath; }
 609   bool cont_fastpath() const                   { return _cont_fastpath == nullptr && _cont_fastpath_thread_state != 0; }
 610   bool cont_fastpath_thread_state() const      { return _cont_fastpath_thread_state != 0; }
 611 
 612   void inc_held_monitor_count(intx i = 1, bool jni = false);
 613   void dec_held_monitor_count(intx i = 1, bool jni = false);
 614 
 615   intx held_monitor_count() { return _held_monitor_count; }

 616   intx jni_monitor_count()  { return _jni_monitor_count;  }
 617   void clear_jni_monitor_count() { _jni_monitor_count = 0;   }
 618 
 619   // Support for SharedRuntime::monitor_exit_helper()
 620   ObjectMonitor* unlocked_inflated_monitor() const { return _unlocked_inflated_monitor; }
 621   void clear_unlocked_inflated_monitor() {
 622     _unlocked_inflated_monitor = nullptr;
 623   }
 624 
 625   inline bool is_vthread_mounted() const;
 626   inline const ContinuationEntry* vthread_continuation() const;
 627 











 628  private:
 629   DEBUG_ONLY(void verify_frame_info();)
 630 
 631   // Support for thread handshake operations
 632   HandshakeState _handshake;
 633  public:
 634   HandshakeState* handshake_state() { return &_handshake; }
 635 
 636   // A JavaThread can always safely operate on it self and other threads
 637   // can do it safely if they are the active handshaker.
 638   bool is_handshake_safe_for(Thread* th) const {
 639     return _handshake.active_handshaker() == th || this == th;
 640   }
 641 
 642   // Suspend/resume support for JavaThread
 643   // higher-level suspension/resume logic called by the public APIs
 644   bool java_suspend();
 645   bool java_resume();
 646   bool is_suspended()     { return _handshake.is_suspended(); }
 647 

 656   inline void set_carrier_thread_suspended();
 657   inline void clear_carrier_thread_suspended();
 658 
 659   bool is_carrier_thread_suspended() const {
 660     return _carrier_thread_suspended;
 661   }
 662 
 663   bool is_in_VTMS_transition() const             { return _is_in_VTMS_transition; }
 664   bool is_in_tmp_VTMS_transition() const         { return _is_in_tmp_VTMS_transition; }
 665   bool is_in_any_VTMS_transition() const         { return _is_in_VTMS_transition || _is_in_tmp_VTMS_transition; }
 666 
 667   void set_is_in_VTMS_transition(bool val);
 668   void toggle_is_in_tmp_VTMS_transition()        { _is_in_tmp_VTMS_transition = !_is_in_tmp_VTMS_transition; };
 669 
 670   bool is_disable_suspend() const                { return _is_disable_suspend; }
 671   void toggle_is_disable_suspend()               { _is_disable_suspend = !_is_disable_suspend; };
 672 
 673   bool VTMS_transition_mark() const              { return Atomic::load(&_VTMS_transition_mark); }
 674   void set_VTMS_transition_mark(bool val)        { Atomic::store(&_VTMS_transition_mark, val); }
 675 








 676 #ifdef ASSERT
 677   bool is_VTMS_transition_disabler() const       { return _is_VTMS_transition_disabler; }
 678   void set_is_VTMS_transition_disabler(bool val);
 679 #endif
 680 #endif
 681 


 682   // Support for object deoptimization and JFR suspension
 683   void handle_special_runtime_exit_condition();
 684   bool has_special_runtime_exit_condition() {
 685     return (_suspend_flags & (_obj_deopt JFR_ONLY(| _trace_flag))) != 0;
 686   }
 687 
 688   // Stack-locking support (not for LM_LIGHTWEIGHT)
 689   bool is_lock_owned(address adr) const;
 690 
 691   // Accessors for vframe array top
 692   // The linked list of vframe arrays are sorted on sp. This means when we
 693   // unpack the head must contain the vframe array to unpack.
 694   void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
 695   vframeArray* vframe_array_head() const         { return _vframe_array_head;  }
 696 
 697   // Side structure for deferring update of java frame locals until deopt occurs
 698   JvmtiDeferredUpdates* deferred_updates() const      { return _jvmti_deferred_updates; }
 699   void set_deferred_updates(JvmtiDeferredUpdates* du) { _jvmti_deferred_updates = du; }
 700 
 701   // These only really exist to make debugging deopt problems simpler

 814   }
 815   static ByteSize reserved_stack_activation_offset() {
 816     return byte_offset_of(JavaThread, _stack_overflow_state._reserved_stack_activation);
 817   }
 818   static ByteSize shadow_zone_safe_limit()  {
 819     return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_safe_limit);
 820   }
 821   static ByteSize shadow_zone_growth_watermark()  {
 822     return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_growth_watermark);
 823   }
 824 
 825   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags); }
 826 
 827   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
 828   static ByteSize should_post_on_exceptions_flag_offset() {
 829     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
 830   }
 831   static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
 832   NOT_PRODUCT(static ByteSize requires_cross_modify_fence_offset()  { return byte_offset_of(JavaThread, _requires_cross_modify_fence); })
 833 


 834   static ByteSize cont_entry_offset()         { return byte_offset_of(JavaThread, _cont_entry); }
 835   static ByteSize cont_fastpath_offset()      { return byte_offset_of(JavaThread, _cont_fastpath); }
 836   static ByteSize held_monitor_count_offset() { return byte_offset_of(JavaThread, _held_monitor_count); }
 837   static ByteSize jni_monitor_count_offset()  { return byte_offset_of(JavaThread, _jni_monitor_count); }



 838   static ByteSize unlocked_inflated_monitor_offset() { return byte_offset_of(JavaThread, _unlocked_inflated_monitor); }
 839 
 840 #if INCLUDE_JVMTI
 841   static ByteSize is_in_VTMS_transition_offset()     { return byte_offset_of(JavaThread, _is_in_VTMS_transition); }
 842   static ByteSize is_in_tmp_VTMS_transition_offset() { return byte_offset_of(JavaThread, _is_in_tmp_VTMS_transition); }
 843   static ByteSize is_disable_suspend_offset()        { return byte_offset_of(JavaThread, _is_disable_suspend); }
 844 #endif
 845 
 846   // Returns the jni environment for this thread
 847   JNIEnv* jni_environment()                      { return &_jni_environment; }
 848 
 849   // Returns the current thread as indicated by the given JNIEnv.
 850   // We don't assert it is Thread::current here as that is done at the
 851   // external JNI entry points where the JNIEnv is passed into the VM.
 852   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
 853     JavaThread* current = reinterpret_cast<JavaThread*>(((intptr_t)env - in_bytes(jni_environment_offset())));
 854     // We can't normally get here in a thread that has completed its
 855     // execution and so "is_terminated", except when the call is from
 856     // AsyncGetCallTrace, which can be triggered by a signal at any point in
 857     // a thread's lifecycle. A thread is also considered terminated if the VM

1238   public:
1239     UnlockFlagSaver(JavaThread* t) {
1240       _thread = t;
1241       _do_not_unlock = t->do_not_unlock_if_synchronized();
1242       t->set_do_not_unlock_if_synchronized(false);
1243     }
1244     ~UnlockFlagSaver() {
1245       _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
1246     }
1247 };
1248 
1249 class JNIHandleMark : public StackObj {
1250   JavaThread* _thread;
1251  public:
1252   JNIHandleMark(JavaThread* thread) : _thread(thread) {
1253     thread->push_jni_handle_block();
1254   }
1255   ~JNIHandleMark() { _thread->pop_jni_handle_block(); }
1256 };
1257 


















1258 #endif // SHARE_RUNTIME_JAVATHREAD_HPP

  24  */
  25 
  26 #ifndef SHARE_RUNTIME_JAVATHREAD_HPP
  27 #define SHARE_RUNTIME_JAVATHREAD_HPP
  28 
  29 #include "jni.h"
  30 #include "memory/allocation.hpp"
  31 #include "oops/oop.hpp"
  32 #include "oops/oopHandle.hpp"
  33 #include "runtime/frame.hpp"
  34 #include "runtime/globals.hpp"
  35 #include "runtime/handshake.hpp"
  36 #include "runtime/javaFrameAnchor.hpp"
  37 #include "runtime/lockStack.hpp"
  38 #include "runtime/park.hpp"
  39 #include "runtime/safepointMechanism.hpp"
  40 #include "runtime/stackWatermarkSet.hpp"
  41 #include "runtime/stackOverflow.hpp"
  42 #include "runtime/thread.hpp"
  43 #include "runtime/threadHeapSampler.hpp"
  44 #include "runtime/threadIdentifier.hpp"
  45 #include "runtime/threadStatisticalInfo.hpp"
  46 #include "utilities/exceptions.hpp"
  47 #include "utilities/globalDefinitions.hpp"
  48 #include "utilities/macros.hpp"
  49 #if INCLUDE_JFR
  50 #include "jfr/support/jfrThreadExtension.hpp"
  51 #endif
  52 
  53 class AsyncExceptionHandshake;
  54 class ContinuationEntry;
  55 class DeoptResourceMark;
  56 class InternalOOMEMark;
  57 class JNIHandleBlock;
  58 class JVMCIRuntime;
  59 
  60 class JvmtiDeferredUpdates;
  61 class JvmtiSampledObjectAllocEventCollector;
  62 class JvmtiThreadState;
  63 
  64 class Metadata;

 143 
 144   // Used to pass back results to the interpreter or generated code running Java code.
 145   oop           _vm_result;    // oop result is GC-preserved
 146   Metadata*     _vm_result_2;  // non-oop result
 147 
 148   // See ReduceInitialCardMarks: this holds the precise space interval of
 149   // the most recent slow path allocation for which compiled code has
 150   // elided card-marks for performance along the fast-path.
 151   MemRegion     _deferred_card_mark;
 152 
 153   ObjectMonitor* volatile _current_pending_monitor;     // ObjectMonitor this thread is waiting to lock
 154   bool           _current_pending_monitor_is_from_java; // locking is from Java code
 155   ObjectMonitor* volatile _current_waiting_monitor;     // ObjectMonitor on which this thread called Object.wait()
 156 
 157   // Active_handles points to a block of handles
 158   JNIHandleBlock* _active_handles;
 159 
 160   // One-element thread local free list
 161   JNIHandleBlock* _free_handle_block;
 162 
 163   // ID used as owner for inflated monitors. Same as the tidĀ field of the current
 164   // _vthread object, except during creation of the primordial and JNI attached
 165   // thread cases where this field can have a temporal value.
 166   int64_t _lock_id;
 167 
 168  public:
 169   bool _on_monitorenter;
 170 
 171   bool is_on_monitorenter() { return _on_monitorenter; }
 172   void set_on_monitorenter(bool val) { _on_monitorenter = val; }
 173 
 174   void set_lock_id(int64_t tid) {
 175     assert(tid >= ThreadIdentifier::initial() && tid < ThreadIdentifier::current(), "invalid tid");
 176     _lock_id = tid;
 177   }
 178   int64_t lock_id() const { return _lock_id; }
 179 
 180   // For tracking the heavyweight monitor the thread is pending on.
 181   ObjectMonitor* current_pending_monitor() {
 182     // Use Atomic::load() to prevent data race between concurrent modification and
 183     // concurrent readers, e.g. ThreadService::get_current_contended_monitor().
 184     // Especially, reloading pointer from thread after null check must be prevented.
 185     return Atomic::load(&_current_pending_monitor);
 186   }
 187   void set_current_pending_monitor(ObjectMonitor* monitor) {
 188     Atomic::store(&_current_pending_monitor, monitor);
 189   }
 190   void set_current_pending_monitor_is_from_java(bool from_java) {
 191     _current_pending_monitor_is_from_java = from_java;
 192   }
 193   bool current_pending_monitor_is_from_java() {
 194     return _current_pending_monitor_is_from_java;
 195   }
 196   ObjectMonitor* current_waiting_monitor() {
 197     // See the comment in current_pending_monitor() above.
 198     return Atomic::load(&_current_waiting_monitor);
 199   }

 316   //
 317   // _vm_exited is a special value to cover the case of a JavaThread
 318   // executing native code after the VM itself is terminated.
 319   //
 320   // A JavaThread that fails to JNI attach has these _terminated field transitions:
 321   //   _not_terminated => _thread_terminated
 322   //
 323   volatile TerminatedTypes _terminated;
 324 
 325   jint                  _in_deopt_handler;       // count of deoptimization
 326                                                  // handlers thread is in
 327   volatile bool         _doing_unsafe_access;    // Thread may fault due to unsafe access
 328   bool                  _do_not_unlock_if_synchronized;  // Do not unlock the receiver of a synchronized method (since it was
 329                                                          // never locked) when throwing an exception. Used by interpreter only.
 330 #if INCLUDE_JVMTI
 331   volatile bool         _carrier_thread_suspended;       // Carrier thread is externally suspended
 332   bool                  _is_in_VTMS_transition;          // thread is in virtual thread mount state transition
 333   bool                  _is_in_tmp_VTMS_transition;      // thread is in temporary virtual thread mount state transition
 334   bool                  _is_disable_suspend;             // JVMTI suspend is temporarily disabled; used on current thread only
 335   bool                  _VTMS_transition_mark;           // used for sync between VTMS transitions and disablers
 336   bool                  _pending_jvmti_unmount_event;    // When preempting we post unmount event at unmount end rather than start
 337   bool                  _on_monitor_waited_event;        // Avoid callee arg processing for enterSpecial when posting waited event
 338   ObjectMonitor*        _contended_entered_monitor;      // Monitor por pending monitor_contended_entered callback
 339 #ifdef ASSERT
 340   bool                  _is_VTMS_transition_disabler;    // thread currently disabled VTMS transitions
 341 #endif
 342 #endif
 343 
 344   // JNI attach states:
 345   enum JNIAttachStates {
 346     _not_attaching_via_jni = 1,  // thread is not attaching via JNI
 347     _attaching_via_jni,          // thread is attaching via JNI
 348     _attached_via_jni            // thread has attached via JNI
 349   };
 350 
 351   // A regular JavaThread's _jni_attach_state is _not_attaching_via_jni.
 352   // A native thread that is attaching via JNI starts with a value
 353   // of _attaching_via_jni and transitions to _attached_via_jni.
 354   volatile JNIAttachStates _jni_attach_state;
 355 
 356   // In scope of an InternalOOMEMark?
 357   bool _is_in_internal_oome_mark;
 358 

 469   // JVMTI PopFrame support
 470   // This is set to popframe_pending to signal that top Java frame should be popped immediately
 471   int _popframe_condition;
 472 
 473   // If reallocation of scalar replaced objects fails, we throw OOM
 474   // and during exception propagation, pop the top
 475   // _frames_to_pop_failed_realloc frames, the ones that reference
 476   // failed reallocations.
 477   int _frames_to_pop_failed_realloc;
 478 
 479   ContinuationEntry* _cont_entry;
 480   intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub frame inside the
 481                             // continuation that we know about
 482   int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
 483 
 484   // It's signed for error detection.
 485   intx _held_monitor_count;  // used by continuations for fast lock detection
 486   intx _jni_monitor_count;
 487   ObjectMonitor* _unlocked_inflated_monitor;
 488 
 489   bool _preempting;
 490   bool _preemption_cancelled;
 491   bool _pending_interrupted_exception;
 492   address _preempt_alternate_return; // used when preempting a thread
 493 
 494 #ifdef ASSERT
 495   intx _obj_locker_count;
 496 
 497  public:
 498   intx obj_locker_count() { return _obj_locker_count; }
 499   void inc_obj_locker_count() {
 500     assert(_obj_locker_count >= 0, "Must always be greater than 0: " INTX_FORMAT, _obj_locker_count);
 501     _obj_locker_count++;
 502   }
 503   void dec_obj_locker_count() {
 504     _obj_locker_count--;
 505     assert(_obj_locker_count >= 0, "Must always be greater than 0: " INTX_FORMAT, _obj_locker_count);
 506   }
 507 #endif // ASSERT
 508 
 509 private:
 510 
 511   friend class VMThread;
 512   friend class ThreadWaitTransition;
 513   friend class VM_Exit;
 514 
 515   // Stack watermark barriers.
 516   StackWatermarks _stack_watermarks;
 517 
 518  public:
 519   inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; }
 520 
 521  public:
 522   // Constructor
 523   JavaThread(MemTag mem_tag = mtThread);   // delegating constructor
 524   JavaThread(ThreadFunction entry_point, size_t stack_size = 0, MemTag mem_tag = mtThread);
 525   ~JavaThread();
 526 
 527   // Factory method to create a new JavaThread whose attach state is "is attaching"
 528   static JavaThread* create_attaching_thread();

 634   void set_doing_unsafe_access(bool val)         { _doing_unsafe_access = val; }
 635 
 636   bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
 637   void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
 638 
 639   SafepointMechanism::ThreadData* poll_data() { return &_poll_data; }
 640 
 641   void set_requires_cross_modify_fence(bool val) PRODUCT_RETURN NOT_PRODUCT({ _requires_cross_modify_fence = val; })
 642 
 643   // Continuation support
 644   ContinuationEntry* last_continuation() const { return _cont_entry; }
 645   void set_cont_fastpath(intptr_t* x)          { _cont_fastpath = x; }
 646   void push_cont_fastpath(intptr_t* sp)        { if (sp > _cont_fastpath) _cont_fastpath = sp; }
 647   void set_cont_fastpath_thread_state(bool x)  { _cont_fastpath_thread_state = (int)x; }
 648   intptr_t* raw_cont_fastpath() const          { return _cont_fastpath; }
 649   bool cont_fastpath() const                   { return _cont_fastpath == nullptr && _cont_fastpath_thread_state != 0; }
 650   bool cont_fastpath_thread_state() const      { return _cont_fastpath_thread_state != 0; }
 651 
 652   void inc_held_monitor_count(intx i = 1, bool jni = false);
 653   void dec_held_monitor_count(intx i = 1, bool jni = false);

 654   intx held_monitor_count() { return _held_monitor_count; }
 655 
 656   intx jni_monitor_count()  { return _jni_monitor_count;  }
 657   void clear_jni_monitor_count() { _jni_monitor_count = 0; }
 658 
 659   // Support for SharedRuntime::monitor_exit_helper()
 660   ObjectMonitor* unlocked_inflated_monitor() const { return _unlocked_inflated_monitor; }
 661   void clear_unlocked_inflated_monitor() {
 662     _unlocked_inflated_monitor = nullptr;
 663   }
 664 
 665   inline bool is_vthread_mounted() const;
 666   inline const ContinuationEntry* vthread_continuation() const;
 667 
 668   bool preempting()           { return _preempting; }
 669   void set_preempting(bool b) { _preempting = b; }
 670 
 671   bool preemption_cancelled()           { return _preemption_cancelled; }
 672   void set_preemption_cancelled(bool b) { _preemption_cancelled = b; }
 673 
 674   bool pending_interrupted_exception()           { return _pending_interrupted_exception; }
 675   void set_pending_interrupted_exception(bool b) { _pending_interrupted_exception = b; }
 676 
 677   void set_preempt_alternate_return(address val) { _preempt_alternate_return = val; }
 678 
 679  private:
 680   DEBUG_ONLY(void verify_frame_info();)
 681 
 682   // Support for thread handshake operations
 683   HandshakeState _handshake;
 684  public:
 685   HandshakeState* handshake_state() { return &_handshake; }
 686 
 687   // A JavaThread can always safely operate on it self and other threads
 688   // can do it safely if they are the active handshaker.
 689   bool is_handshake_safe_for(Thread* th) const {
 690     return _handshake.active_handshaker() == th || this == th;
 691   }
 692 
 693   // Suspend/resume support for JavaThread
 694   // higher-level suspension/resume logic called by the public APIs
 695   bool java_suspend();
 696   bool java_resume();
 697   bool is_suspended()     { return _handshake.is_suspended(); }
 698 

 707   inline void set_carrier_thread_suspended();
 708   inline void clear_carrier_thread_suspended();
 709 
 710   bool is_carrier_thread_suspended() const {
 711     return _carrier_thread_suspended;
 712   }
 713 
 714   bool is_in_VTMS_transition() const             { return _is_in_VTMS_transition; }
 715   bool is_in_tmp_VTMS_transition() const         { return _is_in_tmp_VTMS_transition; }
 716   bool is_in_any_VTMS_transition() const         { return _is_in_VTMS_transition || _is_in_tmp_VTMS_transition; }
 717 
 718   void set_is_in_VTMS_transition(bool val);
 719   void toggle_is_in_tmp_VTMS_transition()        { _is_in_tmp_VTMS_transition = !_is_in_tmp_VTMS_transition; };
 720 
 721   bool is_disable_suspend() const                { return _is_disable_suspend; }
 722   void toggle_is_disable_suspend()               { _is_disable_suspend = !_is_disable_suspend; };
 723 
 724   bool VTMS_transition_mark() const              { return Atomic::load(&_VTMS_transition_mark); }
 725   void set_VTMS_transition_mark(bool val)        { Atomic::store(&_VTMS_transition_mark, val); }
 726 
 727   bool pending_jvmti_unmount_event()             { return _pending_jvmti_unmount_event; }
 728   void set_pending_jvmti_unmount_event(bool val) { _pending_jvmti_unmount_event = val; }
 729 
 730   bool on_monitor_waited_event()             { return _on_monitor_waited_event; }
 731   void set_on_monitor_waited_event(bool val) { _on_monitor_waited_event = val; }
 732 
 733   bool pending_contended_entered_event()         { return _contended_entered_monitor != nullptr; }
 734   ObjectMonitor* contended_entered_monitor()     { return _contended_entered_monitor; }
 735 #ifdef ASSERT
 736   bool is_VTMS_transition_disabler() const       { return _is_VTMS_transition_disabler; }
 737   void set_is_VTMS_transition_disabler(bool val);
 738 #endif
 739 #endif
 740 
 741   void set_contended_entered_monitor(ObjectMonitor* val) NOT_JVMTI_RETURN JVMTI_ONLY({ _contended_entered_monitor = val; })
 742 
 743   // Support for object deoptimization and JFR suspension
 744   void handle_special_runtime_exit_condition();
 745   bool has_special_runtime_exit_condition() {
 746     return (_suspend_flags & (_obj_deopt JFR_ONLY(| _trace_flag))) != 0;
 747   }
 748 
 749   // Stack-locking support (not for LM_LIGHTWEIGHT)
 750   bool is_lock_owned(address adr) const;
 751 
 752   // Accessors for vframe array top
 753   // The linked list of vframe arrays are sorted on sp. This means when we
 754   // unpack the head must contain the vframe array to unpack.
 755   void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
 756   vframeArray* vframe_array_head() const         { return _vframe_array_head;  }
 757 
 758   // Side structure for deferring update of java frame locals until deopt occurs
 759   JvmtiDeferredUpdates* deferred_updates() const      { return _jvmti_deferred_updates; }
 760   void set_deferred_updates(JvmtiDeferredUpdates* du) { _jvmti_deferred_updates = du; }
 761 
 762   // These only really exist to make debugging deopt problems simpler

 875   }
 876   static ByteSize reserved_stack_activation_offset() {
 877     return byte_offset_of(JavaThread, _stack_overflow_state._reserved_stack_activation);
 878   }
 879   static ByteSize shadow_zone_safe_limit()  {
 880     return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_safe_limit);
 881   }
 882   static ByteSize shadow_zone_growth_watermark()  {
 883     return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_growth_watermark);
 884   }
 885 
 886   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags); }
 887 
 888   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
 889   static ByteSize should_post_on_exceptions_flag_offset() {
 890     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
 891   }
 892   static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
 893   NOT_PRODUCT(static ByteSize requires_cross_modify_fence_offset()  { return byte_offset_of(JavaThread, _requires_cross_modify_fence); })
 894 
 895   static ByteSize lock_id_offset()            { return byte_offset_of(JavaThread, _lock_id); }
 896 
 897   static ByteSize cont_entry_offset()         { return byte_offset_of(JavaThread, _cont_entry); }
 898   static ByteSize cont_fastpath_offset()      { return byte_offset_of(JavaThread, _cont_fastpath); }
 899   static ByteSize held_monitor_count_offset() { return byte_offset_of(JavaThread, _held_monitor_count); }
 900   static ByteSize jni_monitor_count_offset()  { return byte_offset_of(JavaThread, _jni_monitor_count); }
 901   static ByteSize preempting_offset()         { return byte_offset_of(JavaThread, _preempting); }
 902   static ByteSize preemption_cancelled_offset()  { return byte_offset_of(JavaThread, _preemption_cancelled); }
 903   static ByteSize preempt_alternate_return_offset() { return byte_offset_of(JavaThread, _preempt_alternate_return); }
 904   static ByteSize unlocked_inflated_monitor_offset() { return byte_offset_of(JavaThread, _unlocked_inflated_monitor); }
 905 
 906 #if INCLUDE_JVMTI
 907   static ByteSize is_in_VTMS_transition_offset()     { return byte_offset_of(JavaThread, _is_in_VTMS_transition); }
 908   static ByteSize is_in_tmp_VTMS_transition_offset() { return byte_offset_of(JavaThread, _is_in_tmp_VTMS_transition); }
 909   static ByteSize is_disable_suspend_offset()        { return byte_offset_of(JavaThread, _is_disable_suspend); }
 910 #endif
 911 
 912   // Returns the jni environment for this thread
 913   JNIEnv* jni_environment()                      { return &_jni_environment; }
 914 
 915   // Returns the current thread as indicated by the given JNIEnv.
 916   // We don't assert it is Thread::current here as that is done at the
 917   // external JNI entry points where the JNIEnv is passed into the VM.
 918   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
 919     JavaThread* current = reinterpret_cast<JavaThread*>(((intptr_t)env - in_bytes(jni_environment_offset())));
 920     // We can't normally get here in a thread that has completed its
 921     // execution and so "is_terminated", except when the call is from
 922     // AsyncGetCallTrace, which can be triggered by a signal at any point in
 923     // a thread's lifecycle. A thread is also considered terminated if the VM

1304   public:
1305     UnlockFlagSaver(JavaThread* t) {
1306       _thread = t;
1307       _do_not_unlock = t->do_not_unlock_if_synchronized();
1308       t->set_do_not_unlock_if_synchronized(false);
1309     }
1310     ~UnlockFlagSaver() {
1311       _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
1312     }
1313 };
1314 
1315 class JNIHandleMark : public StackObj {
1316   JavaThread* _thread;
1317  public:
1318   JNIHandleMark(JavaThread* thread) : _thread(thread) {
1319     thread->push_jni_handle_block();
1320   }
1321   ~JNIHandleMark() { _thread->pop_jni_handle_block(); }
1322 };
1323 
1324 class ThreadOnMonitorEnter {
1325   JavaThread* _thread;
1326  public:
1327   ThreadOnMonitorEnter(JavaThread* thread) : _thread(thread) {
1328     _thread->set_on_monitorenter(true);
1329   }
1330   ~ThreadOnMonitorEnter() { _thread->set_on_monitorenter(false); }
1331 };
1332 
1333 class ThreadOnMonitorWaitedEvent {
1334   JavaThread* _thread;
1335  public:
1336   ThreadOnMonitorWaitedEvent(JavaThread* thread) : _thread(thread) {
1337     JVMTI_ONLY(_thread->set_on_monitor_waited_event(true);)
1338   }
1339   ~ThreadOnMonitorWaitedEvent() { JVMTI_ONLY(_thread->set_on_monitor_waited_event(false);) }
1340 };
1341 
1342 #endif // SHARE_RUNTIME_JAVATHREAD_HPP
< prev index next >