< prev index next >

src/hotspot/share/runtime/javaThread.hpp

Print this page

  24  */
  25 
  26 #ifndef SHARE_RUNTIME_JAVATHREAD_HPP
  27 #define SHARE_RUNTIME_JAVATHREAD_HPP
  28 
  29 #include "jni.h"
  30 #include "memory/allocation.hpp"
  31 #include "oops/oop.hpp"
  32 #include "oops/oopHandle.hpp"
  33 #include "runtime/frame.hpp"
  34 #include "runtime/globals.hpp"
  35 #include "runtime/handshake.hpp"
  36 #include "runtime/javaFrameAnchor.hpp"
  37 #include "runtime/lockStack.hpp"
  38 #include "runtime/park.hpp"
  39 #include "runtime/safepointMechanism.hpp"
  40 #include "runtime/stackWatermarkSet.hpp"
  41 #include "runtime/stackOverflow.hpp"
  42 #include "runtime/thread.hpp"
  43 #include "runtime/threadHeapSampler.hpp"

  44 #include "runtime/threadStatisticalInfo.hpp"
  45 #include "utilities/exceptions.hpp"
  46 #include "utilities/globalDefinitions.hpp"
  47 #include "utilities/macros.hpp"
  48 #if INCLUDE_JFR
  49 #include "jfr/support/jfrThreadExtension.hpp"
  50 #endif
  51 
  52 class AsyncExceptionHandshake;
  53 class ContinuationEntry;
  54 class DeoptResourceMark;
  55 class JNIHandleBlock;
  56 class JVMCIRuntime;
  57 
  58 class JvmtiDeferredUpdates;
  59 class JvmtiSampledObjectAllocEventCollector;
  60 class JvmtiThreadState;
  61 
  62 class Metadata;
  63 class OopHandleList;

 140 
 141   // Used to pass back results to the interpreter or generated code running Java code.
 142   oop           _vm_result;    // oop result is GC-preserved
 143   Metadata*     _vm_result_2;  // non-oop result
 144 
 145   // See ReduceInitialCardMarks: this holds the precise space interval of
 146   // the most recent slow path allocation for which compiled code has
 147   // elided card-marks for performance along the fast-path.
 148   MemRegion     _deferred_card_mark;
 149 
 150   ObjectMonitor* volatile _current_pending_monitor;     // ObjectMonitor this thread is waiting to lock
 151   bool           _current_pending_monitor_is_from_java; // locking is from Java code
 152   ObjectMonitor* volatile _current_waiting_monitor;     // ObjectMonitor on which this thread called Object.wait()
 153 
 154   // Active_handles points to a block of handles
 155   JNIHandleBlock* _active_handles;
 156 
 157   // One-element thread local free list
 158   JNIHandleBlock* _free_handle_block;
 159 





 160  public:
 161   volatile intptr_t _Stalled;










 162 
 163   // For tracking the heavyweight monitor the thread is pending on.
 164   ObjectMonitor* current_pending_monitor() {
 165     // Use Atomic::load() to prevent data race between concurrent modification and
 166     // concurrent readers, e.g. ThreadService::get_current_contended_monitor().
 167     // Especially, reloading pointer from thread after null check must be prevented.
 168     return Atomic::load(&_current_pending_monitor);
 169   }
 170   void set_current_pending_monitor(ObjectMonitor* monitor) {
 171     Atomic::store(&_current_pending_monitor, monitor);
 172   }
 173   void set_current_pending_monitor_is_from_java(bool from_java) {
 174     _current_pending_monitor_is_from_java = from_java;
 175   }
 176   bool current_pending_monitor_is_from_java() {
 177     return _current_pending_monitor_is_from_java;
 178   }
 179   ObjectMonitor* current_waiting_monitor() {
 180     // See the comment in current_pending_monitor() above.
 181     return Atomic::load(&_current_waiting_monitor);

 439   int _depth_first_number;
 440 
 441   // JVMTI PopFrame support
 442   // This is set to popframe_pending to signal that top Java frame should be popped immediately
 443   int _popframe_condition;
 444 
 445   // If reallocation of scalar replaced objects fails, we throw OOM
 446   // and during exception propagation, pop the top
 447   // _frames_to_pop_failed_realloc frames, the ones that reference
 448   // failed reallocations.
 449   int _frames_to_pop_failed_realloc;
 450 
 451   ContinuationEntry* _cont_entry;
 452   intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub frame inside the
 453                             // continuation that we know about
 454   int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
 455 
 456   // It's signed for error detection.
 457   intx _held_monitor_count;  // used by continuations for fast lock detection
 458   intx _jni_monitor_count;




















 459 
 460 private:
 461 
 462   friend class VMThread;
 463   friend class ThreadWaitTransition;
 464   friend class VM_Exit;
 465 
 466   // Stack watermark barriers.
 467   StackWatermarks _stack_watermarks;
 468 
 469  public:
 470   inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; }
 471 
 472  public:
 473   // Constructor
 474   JavaThread();                            // delegating constructor
 475   JavaThread(bool is_attaching_via_jni);   // for main thread and JNI attached threads
 476   JavaThread(ThreadFunction entry_point, size_t stack_size = 0);
 477   ~JavaThread();
 478 

 584   bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
 585   void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
 586 
 587   SafepointMechanism::ThreadData* poll_data() { return &_poll_data; }
 588 
 589   void set_requires_cross_modify_fence(bool val) PRODUCT_RETURN NOT_PRODUCT({ _requires_cross_modify_fence = val; })
 590 
 591   // Continuation support
 592   ContinuationEntry* last_continuation() const { return _cont_entry; }
 593   void set_cont_fastpath(intptr_t* x)          { _cont_fastpath = x; }
 594   void push_cont_fastpath(intptr_t* sp)        { if (sp > _cont_fastpath) _cont_fastpath = sp; }
 595   void set_cont_fastpath_thread_state(bool x)  { _cont_fastpath_thread_state = (int)x; }
 596   intptr_t* raw_cont_fastpath() const          { return _cont_fastpath; }
 597   bool cont_fastpath() const                   { return _cont_fastpath == nullptr && _cont_fastpath_thread_state != 0; }
 598   bool cont_fastpath_thread_state() const      { return _cont_fastpath_thread_state != 0; }
 599 
 600   void inc_held_monitor_count(intx i = 1, bool jni = false);
 601   void dec_held_monitor_count(intx i = 1, bool jni = false);
 602 
 603   intx held_monitor_count() { return _held_monitor_count; }


 604   intx jni_monitor_count()  { return _jni_monitor_count;  }
 605   void clear_jni_monitor_count() { _jni_monitor_count = 0;   }
 606 
 607   inline bool is_vthread_mounted() const;
 608   inline const ContinuationEntry* vthread_continuation() const;
 609 











 610  private:
 611   DEBUG_ONLY(void verify_frame_info();)
 612 
 613   // Support for thread handshake operations
 614   HandshakeState _handshake;
 615  public:
 616   HandshakeState* handshake_state() { return &_handshake; }
 617 
 618   // A JavaThread can always safely operate on it self and other threads
 619   // can do it safely if they are the active handshaker.
 620   bool is_handshake_safe_for(Thread* th) const {
 621     return _handshake.active_handshaker() == th || this == th;
 622   }
 623 
 624   // Suspend/resume support for JavaThread
 625   // higher-level suspension/resume logic called by the public APIs
 626   bool java_suspend();
 627   bool java_resume();
 628   bool is_suspended()     { return _handshake.is_suspended(); }
 629 

 792   }
 793   static ByteSize reserved_stack_activation_offset() {
 794     return byte_offset_of(JavaThread, _stack_overflow_state._reserved_stack_activation);
 795   }
 796   static ByteSize shadow_zone_safe_limit()  {
 797     return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_safe_limit);
 798   }
 799   static ByteSize shadow_zone_growth_watermark()  {
 800     return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_growth_watermark);
 801   }
 802 
 803   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags); }
 804 
 805   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
 806   static ByteSize should_post_on_exceptions_flag_offset() {
 807     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
 808   }
 809   static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
 810   NOT_PRODUCT(static ByteSize requires_cross_modify_fence_offset()  { return byte_offset_of(JavaThread, _requires_cross_modify_fence); })
 811 


 812   static ByteSize cont_entry_offset()         { return byte_offset_of(JavaThread, _cont_entry); }
 813   static ByteSize cont_fastpath_offset()      { return byte_offset_of(JavaThread, _cont_fastpath); }
 814   static ByteSize held_monitor_count_offset() { return byte_offset_of(JavaThread, _held_monitor_count); }





 815 
 816 #if INCLUDE_JVMTI
 817   static ByteSize is_in_VTMS_transition_offset()     { return byte_offset_of(JavaThread, _is_in_VTMS_transition); }
 818   static ByteSize is_in_tmp_VTMS_transition_offset() { return byte_offset_of(JavaThread, _is_in_tmp_VTMS_transition); }
 819   static ByteSize is_disable_suspend_offset()        { return byte_offset_of(JavaThread, _is_disable_suspend); }
 820 #endif
 821 
 822   // Returns the jni environment for this thread
 823   JNIEnv* jni_environment()                      { return &_jni_environment; }
 824 
 825   // Returns the current thread as indicated by the given JNIEnv.
 826   // We don't assert it is Thread::current here as that is done at the
 827   // external JNI entry points where the JNIEnv is passed into the VM.
 828   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
 829     JavaThread* current = reinterpret_cast<JavaThread*>(((intptr_t)env - in_bytes(jni_environment_offset())));
 830     // We can't normally get here in a thread that has completed its
 831     // execution and so "is_terminated", except when the call is from
 832     // AsyncGetCallTrace, which can be triggered by a signal at any point in
 833     // a thread's lifecycle. A thread is also considered terminated if the VM
 834     // has exited, so we have to check this and block in case this is a daemon

1208   public:
1209     UnlockFlagSaver(JavaThread* t) {
1210       _thread = t;
1211       _do_not_unlock = t->do_not_unlock_if_synchronized();
1212       t->set_do_not_unlock_if_synchronized(false);
1213     }
1214     ~UnlockFlagSaver() {
1215       _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
1216     }
1217 };
1218 
1219 class JNIHandleMark : public StackObj {
1220   JavaThread* _thread;
1221  public:
1222   JNIHandleMark(JavaThread* thread) : _thread(thread) {
1223     thread->push_jni_handle_block();
1224   }
1225   ~JNIHandleMark() { _thread->pop_jni_handle_block(); }
1226 };
1227 









1228 #endif // SHARE_RUNTIME_JAVATHREAD_HPP

  24  */
  25 
  26 #ifndef SHARE_RUNTIME_JAVATHREAD_HPP
  27 #define SHARE_RUNTIME_JAVATHREAD_HPP
  28 
  29 #include "jni.h"
  30 #include "memory/allocation.hpp"
  31 #include "oops/oop.hpp"
  32 #include "oops/oopHandle.hpp"
  33 #include "runtime/frame.hpp"
  34 #include "runtime/globals.hpp"
  35 #include "runtime/handshake.hpp"
  36 #include "runtime/javaFrameAnchor.hpp"
  37 #include "runtime/lockStack.hpp"
  38 #include "runtime/park.hpp"
  39 #include "runtime/safepointMechanism.hpp"
  40 #include "runtime/stackWatermarkSet.hpp"
  41 #include "runtime/stackOverflow.hpp"
  42 #include "runtime/thread.hpp"
  43 #include "runtime/threadHeapSampler.hpp"
  44 #include "runtime/threadIdentifier.hpp"
  45 #include "runtime/threadStatisticalInfo.hpp"
  46 #include "utilities/exceptions.hpp"
  47 #include "utilities/globalDefinitions.hpp"
  48 #include "utilities/macros.hpp"
  49 #if INCLUDE_JFR
  50 #include "jfr/support/jfrThreadExtension.hpp"
  51 #endif
  52 
  53 class AsyncExceptionHandshake;
  54 class ContinuationEntry;
  55 class DeoptResourceMark;
  56 class JNIHandleBlock;
  57 class JVMCIRuntime;
  58 
  59 class JvmtiDeferredUpdates;
  60 class JvmtiSampledObjectAllocEventCollector;
  61 class JvmtiThreadState;
  62 
  63 class Metadata;
  64 class OopHandleList;

 141 
 142   // Used to pass back results to the interpreter or generated code running Java code.
 143   oop           _vm_result;    // oop result is GC-preserved
 144   Metadata*     _vm_result_2;  // non-oop result
 145 
 146   // See ReduceInitialCardMarks: this holds the precise space interval of
 147   // the most recent slow path allocation for which compiled code has
 148   // elided card-marks for performance along the fast-path.
 149   MemRegion     _deferred_card_mark;
 150 
 151   ObjectMonitor* volatile _current_pending_monitor;     // ObjectMonitor this thread is waiting to lock
 152   bool           _current_pending_monitor_is_from_java; // locking is from Java code
 153   ObjectMonitor* volatile _current_waiting_monitor;     // ObjectMonitor on which this thread called Object.wait()
 154 
 155   // Active_handles points to a block of handles
 156   JNIHandleBlock* _active_handles;
 157 
 158   // One-element thread local free list
 159   JNIHandleBlock* _free_handle_block;
 160 
 161   // ID used as owner for inflated monitors. Same as the tidĀ field of the current
 162   // _vthread object, except during creation of the primordial and JNI attached
 163   // thread cases where this field can have a temporal value.
 164   int64_t _lock_id;
 165 
 166  public:
 167   volatile intptr_t _Stalled;
 168   bool _on_monitorenter;
 169 
 170   bool is_on_monitorenter() { return _on_monitorenter; }
 171   void set_on_monitorenter(bool val) { _on_monitorenter = val; }
 172 
 173   void set_lock_id(int64_t tid) {
 174     assert(tid >= ThreadIdentifier::initial() && tid < ThreadIdentifier::current(), "invalid tid");
 175     _lock_id = tid;
 176   }
 177   int64_t lock_id() const { return _lock_id; }
 178 
 179   // For tracking the heavyweight monitor the thread is pending on.
 180   ObjectMonitor* current_pending_monitor() {
 181     // Use Atomic::load() to prevent data race between concurrent modification and
 182     // concurrent readers, e.g. ThreadService::get_current_contended_monitor().
 183     // Especially, reloading pointer from thread after null check must be prevented.
 184     return Atomic::load(&_current_pending_monitor);
 185   }
 186   void set_current_pending_monitor(ObjectMonitor* monitor) {
 187     Atomic::store(&_current_pending_monitor, monitor);
 188   }
 189   void set_current_pending_monitor_is_from_java(bool from_java) {
 190     _current_pending_monitor_is_from_java = from_java;
 191   }
 192   bool current_pending_monitor_is_from_java() {
 193     return _current_pending_monitor_is_from_java;
 194   }
 195   ObjectMonitor* current_waiting_monitor() {
 196     // See the comment in current_pending_monitor() above.
 197     return Atomic::load(&_current_waiting_monitor);

 455   int _depth_first_number;
 456 
 457   // JVMTI PopFrame support
 458   // This is set to popframe_pending to signal that top Java frame should be popped immediately
 459   int _popframe_condition;
 460 
 461   // If reallocation of scalar replaced objects fails, we throw OOM
 462   // and during exception propagation, pop the top
 463   // _frames_to_pop_failed_realloc frames, the ones that reference
 464   // failed reallocations.
 465   int _frames_to_pop_failed_realloc;
 466 
 467   ContinuationEntry* _cont_entry;
 468   intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub frame inside the
 469                             // continuation that we know about
 470   int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
 471 
 472   // It's signed for error detection.
 473   intx _held_monitor_count;  // used by continuations for fast lock detection
 474   intx _jni_monitor_count;
 475   bool _preempting;
 476   bool _preemption_cancelled;
 477   bool _jvmti_unmount_event_pending;
 478   address _preempt_alternate_return; // used when preempting a thread
 479   address _preempt_alternate_return_sp;
 480 
 481 #ifdef ASSERT
 482   intx _obj_locker_count;
 483 
 484  public:
 485   intx obj_locker_count() { return _obj_locker_count; }
 486   void inc_obj_locker_count() {
 487     assert(_obj_locker_count >= 0, "Must always be greater than 0: " INTX_FORMAT, _obj_locker_count);
 488     _obj_locker_count++;
 489   }
 490   void dec_obj_locker_count() {
 491     _obj_locker_count--;
 492     assert(_obj_locker_count >= 0, "Must always be greater than 0: " INTX_FORMAT, _obj_locker_count);
 493   }
 494 #endif // ASSERT
 495 
 496 private:
 497 
 498   friend class VMThread;
 499   friend class ThreadWaitTransition;
 500   friend class VM_Exit;
 501 
 502   // Stack watermark barriers.
 503   StackWatermarks _stack_watermarks;
 504 
 505  public:
 506   inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; }
 507 
 508  public:
 509   // Constructor
 510   JavaThread();                            // delegating constructor
 511   JavaThread(bool is_attaching_via_jni);   // for main thread and JNI attached threads
 512   JavaThread(ThreadFunction entry_point, size_t stack_size = 0);
 513   ~JavaThread();
 514 

 620   bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
 621   void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
 622 
 623   SafepointMechanism::ThreadData* poll_data() { return &_poll_data; }
 624 
 625   void set_requires_cross_modify_fence(bool val) PRODUCT_RETURN NOT_PRODUCT({ _requires_cross_modify_fence = val; })
 626 
 627   // Continuation support
 628   ContinuationEntry* last_continuation() const { return _cont_entry; }
 629   void set_cont_fastpath(intptr_t* x)          { _cont_fastpath = x; }
 630   void push_cont_fastpath(intptr_t* sp)        { if (sp > _cont_fastpath) _cont_fastpath = sp; }
 631   void set_cont_fastpath_thread_state(bool x)  { _cont_fastpath_thread_state = (int)x; }
 632   intptr_t* raw_cont_fastpath() const          { return _cont_fastpath; }
 633   bool cont_fastpath() const                   { return _cont_fastpath == nullptr && _cont_fastpath_thread_state != 0; }
 634   bool cont_fastpath_thread_state() const      { return _cont_fastpath_thread_state != 0; }
 635 
 636   void inc_held_monitor_count(intx i = 1, bool jni = false);
 637   void dec_held_monitor_count(intx i = 1, bool jni = false);
 638 
 639   intx held_monitor_count() { return _held_monitor_count; }
 640   void clear_held_monitor_count() { _held_monitor_count = 0; }
 641   void set_held_monitor_count(int val) { _held_monitor_count = val; }
 642   intx jni_monitor_count()  { return _jni_monitor_count;  }
 643   void clear_jni_monitor_count() { _jni_monitor_count = 0; }
 644 
 645   inline bool is_vthread_mounted() const;
 646   inline const ContinuationEntry* vthread_continuation() const;
 647 
 648   bool preempting()           { return _preempting; }
 649   void set_preempting(bool b) { _preempting = b; }
 650 
 651   bool preemption_cancelled() { return _preemption_cancelled; }
 652   void set_preemption_cancelled(bool val) { _preemption_cancelled = val; }
 653 
 654   bool jvmti_unmount_event_pending() { return _jvmti_unmount_event_pending; }
 655   void set_jvmti_unmount_event_pending(bool val) { _jvmti_unmount_event_pending = val; }
 656 
 657   void set_preempt_alternate_return(address val) { _preempt_alternate_return = val; }
 658   void set_preempt_alternate_return_sp(address val) { _preempt_alternate_return_sp = val; }
 659  private:
 660   DEBUG_ONLY(void verify_frame_info();)
 661 
 662   // Support for thread handshake operations
 663   HandshakeState _handshake;
 664  public:
 665   HandshakeState* handshake_state() { return &_handshake; }
 666 
 667   // A JavaThread can always safely operate on it self and other threads
 668   // can do it safely if they are the active handshaker.
 669   bool is_handshake_safe_for(Thread* th) const {
 670     return _handshake.active_handshaker() == th || this == th;
 671   }
 672 
 673   // Suspend/resume support for JavaThread
 674   // higher-level suspension/resume logic called by the public APIs
 675   bool java_suspend();
 676   bool java_resume();
 677   bool is_suspended()     { return _handshake.is_suspended(); }
 678 

 841   }
 842   static ByteSize reserved_stack_activation_offset() {
 843     return byte_offset_of(JavaThread, _stack_overflow_state._reserved_stack_activation);
 844   }
 845   static ByteSize shadow_zone_safe_limit()  {
 846     return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_safe_limit);
 847   }
 848   static ByteSize shadow_zone_growth_watermark()  {
 849     return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_growth_watermark);
 850   }
 851 
 852   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags); }
 853 
 854   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
 855   static ByteSize should_post_on_exceptions_flag_offset() {
 856     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
 857   }
 858   static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
 859   NOT_PRODUCT(static ByteSize requires_cross_modify_fence_offset()  { return byte_offset_of(JavaThread, _requires_cross_modify_fence); })
 860 
 861   static ByteSize lock_id_offset()            { return byte_offset_of(JavaThread, _lock_id); }
 862 
 863   static ByteSize cont_entry_offset()         { return byte_offset_of(JavaThread, _cont_entry); }
 864   static ByteSize cont_fastpath_offset()      { return byte_offset_of(JavaThread, _cont_fastpath); }
 865   static ByteSize held_monitor_count_offset() { return byte_offset_of(JavaThread, _held_monitor_count); }
 866   static ByteSize preempting_offset()         { return byte_offset_of(JavaThread, _preempting); }
 867   static ByteSize preemption_cancelled_offset()  { return byte_offset_of(JavaThread, _preemption_cancelled); }
 868   static ByteSize preempt_alternate_return_offset() {
 869     return byte_offset_of(JavaThread, _preempt_alternate_return);
 870   }
 871 
 872 #if INCLUDE_JVMTI
 873   static ByteSize is_in_VTMS_transition_offset()     { return byte_offset_of(JavaThread, _is_in_VTMS_transition); }
 874   static ByteSize is_in_tmp_VTMS_transition_offset() { return byte_offset_of(JavaThread, _is_in_tmp_VTMS_transition); }
 875   static ByteSize is_disable_suspend_offset()        { return byte_offset_of(JavaThread, _is_disable_suspend); }
 876 #endif
 877 
 878   // Returns the jni environment for this thread
 879   JNIEnv* jni_environment()                      { return &_jni_environment; }
 880 
 881   // Returns the current thread as indicated by the given JNIEnv.
 882   // We don't assert it is Thread::current here as that is done at the
 883   // external JNI entry points where the JNIEnv is passed into the VM.
 884   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
 885     JavaThread* current = reinterpret_cast<JavaThread*>(((intptr_t)env - in_bytes(jni_environment_offset())));
 886     // We can't normally get here in a thread that has completed its
 887     // execution and so "is_terminated", except when the call is from
 888     // AsyncGetCallTrace, which can be triggered by a signal at any point in
 889     // a thread's lifecycle. A thread is also considered terminated if the VM
 890     // has exited, so we have to check this and block in case this is a daemon

1264   public:
1265     UnlockFlagSaver(JavaThread* t) {
1266       _thread = t;
1267       _do_not_unlock = t->do_not_unlock_if_synchronized();
1268       t->set_do_not_unlock_if_synchronized(false);
1269     }
1270     ~UnlockFlagSaver() {
1271       _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
1272     }
1273 };
1274 
1275 class JNIHandleMark : public StackObj {
1276   JavaThread* _thread;
1277  public:
1278   JNIHandleMark(JavaThread* thread) : _thread(thread) {
1279     thread->push_jni_handle_block();
1280   }
1281   ~JNIHandleMark() { _thread->pop_jni_handle_block(); }
1282 };
1283 
1284 class ThreadOnMonitorEnter {
1285   JavaThread* _thread;
1286  public:
1287   ThreadOnMonitorEnter(JavaThread* thread) : _thread(thread) {
1288     _thread->set_on_monitorenter(true);
1289   }
1290   ~ThreadOnMonitorEnter() { _thread->set_on_monitorenter(false); }
1291 };
1292 
1293 #endif // SHARE_RUNTIME_JAVATHREAD_HPP
< prev index next >