24 */
25
26 #ifndef SHARE_RUNTIME_JAVATHREAD_HPP
27 #define SHARE_RUNTIME_JAVATHREAD_HPP
28
29 #include "jni.h"
30 #include "memory/allocation.hpp"
31 #include "oops/oop.hpp"
32 #include "oops/oopHandle.hpp"
33 #include "runtime/frame.hpp"
34 #include "runtime/globals.hpp"
35 #include "runtime/handshake.hpp"
36 #include "runtime/javaFrameAnchor.hpp"
37 #include "runtime/lockStack.hpp"
38 #include "runtime/park.hpp"
39 #include "runtime/safepointMechanism.hpp"
40 #include "runtime/stackWatermarkSet.hpp"
41 #include "runtime/stackOverflow.hpp"
42 #include "runtime/thread.hpp"
43 #include "runtime/threadHeapSampler.hpp"
44 #include "runtime/threadStatisticalInfo.hpp"
45 #include "utilities/exceptions.hpp"
46 #include "utilities/globalDefinitions.hpp"
47 #include "utilities/macros.hpp"
48 #if INCLUDE_JFR
49 #include "jfr/support/jfrThreadExtension.hpp"
50 #endif
51
52 class AsyncExceptionHandshake;
53 class ContinuationEntry;
54 class DeoptResourceMark;
55 class JNIHandleBlock;
56 class JVMCIRuntime;
57
58 class JvmtiDeferredUpdates;
59 class JvmtiSampledObjectAllocEventCollector;
60 class JvmtiThreadState;
61
62 class Metadata;
63 class OopHandleList;
140
141 // Used to pass back results to the interpreter or generated code running Java code.
142 oop _vm_result; // oop result is GC-preserved
143 Metadata* _vm_result_2; // non-oop result
144
145 // See ReduceInitialCardMarks: this holds the precise space interval of
146 // the most recent slow path allocation for which compiled code has
147 // elided card-marks for performance along the fast-path.
148 MemRegion _deferred_card_mark;
149
150 ObjectMonitor* volatile _current_pending_monitor; // ObjectMonitor this thread is waiting to lock
151 bool _current_pending_monitor_is_from_java; // locking is from Java code
152 ObjectMonitor* volatile _current_waiting_monitor; // ObjectMonitor on which this thread called Object.wait()
153
154 // Active_handles points to a block of handles
155 JNIHandleBlock* _active_handles;
156
157 // One-element thread local free list
158 JNIHandleBlock* _free_handle_block;
159
160 public:
161 // For tracking the heavyweight monitor the thread is pending on.
162 ObjectMonitor* current_pending_monitor() {
163 // Use Atomic::load() to prevent data race between concurrent modification and
164 // concurrent readers, e.g. ThreadService::get_current_contended_monitor().
165 // Especially, reloading pointer from thread after null check must be prevented.
166 return Atomic::load(&_current_pending_monitor);
167 }
168 void set_current_pending_monitor(ObjectMonitor* monitor) {
169 Atomic::store(&_current_pending_monitor, monitor);
170 }
171 void set_current_pending_monitor_is_from_java(bool from_java) {
172 _current_pending_monitor_is_from_java = from_java;
173 }
174 bool current_pending_monitor_is_from_java() {
175 return _current_pending_monitor_is_from_java;
176 }
177 ObjectMonitor* current_waiting_monitor() {
178 // See the comment in current_pending_monitor() above.
179 return Atomic::load(&_current_waiting_monitor);
180 }
450 int _depth_first_number;
451
452 // JVMTI PopFrame support
453 // This is set to popframe_pending to signal that top Java frame should be popped immediately
454 int _popframe_condition;
455
456 // If reallocation of scalar replaced objects fails, we throw OOM
457 // and during exception propagation, pop the top
458 // _frames_to_pop_failed_realloc frames, the ones that reference
459 // failed reallocations.
460 int _frames_to_pop_failed_realloc;
461
462 ContinuationEntry* _cont_entry;
463 intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub frame inside the
464 // continuation that we know about
465 int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
466
467 // It's signed for error detection.
468 intx _held_monitor_count; // used by continuations for fast lock detection
469 intx _jni_monitor_count;
470
471 private:
472
473 friend class VMThread;
474 friend class ThreadWaitTransition;
475 friend class VM_Exit;
476
477 // Stack watermark barriers.
478 StackWatermarks _stack_watermarks;
479
480 public:
481 inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; }
482
483 public:
484 // Constructor
485 JavaThread(); // delegating constructor
486 JavaThread(bool is_attaching_via_jni); // for main thread and JNI attached threads
487 JavaThread(ThreadFunction entry_point, size_t stack_size = 0);
488 ~JavaThread();
489
594 void set_doing_unsafe_access(bool val) { _doing_unsafe_access = val; }
595
596 bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; }
597 void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
598
599 SafepointMechanism::ThreadData* poll_data() { return &_poll_data; }
600
601 void set_requires_cross_modify_fence(bool val) PRODUCT_RETURN NOT_PRODUCT({ _requires_cross_modify_fence = val; })
602
603 // Continuation support
604 ContinuationEntry* last_continuation() const { return _cont_entry; }
605 void set_cont_fastpath(intptr_t* x) { _cont_fastpath = x; }
606 void push_cont_fastpath(intptr_t* sp) { if (sp > _cont_fastpath) _cont_fastpath = sp; }
607 void set_cont_fastpath_thread_state(bool x) { _cont_fastpath_thread_state = (int)x; }
608 intptr_t* raw_cont_fastpath() const { return _cont_fastpath; }
609 bool cont_fastpath() const { return _cont_fastpath == nullptr && _cont_fastpath_thread_state != 0; }
610 bool cont_fastpath_thread_state() const { return _cont_fastpath_thread_state != 0; }
611
612 void inc_held_monitor_count(intx i = 1, bool jni = false);
613 void dec_held_monitor_count(intx i = 1, bool jni = false);
614
615 intx held_monitor_count() { return _held_monitor_count; }
616 intx jni_monitor_count() { return _jni_monitor_count; }
617 void clear_jni_monitor_count() { _jni_monitor_count = 0; }
618
619 inline bool is_vthread_mounted() const;
620 inline const ContinuationEntry* vthread_continuation() const;
621
622 private:
623 DEBUG_ONLY(void verify_frame_info();)
624
625 // Support for thread handshake operations
626 HandshakeState _handshake;
627 public:
628 HandshakeState* handshake_state() { return &_handshake; }
629
630 // A JavaThread can always safely operate on it self and other threads
631 // can do it safely if they are the active handshaker.
632 bool is_handshake_safe_for(Thread* th) const {
633 return _handshake.active_handshaker() == th || this == th;
634 }
635
636 // Suspend/resume support for JavaThread
637 // higher-level suspension/resume logic called by the public APIs
638 bool java_suspend();
639 bool java_resume();
640 bool is_suspended() { return _handshake.is_suspended(); }
641
804 }
805 static ByteSize reserved_stack_activation_offset() {
806 return byte_offset_of(JavaThread, _stack_overflow_state._reserved_stack_activation);
807 }
808 static ByteSize shadow_zone_safe_limit() {
809 return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_safe_limit);
810 }
811 static ByteSize shadow_zone_growth_watermark() {
812 return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_growth_watermark);
813 }
814
815 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); }
816
817 static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
818 static ByteSize should_post_on_exceptions_flag_offset() {
819 return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
820 }
821 static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
822 NOT_PRODUCT(static ByteSize requires_cross_modify_fence_offset() { return byte_offset_of(JavaThread, _requires_cross_modify_fence); })
823
824 static ByteSize cont_entry_offset() { return byte_offset_of(JavaThread, _cont_entry); }
825 static ByteSize cont_fastpath_offset() { return byte_offset_of(JavaThread, _cont_fastpath); }
826 static ByteSize held_monitor_count_offset() { return byte_offset_of(JavaThread, _held_monitor_count); }
827 static ByteSize jni_monitor_count_offset() { return byte_offset_of(JavaThread, _jni_monitor_count); }
828
829 #if INCLUDE_JVMTI
830 static ByteSize is_in_VTMS_transition_offset() { return byte_offset_of(JavaThread, _is_in_VTMS_transition); }
831 static ByteSize is_in_tmp_VTMS_transition_offset() { return byte_offset_of(JavaThread, _is_in_tmp_VTMS_transition); }
832 static ByteSize is_disable_suspend_offset() { return byte_offset_of(JavaThread, _is_disable_suspend); }
833 #endif
834
835 // Returns the jni environment for this thread
836 JNIEnv* jni_environment() { return &_jni_environment; }
837
838 // Returns the current thread as indicated by the given JNIEnv.
839 // We don't assert it is Thread::current here as that is done at the
840 // external JNI entry points where the JNIEnv is passed into the VM.
841 static JavaThread* thread_from_jni_environment(JNIEnv* env) {
842 JavaThread* current = reinterpret_cast<JavaThread*>(((intptr_t)env - in_bytes(jni_environment_offset())));
843 // We can't normally get here in a thread that has completed its
844 // execution and so "is_terminated", except when the call is from
845 // AsyncGetCallTrace, which can be triggered by a signal at any point in
846 // a thread's lifecycle. A thread is also considered terminated if the VM
847 // has exited, so we have to check this and block in case this is a daemon
1225 public:
1226 UnlockFlagSaver(JavaThread* t) {
1227 _thread = t;
1228 _do_not_unlock = t->do_not_unlock_if_synchronized();
1229 t->set_do_not_unlock_if_synchronized(false);
1230 }
1231 ~UnlockFlagSaver() {
1232 _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
1233 }
1234 };
1235
1236 class JNIHandleMark : public StackObj {
1237 JavaThread* _thread;
1238 public:
1239 JNIHandleMark(JavaThread* thread) : _thread(thread) {
1240 thread->push_jni_handle_block();
1241 }
1242 ~JNIHandleMark() { _thread->pop_jni_handle_block(); }
1243 };
1244
1245 #endif // SHARE_RUNTIME_JAVATHREAD_HPP
|
24 */
25
26 #ifndef SHARE_RUNTIME_JAVATHREAD_HPP
27 #define SHARE_RUNTIME_JAVATHREAD_HPP
28
29 #include "jni.h"
30 #include "memory/allocation.hpp"
31 #include "oops/oop.hpp"
32 #include "oops/oopHandle.hpp"
33 #include "runtime/frame.hpp"
34 #include "runtime/globals.hpp"
35 #include "runtime/handshake.hpp"
36 #include "runtime/javaFrameAnchor.hpp"
37 #include "runtime/lockStack.hpp"
38 #include "runtime/park.hpp"
39 #include "runtime/safepointMechanism.hpp"
40 #include "runtime/stackWatermarkSet.hpp"
41 #include "runtime/stackOverflow.hpp"
42 #include "runtime/thread.hpp"
43 #include "runtime/threadHeapSampler.hpp"
44 #include "runtime/threadIdentifier.hpp"
45 #include "runtime/threadStatisticalInfo.hpp"
46 #include "utilities/exceptions.hpp"
47 #include "utilities/globalDefinitions.hpp"
48 #include "utilities/macros.hpp"
49 #if INCLUDE_JFR
50 #include "jfr/support/jfrThreadExtension.hpp"
51 #endif
52
53 class AsyncExceptionHandshake;
54 class ContinuationEntry;
55 class DeoptResourceMark;
56 class JNIHandleBlock;
57 class JVMCIRuntime;
58
59 class JvmtiDeferredUpdates;
60 class JvmtiSampledObjectAllocEventCollector;
61 class JvmtiThreadState;
62
63 class Metadata;
64 class OopHandleList;
141
142 // Used to pass back results to the interpreter or generated code running Java code.
143 oop _vm_result; // oop result is GC-preserved
144 Metadata* _vm_result_2; // non-oop result
145
146 // See ReduceInitialCardMarks: this holds the precise space interval of
147 // the most recent slow path allocation for which compiled code has
148 // elided card-marks for performance along the fast-path.
149 MemRegion _deferred_card_mark;
150
151 ObjectMonitor* volatile _current_pending_monitor; // ObjectMonitor this thread is waiting to lock
152 bool _current_pending_monitor_is_from_java; // locking is from Java code
153 ObjectMonitor* volatile _current_waiting_monitor; // ObjectMonitor on which this thread called Object.wait()
154
155 // Active_handles points to a block of handles
156 JNIHandleBlock* _active_handles;
157
158 // One-element thread local free list
159 JNIHandleBlock* _free_handle_block;
160
161 // ID used as owner for inflated monitors. Same as the tidĀ field of the current
162 // _vthread object, except during creation of the primordial and JNI attached
163 // thread cases where this field can have a temporal value.
164 int64_t _lock_id;
165
166 public:
167 bool _on_monitorenter;
168
169 bool is_on_monitorenter() { return _on_monitorenter; }
170 void set_on_monitorenter(bool val) { _on_monitorenter = val; }
171
172 void set_lock_id(int64_t tid) {
173 assert(tid >= ThreadIdentifier::initial() && tid < ThreadIdentifier::current(), "invalid tid");
174 _lock_id = tid;
175 }
176 int64_t lock_id() const { return _lock_id; }
177
178 // For tracking the heavyweight monitor the thread is pending on.
179 ObjectMonitor* current_pending_monitor() {
180 // Use Atomic::load() to prevent data race between concurrent modification and
181 // concurrent readers, e.g. ThreadService::get_current_contended_monitor().
182 // Especially, reloading pointer from thread after null check must be prevented.
183 return Atomic::load(&_current_pending_monitor);
184 }
185 void set_current_pending_monitor(ObjectMonitor* monitor) {
186 Atomic::store(&_current_pending_monitor, monitor);
187 }
188 void set_current_pending_monitor_is_from_java(bool from_java) {
189 _current_pending_monitor_is_from_java = from_java;
190 }
191 bool current_pending_monitor_is_from_java() {
192 return _current_pending_monitor_is_from_java;
193 }
194 ObjectMonitor* current_waiting_monitor() {
195 // See the comment in current_pending_monitor() above.
196 return Atomic::load(&_current_waiting_monitor);
197 }
467 int _depth_first_number;
468
469 // JVMTI PopFrame support
470 // This is set to popframe_pending to signal that top Java frame should be popped immediately
471 int _popframe_condition;
472
473 // If reallocation of scalar replaced objects fails, we throw OOM
474 // and during exception propagation, pop the top
475 // _frames_to_pop_failed_realloc frames, the ones that reference
476 // failed reallocations.
477 int _frames_to_pop_failed_realloc;
478
479 ContinuationEntry* _cont_entry;
480 intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub frame inside the
481 // continuation that we know about
482 int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
483
484 // It's signed for error detection.
485 intx _held_monitor_count; // used by continuations for fast lock detection
486 intx _jni_monitor_count;
487 bool _preempting;
488 bool _preemption_cancelled;
489 bool _jvmti_unmount_event_pending;
490 address _preempt_alternate_return; // used when preempting a thread
491 address _preempt_alternate_return_sp;
492
493 #ifdef ASSERT
494 intx _obj_locker_count;
495
496 public:
497 intx obj_locker_count() { return _obj_locker_count; }
498 void inc_obj_locker_count() {
499 assert(_obj_locker_count >= 0, "Must always be greater than 0: " INTX_FORMAT, _obj_locker_count);
500 _obj_locker_count++;
501 }
502 void dec_obj_locker_count() {
503 _obj_locker_count--;
504 assert(_obj_locker_count >= 0, "Must always be greater than 0: " INTX_FORMAT, _obj_locker_count);
505 }
506 #endif // ASSERT
507
508 private:
509
510 friend class VMThread;
511 friend class ThreadWaitTransition;
512 friend class VM_Exit;
513
514 // Stack watermark barriers.
515 StackWatermarks _stack_watermarks;
516
517 public:
518 inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; }
519
520 public:
521 // Constructor
522 JavaThread(); // delegating constructor
523 JavaThread(bool is_attaching_via_jni); // for main thread and JNI attached threads
524 JavaThread(ThreadFunction entry_point, size_t stack_size = 0);
525 ~JavaThread();
526
631 void set_doing_unsafe_access(bool val) { _doing_unsafe_access = val; }
632
633 bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; }
634 void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
635
636 SafepointMechanism::ThreadData* poll_data() { return &_poll_data; }
637
638 void set_requires_cross_modify_fence(bool val) PRODUCT_RETURN NOT_PRODUCT({ _requires_cross_modify_fence = val; })
639
640 // Continuation support
641 ContinuationEntry* last_continuation() const { return _cont_entry; }
642 void set_cont_fastpath(intptr_t* x) { _cont_fastpath = x; }
643 void push_cont_fastpath(intptr_t* sp) { if (sp > _cont_fastpath) _cont_fastpath = sp; }
644 void set_cont_fastpath_thread_state(bool x) { _cont_fastpath_thread_state = (int)x; }
645 intptr_t* raw_cont_fastpath() const { return _cont_fastpath; }
646 bool cont_fastpath() const { return _cont_fastpath == nullptr && _cont_fastpath_thread_state != 0; }
647 bool cont_fastpath_thread_state() const { return _cont_fastpath_thread_state != 0; }
648
649 void inc_held_monitor_count(intx i = 1, bool jni = false);
650 void dec_held_monitor_count(intx i = 1, bool jni = false);
651 intx held_monitor_count() { return _held_monitor_count; }
652
653 intx jni_monitor_count() { return _jni_monitor_count; }
654 void clear_jni_monitor_count() { _jni_monitor_count = 0; }
655
656 inline bool is_vthread_mounted() const;
657 inline const ContinuationEntry* vthread_continuation() const;
658
659 bool preempting() { return _preempting; }
660 void set_preempting(bool b) { _preempting = b; }
661
662 bool preemption_cancelled() { return _preemption_cancelled; }
663 void set_preemption_cancelled(bool val) { _preemption_cancelled = val; }
664
665 bool jvmti_unmount_event_pending() { return _jvmti_unmount_event_pending; }
666 void set_jvmti_unmount_event_pending(bool val) { _jvmti_unmount_event_pending = val; }
667
668 void set_preempt_alternate_return(address val) { _preempt_alternate_return = val; }
669 void set_preempt_alternate_return_sp(address val) { _preempt_alternate_return_sp = val; }
670 private:
671 DEBUG_ONLY(void verify_frame_info();)
672
673 // Support for thread handshake operations
674 HandshakeState _handshake;
675 public:
676 HandshakeState* handshake_state() { return &_handshake; }
677
678 // A JavaThread can always safely operate on it self and other threads
679 // can do it safely if they are the active handshaker.
680 bool is_handshake_safe_for(Thread* th) const {
681 return _handshake.active_handshaker() == th || this == th;
682 }
683
684 // Suspend/resume support for JavaThread
685 // higher-level suspension/resume logic called by the public APIs
686 bool java_suspend();
687 bool java_resume();
688 bool is_suspended() { return _handshake.is_suspended(); }
689
852 }
853 static ByteSize reserved_stack_activation_offset() {
854 return byte_offset_of(JavaThread, _stack_overflow_state._reserved_stack_activation);
855 }
856 static ByteSize shadow_zone_safe_limit() {
857 return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_safe_limit);
858 }
859 static ByteSize shadow_zone_growth_watermark() {
860 return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_growth_watermark);
861 }
862
863 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); }
864
865 static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
866 static ByteSize should_post_on_exceptions_flag_offset() {
867 return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
868 }
869 static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
870 NOT_PRODUCT(static ByteSize requires_cross_modify_fence_offset() { return byte_offset_of(JavaThread, _requires_cross_modify_fence); })
871
872 static ByteSize lock_id_offset() { return byte_offset_of(JavaThread, _lock_id); }
873
874 static ByteSize cont_entry_offset() { return byte_offset_of(JavaThread, _cont_entry); }
875 static ByteSize cont_fastpath_offset() { return byte_offset_of(JavaThread, _cont_fastpath); }
876 static ByteSize held_monitor_count_offset() { return byte_offset_of(JavaThread, _held_monitor_count); }
877 static ByteSize jni_monitor_count_offset() { return byte_offset_of(JavaThread, _jni_monitor_count); }
878 static ByteSize preempting_offset() { return byte_offset_of(JavaThread, _preempting); }
879 static ByteSize preemption_cancelled_offset() { return byte_offset_of(JavaThread, _preemption_cancelled); }
880 static ByteSize preempt_alternate_return_offset() { return byte_offset_of(JavaThread, _preempt_alternate_return); }
881
882 #if INCLUDE_JVMTI
883 static ByteSize is_in_VTMS_transition_offset() { return byte_offset_of(JavaThread, _is_in_VTMS_transition); }
884 static ByteSize is_in_tmp_VTMS_transition_offset() { return byte_offset_of(JavaThread, _is_in_tmp_VTMS_transition); }
885 static ByteSize is_disable_suspend_offset() { return byte_offset_of(JavaThread, _is_disable_suspend); }
886 #endif
887
888 // Returns the jni environment for this thread
889 JNIEnv* jni_environment() { return &_jni_environment; }
890
891 // Returns the current thread as indicated by the given JNIEnv.
892 // We don't assert it is Thread::current here as that is done at the
893 // external JNI entry points where the JNIEnv is passed into the VM.
894 static JavaThread* thread_from_jni_environment(JNIEnv* env) {
895 JavaThread* current = reinterpret_cast<JavaThread*>(((intptr_t)env - in_bytes(jni_environment_offset())));
896 // We can't normally get here in a thread that has completed its
897 // execution and so "is_terminated", except when the call is from
898 // AsyncGetCallTrace, which can be triggered by a signal at any point in
899 // a thread's lifecycle. A thread is also considered terminated if the VM
900 // has exited, so we have to check this and block in case this is a daemon
1278 public:
1279 UnlockFlagSaver(JavaThread* t) {
1280 _thread = t;
1281 _do_not_unlock = t->do_not_unlock_if_synchronized();
1282 t->set_do_not_unlock_if_synchronized(false);
1283 }
1284 ~UnlockFlagSaver() {
1285 _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
1286 }
1287 };
1288
1289 class JNIHandleMark : public StackObj {
1290 JavaThread* _thread;
1291 public:
1292 JNIHandleMark(JavaThread* thread) : _thread(thread) {
1293 thread->push_jni_handle_block();
1294 }
1295 ~JNIHandleMark() { _thread->pop_jni_handle_block(); }
1296 };
1297
1298 class ThreadOnMonitorEnter {
1299 JavaThread* _thread;
1300 public:
1301 ThreadOnMonitorEnter(JavaThread* thread) : _thread(thread) {
1302 _thread->set_on_monitorenter(true);
1303 }
1304 ~ThreadOnMonitorEnter() { _thread->set_on_monitorenter(false); }
1305 };
1306
1307 #endif // SHARE_RUNTIME_JAVATHREAD_HPP
|