44 #include "jfr/support/jfrThreadExtension.hpp"
45 #endif
46
47 class CompilerThread;
48 class HandleArea;
49 class HandleMark;
50 class JvmtiRawMonitor;
51 class NMethodClosure;
52 class Metadata;
53 class OopClosure;
54 class OSThread;
55 class ParkEvent;
56 class ResourceArea;
57 class SafeThreadsListPtr;
58 class ThreadClosure;
59 class ThreadsList;
60 class ThreadsSMRSupport;
61 class VMErrorCallback;
62
63
64 DEBUG_ONLY(class ResourceMark;)
65
66 class WorkerThread;
67
68 class JavaThread;
69
70 // Class hierarchy
71 // - Thread
72 // - JavaThread
73 // - various subclasses eg CompilerThread, ServiceThread
74 // - NonJavaThread
75 // - NamedThread
76 // - VMThread
77 // - ConcurrentGCThread
78 // - WorkerThread
79 // - WatcherThread
80 // - JfrThreadSampler
81 // - LogAsyncWriter
82 //
83 // All Thread subclasses must be either JavaThread or NonJavaThread.
616 void init_wx();
617 WXMode enable_wx(WXMode new_state);
618
619 void assert_wx_state(WXMode expected) {
620 assert(_wx_state == expected, "wrong state");
621 }
622 #endif // __APPLE__ && AARCH64
623
624 private:
625 bool _in_asgct = false;
626 public:
627 bool in_asgct() const { return _in_asgct; }
628 void set_in_asgct(bool value) { _in_asgct = value; }
629 static bool current_in_asgct() {
630 Thread *cur = Thread::current_or_null_safe();
631 return cur != nullptr && cur->in_asgct();
632 }
633
634 private:
635 VMErrorCallback* _vm_error_callbacks;
636 };
637
638 class ThreadInAsgct {
639 private:
640 Thread* _thread;
641 bool _saved_in_asgct;
642 public:
643 ThreadInAsgct(Thread* thread) : _thread(thread) {
644 assert(thread != nullptr, "invariant");
645 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
646 _saved_in_asgct = thread->in_asgct();
647 thread->set_in_asgct(true);
648 }
649 ~ThreadInAsgct() {
650 assert(_thread->in_asgct(), "invariant");
651 _thread->set_in_asgct(_saved_in_asgct);
652 }
653 };
654
655 // Inline implementation of Thread::current()
|
44 #include "jfr/support/jfrThreadExtension.hpp"
45 #endif
46
47 class CompilerThread;
48 class HandleArea;
49 class HandleMark;
50 class JvmtiRawMonitor;
51 class NMethodClosure;
52 class Metadata;
53 class OopClosure;
54 class OSThread;
55 class ParkEvent;
56 class ResourceArea;
57 class SafeThreadsListPtr;
58 class ThreadClosure;
59 class ThreadsList;
60 class ThreadsSMRSupport;
61 class VMErrorCallback;
62
63
64 class PerfTraceTime;
65
66 DEBUG_ONLY(class ResourceMark;)
67
68 class WorkerThread;
69
70 class JavaThread;
71
72 // Class hierarchy
73 // - Thread
74 // - JavaThread
75 // - various subclasses eg CompilerThread, ServiceThread
76 // - NonJavaThread
77 // - NamedThread
78 // - VMThread
79 // - ConcurrentGCThread
80 // - WorkerThread
81 // - WatcherThread
82 // - JfrThreadSampler
83 // - LogAsyncWriter
84 //
85 // All Thread subclasses must be either JavaThread or NonJavaThread.
618 void init_wx();
619 WXMode enable_wx(WXMode new_state);
620
621 void assert_wx_state(WXMode expected) {
622 assert(_wx_state == expected, "wrong state");
623 }
624 #endif // __APPLE__ && AARCH64
625
626 private:
627 bool _in_asgct = false;
628 public:
629 bool in_asgct() const { return _in_asgct; }
630 void set_in_asgct(bool value) { _in_asgct = value; }
631 static bool current_in_asgct() {
632 Thread *cur = Thread::current_or_null_safe();
633 return cur != nullptr && cur->in_asgct();
634 }
635
636 private:
637 VMErrorCallback* _vm_error_callbacks;
638
639 bool _profile_vm_locks;
640 bool _profile_vm_calls;
641 bool _profile_vm_ops;
642 bool _profile_rt_calls;
643 bool _profile_upcalls;
644
645 jlong _all_bc_counter_value;
646 jlong _clinit_bc_counter_value;
647
648 PerfTraceTime* _current_rt_call_timer;
649 public:
650 bool profile_vm_locks() const { return _profile_vm_locks; }
651 void set_profile_vm_locks(bool v) { _profile_vm_locks = v; }
652
653 bool profile_vm_calls() const { return _profile_vm_calls; }
654 void set_profile_vm_calls(bool v) { _profile_vm_calls = v; }
655
656 bool profile_vm_ops() const { return _profile_vm_ops; }
657 void set_profile_vm_ops(bool v) { _profile_vm_ops = v; }
658
659 bool profile_rt_calls() const { return _profile_rt_calls; }
660 void set_profile_rt_calls(bool v) { _profile_rt_calls = v; }
661
662 bool profile_upcalls() const { return _profile_upcalls; }
663 void set_profile_upcalls(bool v) { _profile_upcalls = v; }
664
665 PerfTraceTime* current_rt_call_timer() const { return _current_rt_call_timer; }
666 void set_current_rt_call_timer(PerfTraceTime* c) { _current_rt_call_timer = c; }
667 bool has_current_rt_call_timer() const { return _current_rt_call_timer != nullptr; }
668
669 bool do_profile_rt_call() const {
670 return ProfileRuntimeCalls && profile_rt_calls() && !has_current_rt_call_timer();
671 }
672
673 jlong bc_counter_value() const { return _all_bc_counter_value; }
674
675 jlong clinit_bc_counter_value() const { return _clinit_bc_counter_value; }
676
677 void inc_clinit_bc_counter_value(jlong l) { _clinit_bc_counter_value += l; }
678
679 static ByteSize bc_counter_offset() { return byte_offset_of(Thread, _all_bc_counter_value); }
680 };
681
682 class ProfileVMCallContext : StackObj {
683 private:
684 Thread* _thread;
685 bool _enabled;
686 PerfTraceTime* _timer;
687
688 static int _perf_nested_runtime_calls_count;
689
690 static const char* name(PerfTraceTime* t);
691 static void notify_nested_rt_call(PerfTraceTime* current, PerfTraceTime* inner_timer);
692 public:
693 inline ProfileVMCallContext(Thread* current, PerfTraceTime* timer, bool is_on)
694 : _thread(current), _enabled(is_on), _timer(timer) {
695 if (_enabled) {
696 assert(timer != nullptr, "");
697 assert(_thread->current_rt_call_timer() == nullptr, "%s", name(_thread->current_rt_call_timer()));
698 _thread->set_current_rt_call_timer(timer);
699 } else if (current->profile_rt_calls()) {
700 notify_nested_rt_call(current->current_rt_call_timer(), timer);
701 }
702 }
703
704 inline ~ProfileVMCallContext() {
705 if (_enabled) {
706 assert(_timer == _thread->current_rt_call_timer(),
707 "%s vs %s", name(_timer), name(_thread->current_rt_call_timer()));
708 _thread->set_current_rt_call_timer(nullptr);
709 }
710 }
711
712 static int nested_runtime_calls_count() { return _perf_nested_runtime_calls_count; };
713 };
714
715 class PauseRuntimeCallProfiling : public StackObj {
716 protected:
717 Thread* _thread;
718 bool _enabled;
719 PerfTraceTime* _timer;
720
721 public:
722 inline PauseRuntimeCallProfiling(Thread* current, bool is_on)
723 : _thread(current), _enabled(is_on), _timer(nullptr) {
724 if (_enabled) {
725 _timer = _thread->current_rt_call_timer();
726 _thread->set_current_rt_call_timer(nullptr);
727 }
728 }
729
730 inline ~PauseRuntimeCallProfiling () {
731 if (_enabled) {
732 guarantee(_thread->current_rt_call_timer() == nullptr, "");
733 _thread->set_current_rt_call_timer(_timer); // restore
734 }
735 }
736 };
737
738 class ThreadInAsgct {
739 private:
740 Thread* _thread;
741 bool _saved_in_asgct;
742 public:
743 ThreadInAsgct(Thread* thread) : _thread(thread) {
744 assert(thread != nullptr, "invariant");
745 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
746 _saved_in_asgct = thread->in_asgct();
747 thread->set_in_asgct(true);
748 }
749 ~ThreadInAsgct() {
750 assert(_thread->in_asgct(), "invariant");
751 _thread->set_in_asgct(_saved_in_asgct);
752 }
753 };
754
755 // Inline implementation of Thread::current()
|