45 #include "jfr/support/jfrThreadExtension.hpp"
46 #endif
47
48 class CompilerThread;
49 class HandleArea;
50 class HandleMark;
51 class JvmtiRawMonitor;
52 class NMethodClosure;
53 class Metadata;
54 class OopClosure;
55 class OSThread;
56 class ParkEvent;
57 class ResourceArea;
58 class SafeThreadsListPtr;
59 class ThreadClosure;
60 class ThreadsList;
61 class ThreadsSMRSupport;
62 class VMErrorCallback;
63
64
65 DEBUG_ONLY(class ResourceMark;)
66
67 class WorkerThread;
68
69 class JavaThread;
70
71 // Class hierarchy
72 // - Thread
73 // - JavaThread
74 // - various subclasses eg CompilerThread, ServiceThread
75 // - NonJavaThread
76 // - NamedThread
77 // - VMThread
78 // - ConcurrentGCThread
79 // - WorkerThread
80 // - WatcherThread
81 // - JfrThreadSampler
82 // - JfrCPUSamplerThread
83 // - LogAsyncWriter
84 //
614 void assert_wx_state(WXMode expected) {
615 assert(_wx_state == expected, "wrong state");
616 }
617 WXMode get_wx_state() {
618 return _wx_state;
619 }
620 #endif // MACOS_AARCH64
621
622 private:
623 bool _in_asgct = false;
624 public:
625 bool in_asgct() const { return _in_asgct; }
626 void set_in_asgct(bool value) { _in_asgct = value; }
627 static bool current_in_asgct() {
628 Thread *cur = Thread::current_or_null_safe();
629 return cur != nullptr && cur->in_asgct();
630 }
631
632 private:
633 VMErrorCallback* _vm_error_callbacks;
634 };
635
636 class ThreadInAsgct {
637 private:
638 Thread* _thread;
639 bool _saved_in_asgct;
640 public:
641 ThreadInAsgct(Thread* thread) : _thread(thread) {
642 assert(thread != nullptr, "invariant");
643 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
644 _saved_in_asgct = thread->in_asgct();
645 thread->set_in_asgct(true);
646 }
647 ~ThreadInAsgct() {
648 assert(_thread->in_asgct(), "invariant");
649 _thread->set_in_asgct(_saved_in_asgct);
650 }
651 };
652
653 // Inline implementation of Thread::current()
|
45 #include "jfr/support/jfrThreadExtension.hpp"
46 #endif
47
48 class CompilerThread;
49 class HandleArea;
50 class HandleMark;
51 class JvmtiRawMonitor;
52 class NMethodClosure;
53 class Metadata;
54 class OopClosure;
55 class OSThread;
56 class ParkEvent;
57 class ResourceArea;
58 class SafeThreadsListPtr;
59 class ThreadClosure;
60 class ThreadsList;
61 class ThreadsSMRSupport;
62 class VMErrorCallback;
63
64
65 class PerfTraceTime;
66
67 DEBUG_ONLY(class ResourceMark;)
68
69 class WorkerThread;
70
71 class JavaThread;
72
73 // Class hierarchy
74 // - Thread
75 // - JavaThread
76 // - various subclasses eg CompilerThread, ServiceThread
77 // - NonJavaThread
78 // - NamedThread
79 // - VMThread
80 // - ConcurrentGCThread
81 // - WorkerThread
82 // - WatcherThread
83 // - JfrThreadSampler
84 // - JfrCPUSamplerThread
85 // - LogAsyncWriter
86 //
616 void assert_wx_state(WXMode expected) {
617 assert(_wx_state == expected, "wrong state");
618 }
619 WXMode get_wx_state() {
620 return _wx_state;
621 }
622 #endif // MACOS_AARCH64
623
624 private:
625 bool _in_asgct = false;
626 public:
627 bool in_asgct() const { return _in_asgct; }
628 void set_in_asgct(bool value) { _in_asgct = value; }
629 static bool current_in_asgct() {
630 Thread *cur = Thread::current_or_null_safe();
631 return cur != nullptr && cur->in_asgct();
632 }
633
634 private:
635 VMErrorCallback* _vm_error_callbacks;
636
637 bool _profile_vm_locks;
638 bool _profile_vm_calls;
639 bool _profile_vm_ops;
640 bool _profile_rt_calls;
641 bool _profile_upcalls;
642
643 jlong _all_bc_counter_value;
644 jlong _clinit_bc_counter_value;
645
646 PerfTraceTime* _current_rt_call_timer;
647 public:
648 bool profile_vm_locks() const { return _profile_vm_locks; }
649 void set_profile_vm_locks(bool v) { _profile_vm_locks = v; }
650
651 bool profile_vm_calls() const { return _profile_vm_calls; }
652 void set_profile_vm_calls(bool v) { _profile_vm_calls = v; }
653
654 bool profile_vm_ops() const { return _profile_vm_ops; }
655 void set_profile_vm_ops(bool v) { _profile_vm_ops = v; }
656
657 bool profile_rt_calls() const { return _profile_rt_calls; }
658 void set_profile_rt_calls(bool v) { _profile_rt_calls = v; }
659
660 bool profile_upcalls() const { return _profile_upcalls; }
661 void set_profile_upcalls(bool v) { _profile_upcalls = v; }
662
663 PerfTraceTime* current_rt_call_timer() const { return _current_rt_call_timer; }
664 void set_current_rt_call_timer(PerfTraceTime* c) { _current_rt_call_timer = c; }
665 bool has_current_rt_call_timer() const { return _current_rt_call_timer != nullptr; }
666
667 bool do_profile_rt_call() const {
668 return ProfileRuntimeCalls && profile_rt_calls() && !has_current_rt_call_timer();
669 }
670
671 jlong bc_counter_value() const { return _all_bc_counter_value; }
672
673 jlong clinit_bc_counter_value() const { return _clinit_bc_counter_value; }
674
675 void inc_clinit_bc_counter_value(jlong l) { _clinit_bc_counter_value += l; }
676
677 static ByteSize bc_counter_offset() { return byte_offset_of(Thread, _all_bc_counter_value); }
678 };
679
680 class ProfileVMCallContext : StackObj {
681 private:
682 Thread* _thread;
683 bool _enabled;
684 PerfTraceTime* _timer;
685
686 static int _perf_nested_runtime_calls_count;
687
688 static const char* name(PerfTraceTime* t);
689 static void notify_nested_rt_call(PerfTraceTime* current, PerfTraceTime* inner_timer);
690 public:
691 inline ProfileVMCallContext(Thread* current, PerfTraceTime* timer, bool is_on)
692 : _thread(current), _enabled(is_on), _timer(timer) {
693 if (_enabled) {
694 assert(timer != nullptr, "");
695 assert(_thread->current_rt_call_timer() == nullptr, "%s", name(_thread->current_rt_call_timer()));
696 _thread->set_current_rt_call_timer(timer);
697 } else if (current->profile_rt_calls()) {
698 notify_nested_rt_call(current->current_rt_call_timer(), timer);
699 }
700 }
701
702 inline ~ProfileVMCallContext() {
703 if (_enabled) {
704 assert(_timer == _thread->current_rt_call_timer(),
705 "%s vs %s", name(_timer), name(_thread->current_rt_call_timer()));
706 _thread->set_current_rt_call_timer(nullptr);
707 }
708 }
709
710 static int nested_runtime_calls_count() { return _perf_nested_runtime_calls_count; };
711 };
712
713 class PauseRuntimeCallProfiling : public StackObj {
714 protected:
715 Thread* _thread;
716 bool _enabled;
717 PerfTraceTime* _timer;
718
719 public:
720 inline PauseRuntimeCallProfiling(Thread* current, bool is_on)
721 : _thread(current), _enabled(is_on), _timer(nullptr) {
722 if (_enabled) {
723 _timer = _thread->current_rt_call_timer();
724 _thread->set_current_rt_call_timer(nullptr);
725 }
726 }
727
728 inline ~PauseRuntimeCallProfiling () {
729 if (_enabled) {
730 guarantee(_thread->current_rt_call_timer() == nullptr, "");
731 _thread->set_current_rt_call_timer(_timer); // restore
732 }
733 }
734 };
735
736 class ThreadInAsgct {
737 private:
738 Thread* _thread;
739 bool _saved_in_asgct;
740 public:
741 ThreadInAsgct(Thread* thread) : _thread(thread) {
742 assert(thread != nullptr, "invariant");
743 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
744 _saved_in_asgct = thread->in_asgct();
745 thread->set_in_asgct(true);
746 }
747 ~ThreadInAsgct() {
748 assert(_thread->in_asgct(), "invariant");
749 _thread->set_in_asgct(_saved_in_asgct);
750 }
751 };
752
753 // Inline implementation of Thread::current()
|