44 #include "jfr/support/jfrThreadExtension.hpp"
45 #endif
46
47 class CompilerThread;
48 class HandleArea;
49 class HandleMark;
50 class JvmtiRawMonitor;
51 class NMethodClosure;
52 class Metadata;
53 class OopClosure;
54 class OSThread;
55 class ParkEvent;
56 class ResourceArea;
57 class SafeThreadsListPtr;
58 class ThreadClosure;
59 class ThreadsList;
60 class ThreadsSMRSupport;
61 class VMErrorCallback;
62
63
64 DEBUG_ONLY(class ResourceMark;)
65
66 class WorkerThread;
67
68 class JavaThread;
69
70 // Class hierarchy
71 // - Thread
72 // - JavaThread
73 // - various subclasses eg CompilerThread, ServiceThread
74 // - NonJavaThread
75 // - NamedThread
76 // - VMThread
77 // - ConcurrentGCThread
78 // - WorkerThread
79 // - WatcherThread
80 // - JfrThreadSampler
81 // - LogAsyncWriter
82 //
83 // All Thread subclasses must be either JavaThread or NonJavaThread.
617 void init_wx();
618 WXMode enable_wx(WXMode new_state);
619
620 void assert_wx_state(WXMode expected) {
621 assert(_wx_state == expected, "wrong state");
622 }
623 #endif // __APPLE__ && AARCH64
624
625 private:
626 bool _in_asgct = false;
627 public:
628 bool in_asgct() const { return _in_asgct; }
629 void set_in_asgct(bool value) { _in_asgct = value; }
630 static bool current_in_asgct() {
631 Thread *cur = Thread::current_or_null_safe();
632 return cur != nullptr && cur->in_asgct();
633 }
634
635 private:
636 VMErrorCallback* _vm_error_callbacks;
637 };
638
639 class ThreadInAsgct {
640 private:
641 Thread* _thread;
642 bool _saved_in_asgct;
643 public:
644 ThreadInAsgct(Thread* thread) : _thread(thread) {
645 assert(thread != nullptr, "invariant");
646 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
647 _saved_in_asgct = thread->in_asgct();
648 thread->set_in_asgct(true);
649 }
650 ~ThreadInAsgct() {
651 assert(_thread->in_asgct(), "invariant");
652 _thread->set_in_asgct(_saved_in_asgct);
653 }
654 };
655
656 // Inline implementation of Thread::current()
|
44 #include "jfr/support/jfrThreadExtension.hpp"
45 #endif
46
47 class CompilerThread;
48 class HandleArea;
49 class HandleMark;
50 class JvmtiRawMonitor;
51 class NMethodClosure;
52 class Metadata;
53 class OopClosure;
54 class OSThread;
55 class ParkEvent;
56 class ResourceArea;
57 class SafeThreadsListPtr;
58 class ThreadClosure;
59 class ThreadsList;
60 class ThreadsSMRSupport;
61 class VMErrorCallback;
62
63
64 class PerfTraceTime;
65
66 DEBUG_ONLY(class ResourceMark;)
67
68 class WorkerThread;
69
70 class JavaThread;
71
72 // Class hierarchy
73 // - Thread
74 // - JavaThread
75 // - various subclasses eg CompilerThread, ServiceThread
76 // - NonJavaThread
77 // - NamedThread
78 // - VMThread
79 // - ConcurrentGCThread
80 // - WorkerThread
81 // - WatcherThread
82 // - JfrThreadSampler
83 // - LogAsyncWriter
84 //
85 // All Thread subclasses must be either JavaThread or NonJavaThread.
619 void init_wx();
620 WXMode enable_wx(WXMode new_state);
621
622 void assert_wx_state(WXMode expected) {
623 assert(_wx_state == expected, "wrong state");
624 }
625 #endif // __APPLE__ && AARCH64
626
627 private:
628 bool _in_asgct = false;
629 public:
630 bool in_asgct() const { return _in_asgct; }
631 void set_in_asgct(bool value) { _in_asgct = value; }
632 static bool current_in_asgct() {
633 Thread *cur = Thread::current_or_null_safe();
634 return cur != nullptr && cur->in_asgct();
635 }
636
637 private:
638 VMErrorCallback* _vm_error_callbacks;
639
640 bool _profile_vm_locks;
641 bool _profile_vm_calls;
642 bool _profile_vm_ops;
643 bool _profile_rt_calls;
644 bool _profile_upcalls;
645
646 jlong _all_bc_counter_value;
647 jlong _clinit_bc_counter_value;
648
649 PerfTraceTime* _current_rt_call_timer;
650 public:
651 bool profile_vm_locks() const { return _profile_vm_locks; }
652 void set_profile_vm_locks(bool v) { _profile_vm_locks = v; }
653
654 bool profile_vm_calls() const { return _profile_vm_calls; }
655 void set_profile_vm_calls(bool v) { _profile_vm_calls = v; }
656
657 bool profile_vm_ops() const { return _profile_vm_ops; }
658 void set_profile_vm_ops(bool v) { _profile_vm_ops = v; }
659
660 bool profile_rt_calls() const { return _profile_rt_calls; }
661 void set_profile_rt_calls(bool v) { _profile_rt_calls = v; }
662
663 bool profile_upcalls() const { return _profile_upcalls; }
664 void set_profile_upcalls(bool v) { _profile_upcalls = v; }
665
666 PerfTraceTime* current_rt_call_timer() const { return _current_rt_call_timer; }
667 void set_current_rt_call_timer(PerfTraceTime* c) { _current_rt_call_timer = c; }
668 bool has_current_rt_call_timer() const { return _current_rt_call_timer != nullptr; }
669
670 bool do_profile_rt_call() const {
671 return ProfileRuntimeCalls && profile_rt_calls() && !has_current_rt_call_timer();
672 }
673
674 jlong bc_counter_value() const { return _all_bc_counter_value; }
675
676 jlong clinit_bc_counter_value() const { return _clinit_bc_counter_value; }
677
678 void inc_clinit_bc_counter_value(jlong l) { _clinit_bc_counter_value += l; }
679
680 static ByteSize bc_counter_offset() { return byte_offset_of(Thread, _all_bc_counter_value); }
681 };
682
683 class ProfileVMCallContext : StackObj {
684 private:
685 Thread* _thread;
686 bool _enabled;
687 PerfTraceTime* _timer;
688
689 static int _perf_nested_runtime_calls_count;
690
691 static const char* name(PerfTraceTime* t);
692 static void notify_nested_rt_call(PerfTraceTime* current, PerfTraceTime* inner_timer);
693 public:
694 inline ProfileVMCallContext(Thread* current, PerfTraceTime* timer, bool is_on)
695 : _thread(current), _enabled(is_on), _timer(timer) {
696 if (_enabled) {
697 assert(timer != nullptr, "");
698 assert(_thread->current_rt_call_timer() == nullptr, "%s", name(_thread->current_rt_call_timer()));
699 _thread->set_current_rt_call_timer(timer);
700 } else if (current->profile_rt_calls()) {
701 notify_nested_rt_call(current->current_rt_call_timer(), timer);
702 }
703 }
704
705 inline ~ProfileVMCallContext() {
706 if (_enabled) {
707 assert(_timer == _thread->current_rt_call_timer(),
708 "%s vs %s", name(_timer), name(_thread->current_rt_call_timer()));
709 _thread->set_current_rt_call_timer(nullptr);
710 }
711 }
712
713 static int nested_runtime_calls_count() { return _perf_nested_runtime_calls_count; };
714 };
715
716 class PauseRuntimeCallProfiling : public StackObj {
717 protected:
718 Thread* _thread;
719 bool _enabled;
720 PerfTraceTime* _timer;
721
722 public:
723 inline PauseRuntimeCallProfiling(Thread* current, bool is_on)
724 : _thread(current), _enabled(is_on), _timer(nullptr) {
725 if (_enabled) {
726 _timer = _thread->current_rt_call_timer();
727 _thread->set_current_rt_call_timer(nullptr);
728 }
729 }
730
731 inline ~PauseRuntimeCallProfiling () {
732 if (_enabled) {
733 guarantee(_thread->current_rt_call_timer() == nullptr, "");
734 _thread->set_current_rt_call_timer(_timer); // restore
735 }
736 }
737 };
738
739 class ThreadInAsgct {
740 private:
741 Thread* _thread;
742 bool _saved_in_asgct;
743 public:
744 ThreadInAsgct(Thread* thread) : _thread(thread) {
745 assert(thread != nullptr, "invariant");
746 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
747 _saved_in_asgct = thread->in_asgct();
748 thread->set_in_asgct(true);
749 }
750 ~ThreadInAsgct() {
751 assert(_thread->in_asgct(), "invariant");
752 _thread->set_in_asgct(_saved_in_asgct);
753 }
754 };
755
756 // Inline implementation of Thread::current()
|