44 #include "jfr/support/jfrThreadExtension.hpp"
45 #endif
46
47 class CompilerThread;
48 class HandleArea;
49 class HandleMark;
50 class JvmtiRawMonitor;
51 class NMethodClosure;
52 class Metadata;
53 class OopClosure;
54 class OSThread;
55 class ParkEvent;
56 class ResourceArea;
57 class SafeThreadsListPtr;
58 class ThreadClosure;
59 class ThreadsList;
60 class ThreadsSMRSupport;
61 class VMErrorCallback;
62
63
64 DEBUG_ONLY(class ResourceMark;)
65
66 class WorkerThread;
67
68 class JavaThread;
69
70 // Class hierarchy
71 // - Thread
72 // - JavaThread
73 // - various subclasses eg CompilerThread, ServiceThread
74 // - NonJavaThread
75 // - NamedThread
76 // - VMThread
77 // - ConcurrentGCThread
78 // - WorkerThread
79 // - WatcherThread
80 // - JfrThreadSampler
81 // - LogAsyncWriter
82 //
83 // All Thread subclasses must be either JavaThread or NonJavaThread.
614 void init_wx();
615 WXMode enable_wx(WXMode new_state);
616
617 void assert_wx_state(WXMode expected) {
618 assert(_wx_state == expected, "wrong state");
619 }
620 #endif // __APPLE__ && AARCH64
621
622 private:
623 bool _in_asgct = false;
624 public:
625 bool in_asgct() const { return _in_asgct; }
626 void set_in_asgct(bool value) { _in_asgct = value; }
627 static bool current_in_asgct() {
628 Thread *cur = Thread::current_or_null_safe();
629 return cur != nullptr && cur->in_asgct();
630 }
631
632 private:
633 VMErrorCallback* _vm_error_callbacks;
634 };
635
636 class ThreadInAsgct {
637 private:
638 Thread* _thread;
639 bool _saved_in_asgct;
640 public:
641 ThreadInAsgct(Thread* thread) : _thread(thread) {
642 assert(thread != nullptr, "invariant");
643 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
644 _saved_in_asgct = thread->in_asgct();
645 thread->set_in_asgct(true);
646 }
647 ~ThreadInAsgct() {
648 assert(_thread->in_asgct(), "invariant");
649 _thread->set_in_asgct(_saved_in_asgct);
650 }
651 };
652
653 // Inline implementation of Thread::current()
|
44 #include "jfr/support/jfrThreadExtension.hpp"
45 #endif
46
47 class CompilerThread;
48 class HandleArea;
49 class HandleMark;
50 class JvmtiRawMonitor;
51 class NMethodClosure;
52 class Metadata;
53 class OopClosure;
54 class OSThread;
55 class ParkEvent;
56 class ResourceArea;
57 class SafeThreadsListPtr;
58 class ThreadClosure;
59 class ThreadsList;
60 class ThreadsSMRSupport;
61 class VMErrorCallback;
62
63
64 class PerfTraceTime;
65
66 DEBUG_ONLY(class ResourceMark;)
67
68 class WorkerThread;
69
70 class JavaThread;
71
72 // Class hierarchy
73 // - Thread
74 // - JavaThread
75 // - various subclasses eg CompilerThread, ServiceThread
76 // - NonJavaThread
77 // - NamedThread
78 // - VMThread
79 // - ConcurrentGCThread
80 // - WorkerThread
81 // - WatcherThread
82 // - JfrThreadSampler
83 // - LogAsyncWriter
84 //
85 // All Thread subclasses must be either JavaThread or NonJavaThread.
616 void init_wx();
617 WXMode enable_wx(WXMode new_state);
618
619 void assert_wx_state(WXMode expected) {
620 assert(_wx_state == expected, "wrong state");
621 }
622 #endif // __APPLE__ && AARCH64
623
624 private:
625 bool _in_asgct = false;
626 public:
627 bool in_asgct() const { return _in_asgct; }
628 void set_in_asgct(bool value) { _in_asgct = value; }
629 static bool current_in_asgct() {
630 Thread *cur = Thread::current_or_null_safe();
631 return cur != nullptr && cur->in_asgct();
632 }
633
634 private:
635 VMErrorCallback* _vm_error_callbacks;
636
637 bool _profile_vm_locks;
638 bool _profile_vm_calls;
639 bool _profile_vm_ops;
640 bool _profile_rt_calls;
641 bool _profile_upcalls;
642
643 jlong _all_bc_counter_value;
644 jlong _clinit_bc_counter_value;
645
646 PerfTraceTime* _current_rt_call_timer;
647 public:
648 bool profile_vm_locks() const { return _profile_vm_locks; }
649 void set_profile_vm_locks(bool v) { _profile_vm_locks = v; }
650
651 bool profile_vm_calls() const { return _profile_vm_calls; }
652 void set_profile_vm_calls(bool v) { _profile_vm_calls = v; }
653
654 bool profile_vm_ops() const { return _profile_vm_ops; }
655 void set_profile_vm_ops(bool v) { _profile_vm_ops = v; }
656
657 bool profile_rt_calls() const { return _profile_rt_calls; }
658 void set_profile_rt_calls(bool v) { _profile_rt_calls = v; }
659
660 bool profile_upcalls() const { return _profile_upcalls; }
661 void set_profile_upcalls(bool v) { _profile_upcalls = v; }
662
663 PerfTraceTime* current_rt_call_timer() const { return _current_rt_call_timer; }
664 void set_current_rt_call_timer(PerfTraceTime* c) { _current_rt_call_timer = c; }
665 bool has_current_rt_call_timer() const { return _current_rt_call_timer != nullptr; }
666
667 bool do_profile_rt_call() const {
668 return ProfileRuntimeCalls && profile_rt_calls() && !has_current_rt_call_timer();
669 }
670
671 jlong bc_counter_value() const { return _all_bc_counter_value; }
672
673 jlong clinit_bc_counter_value() const { return _clinit_bc_counter_value; }
674
675 void inc_clinit_bc_counter_value(jlong l) { _clinit_bc_counter_value += l; }
676
677 static ByteSize bc_counter_offset() { return byte_offset_of(Thread, _all_bc_counter_value); }
678 };
679
680 class ProfileVMCallContext : StackObj {
681 private:
682 Thread* _thread;
683 bool _enabled;
684 PerfTraceTime* _timer;
685
686 static int _perf_nested_runtime_calls_count;
687
688 static const char* name(PerfTraceTime* t);
689 static void notify_nested_rt_call(PerfTraceTime* current, PerfTraceTime* inner_timer);
690 public:
691 inline ProfileVMCallContext(Thread* current, PerfTraceTime* timer, bool is_on)
692 : _thread(current), _enabled(is_on), _timer(timer) {
693 if (_enabled) {
694 assert(timer != nullptr, "");
695 assert(_thread->current_rt_call_timer() == nullptr, "%s", name(_thread->current_rt_call_timer()));
696 _thread->set_current_rt_call_timer(timer);
697 } else if (current->profile_rt_calls()) {
698 notify_nested_rt_call(current->current_rt_call_timer(), timer);
699 }
700 }
701
702 inline ~ProfileVMCallContext() {
703 if (_enabled) {
704 assert(_timer == _thread->current_rt_call_timer(),
705 "%s vs %s", name(_timer), name(_thread->current_rt_call_timer()));
706 _thread->set_current_rt_call_timer(nullptr);
707 }
708 }
709
710 static int nested_runtime_calls_count() { return _perf_nested_runtime_calls_count; };
711 };
712
713 class PauseRuntimeCallProfiling : public StackObj {
714 protected:
715 Thread* _thread;
716 bool _enabled;
717 PerfTraceTime* _timer;
718
719 public:
720 inline PauseRuntimeCallProfiling(Thread* current, bool is_on)
721 : _thread(current), _enabled(is_on), _timer(nullptr) {
722 if (_enabled) {
723 _timer = _thread->current_rt_call_timer();
724 _thread->set_current_rt_call_timer(nullptr);
725 }
726 }
727
728 inline ~PauseRuntimeCallProfiling () {
729 if (_enabled) {
730 guarantee(_thread->current_rt_call_timer() == nullptr, "");
731 _thread->set_current_rt_call_timer(_timer); // restore
732 }
733 }
734 };
735
736 class ThreadInAsgct {
737 private:
738 Thread* _thread;
739 bool _saved_in_asgct;
740 public:
741 ThreadInAsgct(Thread* thread) : _thread(thread) {
742 assert(thread != nullptr, "invariant");
743 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
744 _saved_in_asgct = thread->in_asgct();
745 thread->set_in_asgct(true);
746 }
747 ~ThreadInAsgct() {
748 assert(_thread->in_asgct(), "invariant");
749 _thread->set_in_asgct(_saved_in_asgct);
750 }
751 };
752
753 // Inline implementation of Thread::current()
|