43 #include "jfr/support/jfrThreadExtension.hpp"
44 #endif
45
46 class CompilerThread;
47 class HandleArea;
48 class HandleMark;
49 class JvmtiRawMonitor;
50 class NMethodClosure;
51 class Metadata;
52 class OopClosure;
53 class OSThread;
54 class ParkEvent;
55 class ResourceArea;
56 class SafeThreadsListPtr;
57 class ThreadClosure;
58 class ThreadsList;
59 class ThreadsSMRSupport;
60 class VMErrorCallback;
61
62
63 DEBUG_ONLY(class ResourceMark;)
64
65 class WorkerThread;
66
67 class JavaThread;
68
69 // Class hierarchy
70 // - Thread
71 // - JavaThread
72 // - various subclasses eg CompilerThread, ServiceThread
73 // - NonJavaThread
74 // - NamedThread
75 // - VMThread
76 // - ConcurrentGCThread
77 // - WorkerThread
78 // - WatcherThread
79 // - JfrThreadSampler
80 // - LogAsyncWriter
81 //
82 // All Thread subclasses must be either JavaThread or NonJavaThread.
607 void init_wx();
608 WXMode enable_wx(WXMode new_state);
609
610 void assert_wx_state(WXMode expected) {
611 assert(_wx_state == expected, "wrong state");
612 }
613 #endif // __APPLE__ && AARCH64
614
615 private:
616 bool _in_asgct = false;
617 public:
618 bool in_asgct() const { return _in_asgct; }
619 void set_in_asgct(bool value) { _in_asgct = value; }
620 static bool current_in_asgct() {
621 Thread *cur = Thread::current_or_null_safe();
622 return cur != nullptr && cur->in_asgct();
623 }
624
625 private:
626 VMErrorCallback* _vm_error_callbacks;
627 };
628
629 class ThreadInAsgct {
630 private:
631 Thread* _thread;
632 bool _saved_in_asgct;
633 public:
634 ThreadInAsgct(Thread* thread) : _thread(thread) {
635 assert(thread != nullptr, "invariant");
636 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
637 _saved_in_asgct = thread->in_asgct();
638 thread->set_in_asgct(true);
639 }
640 ~ThreadInAsgct() {
641 assert(_thread->in_asgct(), "invariant");
642 _thread->set_in_asgct(_saved_in_asgct);
643 }
644 };
645
646 // Inline implementation of Thread::current()
|
43 #include "jfr/support/jfrThreadExtension.hpp"
44 #endif
45
46 class CompilerThread;
47 class HandleArea;
48 class HandleMark;
49 class JvmtiRawMonitor;
50 class NMethodClosure;
51 class Metadata;
52 class OopClosure;
53 class OSThread;
54 class ParkEvent;
55 class ResourceArea;
56 class SafeThreadsListPtr;
57 class ThreadClosure;
58 class ThreadsList;
59 class ThreadsSMRSupport;
60 class VMErrorCallback;
61
62
63 class PerfTraceTime;
64
65 DEBUG_ONLY(class ResourceMark;)
66
67 class WorkerThread;
68
69 class JavaThread;
70
71 // Class hierarchy
72 // - Thread
73 // - JavaThread
74 // - various subclasses eg CompilerThread, ServiceThread
75 // - NonJavaThread
76 // - NamedThread
77 // - VMThread
78 // - ConcurrentGCThread
79 // - WorkerThread
80 // - WatcherThread
81 // - JfrThreadSampler
82 // - LogAsyncWriter
83 //
84 // All Thread subclasses must be either JavaThread or NonJavaThread.
609 void init_wx();
610 WXMode enable_wx(WXMode new_state);
611
612 void assert_wx_state(WXMode expected) {
613 assert(_wx_state == expected, "wrong state");
614 }
615 #endif // __APPLE__ && AARCH64
616
617 private:
618 bool _in_asgct = false;
619 public:
620 bool in_asgct() const { return _in_asgct; }
621 void set_in_asgct(bool value) { _in_asgct = value; }
622 static bool current_in_asgct() {
623 Thread *cur = Thread::current_or_null_safe();
624 return cur != nullptr && cur->in_asgct();
625 }
626
627 private:
628 VMErrorCallback* _vm_error_callbacks;
629
630 bool _profile_vm_locks;
631 bool _profile_vm_calls;
632 bool _profile_vm_ops;
633 bool _profile_rt_calls;
634 bool _profile_upcalls;
635
636 jlong _all_bc_counter_value;
637 jlong _clinit_bc_counter_value;
638
639 PerfTraceTime* _current_rt_call_timer;
640 public:
641 bool profile_vm_locks() const { return _profile_vm_locks; }
642 void set_profile_vm_locks(bool v) { _profile_vm_locks = v; }
643
644 bool profile_vm_calls() const { return _profile_vm_calls; }
645 void set_profile_vm_calls(bool v) { _profile_vm_calls = v; }
646
647 bool profile_vm_ops() const { return _profile_vm_ops; }
648 void set_profile_vm_ops(bool v) { _profile_vm_ops = v; }
649
650 bool profile_rt_calls() const { return _profile_rt_calls; }
651 void set_profile_rt_calls(bool v) { _profile_rt_calls = v; }
652
653 bool profile_upcalls() const { return _profile_upcalls; }
654 void set_profile_upcalls(bool v) { _profile_upcalls = v; }
655
656 PerfTraceTime* current_rt_call_timer() const { return _current_rt_call_timer; }
657 void set_current_rt_call_timer(PerfTraceTime* c) { _current_rt_call_timer = c; }
658 bool has_current_rt_call_timer() const { return _current_rt_call_timer != nullptr; }
659
660 bool do_profile_rt_call() const {
661 return ProfileRuntimeCalls && profile_rt_calls() && !has_current_rt_call_timer();
662 }
663
664 jlong bc_counter_value() const { return _all_bc_counter_value; }
665
666 jlong clinit_bc_counter_value() const { return _clinit_bc_counter_value; }
667
668 void inc_clinit_bc_counter_value(jlong l) { _clinit_bc_counter_value += l; }
669
670 static ByteSize bc_counter_offset() { return byte_offset_of(Thread, _all_bc_counter_value); }
671 };
672
673 class ProfileVMCallContext : StackObj {
674 private:
675 Thread* _thread;
676 bool _enabled;
677 PerfTraceTime* _timer;
678
679 static int _perf_nested_runtime_calls_count;
680
681 static const char* name(PerfTraceTime* t);
682 static void notify_nested_rt_call(PerfTraceTime* current, PerfTraceTime* inner_timer);
683 public:
684 inline ProfileVMCallContext(Thread* current, PerfTraceTime* timer, bool is_on)
685 : _thread(current), _enabled(is_on), _timer(timer) {
686 if (_enabled) {
687 assert(timer != nullptr, "");
688 assert(_thread->current_rt_call_timer() == nullptr, "%s", name(_thread->current_rt_call_timer()));
689 _thread->set_current_rt_call_timer(timer);
690 } else if (current->profile_rt_calls()) {
691 notify_nested_rt_call(current->current_rt_call_timer(), timer);
692 }
693 }
694
695 inline ~ProfileVMCallContext() {
696 if (_enabled) {
697 assert(_timer == _thread->current_rt_call_timer(),
698 "%s vs %s", name(_timer), name(_thread->current_rt_call_timer()));
699 _thread->set_current_rt_call_timer(nullptr);
700 }
701 }
702
703 static int nested_runtime_calls_count() { return _perf_nested_runtime_calls_count; };
704 };
705
706 class PauseRuntimeCallProfiling : public StackObj {
707 protected:
708 Thread* _thread;
709 bool _enabled;
710 PerfTraceTime* _timer;
711
712 public:
713 inline PauseRuntimeCallProfiling(Thread* current, bool is_on)
714 : _thread(current), _enabled(is_on), _timer(nullptr) {
715 if (_enabled) {
716 _timer = _thread->current_rt_call_timer();
717 _thread->set_current_rt_call_timer(nullptr);
718 }
719 }
720
721 inline ~PauseRuntimeCallProfiling () {
722 if (_enabled) {
723 guarantee(_thread->current_rt_call_timer() == nullptr, "");
724 _thread->set_current_rt_call_timer(_timer); // restore
725 }
726 }
727 };
728
729 class ThreadInAsgct {
730 private:
731 Thread* _thread;
732 bool _saved_in_asgct;
733 public:
734 ThreadInAsgct(Thread* thread) : _thread(thread) {
735 assert(thread != nullptr, "invariant");
736 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
737 _saved_in_asgct = thread->in_asgct();
738 thread->set_in_asgct(true);
739 }
740 ~ThreadInAsgct() {
741 assert(_thread->in_asgct(), "invariant");
742 _thread->set_in_asgct(_saved_in_asgct);
743 }
744 };
745
746 // Inline implementation of Thread::current()
|