44 #endif
45
46 class CompilerThread;
47 class HandleArea;
48 class HandleMark;
49 class ICRefillVerifier;
50 class JvmtiRawMonitor;
51 class NMethodClosure;
52 class Metadata;
53 class OopClosure;
54 class OSThread;
55 class ParkEvent;
56 class ResourceArea;
57 class SafeThreadsListPtr;
58 class ThreadClosure;
59 class ThreadsList;
60 class ThreadsSMRSupport;
61 class VMErrorCallback;
62
63
64 DEBUG_ONLY(class ResourceMark;)
65
66 class WorkerThread;
67
68 class JavaThread;
69
70 // Class hierarchy
71 // - Thread
72 // - JavaThread
73 // - various subclasses eg CompilerThread, ServiceThread
74 // - NonJavaThread
75 // - NamedThread
76 // - VMThread
77 // - ConcurrentGCThread
78 // - WorkerThread
79 // - WatcherThread
80 // - JfrThreadSampler
81 // - LogAsyncWriter
82 //
83 // All Thread subclasses must be either JavaThread or NonJavaThread.
624 void init_wx();
625 WXMode enable_wx(WXMode new_state);
626
627 void assert_wx_state(WXMode expected) {
628 assert(_wx_state == expected, "wrong state");
629 }
630 #endif // __APPLE__ && AARCH64
631
632 private:
633 bool _in_asgct = false;
634 public:
635 bool in_asgct() const { return _in_asgct; }
636 void set_in_asgct(bool value) { _in_asgct = value; }
637 static bool current_in_asgct() {
638 Thread *cur = Thread::current_or_null_safe();
639 return cur != nullptr && cur->in_asgct();
640 }
641
642 private:
643 VMErrorCallback* _vm_error_callbacks;
644 };
645
646 class ThreadInAsgct {
647 private:
648 Thread* _thread;
649 bool _saved_in_asgct;
650 public:
651 ThreadInAsgct(Thread* thread) : _thread(thread) {
652 assert(thread != nullptr, "invariant");
653 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
654 _saved_in_asgct = thread->in_asgct();
655 thread->set_in_asgct(true);
656 }
657 ~ThreadInAsgct() {
658 assert(_thread->in_asgct(), "invariant");
659 _thread->set_in_asgct(_saved_in_asgct);
660 }
661 };
662
663 // Inline implementation of Thread::current()
|
44 #endif
45
46 class CompilerThread;
47 class HandleArea;
48 class HandleMark;
49 class ICRefillVerifier;
50 class JvmtiRawMonitor;
51 class NMethodClosure;
52 class Metadata;
53 class OopClosure;
54 class OSThread;
55 class ParkEvent;
56 class ResourceArea;
57 class SafeThreadsListPtr;
58 class ThreadClosure;
59 class ThreadsList;
60 class ThreadsSMRSupport;
61 class VMErrorCallback;
62
63
64 class PerfTraceTime;
65
66 DEBUG_ONLY(class ResourceMark;)
67
68 class WorkerThread;
69
70 class JavaThread;
71
72 // Class hierarchy
73 // - Thread
74 // - JavaThread
75 // - various subclasses eg CompilerThread, ServiceThread
76 // - NonJavaThread
77 // - NamedThread
78 // - VMThread
79 // - ConcurrentGCThread
80 // - WorkerThread
81 // - WatcherThread
82 // - JfrThreadSampler
83 // - LogAsyncWriter
84 //
85 // All Thread subclasses must be either JavaThread or NonJavaThread.
626 void init_wx();
627 WXMode enable_wx(WXMode new_state);
628
629 void assert_wx_state(WXMode expected) {
630 assert(_wx_state == expected, "wrong state");
631 }
632 #endif // __APPLE__ && AARCH64
633
634 private:
635 bool _in_asgct = false;
636 public:
637 bool in_asgct() const { return _in_asgct; }
638 void set_in_asgct(bool value) { _in_asgct = value; }
639 static bool current_in_asgct() {
640 Thread *cur = Thread::current_or_null_safe();
641 return cur != nullptr && cur->in_asgct();
642 }
643
644 private:
645 VMErrorCallback* _vm_error_callbacks;
646
647 bool _profile_vm_locks;
648 bool _profile_vm_calls;
649 bool _profile_vm_ops;
650 bool _profile_rt_calls;
651 bool _profile_upcalls;
652
653 jlong _all_bc_counter_value;
654 jlong _clinit_bc_counter_value;
655
656 PerfTraceTime* _current_rt_call_timer;
657 public:
658 bool profile_vm_locks() const { return _profile_vm_locks; }
659 void set_profile_vm_locks(bool v) { _profile_vm_locks = v; }
660
661 bool profile_vm_calls() const { return _profile_vm_calls; }
662 void set_profile_vm_calls(bool v) { _profile_vm_calls = v; }
663
664 bool profile_vm_ops() const { return _profile_vm_ops; }
665 void set_profile_vm_ops(bool v) { _profile_vm_ops = v; }
666
667 bool profile_rt_calls() const { return _profile_rt_calls; }
668 void set_profile_rt_calls(bool v) { _profile_rt_calls = v; }
669
670 bool profile_upcalls() const { return _profile_upcalls; }
671 void set_profile_upcalls(bool v) { _profile_upcalls = v; }
672
673 PerfTraceTime* current_rt_call_timer() const { return _current_rt_call_timer; }
674 void set_current_rt_call_timer(PerfTraceTime* c) { _current_rt_call_timer = c; }
675 bool has_current_rt_call_timer() const { return _current_rt_call_timer != nullptr; }
676
677 bool do_profile_rt_call() const {
678 return ProfileRuntimeCalls && profile_rt_calls() && !has_current_rt_call_timer();
679 }
680
681 jlong bc_counter_value() const { return _all_bc_counter_value; }
682
683 jlong clinit_bc_counter_value() const { return _clinit_bc_counter_value; }
684
685 void inc_clinit_bc_counter_value(jlong l) { _clinit_bc_counter_value += l; }
686
687 static ByteSize bc_counter_offset() { return byte_offset_of(Thread, _all_bc_counter_value); }
688 };
689
690 class ProfileVMCallContext : StackObj {
691 private:
692 Thread* _thread;
693 bool _enabled;
694 PerfTraceTime* _timer;
695
696 static int _perf_nested_runtime_calls_count;
697
698 static const char* name(PerfTraceTime* t);
699 static void notify_nested_rt_call(PerfTraceTime* current, PerfTraceTime* inner_timer);
700 public:
701 inline ProfileVMCallContext(Thread* current, PerfTraceTime* timer, bool is_on)
702 : _thread(current), _enabled(is_on), _timer(timer) {
703 if (_enabled) {
704 assert(timer != nullptr, "");
705 assert(_thread->current_rt_call_timer() == nullptr, "%s", name(_thread->current_rt_call_timer()));
706 _thread->set_current_rt_call_timer(timer);
707 } else if (current->profile_rt_calls()) {
708 notify_nested_rt_call(current->current_rt_call_timer(), timer);
709 }
710 }
711
712 inline ~ProfileVMCallContext() {
713 if (_enabled) {
714 assert(_timer == _thread->current_rt_call_timer(),
715 "%s vs %s", name(_timer), name(_thread->current_rt_call_timer()));
716 _thread->set_current_rt_call_timer(nullptr);
717 }
718 }
719
720 static int nested_runtime_calls_count() { return _perf_nested_runtime_calls_count; };
721 };
722
723 class PauseRuntimeCallProfiling : public StackObj {
724 protected:
725 Thread* _thread;
726 bool _enabled;
727 PerfTraceTime* _timer;
728
729 public:
730 inline PauseRuntimeCallProfiling(Thread* current, bool is_on)
731 : _thread(current), _enabled(is_on), _timer(nullptr) {
732 if (_enabled) {
733 _timer = _thread->current_rt_call_timer();
734 _thread->set_current_rt_call_timer(nullptr);
735 }
736 }
737
738 inline ~PauseRuntimeCallProfiling () {
739 if (_enabled) {
740 guarantee(_thread->current_rt_call_timer() == nullptr, "");
741 _thread->set_current_rt_call_timer(_timer); // restore
742 }
743 }
744 };
745
746 class ThreadInAsgct {
747 private:
748 Thread* _thread;
749 bool _saved_in_asgct;
750 public:
751 ThreadInAsgct(Thread* thread) : _thread(thread) {
752 assert(thread != nullptr, "invariant");
753 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
754 _saved_in_asgct = thread->in_asgct();
755 thread->set_in_asgct(true);
756 }
757 ~ThreadInAsgct() {
758 assert(_thread->in_asgct(), "invariant");
759 _thread->set_in_asgct(_saved_in_asgct);
760 }
761 };
762
763 // Inline implementation of Thread::current()
|