44 #endif
45
46 class CompilerThread;
47 class HandleArea;
48 class HandleMark;
49 class ICRefillVerifier;
50 class JvmtiRawMonitor;
51 class NMethodClosure;
52 class Metadata;
53 class OopClosure;
54 class OSThread;
55 class ParkEvent;
56 class ResourceArea;
57 class SafeThreadsListPtr;
58 class ThreadClosure;
59 class ThreadsList;
60 class ThreadsSMRSupport;
61 class VMErrorCallback;
62
63
64 DEBUG_ONLY(class ResourceMark;)
65
66 class WorkerThread;
67
68 class JavaThread;
69
70 // Class hierarchy
71 // - Thread
72 // - JavaThread
73 // - various subclasses eg CompilerThread, ServiceThread
74 // - NonJavaThread
75 // - NamedThread
76 // - VMThread
77 // - ConcurrentGCThread
78 // - WorkerThread
79 // - WatcherThread
80 // - JfrThreadSampler
81 // - LogAsyncWriter
82 //
83 // All Thread subclasses must be either JavaThread or NonJavaThread.
622 void init_wx();
623 WXMode enable_wx(WXMode new_state);
624
625 void assert_wx_state(WXMode expected) {
626 assert(_wx_state == expected, "wrong state");
627 }
628 #endif // __APPLE__ && AARCH64
629
630 private:
631 bool _in_asgct = false;
632 public:
633 bool in_asgct() const { return _in_asgct; }
634 void set_in_asgct(bool value) { _in_asgct = value; }
635 static bool current_in_asgct() {
636 Thread *cur = Thread::current_or_null_safe();
637 return cur != nullptr && cur->in_asgct();
638 }
639
640 private:
641 VMErrorCallback* _vm_error_callbacks;
642 };
643
644 class ThreadInAsgct {
645 private:
646 Thread* _thread;
647 bool _saved_in_asgct;
648 public:
649 ThreadInAsgct(Thread* thread) : _thread(thread) {
650 assert(thread != nullptr, "invariant");
651 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
652 _saved_in_asgct = thread->in_asgct();
653 thread->set_in_asgct(true);
654 }
655 ~ThreadInAsgct() {
656 assert(_thread->in_asgct(), "invariant");
657 _thread->set_in_asgct(_saved_in_asgct);
658 }
659 };
660
661 // Inline implementation of Thread::current()
|
44 #endif
45
46 class CompilerThread;
47 class HandleArea;
48 class HandleMark;
49 class ICRefillVerifier;
50 class JvmtiRawMonitor;
51 class NMethodClosure;
52 class Metadata;
53 class OopClosure;
54 class OSThread;
55 class ParkEvent;
56 class ResourceArea;
57 class SafeThreadsListPtr;
58 class ThreadClosure;
59 class ThreadsList;
60 class ThreadsSMRSupport;
61 class VMErrorCallback;
62
63
64 class PerfTraceTime;
65
66 DEBUG_ONLY(class ResourceMark;)
67
68 class WorkerThread;
69
70 class JavaThread;
71
72 // Class hierarchy
73 // - Thread
74 // - JavaThread
75 // - various subclasses eg CompilerThread, ServiceThread
76 // - NonJavaThread
77 // - NamedThread
78 // - VMThread
79 // - ConcurrentGCThread
80 // - WorkerThread
81 // - WatcherThread
82 // - JfrThreadSampler
83 // - LogAsyncWriter
84 //
85 // All Thread subclasses must be either JavaThread or NonJavaThread.
624 void init_wx();
625 WXMode enable_wx(WXMode new_state);
626
627 void assert_wx_state(WXMode expected) {
628 assert(_wx_state == expected, "wrong state");
629 }
630 #endif // __APPLE__ && AARCH64
631
632 private:
633 bool _in_asgct = false;
634 public:
635 bool in_asgct() const { return _in_asgct; }
636 void set_in_asgct(bool value) { _in_asgct = value; }
637 static bool current_in_asgct() {
638 Thread *cur = Thread::current_or_null_safe();
639 return cur != nullptr && cur->in_asgct();
640 }
641
642 private:
643 VMErrorCallback* _vm_error_callbacks;
644
645 bool _profile_vm_locks;
646 bool _profile_vm_calls;
647 bool _profile_vm_ops;
648 bool _profile_rt_calls;
649 bool _profile_upcalls;
650
651 jlong _all_bc_counter_value;
652 jlong _clinit_bc_counter_value;
653
654 PerfTraceTime* _current_rt_call_timer;
655 public:
656 bool profile_vm_locks() const { return _profile_vm_locks; }
657 void set_profile_vm_locks(bool v) { _profile_vm_locks = v; }
658
659 bool profile_vm_calls() const { return _profile_vm_calls; }
660 void set_profile_vm_calls(bool v) { _profile_vm_calls = v; }
661
662 bool profile_vm_ops() const { return _profile_vm_ops; }
663 void set_profile_vm_ops(bool v) { _profile_vm_ops = v; }
664
665 bool profile_rt_calls() const { return _profile_rt_calls; }
666 void set_profile_rt_calls(bool v) { _profile_rt_calls = v; }
667
668 bool profile_upcalls() const { return _profile_upcalls; }
669 void set_profile_upcalls(bool v) { _profile_upcalls = v; }
670
671 PerfTraceTime* current_rt_call_timer() const { return _current_rt_call_timer; }
672 void set_current_rt_call_timer(PerfTraceTime* c) { _current_rt_call_timer = c; }
673 bool has_current_rt_call_timer() const { return _current_rt_call_timer != nullptr; }
674
675 bool do_profile_rt_call() const {
676 return ProfileRuntimeCalls && profile_rt_calls() && !has_current_rt_call_timer();
677 }
678
679 jlong bc_counter_value() const { return _all_bc_counter_value; }
680
681 jlong clinit_bc_counter_value() const { return _clinit_bc_counter_value; }
682
683 void inc_clinit_bc_counter_value(jlong l) { _clinit_bc_counter_value += l; }
684
685 static ByteSize bc_counter_offset() { return byte_offset_of(Thread, _all_bc_counter_value); }
686 };
687
688 class ProfileVMCallContext : StackObj {
689 private:
690 Thread* _thread;
691 bool _enabled;
692 PerfTraceTime* _timer;
693
694 static int _perf_nested_runtime_calls_count;
695
696 static const char* name(PerfTraceTime* t);
697 static void notify_nested_rt_call(PerfTraceTime* current, PerfTraceTime* inner_timer);
698 public:
699 inline ProfileVMCallContext(Thread* current, PerfTraceTime* timer, bool is_on)
700 : _thread(current), _enabled(is_on), _timer(timer) {
701 if (_enabled) {
702 assert(timer != nullptr, "");
703 assert(_thread->current_rt_call_timer() == nullptr, "%s", name(_thread->current_rt_call_timer()));
704 _thread->set_current_rt_call_timer(timer);
705 } else if (current->profile_rt_calls()) {
706 notify_nested_rt_call(current->current_rt_call_timer(), timer);
707 }
708 }
709
710 inline ~ProfileVMCallContext() {
711 if (_enabled) {
712 assert(_timer == _thread->current_rt_call_timer(),
713 "%s vs %s", name(_timer), name(_thread->current_rt_call_timer()));
714 _thread->set_current_rt_call_timer(nullptr);
715 }
716 }
717
718 static int nested_runtime_calls_count() { return _perf_nested_runtime_calls_count; };
719 };
720
721 class PauseRuntimeCallProfiling : public StackObj {
722 protected:
723 Thread* _thread;
724 bool _enabled;
725 PerfTraceTime* _timer;
726
727 public:
728 inline PauseRuntimeCallProfiling(Thread* current, bool is_on)
729 : _thread(current), _enabled(is_on), _timer(nullptr) {
730 if (_enabled) {
731 _timer = _thread->current_rt_call_timer();
732 _thread->set_current_rt_call_timer(nullptr);
733 }
734 }
735
736 inline ~PauseRuntimeCallProfiling () {
737 if (_enabled) {
738 guarantee(_thread->current_rt_call_timer() == nullptr, "");
739 _thread->set_current_rt_call_timer(_timer); // restore
740 }
741 }
742 };
743
744 class ThreadInAsgct {
745 private:
746 Thread* _thread;
747 bool _saved_in_asgct;
748 public:
749 ThreadInAsgct(Thread* thread) : _thread(thread) {
750 assert(thread != nullptr, "invariant");
751 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
752 _saved_in_asgct = thread->in_asgct();
753 thread->set_in_asgct(true);
754 }
755 ~ThreadInAsgct() {
756 assert(_thread->in_asgct(), "invariant");
757 _thread->set_in_asgct(_saved_in_asgct);
758 }
759 };
760
761 // Inline implementation of Thread::current()
|