1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32 #include "runtime/mutexLocker.hpp"
33
34 class AbstractCompiler;
35 class CompiledDirectCall;
36 class CompiledIC;
37 class CompiledICData;
38 class CompileTask;
39 class DepChange;
40 class Dependencies;
41 class DirectiveSet;
42 class DebugInformationRecorder;
43 class ExceptionHandlerTable;
44 class ImplicitExceptionTable;
45 class JvmtiThreadState;
46 class MetadataClosure;
47 class NativeCallWrapper;
48 class OopIterateClosure;
49 class ScopeDesc;
199 };
200
201 // nmethod's read-only data
202 address _immutable_data;
203
204 PcDescContainer* _pc_desc_container;
205 ExceptionCache* volatile _exception_cache;
206
207 void* _gc_data;
208
209 struct oops_do_mark_link; // Opaque data type.
210 static nmethod* volatile _oops_do_mark_nmethods;
211 oops_do_mark_link* volatile _oops_do_mark_link;
212
213 CompiledICData* _compiled_ic_data;
214
215 // offsets for entry points
216 address _osr_entry_point; // entry point for on stack replacement
217 uint16_t _entry_offset; // entry point with class check
218 uint16_t _verified_entry_offset; // entry point without class check
219 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
220 int _immutable_data_size;
221
222 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
223
224 int _skipped_instructions_size;
225
226 int _stub_offset;
227
228 // Offsets for different stubs section parts
229 int _exception_offset;
230 // All deoptee's will resume execution at this location described by
231 // this offset.
232 int _deopt_handler_entry_offset;
233 // Offset (from insts_end) of the unwind handler if it exists
234 int16_t _unwind_handler_offset;
235 // Number of arguments passed on the stack
236 uint16_t _num_stack_arg_slots;
237
238 uint16_t _oops_size;
680 int skipped_instructions_size () const { return _skipped_instructions_size; }
681 int total_size() const;
682
683 // Containment
684 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
685 // Returns true if a given address is in the 'insts' section. The method
686 // insts_contains_inclusive() is end-inclusive.
687 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
688 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
689 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
690 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
691 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
692 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
693 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
694 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
695 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
696
697 // entry points
698 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
699 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
700
701 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
702 // allowed to advance state
703 in_use = 0, // executable nmethod
704 not_entrant = 1 // marked for deoptimization but activations may still exist
705 };
706
707 // flag accessing and manipulation
708 bool is_not_installed() const { return _state == not_installed; }
709 bool is_in_use() const { return _state <= in_use; }
710 bool is_not_entrant() const { return _state == not_entrant; }
711 int get_state() const { return _state; }
712
713 void clear_unloading_state();
714 // Heuristically deduce an nmethod isn't worth keeping around
715 bool is_cold();
716 bool is_unloading();
717 void do_unloading(bool unloading_occurred);
718
719 bool make_in_use() {
746 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
747 void flush_dependencies();
748
749 template<typename T>
750 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
751 template<typename T>
752 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
753
754 bool has_unsafe_access() const { return _has_unsafe_access; }
755 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
756
757 bool has_monitors() const { return _has_monitors; }
758 void set_has_monitors(bool z) { _has_monitors = z; }
759
760 bool has_scoped_access() const { return _has_scoped_access; }
761 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
762
763 bool has_wide_vectors() const { return _has_wide_vectors; }
764 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
765
766 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
767 void set_has_flushed_dependencies(bool z) {
768 assert(!has_flushed_dependencies(), "should only happen once");
769 _has_flushed_dependencies = z;
770 }
771
772 bool is_unlinked() const { return _is_unlinked; }
773 void set_is_unlinked() {
774 assert(!_is_unlinked, "already unlinked");
775 _is_unlinked = true;
776 }
777
778 int comp_level() const { return _comp_level; }
779
780 // Support for oops in scopes and relocs:
781 // Note: index 0 is reserved for null.
782 oop oop_at(int index) const;
783 oop oop_at_phantom(int index) const; // phantom reference
784 oop* oop_addr_at(int index) const { // for GC
785 // relocation indexes are biased by 1 (because 0 is reserved)
825 address handler_for_exception_and_pc(Handle exception, address pc);
826 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
827 void clean_exception_cache();
828
829 void add_exception_cache_entry(ExceptionCache* new_entry);
830 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
831
832
833 // Deopt
834 // Return true is the PC is one would expect if the frame is being deopted.
835 inline bool is_deopt_pc(address pc);
836 inline bool is_deopt_entry(address pc);
837
838 // Accessor/mutator for the original pc of a frame before a frame was deopted.
839 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
840 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
841
842 const char* state() const;
843
844 bool inlinecache_check_contains(address addr) const {
845 return (addr >= code_begin() && addr < verified_entry_point());
846 }
847
848 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
849
850 // implicit exceptions support
851 address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
852 address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
853
854 // Inline cache support for class unloading and nmethod unloading
855 private:
856 void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
857
858 address continuation_for_implicit_exception(address pc, bool for_div0_check);
859
860 public:
861 // Serial version used by whitebox test
862 void cleanup_inline_caches_whitebox();
863
864 void clear_inline_caches();
865
1058
1059 // Logging
1060 void log_identity(xmlStream* log) const;
1061 void log_new_nmethod() const;
1062 void log_relocated_nmethod(nmethod* original) const;
1063 void log_state_change(InvalidationReason invalidation_reason) const;
1064
1065 // Prints block-level comments, including nmethod specific block labels:
1066 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1067 const char* nmethod_section_label(address pos) const;
1068
1069 // returns whether this nmethod has code comments.
1070 bool has_code_comment(address begin, address end);
1071 // Prints a comment for one native instruction (reloc info, pc desc)
1072 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1073
1074 // tells if this compiled method is dependent on the given changes,
1075 // and the changes have invalidated it
1076 bool check_dependency_on(DepChange& changes);
1077
1078 // Fast breakpoint support. Tells if this compiled method is
1079 // dependent on the given method. Returns true if this nmethod
1080 // corresponds to the given method as well.
1081 bool is_dependent_on_method(Method* dependee);
1082
1083 // JVMTI's GetLocalInstance() support
1084 ByteSize native_receiver_sp_offset() {
1085 assert(is_native_method(), "sanity");
1086 return _native_receiver_sp_offset;
1087 }
1088 ByteSize native_basic_lock_sp_offset() {
1089 assert(is_native_method(), "sanity");
1090 return _native_basic_lock_sp_offset;
1091 }
1092
1093 // support for code generation
1094 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1095 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1096
1097 void metadata_do(MetadataClosure* f);
1098
1099 address call_instruction_address(address pc) const;
1100
|
1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "compiler/compilerDefinitions.hpp"
31 #include "oops/metadata.hpp"
32 #include "oops/method.hpp"
33 #include "runtime/mutexLocker.hpp"
34
35 class AbstractCompiler;
36 class CompiledDirectCall;
37 class CompiledIC;
38 class CompiledICData;
39 class CompileTask;
40 class DepChange;
41 class Dependencies;
42 class DirectiveSet;
43 class DebugInformationRecorder;
44 class ExceptionHandlerTable;
45 class ImplicitExceptionTable;
46 class JvmtiThreadState;
47 class MetadataClosure;
48 class NativeCallWrapper;
49 class OopIterateClosure;
50 class ScopeDesc;
200 };
201
202 // nmethod's read-only data
203 address _immutable_data;
204
205 PcDescContainer* _pc_desc_container;
206 ExceptionCache* volatile _exception_cache;
207
208 void* _gc_data;
209
210 struct oops_do_mark_link; // Opaque data type.
211 static nmethod* volatile _oops_do_mark_nmethods;
212 oops_do_mark_link* volatile _oops_do_mark_link;
213
214 CompiledICData* _compiled_ic_data;
215
216 // offsets for entry points
217 address _osr_entry_point; // entry point for on stack replacement
218 uint16_t _entry_offset; // entry point with class check
219 uint16_t _verified_entry_offset; // entry point without class check
220 uint16_t _inline_entry_offset; // inline type entry point (unpack all inline type args) with class check
221 uint16_t _verified_inline_entry_offset; // inline type entry point (unpack all inline type args) without class check
222 uint16_t _verified_inline_ro_entry_offset; // inline type entry point (unpack receiver only) without class check
223 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
224 int _immutable_data_size;
225
226 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
227
228 int _skipped_instructions_size;
229
230 int _stub_offset;
231
232 // Offsets for different stubs section parts
233 int _exception_offset;
234 // All deoptee's will resume execution at this location described by
235 // this offset.
236 int _deopt_handler_entry_offset;
237 // Offset (from insts_end) of the unwind handler if it exists
238 int16_t _unwind_handler_offset;
239 // Number of arguments passed on the stack
240 uint16_t _num_stack_arg_slots;
241
242 uint16_t _oops_size;
684 int skipped_instructions_size () const { return _skipped_instructions_size; }
685 int total_size() const;
686
687 // Containment
688 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
689 // Returns true if a given address is in the 'insts' section. The method
690 // insts_contains_inclusive() is end-inclusive.
691 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
692 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
693 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
694 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
695 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
696 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
697 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
698 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
699 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
700
701 // entry points
702 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
703 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
704 address inline_entry_point() const { return code_begin() + _inline_entry_offset; } // inline type entry point (unpack all inline type args)
705 address verified_inline_entry_point() const { return code_begin() + _verified_inline_entry_offset; } // inline type entry point (unpack all inline type args) without class check
706 address verified_inline_ro_entry_point() const { return code_begin() + _verified_inline_ro_entry_offset; } // inline type entry point (only unpack receiver) without class check
707
708 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
709 // allowed to advance state
710 in_use = 0, // executable nmethod
711 not_entrant = 1 // marked for deoptimization but activations may still exist
712 };
713
714 // flag accessing and manipulation
715 bool is_not_installed() const { return _state == not_installed; }
716 bool is_in_use() const { return _state <= in_use; }
717 bool is_not_entrant() const { return _state == not_entrant; }
718 int get_state() const { return _state; }
719
720 void clear_unloading_state();
721 // Heuristically deduce an nmethod isn't worth keeping around
722 bool is_cold();
723 bool is_unloading();
724 void do_unloading(bool unloading_occurred);
725
726 bool make_in_use() {
753 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
754 void flush_dependencies();
755
756 template<typename T>
757 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
758 template<typename T>
759 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
760
761 bool has_unsafe_access() const { return _has_unsafe_access; }
762 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
763
764 bool has_monitors() const { return _has_monitors; }
765 void set_has_monitors(bool z) { _has_monitors = z; }
766
767 bool has_scoped_access() const { return _has_scoped_access; }
768 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
769
770 bool has_wide_vectors() const { return _has_wide_vectors; }
771 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
772
773 bool needs_stack_repair() const {
774 if (is_compiled_by_c1()) {
775 return method()->c1_needs_stack_repair();
776 } else if (is_compiled_by_c2()) {
777 return method()->c2_needs_stack_repair();
778 } else {
779 return false;
780 }
781 }
782
783 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
784 void set_has_flushed_dependencies(bool z) {
785 assert(!has_flushed_dependencies(), "should only happen once");
786 _has_flushed_dependencies = z;
787 }
788
789 bool is_unlinked() const { return _is_unlinked; }
790 void set_is_unlinked() {
791 assert(!_is_unlinked, "already unlinked");
792 _is_unlinked = true;
793 }
794
795 int comp_level() const { return _comp_level; }
796
797 // Support for oops in scopes and relocs:
798 // Note: index 0 is reserved for null.
799 oop oop_at(int index) const;
800 oop oop_at_phantom(int index) const; // phantom reference
801 oop* oop_addr_at(int index) const { // for GC
802 // relocation indexes are biased by 1 (because 0 is reserved)
842 address handler_for_exception_and_pc(Handle exception, address pc);
843 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
844 void clean_exception_cache();
845
846 void add_exception_cache_entry(ExceptionCache* new_entry);
847 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
848
849
850 // Deopt
851 // Return true is the PC is one would expect if the frame is being deopted.
852 inline bool is_deopt_pc(address pc);
853 inline bool is_deopt_entry(address pc);
854
855 // Accessor/mutator for the original pc of a frame before a frame was deopted.
856 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
857 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
858
859 const char* state() const;
860
861 bool inlinecache_check_contains(address addr) const {
862 return (addr >= code_begin() && (addr < verified_entry_point() || addr < verified_inline_entry_point()));
863 }
864
865 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
866
867 // implicit exceptions support
868 address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
869 address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
870
871 // Inline cache support for class unloading and nmethod unloading
872 private:
873 void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
874
875 address continuation_for_implicit_exception(address pc, bool for_div0_check);
876
877 public:
878 // Serial version used by whitebox test
879 void cleanup_inline_caches_whitebox();
880
881 void clear_inline_caches();
882
1075
1076 // Logging
1077 void log_identity(xmlStream* log) const;
1078 void log_new_nmethod() const;
1079 void log_relocated_nmethod(nmethod* original) const;
1080 void log_state_change(InvalidationReason invalidation_reason) const;
1081
1082 // Prints block-level comments, including nmethod specific block labels:
1083 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1084 const char* nmethod_section_label(address pos) const;
1085
1086 // returns whether this nmethod has code comments.
1087 bool has_code_comment(address begin, address end);
1088 // Prints a comment for one native instruction (reloc info, pc desc)
1089 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1090
1091 // tells if this compiled method is dependent on the given changes,
1092 // and the changes have invalidated it
1093 bool check_dependency_on(DepChange& changes);
1094
1095 // Tells if this compiled method is dependent on the given method.
1096 // Returns true if this nmethod corresponds to the given method as well.
1097 // It is used for fast breakpoint support and updating the calling convention
1098 // in case of mismatch.
1099 bool is_dependent_on_method(Method* dependee);
1100
1101 // JVMTI's GetLocalInstance() support
1102 ByteSize native_receiver_sp_offset() {
1103 assert(is_native_method(), "sanity");
1104 return _native_receiver_sp_offset;
1105 }
1106 ByteSize native_basic_lock_sp_offset() {
1107 assert(is_native_method(), "sanity");
1108 return _native_basic_lock_sp_offset;
1109 }
1110
1111 // support for code generation
1112 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1113 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1114
1115 void metadata_do(MetadataClosure* f);
1116
1117 address call_instruction_address(address pc) const;
1118
|