1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32 #include "runtime/mutexLocker.hpp"
33
34 class AbstractCompiler;
35 class CompiledDirectCall;
36 class CompiledIC;
37 class CompiledICData;
38 class CompileTask;
39 class DepChange;
40 class Dependencies;
41 class DirectiveSet;
42 class DebugInformationRecorder;
43 class ExceptionHandlerTable;
44 class ImplicitExceptionTable;
45 class JvmtiThreadState;
46 class MetadataClosure;
47 class NativeCallWrapper;
48 class OopIterateClosure;
49 class ScopeDesc;
199 };
200
201 // nmethod's read-only data
202 address _immutable_data;
203
204 PcDescContainer* _pc_desc_container;
205 ExceptionCache* volatile _exception_cache;
206
207 void* _gc_data;
208
209 struct oops_do_mark_link; // Opaque data type.
210 static nmethod* volatile _oops_do_mark_nmethods;
211 oops_do_mark_link* volatile _oops_do_mark_link;
212
213 CompiledICData* _compiled_ic_data;
214
215 // offsets for entry points
216 address _osr_entry_point; // entry point for on stack replacement
217 uint16_t _entry_offset; // entry point with class check
218 uint16_t _verified_entry_offset; // entry point without class check
219 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
220 int _immutable_data_size;
221
222 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
223
224 int _skipped_instructions_size;
225
226 int _stub_offset;
227
228 // Offsets for different stubs section parts
229 int _exception_offset;
230 // All deoptee's will resume execution at this location described by
231 // this offset.
232 int _deopt_handler_entry_offset;
233 // Offset (from insts_end) of the unwind handler if it exists
234 int16_t _unwind_handler_offset;
235 // Number of arguments passed on the stack
236 uint16_t _num_stack_arg_slots;
237
238 #if INCLUDE_JVMCI
679 int skipped_instructions_size () const { return _skipped_instructions_size; }
680 int total_size() const;
681
682 // Containment
683 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
684 // Returns true if a given address is in the 'insts' section. The method
685 // insts_contains_inclusive() is end-inclusive.
686 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
687 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
688 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
689 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
690 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
691 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
692 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
693 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
694 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
695
696 // entry points
697 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
698 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
699
700 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
701 // allowed to advance state
702 in_use = 0, // executable nmethod
703 not_entrant = 1 // marked for deoptimization but activations may still exist
704 };
705
706 // flag accessing and manipulation
707 bool is_not_installed() const { return _state == not_installed; }
708 bool is_in_use() const { return _state <= in_use; }
709 bool is_not_entrant() const { return _state == not_entrant; }
710 int get_state() const { return _state; }
711
712 void clear_unloading_state();
713 // Heuristically deduce an nmethod isn't worth keeping around
714 bool is_cold();
715 bool is_unloading();
716 void do_unloading(bool unloading_occurred);
717
718 bool make_in_use() {
745 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
746 void flush_dependencies();
747
748 template<typename T>
749 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
750 template<typename T>
751 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
752
753 bool has_unsafe_access() const { return _has_unsafe_access; }
754 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
755
756 bool has_monitors() const { return _has_monitors; }
757 void set_has_monitors(bool z) { _has_monitors = z; }
758
759 bool has_scoped_access() const { return _has_scoped_access; }
760 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
761
762 bool has_wide_vectors() const { return _has_wide_vectors; }
763 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
764
765 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
766 void set_has_flushed_dependencies(bool z) {
767 assert(!has_flushed_dependencies(), "should only happen once");
768 _has_flushed_dependencies = z;
769 }
770
771 bool is_unlinked() const { return _is_unlinked; }
772 void set_is_unlinked() {
773 assert(!_is_unlinked, "already unlinked");
774 _is_unlinked = true;
775 }
776
777 int comp_level() const { return _comp_level; }
778
779 // Support for oops in scopes and relocs:
780 // Note: index 0 is reserved for null.
781 oop oop_at(int index) const;
782 oop oop_at_phantom(int index) const; // phantom reference
783 oop* oop_addr_at(int index) const { // for GC
784 // relocation indexes are biased by 1 (because 0 is reserved)
824 address handler_for_exception_and_pc(Handle exception, address pc);
825 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
826 void clean_exception_cache();
827
828 void add_exception_cache_entry(ExceptionCache* new_entry);
829 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
830
831
832 // Deopt
833 // Return true is the PC is one would expect if the frame is being deopted.
834 inline bool is_deopt_pc(address pc);
835 inline bool is_deopt_entry(address pc);
836
837 // Accessor/mutator for the original pc of a frame before a frame was deopted.
838 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
839 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
840
841 const char* state() const;
842
843 bool inlinecache_check_contains(address addr) const {
844 return (addr >= code_begin() && addr < verified_entry_point());
845 }
846
847 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
848
849 // implicit exceptions support
850 address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
851 address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
852
853 // Inline cache support for class unloading and nmethod unloading
854 private:
855 void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
856
857 address continuation_for_implicit_exception(address pc, bool for_div0_check);
858
859 public:
860 // Serial version used by whitebox test
861 void cleanup_inline_caches_whitebox();
862
863 void clear_inline_caches();
864
1057
1058 // Logging
1059 void log_identity(xmlStream* log) const;
1060 void log_new_nmethod() const;
1061 void log_relocated_nmethod(nmethod* original) const;
1062 void log_state_change(InvalidationReason invalidation_reason) const;
1063
1064 // Prints block-level comments, including nmethod specific block labels:
1065 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1066 const char* nmethod_section_label(address pos) const;
1067
1068 // returns whether this nmethod has code comments.
1069 bool has_code_comment(address begin, address end);
1070 // Prints a comment for one native instruction (reloc info, pc desc)
1071 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1072
1073 // tells if this compiled method is dependent on the given changes,
1074 // and the changes have invalidated it
1075 bool check_dependency_on(DepChange& changes);
1076
1077 // Fast breakpoint support. Tells if this compiled method is
1078 // dependent on the given method. Returns true if this nmethod
1079 // corresponds to the given method as well.
1080 bool is_dependent_on_method(Method* dependee);
1081
1082 // JVMTI's GetLocalInstance() support
1083 ByteSize native_receiver_sp_offset() {
1084 assert(is_native_method(), "sanity");
1085 return _native_receiver_sp_offset;
1086 }
1087 ByteSize native_basic_lock_sp_offset() {
1088 assert(is_native_method(), "sanity");
1089 return _native_basic_lock_sp_offset;
1090 }
1091
1092 // support for code generation
1093 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1094 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1095
1096 void metadata_do(MetadataClosure* f);
1097
1098 address call_instruction_address(address pc) const;
1099
|
1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "compiler/compilerDefinitions.hpp"
31 #include "oops/metadata.hpp"
32 #include "oops/method.hpp"
33 #include "runtime/mutexLocker.hpp"
34
35 class AbstractCompiler;
36 class CompiledDirectCall;
37 class CompiledIC;
38 class CompiledICData;
39 class CompileTask;
40 class DepChange;
41 class Dependencies;
42 class DirectiveSet;
43 class DebugInformationRecorder;
44 class ExceptionHandlerTable;
45 class ImplicitExceptionTable;
46 class JvmtiThreadState;
47 class MetadataClosure;
48 class NativeCallWrapper;
49 class OopIterateClosure;
50 class ScopeDesc;
200 };
201
202 // nmethod's read-only data
203 address _immutable_data;
204
205 PcDescContainer* _pc_desc_container;
206 ExceptionCache* volatile _exception_cache;
207
208 void* _gc_data;
209
210 struct oops_do_mark_link; // Opaque data type.
211 static nmethod* volatile _oops_do_mark_nmethods;
212 oops_do_mark_link* volatile _oops_do_mark_link;
213
214 CompiledICData* _compiled_ic_data;
215
216 // offsets for entry points
217 address _osr_entry_point; // entry point for on stack replacement
218 uint16_t _entry_offset; // entry point with class check
219 uint16_t _verified_entry_offset; // entry point without class check
220 uint16_t _inline_entry_offset; // inline type entry point (unpack all inline type args) with class check
221 uint16_t _verified_inline_entry_offset; // inline type entry point (unpack all inline type args) without class check
222 uint16_t _verified_inline_ro_entry_offset; // inline type entry point (unpack receiver only) without class check
223 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
224 int _immutable_data_size;
225
226 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
227
228 int _skipped_instructions_size;
229
230 int _stub_offset;
231
232 // Offsets for different stubs section parts
233 int _exception_offset;
234 // All deoptee's will resume execution at this location described by
235 // this offset.
236 int _deopt_handler_entry_offset;
237 // Offset (from insts_end) of the unwind handler if it exists
238 int16_t _unwind_handler_offset;
239 // Number of arguments passed on the stack
240 uint16_t _num_stack_arg_slots;
241
242 #if INCLUDE_JVMCI
683 int skipped_instructions_size () const { return _skipped_instructions_size; }
684 int total_size() const;
685
686 // Containment
687 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
688 // Returns true if a given address is in the 'insts' section. The method
689 // insts_contains_inclusive() is end-inclusive.
690 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
691 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
692 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
693 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
694 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
695 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
696 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
697 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
698 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
699
700 // entry points
701 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
702 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
703 address inline_entry_point() const { return code_begin() + _inline_entry_offset; } // inline type entry point (unpack all inline type args)
704 address verified_inline_entry_point() const { return code_begin() + _verified_inline_entry_offset; } // inline type entry point (unpack all inline type args) without class check
705 address verified_inline_ro_entry_point() const { return code_begin() + _verified_inline_ro_entry_offset; } // inline type entry point (only unpack receiver) without class check
706
707 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
708 // allowed to advance state
709 in_use = 0, // executable nmethod
710 not_entrant = 1 // marked for deoptimization but activations may still exist
711 };
712
713 // flag accessing and manipulation
714 bool is_not_installed() const { return _state == not_installed; }
715 bool is_in_use() const { return _state <= in_use; }
716 bool is_not_entrant() const { return _state == not_entrant; }
717 int get_state() const { return _state; }
718
719 void clear_unloading_state();
720 // Heuristically deduce an nmethod isn't worth keeping around
721 bool is_cold();
722 bool is_unloading();
723 void do_unloading(bool unloading_occurred);
724
725 bool make_in_use() {
752 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
753 void flush_dependencies();
754
755 template<typename T>
756 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
757 template<typename T>
758 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
759
760 bool has_unsafe_access() const { return _has_unsafe_access; }
761 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
762
763 bool has_monitors() const { return _has_monitors; }
764 void set_has_monitors(bool z) { _has_monitors = z; }
765
766 bool has_scoped_access() const { return _has_scoped_access; }
767 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
768
769 bool has_wide_vectors() const { return _has_wide_vectors; }
770 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
771
772 bool needs_stack_repair() const {
773 if (is_compiled_by_c1()) {
774 return method()->c1_needs_stack_repair();
775 } else if (is_compiled_by_c2()) {
776 return method()->c2_needs_stack_repair();
777 } else {
778 return false;
779 }
780 }
781
782 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
783 void set_has_flushed_dependencies(bool z) {
784 assert(!has_flushed_dependencies(), "should only happen once");
785 _has_flushed_dependencies = z;
786 }
787
788 bool is_unlinked() const { return _is_unlinked; }
789 void set_is_unlinked() {
790 assert(!_is_unlinked, "already unlinked");
791 _is_unlinked = true;
792 }
793
794 int comp_level() const { return _comp_level; }
795
796 // Support for oops in scopes and relocs:
797 // Note: index 0 is reserved for null.
798 oop oop_at(int index) const;
799 oop oop_at_phantom(int index) const; // phantom reference
800 oop* oop_addr_at(int index) const { // for GC
801 // relocation indexes are biased by 1 (because 0 is reserved)
841 address handler_for_exception_and_pc(Handle exception, address pc);
842 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
843 void clean_exception_cache();
844
845 void add_exception_cache_entry(ExceptionCache* new_entry);
846 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
847
848
849 // Deopt
850 // Return true is the PC is one would expect if the frame is being deopted.
851 inline bool is_deopt_pc(address pc);
852 inline bool is_deopt_entry(address pc);
853
854 // Accessor/mutator for the original pc of a frame before a frame was deopted.
855 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
856 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
857
858 const char* state() const;
859
860 bool inlinecache_check_contains(address addr) const {
861 return (addr >= code_begin() && (addr < verified_entry_point() || addr < verified_inline_entry_point()));
862 }
863
864 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
865
866 // implicit exceptions support
867 address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
868 address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
869
870 // Inline cache support for class unloading and nmethod unloading
871 private:
872 void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
873
874 address continuation_for_implicit_exception(address pc, bool for_div0_check);
875
876 public:
877 // Serial version used by whitebox test
878 void cleanup_inline_caches_whitebox();
879
880 void clear_inline_caches();
881
1074
1075 // Logging
1076 void log_identity(xmlStream* log) const;
1077 void log_new_nmethod() const;
1078 void log_relocated_nmethod(nmethod* original) const;
1079 void log_state_change(InvalidationReason invalidation_reason) const;
1080
1081 // Prints block-level comments, including nmethod specific block labels:
1082 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1083 const char* nmethod_section_label(address pos) const;
1084
1085 // returns whether this nmethod has code comments.
1086 bool has_code_comment(address begin, address end);
1087 // Prints a comment for one native instruction (reloc info, pc desc)
1088 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1089
1090 // tells if this compiled method is dependent on the given changes,
1091 // and the changes have invalidated it
1092 bool check_dependency_on(DepChange& changes);
1093
1094 // Tells if this compiled method is dependent on the given method.
1095 // Returns true if this nmethod corresponds to the given method as well.
1096 // It is used for fast breakpoint support and updating the calling convention
1097 // in case of mismatch.
1098 bool is_dependent_on_method(Method* dependee);
1099
1100 // JVMTI's GetLocalInstance() support
1101 ByteSize native_receiver_sp_offset() {
1102 assert(is_native_method(), "sanity");
1103 return _native_receiver_sp_offset;
1104 }
1105 ByteSize native_basic_lock_sp_offset() {
1106 assert(is_native_method(), "sanity");
1107 return _native_basic_lock_sp_offset;
1108 }
1109
1110 // support for code generation
1111 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1112 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1113
1114 void metadata_do(MetadataClosure* f);
1115
1116 address call_instruction_address(address pc) const;
1117
|