10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32 #include "runtime/mutexLocker.hpp"
33
34 class AbstractCompiler;
35 class CompiledDirectCall;
36 class CompiledIC;
37 class CompiledICData;
38 class CompileTask;
39 class DepChange;
40 class Dependencies;
41 class DirectiveSet;
42 class DebugInformationRecorder;
43 class ExceptionHandlerTable;
44 class ImplicitExceptionTable;
45 class JvmtiThreadState;
46 class MetadataClosure;
47 class NativeCallWrapper;
48 class OopIterateClosure;
49 class ScopeDesc;
199 };
200
201 // nmethod's read-only data
202 address _immutable_data;
203
204 PcDescContainer* _pc_desc_container;
205 ExceptionCache* volatile _exception_cache;
206
207 void* _gc_data;
208
209 struct oops_do_mark_link; // Opaque data type.
210 static nmethod* volatile _oops_do_mark_nmethods;
211 oops_do_mark_link* volatile _oops_do_mark_link;
212
213 CompiledICData* _compiled_ic_data;
214
215 // offsets for entry points
216 address _osr_entry_point; // entry point for on stack replacement
217 uint16_t _entry_offset; // entry point with class check
218 uint16_t _verified_entry_offset; // entry point without class check
219 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
220 int _immutable_data_size;
221
222 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
223
224 int _skipped_instructions_size;
225
226 int _stub_offset;
227
228 // Offsets for different stubs section parts
229 int _exception_offset;
230 // All deoptee's will resume execution at this location described by
231 // this offset.
232 int _deopt_handler_entry_offset;
233 // Offset (from insts_end) of the unwind handler if it exists
234 int16_t _unwind_handler_offset;
235 // Number of arguments passed on the stack
236 uint16_t _num_stack_arg_slots;
237
238 uint16_t _oops_size;
677 int skipped_instructions_size () const { return _skipped_instructions_size; }
678 int total_size() const;
679
680 // Containment
681 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
682 // Returns true if a given address is in the 'insts' section. The method
683 // insts_contains_inclusive() is end-inclusive.
684 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
685 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
686 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
687 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
688 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
689 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
690 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
691 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
692 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
693
694 // entry points
695 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
696 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
697
698 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
699 // allowed to advance state
700 in_use = 0, // executable nmethod
701 not_entrant = 1 // marked for deoptimization but activations may still exist
702 };
703
704 // flag accessing and manipulation
705 bool is_not_installed() const { return _state == not_installed; }
706 bool is_in_use() const { return _state <= in_use; }
707 bool is_not_entrant() const { return _state == not_entrant; }
708 int get_state() const { return _state; }
709
710 void clear_unloading_state();
711 // Heuristically deduce an nmethod isn't worth keeping around
712 bool is_cold();
713 bool is_unloading();
714 void do_unloading(bool unloading_occurred);
715
716 bool make_in_use() {
743 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
744 void flush_dependencies();
745
746 template<typename T>
747 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
748 template<typename T>
749 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
750
751 bool has_unsafe_access() const { return _has_unsafe_access; }
752 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
753
754 bool has_monitors() const { return _has_monitors; }
755 void set_has_monitors(bool z) { _has_monitors = z; }
756
757 bool has_scoped_access() const { return _has_scoped_access; }
758 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
759
760 bool has_wide_vectors() const { return _has_wide_vectors; }
761 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
762
763 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
764 void set_has_flushed_dependencies(bool z) {
765 assert(!has_flushed_dependencies(), "should only happen once");
766 _has_flushed_dependencies = z;
767 }
768
769 bool is_unlinked() const { return _is_unlinked; }
770 void set_is_unlinked() {
771 assert(!_is_unlinked, "already unlinked");
772 _is_unlinked = true;
773 }
774
775 int comp_level() const { return _comp_level; }
776
777 // Support for oops in scopes and relocs:
778 // Note: index 0 is reserved for null.
779 oop oop_at(int index) const;
780 oop oop_at_phantom(int index) const; // phantom reference
781 oop* oop_addr_at(int index) const { // for GC
782 // relocation indexes are biased by 1 (because 0 is reserved)
1055
1056 // Logging
1057 void log_identity(xmlStream* log) const;
1058 void log_new_nmethod() const;
1059 void log_relocated_nmethod(nmethod* original) const;
1060 void log_state_change(InvalidationReason invalidation_reason) const;
1061
1062 // Prints block-level comments, including nmethod specific block labels:
1063 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1064 const char* nmethod_section_label(address pos) const;
1065
1066 // returns whether this nmethod has code comments.
1067 bool has_code_comment(address begin, address end);
1068 // Prints a comment for one native instruction (reloc info, pc desc)
1069 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1070
1071 // tells if this compiled method is dependent on the given changes,
1072 // and the changes have invalidated it
1073 bool check_dependency_on(DepChange& changes);
1074
1075 // Fast breakpoint support. Tells if this compiled method is
1076 // dependent on the given method. Returns true if this nmethod
1077 // corresponds to the given method as well.
1078 bool is_dependent_on_method(Method* dependee);
1079
1080 // JVMTI's GetLocalInstance() support
1081 ByteSize native_receiver_sp_offset() {
1082 assert(is_native_method(), "sanity");
1083 return _native_receiver_sp_offset;
1084 }
1085 ByteSize native_basic_lock_sp_offset() {
1086 assert(is_native_method(), "sanity");
1087 return _native_basic_lock_sp_offset;
1088 }
1089
1090 // support for code generation
1091 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1092 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1093
1094 void metadata_do(MetadataClosure* f);
1095
1096 address call_instruction_address(address pc) const;
1097
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "compiler/compilerDefinitions.hpp"
31 #include "oops/metadata.hpp"
32 #include "oops/method.hpp"
33 #include "runtime/mutexLocker.hpp"
34
35 class AbstractCompiler;
36 class CompiledDirectCall;
37 class CompiledIC;
38 class CompiledICData;
39 class CompileTask;
40 class DepChange;
41 class Dependencies;
42 class DirectiveSet;
43 class DebugInformationRecorder;
44 class ExceptionHandlerTable;
45 class ImplicitExceptionTable;
46 class JvmtiThreadState;
47 class MetadataClosure;
48 class NativeCallWrapper;
49 class OopIterateClosure;
50 class ScopeDesc;
200 };
201
202 // nmethod's read-only data
203 address _immutable_data;
204
205 PcDescContainer* _pc_desc_container;
206 ExceptionCache* volatile _exception_cache;
207
208 void* _gc_data;
209
210 struct oops_do_mark_link; // Opaque data type.
211 static nmethod* volatile _oops_do_mark_nmethods;
212 oops_do_mark_link* volatile _oops_do_mark_link;
213
214 CompiledICData* _compiled_ic_data;
215
216 // offsets for entry points
217 address _osr_entry_point; // entry point for on stack replacement
218 uint16_t _entry_offset; // entry point with class check
219 uint16_t _verified_entry_offset; // entry point without class check
220 // TODO: can these be uint16_t, seem rely on -1 CodeOffset, can change later...
221 address _inline_entry_point; // inline type entry point (unpack all inline type args) with class check
222 address _verified_inline_entry_point; // inline type entry point (unpack all inline type args) without class check
223 address _verified_inline_ro_entry_point; // inline type entry point (unpack receiver only) without class check
224 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
225 int _immutable_data_size;
226
227 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
228
229 int _skipped_instructions_size;
230
231 int _stub_offset;
232
233 // Offsets for different stubs section parts
234 int _exception_offset;
235 // All deoptee's will resume execution at this location described by
236 // this offset.
237 int _deopt_handler_entry_offset;
238 // Offset (from insts_end) of the unwind handler if it exists
239 int16_t _unwind_handler_offset;
240 // Number of arguments passed on the stack
241 uint16_t _num_stack_arg_slots;
242
243 uint16_t _oops_size;
682 int skipped_instructions_size () const { return _skipped_instructions_size; }
683 int total_size() const;
684
685 // Containment
686 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
687 // Returns true if a given address is in the 'insts' section. The method
688 // insts_contains_inclusive() is end-inclusive.
689 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
690 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
691 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
692 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
693 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
694 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
695 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
696 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
697 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
698
699 // entry points
700 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
701 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
702 address inline_entry_point() const { return _inline_entry_point; } // inline type entry point (unpack all inline type args)
703 address verified_inline_entry_point() const { return _verified_inline_entry_point; } // inline type entry point (unpack all inline type args) without class check
704 address verified_inline_ro_entry_point() const { return _verified_inline_ro_entry_point; } // inline type entry point (only unpack receiver) without class check
705
706 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
707 // allowed to advance state
708 in_use = 0, // executable nmethod
709 not_entrant = 1 // marked for deoptimization but activations may still exist
710 };
711
712 // flag accessing and manipulation
713 bool is_not_installed() const { return _state == not_installed; }
714 bool is_in_use() const { return _state <= in_use; }
715 bool is_not_entrant() const { return _state == not_entrant; }
716 int get_state() const { return _state; }
717
718 void clear_unloading_state();
719 // Heuristically deduce an nmethod isn't worth keeping around
720 bool is_cold();
721 bool is_unloading();
722 void do_unloading(bool unloading_occurred);
723
724 bool make_in_use() {
751 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
752 void flush_dependencies();
753
754 template<typename T>
755 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
756 template<typename T>
757 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
758
759 bool has_unsafe_access() const { return _has_unsafe_access; }
760 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
761
762 bool has_monitors() const { return _has_monitors; }
763 void set_has_monitors(bool z) { _has_monitors = z; }
764
765 bool has_scoped_access() const { return _has_scoped_access; }
766 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
767
768 bool has_wide_vectors() const { return _has_wide_vectors; }
769 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
770
771 bool needs_stack_repair() const {
772 if (is_compiled_by_c1()) {
773 return method()->c1_needs_stack_repair();
774 } else if (is_compiled_by_c2()) {
775 return method()->c2_needs_stack_repair();
776 } else {
777 return false;
778 }
779 }
780
781 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
782 void set_has_flushed_dependencies(bool z) {
783 assert(!has_flushed_dependencies(), "should only happen once");
784 _has_flushed_dependencies = z;
785 }
786
787 bool is_unlinked() const { return _is_unlinked; }
788 void set_is_unlinked() {
789 assert(!_is_unlinked, "already unlinked");
790 _is_unlinked = true;
791 }
792
793 int comp_level() const { return _comp_level; }
794
795 // Support for oops in scopes and relocs:
796 // Note: index 0 is reserved for null.
797 oop oop_at(int index) const;
798 oop oop_at_phantom(int index) const; // phantom reference
799 oop* oop_addr_at(int index) const { // for GC
800 // relocation indexes are biased by 1 (because 0 is reserved)
1073
1074 // Logging
1075 void log_identity(xmlStream* log) const;
1076 void log_new_nmethod() const;
1077 void log_relocated_nmethod(nmethod* original) const;
1078 void log_state_change(InvalidationReason invalidation_reason) const;
1079
1080 // Prints block-level comments, including nmethod specific block labels:
1081 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1082 const char* nmethod_section_label(address pos) const;
1083
1084 // returns whether this nmethod has code comments.
1085 bool has_code_comment(address begin, address end);
1086 // Prints a comment for one native instruction (reloc info, pc desc)
1087 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1088
1089 // tells if this compiled method is dependent on the given changes,
1090 // and the changes have invalidated it
1091 bool check_dependency_on(DepChange& changes);
1092
1093 // Tells if this compiled method is dependent on the given method.
1094 // Returns true if this nmethod corresponds to the given method as well.
1095 // It is used for fast breakpoint support and updating the calling convention
1096 // in case of mismatch.
1097 bool is_dependent_on_method(Method* dependee);
1098
1099 // JVMTI's GetLocalInstance() support
1100 ByteSize native_receiver_sp_offset() {
1101 assert(is_native_method(), "sanity");
1102 return _native_receiver_sp_offset;
1103 }
1104 ByteSize native_basic_lock_sp_offset() {
1105 assert(is_native_method(), "sanity");
1106 return _native_basic_lock_sp_offset;
1107 }
1108
1109 // support for code generation
1110 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1111 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1112
1113 void metadata_do(MetadataClosure* f);
1114
1115 address call_instruction_address(address pc) const;
1116
|