10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32 #include "runtime/mutexLocker.hpp"
33
34 class AbstractCompiler;
35 class CompiledDirectCall;
36 class CompiledIC;
37 class CompiledICData;
38 class CompileTask;
39 class DepChange;
40 class Dependencies;
41 class DirectiveSet;
42 class DebugInformationRecorder;
43 class ExceptionHandlerTable;
44 class ImplicitExceptionTable;
45 class JvmtiThreadState;
46 class MetadataClosure;
47 class NativeCallWrapper;
48 class OopIterateClosure;
49 class ScopeDesc;
199 };
200
201 // nmethod's read-only data
202 address _immutable_data;
203
204 PcDescContainer* _pc_desc_container;
205 ExceptionCache* volatile _exception_cache;
206
207 void* _gc_data;
208
209 struct oops_do_mark_link; // Opaque data type.
210 static nmethod* volatile _oops_do_mark_nmethods;
211 oops_do_mark_link* volatile _oops_do_mark_link;
212
213 CompiledICData* _compiled_ic_data;
214
215 // offsets for entry points
216 address _osr_entry_point; // entry point for on stack replacement
217 uint16_t _entry_offset; // entry point with class check
218 uint16_t _verified_entry_offset; // entry point without class check
219 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
220 int _immutable_data_size;
221
222 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
223
224 int _skipped_instructions_size;
225
226 int _stub_offset;
227
228 // Offsets for different stubs section parts
229 int _exception_offset;
230 // All deoptee's will resume execution at this location described by
231 // this offset.
232 int _deopt_handler_entry_offset;
233 // Offset (from insts_end) of the unwind handler if it exists
234 int16_t _unwind_handler_offset;
235 // Number of arguments passed on the stack
236 uint16_t _num_stack_arg_slots;
237
238 uint16_t _oops_size;
680 int skipped_instructions_size () const { return _skipped_instructions_size; }
681 int total_size() const;
682
683 // Containment
684 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
685 // Returns true if a given address is in the 'insts' section. The method
686 // insts_contains_inclusive() is end-inclusive.
687 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
688 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
689 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
690 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
691 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
692 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
693 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
694 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
695 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
696
697 // entry points
698 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
699 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
700
701 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
702 // allowed to advance state
703 in_use = 0, // executable nmethod
704 not_entrant = 1 // marked for deoptimization but activations may still exist
705 };
706
707 // flag accessing and manipulation
708 bool is_not_installed() const { return _state == not_installed; }
709 bool is_in_use() const { return _state <= in_use; }
710 bool is_not_entrant() const { return _state == not_entrant; }
711 int get_state() const { return _state; }
712
713 void clear_unloading_state();
714 // Heuristically deduce an nmethod isn't worth keeping around
715 bool is_cold();
716 bool is_unloading();
717 void do_unloading(bool unloading_occurred);
718
719 bool make_in_use() {
746 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
747 void flush_dependencies();
748
749 template<typename T>
750 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
751 template<typename T>
752 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
753
754 bool has_unsafe_access() const { return _has_unsafe_access; }
755 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
756
757 bool has_monitors() const { return _has_monitors; }
758 void set_has_monitors(bool z) { _has_monitors = z; }
759
760 bool has_scoped_access() const { return _has_scoped_access; }
761 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
762
763 bool has_wide_vectors() const { return _has_wide_vectors; }
764 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
765
766 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
767 void set_has_flushed_dependencies(bool z) {
768 assert(!has_flushed_dependencies(), "should only happen once");
769 _has_flushed_dependencies = z;
770 }
771
772 bool is_unlinked() const { return _is_unlinked; }
773 void set_is_unlinked() {
774 assert(!_is_unlinked, "already unlinked");
775 _is_unlinked = true;
776 }
777
778 int comp_level() const { return _comp_level; }
779
780 // Support for oops in scopes and relocs:
781 // Note: index 0 is reserved for null.
782 oop oop_at(int index) const;
783 oop oop_at_phantom(int index) const; // phantom reference
784 oop* oop_addr_at(int index) const { // for GC
785 // relocation indexes are biased by 1 (because 0 is reserved)
1058
1059 // Logging
1060 void log_identity(xmlStream* log) const;
1061 void log_new_nmethod() const;
1062 void log_relocated_nmethod(nmethod* original) const;
1063 void log_state_change(InvalidationReason invalidation_reason) const;
1064
1065 // Prints block-level comments, including nmethod specific block labels:
1066 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1067 const char* nmethod_section_label(address pos) const;
1068
1069 // returns whether this nmethod has code comments.
1070 bool has_code_comment(address begin, address end);
1071 // Prints a comment for one native instruction (reloc info, pc desc)
1072 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1073
1074 // tells if this compiled method is dependent on the given changes,
1075 // and the changes have invalidated it
1076 bool check_dependency_on(DepChange& changes);
1077
1078 // Fast breakpoint support. Tells if this compiled method is
1079 // dependent on the given method. Returns true if this nmethod
1080 // corresponds to the given method as well.
1081 bool is_dependent_on_method(Method* dependee);
1082
1083 // JVMTI's GetLocalInstance() support
1084 ByteSize native_receiver_sp_offset() {
1085 assert(is_native_method(), "sanity");
1086 return _native_receiver_sp_offset;
1087 }
1088 ByteSize native_basic_lock_sp_offset() {
1089 assert(is_native_method(), "sanity");
1090 return _native_basic_lock_sp_offset;
1091 }
1092
1093 // support for code generation
1094 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1095 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1096
1097 void metadata_do(MetadataClosure* f);
1098
1099 address call_instruction_address(address pc) const;
1100
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "compiler/compilerDefinitions.hpp"
31 #include "oops/metadata.hpp"
32 #include "oops/method.hpp"
33 #include "runtime/mutexLocker.hpp"
34
35 class AbstractCompiler;
36 class CompiledDirectCall;
37 class CompiledIC;
38 class CompiledICData;
39 class CompileTask;
40 class DepChange;
41 class Dependencies;
42 class DirectiveSet;
43 class DebugInformationRecorder;
44 class ExceptionHandlerTable;
45 class ImplicitExceptionTable;
46 class JvmtiThreadState;
47 class MetadataClosure;
48 class NativeCallWrapper;
49 class OopIterateClosure;
50 class ScopeDesc;
200 };
201
202 // nmethod's read-only data
203 address _immutable_data;
204
205 PcDescContainer* _pc_desc_container;
206 ExceptionCache* volatile _exception_cache;
207
208 void* _gc_data;
209
210 struct oops_do_mark_link; // Opaque data type.
211 static nmethod* volatile _oops_do_mark_nmethods;
212 oops_do_mark_link* volatile _oops_do_mark_link;
213
214 CompiledICData* _compiled_ic_data;
215
216 // offsets for entry points
217 address _osr_entry_point; // entry point for on stack replacement
218 uint16_t _entry_offset; // entry point with class check
219 uint16_t _verified_entry_offset; // entry point without class check
220 // TODO: can these be uint16_t, seem rely on -1 CodeOffset, can change later...
221 address _inline_entry_point; // inline type entry point (unpack all inline type args) with class check
222 address _verified_inline_entry_point; // inline type entry point (unpack all inline type args) without class check
223 address _verified_inline_ro_entry_point; // inline type entry point (unpack receiver only) without class check
224 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
225 int _immutable_data_size;
226
227 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
228
229 int _skipped_instructions_size;
230
231 int _stub_offset;
232
233 // Offsets for different stubs section parts
234 int _exception_offset;
235 // All deoptee's will resume execution at this location described by
236 // this offset.
237 int _deopt_handler_entry_offset;
238 // Offset (from insts_end) of the unwind handler if it exists
239 int16_t _unwind_handler_offset;
240 // Number of arguments passed on the stack
241 uint16_t _num_stack_arg_slots;
242
243 uint16_t _oops_size;
685 int skipped_instructions_size () const { return _skipped_instructions_size; }
686 int total_size() const;
687
688 // Containment
689 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
690 // Returns true if a given address is in the 'insts' section. The method
691 // insts_contains_inclusive() is end-inclusive.
692 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
693 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
694 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
695 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
696 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
697 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
698 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
699 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
700 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
701
702 // entry points
703 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
704 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
705 address inline_entry_point() const { return _inline_entry_point; } // inline type entry point (unpack all inline type args)
706 address verified_inline_entry_point() const { return _verified_inline_entry_point; } // inline type entry point (unpack all inline type args) without class check
707 address verified_inline_ro_entry_point() const { return _verified_inline_ro_entry_point; } // inline type entry point (only unpack receiver) without class check
708
709 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
710 // allowed to advance state
711 in_use = 0, // executable nmethod
712 not_entrant = 1 // marked for deoptimization but activations may still exist
713 };
714
715 // flag accessing and manipulation
716 bool is_not_installed() const { return _state == not_installed; }
717 bool is_in_use() const { return _state <= in_use; }
718 bool is_not_entrant() const { return _state == not_entrant; }
719 int get_state() const { return _state; }
720
721 void clear_unloading_state();
722 // Heuristically deduce an nmethod isn't worth keeping around
723 bool is_cold();
724 bool is_unloading();
725 void do_unloading(bool unloading_occurred);
726
727 bool make_in_use() {
754 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
755 void flush_dependencies();
756
757 template<typename T>
758 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
759 template<typename T>
760 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
761
762 bool has_unsafe_access() const { return _has_unsafe_access; }
763 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
764
765 bool has_monitors() const { return _has_monitors; }
766 void set_has_monitors(bool z) { _has_monitors = z; }
767
768 bool has_scoped_access() const { return _has_scoped_access; }
769 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
770
771 bool has_wide_vectors() const { return _has_wide_vectors; }
772 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
773
774 bool needs_stack_repair() const {
775 if (is_compiled_by_c1()) {
776 return method()->c1_needs_stack_repair();
777 } else if (is_compiled_by_c2()) {
778 return method()->c2_needs_stack_repair();
779 } else {
780 return false;
781 }
782 }
783
784 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
785 void set_has_flushed_dependencies(bool z) {
786 assert(!has_flushed_dependencies(), "should only happen once");
787 _has_flushed_dependencies = z;
788 }
789
790 bool is_unlinked() const { return _is_unlinked; }
791 void set_is_unlinked() {
792 assert(!_is_unlinked, "already unlinked");
793 _is_unlinked = true;
794 }
795
796 int comp_level() const { return _comp_level; }
797
798 // Support for oops in scopes and relocs:
799 // Note: index 0 is reserved for null.
800 oop oop_at(int index) const;
801 oop oop_at_phantom(int index) const; // phantom reference
802 oop* oop_addr_at(int index) const { // for GC
803 // relocation indexes are biased by 1 (because 0 is reserved)
1076
1077 // Logging
1078 void log_identity(xmlStream* log) const;
1079 void log_new_nmethod() const;
1080 void log_relocated_nmethod(nmethod* original) const;
1081 void log_state_change(InvalidationReason invalidation_reason) const;
1082
1083 // Prints block-level comments, including nmethod specific block labels:
1084 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1085 const char* nmethod_section_label(address pos) const;
1086
1087 // returns whether this nmethod has code comments.
1088 bool has_code_comment(address begin, address end);
1089 // Prints a comment for one native instruction (reloc info, pc desc)
1090 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1091
1092 // tells if this compiled method is dependent on the given changes,
1093 // and the changes have invalidated it
1094 bool check_dependency_on(DepChange& changes);
1095
1096 // Tells if this compiled method is dependent on the given method.
1097 // Returns true if this nmethod corresponds to the given method as well.
1098 // It is used for fast breakpoint support and updating the calling convention
1099 // in case of mismatch.
1100 bool is_dependent_on_method(Method* dependee);
1101
1102 // JVMTI's GetLocalInstance() support
1103 ByteSize native_receiver_sp_offset() {
1104 assert(is_native_method(), "sanity");
1105 return _native_receiver_sp_offset;
1106 }
1107 ByteSize native_basic_lock_sp_offset() {
1108 assert(is_native_method(), "sanity");
1109 return _native_basic_lock_sp_offset;
1110 }
1111
1112 // support for code generation
1113 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1114 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1115
1116 void metadata_do(MetadataClosure* f);
1117
1118 address call_instruction_address(address pc) const;
1119
|