10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class ScopeDesc;
49 class xmlStream;
198 };
199
200 // nmethod's read-only data
201 address _immutable_data;
202
203 PcDescContainer* _pc_desc_container;
204 ExceptionCache* volatile _exception_cache;
205
206 void* _gc_data;
207
208 struct oops_do_mark_link; // Opaque data type.
209 static nmethod* volatile _oops_do_mark_nmethods;
210 oops_do_mark_link* volatile _oops_do_mark_link;
211
212 CompiledICData* _compiled_ic_data;
213
214 // offsets for entry points
215 address _osr_entry_point; // entry point for on stack replacement
216 uint16_t _entry_offset; // entry point with class check
217 uint16_t _verified_entry_offset; // entry point without class check
218 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
219 int _immutable_data_size;
220
221 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
222
223 int _skipped_instructions_size;
224
225 int _stub_offset;
226
227 // Offsets for different stubs section parts
228 int _exception_offset;
229 // All deoptee's will resume execution at this location described by
230 // this offset.
231 int _deopt_handler_offset;
232 // Offset (from insts_end) of the unwind handler if it exists
233 int16_t _unwind_handler_offset;
234 // Number of arguments passed on the stack
235 uint16_t _num_stack_arg_slots;
236
237 uint16_t _oops_size;
676 int skipped_instructions_size () const { return _skipped_instructions_size; }
677 int total_size() const;
678
679 // Containment
680 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
681 // Returns true if a given address is in the 'insts' section. The method
682 // insts_contains_inclusive() is end-inclusive.
683 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
684 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
685 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
686 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
687 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
688 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
689 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
690 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
691 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
692
693 // entry points
694 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
695 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
696
697 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
698 // allowed to advance state
699 in_use = 0, // executable nmethod
700 not_entrant = 1 // marked for deoptimization but activations may still exist
701 };
702
703 // flag accessing and manipulation
704 bool is_not_installed() const { return _state == not_installed; }
705 bool is_in_use() const { return _state <= in_use; }
706 bool is_not_entrant() const { return _state == not_entrant; }
707 int get_state() const { return _state; }
708
709 void clear_unloading_state();
710 // Heuristically deduce an nmethod isn't worth keeping around
711 bool is_cold();
712 bool is_unloading();
713 void do_unloading(bool unloading_occurred);
714
715 bool make_in_use() {
742 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
743 void flush_dependencies();
744
745 template<typename T>
746 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
747 template<typename T>
748 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
749
750 bool has_unsafe_access() const { return _has_unsafe_access; }
751 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
752
753 bool has_monitors() const { return _has_monitors; }
754 void set_has_monitors(bool z) { _has_monitors = z; }
755
756 bool has_scoped_access() const { return _has_scoped_access; }
757 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
758
759 bool has_wide_vectors() const { return _has_wide_vectors; }
760 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
761
762 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
763 void set_has_flushed_dependencies(bool z) {
764 assert(!has_flushed_dependencies(), "should only happen once");
765 _has_flushed_dependencies = z;
766 }
767
768 bool is_unlinked() const { return _is_unlinked; }
769 void set_is_unlinked() {
770 assert(!_is_unlinked, "already unlinked");
771 _is_unlinked = true;
772 }
773
774 int comp_level() const { return _comp_level; }
775
776 // Support for oops in scopes and relocs:
777 // Note: index 0 is reserved for null.
778 oop oop_at(int index) const;
779 oop oop_at_phantom(int index) const; // phantom reference
780 oop* oop_addr_at(int index) const { // for GC
781 // relocation indexes are biased by 1 (because 0 is reserved)
1037
1038 // Logging
1039 void log_identity(xmlStream* log) const;
1040 void log_new_nmethod() const;
1041 void log_relocated_nmethod(nmethod* original) const;
1042 void log_state_change(InvalidationReason invalidation_reason) const;
1043
1044 // Prints block-level comments, including nmethod specific block labels:
1045 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1046 const char* nmethod_section_label(address pos) const;
1047
1048 // returns whether this nmethod has code comments.
1049 bool has_code_comment(address begin, address end);
1050 // Prints a comment for one native instruction (reloc info, pc desc)
1051 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1052
1053 // tells if this compiled method is dependent on the given changes,
1054 // and the changes have invalidated it
1055 bool check_dependency_on(DepChange& changes);
1056
1057 // Fast breakpoint support. Tells if this compiled method is
1058 // dependent on the given method. Returns true if this nmethod
1059 // corresponds to the given method as well.
1060 bool is_dependent_on_method(Method* dependee);
1061
1062 // JVMTI's GetLocalInstance() support
1063 ByteSize native_receiver_sp_offset() {
1064 assert(is_native_method(), "sanity");
1065 return _native_receiver_sp_offset;
1066 }
1067 ByteSize native_basic_lock_sp_offset() {
1068 assert(is_native_method(), "sanity");
1069 return _native_basic_lock_sp_offset;
1070 }
1071
1072 // support for code generation
1073 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1074 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1075
1076 void metadata_do(MetadataClosure* f);
1077
1078 address call_instruction_address(address pc) const;
1079
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_NMETHOD_HPP
26 #define SHARE_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "compiler/compilerDefinitions.hpp"
31 #include "oops/metadata.hpp"
32 #include "oops/method.hpp"
33
34 class AbstractCompiler;
35 class CompiledDirectCall;
36 class CompiledIC;
37 class CompiledICData;
38 class CompileTask;
39 class DepChange;
40 class Dependencies;
41 class DirectiveSet;
42 class DebugInformationRecorder;
43 class ExceptionHandlerTable;
44 class ImplicitExceptionTable;
45 class JvmtiThreadState;
46 class MetadataClosure;
47 class NativeCallWrapper;
48 class OopIterateClosure;
49 class ScopeDesc;
50 class xmlStream;
199 };
200
201 // nmethod's read-only data
202 address _immutable_data;
203
204 PcDescContainer* _pc_desc_container;
205 ExceptionCache* volatile _exception_cache;
206
207 void* _gc_data;
208
209 struct oops_do_mark_link; // Opaque data type.
210 static nmethod* volatile _oops_do_mark_nmethods;
211 oops_do_mark_link* volatile _oops_do_mark_link;
212
213 CompiledICData* _compiled_ic_data;
214
215 // offsets for entry points
216 address _osr_entry_point; // entry point for on stack replacement
217 uint16_t _entry_offset; // entry point with class check
218 uint16_t _verified_entry_offset; // entry point without class check
219 // TODO: can these be uint16_t, seem rely on -1 CodeOffset, can change later...
220 address _inline_entry_point; // inline type entry point (unpack all inline type args) with class check
221 address _verified_inline_entry_point; // inline type entry point (unpack all inline type args) without class check
222 address _verified_inline_ro_entry_point; // inline type entry point (unpack receiver only) without class check
223 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
224 int _immutable_data_size;
225
226 // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
227
228 int _skipped_instructions_size;
229
230 int _stub_offset;
231
232 // Offsets for different stubs section parts
233 int _exception_offset;
234 // All deoptee's will resume execution at this location described by
235 // this offset.
236 int _deopt_handler_offset;
237 // Offset (from insts_end) of the unwind handler if it exists
238 int16_t _unwind_handler_offset;
239 // Number of arguments passed on the stack
240 uint16_t _num_stack_arg_slots;
241
242 uint16_t _oops_size;
681 int skipped_instructions_size () const { return _skipped_instructions_size; }
682 int total_size() const;
683
684 // Containment
685 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
686 // Returns true if a given address is in the 'insts' section. The method
687 // insts_contains_inclusive() is end-inclusive.
688 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
689 bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
690 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
691 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
692 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
693 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
694 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
695 bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
696 bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
697
698 // entry points
699 address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
700 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
701 address inline_entry_point() const { return _inline_entry_point; } // inline type entry point (unpack all inline type args)
702 address verified_inline_entry_point() const { return _verified_inline_entry_point; } // inline type entry point (unpack all inline type args) without class check
703 address verified_inline_ro_entry_point() const { return _verified_inline_ro_entry_point; } // inline type entry point (only unpack receiver) without class check
704
705 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
706 // allowed to advance state
707 in_use = 0, // executable nmethod
708 not_entrant = 1 // marked for deoptimization but activations may still exist
709 };
710
711 // flag accessing and manipulation
712 bool is_not_installed() const { return _state == not_installed; }
713 bool is_in_use() const { return _state <= in_use; }
714 bool is_not_entrant() const { return _state == not_entrant; }
715 int get_state() const { return _state; }
716
717 void clear_unloading_state();
718 // Heuristically deduce an nmethod isn't worth keeping around
719 bool is_cold();
720 bool is_unloading();
721 void do_unloading(bool unloading_occurred);
722
723 bool make_in_use() {
750 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
751 void flush_dependencies();
752
753 template<typename T>
754 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
755 template<typename T>
756 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
757
758 bool has_unsafe_access() const { return _has_unsafe_access; }
759 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
760
761 bool has_monitors() const { return _has_monitors; }
762 void set_has_monitors(bool z) { _has_monitors = z; }
763
764 bool has_scoped_access() const { return _has_scoped_access; }
765 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
766
767 bool has_wide_vectors() const { return _has_wide_vectors; }
768 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
769
770 bool needs_stack_repair() const {
771 if (is_compiled_by_c1()) {
772 return method()->c1_needs_stack_repair();
773 } else if (is_compiled_by_c2()) {
774 return method()->c2_needs_stack_repair();
775 } else {
776 return false;
777 }
778 }
779
780 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
781 void set_has_flushed_dependencies(bool z) {
782 assert(!has_flushed_dependencies(), "should only happen once");
783 _has_flushed_dependencies = z;
784 }
785
786 bool is_unlinked() const { return _is_unlinked; }
787 void set_is_unlinked() {
788 assert(!_is_unlinked, "already unlinked");
789 _is_unlinked = true;
790 }
791
792 int comp_level() const { return _comp_level; }
793
794 // Support for oops in scopes and relocs:
795 // Note: index 0 is reserved for null.
796 oop oop_at(int index) const;
797 oop oop_at_phantom(int index) const; // phantom reference
798 oop* oop_addr_at(int index) const { // for GC
799 // relocation indexes are biased by 1 (because 0 is reserved)
1055
1056 // Logging
1057 void log_identity(xmlStream* log) const;
1058 void log_new_nmethod() const;
1059 void log_relocated_nmethod(nmethod* original) const;
1060 void log_state_change(InvalidationReason invalidation_reason) const;
1061
1062 // Prints block-level comments, including nmethod specific block labels:
1063 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1064 const char* nmethod_section_label(address pos) const;
1065
1066 // returns whether this nmethod has code comments.
1067 bool has_code_comment(address begin, address end);
1068 // Prints a comment for one native instruction (reloc info, pc desc)
1069 void print_code_comment_on(outputStream* st, int column, address begin, address end);
1070
1071 // tells if this compiled method is dependent on the given changes,
1072 // and the changes have invalidated it
1073 bool check_dependency_on(DepChange& changes);
1074
1075 // Tells if this compiled method is dependent on the given method.
1076 // Returns true if this nmethod corresponds to the given method as well.
1077 // It is used for fast breakpoint support and updating the calling convention
1078 // in case of mismatch.
1079 bool is_dependent_on_method(Method* dependee);
1080
1081 // JVMTI's GetLocalInstance() support
1082 ByteSize native_receiver_sp_offset() {
1083 assert(is_native_method(), "sanity");
1084 return _native_receiver_sp_offset;
1085 }
1086 ByteSize native_basic_lock_sp_offset() {
1087 assert(is_native_method(), "sanity");
1088 return _native_basic_lock_sp_offset;
1089 }
1090
1091 // support for code generation
1092 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1093 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1094
1095 void metadata_do(MetadataClosure* f);
1096
1097 address call_instruction_address(address pc) const;
1098
|