29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32 #include "runtime/mutexLocker.hpp"
33
34 class AbstractCompiler;
35 class CompiledDirectCall;
36 class CompiledIC;
37 class CompiledICData;
38 class CompileTask;
39 class DepChange;
40 class Dependencies;
41 class DirectiveSet;
42 class DebugInformationRecorder;
43 class ExceptionHandlerTable;
44 class ImplicitExceptionTable;
45 class JvmtiThreadState;
46 class MetadataClosure;
47 class NativeCallWrapper;
48 class OopIterateClosure;
49 class ScopeDesc;
50 class xmlStream;
51
52 // This class is used internally by nmethods, to cache
53 // exception/pc/handler information.
54
55 class ExceptionCache : public CHeapObj<mtCode> {
56 friend class VMStructs;
57 private:
58 enum { cache_size = 16 };
59 Klass* _exception_type;
60 address _pc[cache_size];
61 address _handler[cache_size];
62 volatile int _count;
63 ExceptionCache* volatile _next;
64 ExceptionCache* _purge_list_next;
65
66 inline address pc_at(int index);
67 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
68
161 class FailedSpeculation;
162 class JVMCINMethodData;
163 #endif
164
165 class nmethod : public CodeBlob {
166 friend class VMStructs;
167 friend class JVMCIVMStructs;
168 friend class CodeCache; // scavengable oops
169 friend class JVMCINMethodData;
170 friend class DeoptimizationScope;
171
172 #define ImmutableDataRefCountSize ((int)sizeof(int))
173
174 private:
175
176 // Used to track in which deoptimize handshake this method will be deoptimized.
177 uint64_t _deoptimization_generation;
178
179 uint64_t _gc_epoch;
180
181 Method* _method;
182
183 // To reduce header size union fields which usages do not overlap.
184 union {
185 // To support simple linked-list chaining of nmethods:
186 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
187 struct {
188 // These are used for compiled synchronized native methods to
189 // locate the owner and stack slot for the BasicLock. They are
190 // needed because there is no debug information for compiled native
191 // wrappers and the oop maps are insufficient to allow
192 // frame::retrieve_receiver() to work. Currently they are expected
193 // to be byte offsets from the Java stack pointer for maximum code
194 // sharing between platforms. JVMTI's GetLocalInstance() uses these
195 // offsets to find the receiver for non-static native wrapper frames.
196 ByteSize _native_receiver_sp_offset;
197 ByteSize _native_basic_lock_sp_offset;
198 };
199 };
200
244
245 // Offset in immutable data section
246 // _dependencies_offset == 0
247 uint16_t _nul_chk_table_offset;
248 uint16_t _handler_table_offset; // This table could be big in C1 code
249 int _scopes_pcs_offset;
250 int _scopes_data_offset;
251 #if INCLUDE_JVMCI
252 int _speculations_offset;
253 #endif
254 int _immutable_data_ref_count_offset;
255
256 // location in frame (offset for sp) that deopt can store the original
257 // pc during a deopt.
258 int _orig_pc_offset;
259
260 int _compile_id; // which compilation made this nmethod
261 CompLevel _comp_level; // compilation level (s1)
262 CompilerType _compiler_type; // which compiler made this nmethod (u1)
263
264 // Local state used to keep track of whether unloading is happening or not
265 volatile uint8_t _is_unloading_state;
266
267 // Protected by NMethodState_lock
268 volatile signed char _state; // {not_installed, in_use, not_entrant}
269
270 // set during construction
271 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
272 _has_wide_vectors:1, // Preserve wide vectors at safepoints
273 _has_monitors:1, // Fastpath monitor detection for continuations
274 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
275 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
276 _is_unlinked:1, // mark during class unloading
277 _load_reported:1; // used by jvmti to track if an event has been posted for this nmethod
278
279 enum DeoptimizationStatus : u1 {
280 not_marked,
281 deoptimize,
282 deoptimize_noupdate,
283 deoptimize_done
284 };
285
286 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
287
288 DeoptimizationStatus deoptimization_status() const {
289 return AtomicAccess::load(&_deoptimization_status);
290 }
291
292 // Initialize fields to their default values
293 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
294
295 // Post initialization
296 void post_init();
297
456 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
457 bool oops_do_try_claim_weak_request();
458
459 // Attempt Unclaimed -> N|SD transition. Returns the current link.
460 oops_do_mark_link* oops_do_try_claim_strong_done();
461 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
462 nmethod* oops_do_try_add_to_list_as_weak_done();
463
464 // Attempt X|WD -> N|SR transition. Returns the current link.
465 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
466 // Attempt X|WD -> X|SD transition. Returns true if successful.
467 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
468
469 // Do the N|SD -> X|SD transition.
470 void oops_do_add_to_list_as_strong_done();
471
472 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
473 // transitions).
474 void oops_do_set_strong_done(nmethod* old_head);
475
476 public:
477 // If you change anything in this enum please patch
478 // vmStructs_jvmci.cpp accordingly.
479 enum class InvalidationReason : s1 {
480 NOT_INVALIDATED = -1,
481 C1_CODEPATCH,
482 C1_DEOPTIMIZE,
483 C1_DEOPTIMIZE_FOR_PATCHING,
484 C1_PREDICATE_FAILED_TRAP,
485 CI_REPLAY,
486 UNLOADING,
487 UNLOADING_COLD,
488 JVMCI_INVALIDATE,
489 JVMCI_MATERIALIZE_VIRTUAL_OBJECT,
490 JVMCI_REPLACED_WITH_NEW_CODE,
491 JVMCI_REPROFILE,
492 MARKED_FOR_DEOPTIMIZATION,
493 MISSING_EXCEPTION_HANDLER,
494 NOT_USED,
495 OSR_INVALIDATION_BACK_BRANCH,
496 OSR_INVALIDATION_FOR_COMPILING_WITH_C1,
576 );
577
578 // Relocate the nmethod to the code heap identified by code_blob_type.
579 // Returns nullptr if the code heap does not have enough space, the
580 // nmethod is unrelocatable, or the nmethod is invalidated during relocation,
581 // otherwise the relocated nmethod. The original nmethod will be marked not entrant.
582 nmethod* relocate(CodeBlobType code_blob_type);
583
584 static nmethod* new_native_nmethod(const methodHandle& method,
585 int compile_id,
586 CodeBuffer *code_buffer,
587 int vep_offset,
588 int frame_complete,
589 int frame_size,
590 ByteSize receiver_sp_offset,
591 ByteSize basic_lock_sp_offset,
592 OopMapSet* oop_maps,
593 int exception_handler = -1);
594
595 Method* method () const { return _method; }
596 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
597 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
598 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
599
600 bool is_relocatable();
601
602 // Compiler task identification. Note that all OSR methods
603 // are numbered in an independent sequence if CICountOSR is true,
604 // and native method wrappers are also numbered independently if
605 // CICountNative is true.
606 int compile_id() const { return _compile_id; }
607 const char* compile_kind() const;
608
609 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
610 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
611 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
612 CompilerType compiler_type () const { return _compiler_type; }
613 const char* compiler_name () const;
614
615 // boundaries for different parts
616 address consts_begin () const { return content_begin(); }
617 address consts_end () const { return code_begin() ; }
618 address insts_begin () const { return code_begin() ; }
619 address insts_end () const { return header_begin() + _stub_offset ; }
620 address stub_begin () const { return header_begin() + _stub_offset ; }
621 address stub_end () const { return code_end() ; }
622 address exception_begin () const { return header_begin() + _exception_offset ; }
623 address deopt_handler_entry () const { return header_begin() + _deopt_handler_entry_offset ; }
624 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
625 oop* oops_begin () const { return (oop*) data_begin(); }
626 oop* oops_end () const { return (oop*) data_end(); }
627
628 // mutable data
629 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
630 #if INCLUDE_JVMCI
631 Metadata** metadata_end () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
632 address jvmci_data_begin () const { return mutable_data_begin() + _relocation_size + _metadata_size; }
633 address jvmci_data_end () const { return mutable_data_end(); }
634 #else
635 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
636 #endif
637
638 // immutable data
639 address immutable_data_begin () const { return _immutable_data; }
640 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
641 address dependencies_begin () const { return _immutable_data; }
642 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
643 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
644 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
645 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
646 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
647 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
648 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
649 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
650
651 #if INCLUDE_JVMCI
652 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
653 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
654 address speculations_end () const { return _immutable_data + _immutable_data_ref_count_offset ; }
655 #else
656 address scopes_data_end () const { return _immutable_data + _immutable_data_ref_count_offset ; }
657 #endif
658 address immutable_data_ref_count_begin () const { return _immutable_data + _immutable_data_ref_count_offset ; }
699 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
700
701 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
702 // allowed to advance state
703 in_use = 0, // executable nmethod
704 not_entrant = 1 // marked for deoptimization but activations may still exist
705 };
706
707 // flag accessing and manipulation
708 bool is_not_installed() const { return _state == not_installed; }
709 bool is_in_use() const { return _state <= in_use; }
710 bool is_not_entrant() const { return _state == not_entrant; }
711 int get_state() const { return _state; }
712
713 void clear_unloading_state();
714 // Heuristically deduce an nmethod isn't worth keeping around
715 bool is_cold();
716 bool is_unloading();
717 void do_unloading(bool unloading_occurred);
718
719 bool make_in_use() {
720 return try_transition(in_use);
721 }
722 // Make the nmethod non entrant. The nmethod will continue to be
723 // alive. It is used when an uncommon trap happens. Returns true
724 // if this thread changed the state of the nmethod or false if
725 // another thread performed the transition.
726 bool make_not_entrant(InvalidationReason invalidation_reason);
727 bool make_not_used() { return make_not_entrant(InvalidationReason::NOT_USED); }
728
729 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
730 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
731 void set_deoptimized_done();
732
733 bool update_recompile_counts() const {
734 // Update recompile counts when either the update is explicitly requested (deoptimize)
735 // or the nmethod is not marked for deoptimization at all (not_marked).
736 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
737 DeoptimizationStatus status = deoptimization_status();
738 return status != deoptimize_noupdate && status != deoptimize_done;
739 }
740
741 // tells whether frames described by this nmethod can be deoptimized
742 // note: native wrappers cannot be deoptimized.
743 bool can_be_deoptimized() const { return is_java_method(); }
744
745 bool has_dependencies() { return dependencies_size() != 0; }
746 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
747 void flush_dependencies();
748
749 template<typename T>
750 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
751 template<typename T>
752 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
753
754 bool has_unsafe_access() const { return _has_unsafe_access; }
755 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
756
757 bool has_monitors() const { return _has_monitors; }
758 void set_has_monitors(bool z) { _has_monitors = z; }
759
760 bool has_scoped_access() const { return _has_scoped_access; }
761 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
762
763 bool has_wide_vectors() const { return _has_wide_vectors; }
764 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
765
766 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
767 void set_has_flushed_dependencies(bool z) {
768 assert(!has_flushed_dependencies(), "should only happen once");
769 _has_flushed_dependencies = z;
770 }
771
772 bool is_unlinked() const { return _is_unlinked; }
773 void set_is_unlinked() {
774 assert(!_is_unlinked, "already unlinked");
775 _is_unlinked = true;
776 }
777
778 int comp_level() const { return _comp_level; }
779
780 // Support for oops in scopes and relocs:
781 // Note: index 0 is reserved for null.
782 oop oop_at(int index) const;
783 oop oop_at_phantom(int index) const; // phantom reference
784 oop* oop_addr_at(int index) const { // for GC
785 // relocation indexes are biased by 1 (because 0 is reserved)
786 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
787 return &oops_begin()[index - 1];
788 }
789
790 // Support for meta data in scopes and relocs:
791 // Note: index 0 is reserved for null.
792 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
793 Metadata** metadata_addr_at(int index) const { // for GC
794 // relocation indexes are biased by 1 (because 0 is reserved)
795 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
796 return &metadata_begin()[index - 1];
797 }
798
799 void copy_values(GrowableArray<jobject>* oops);
800 void copy_values(GrowableArray<Metadata*>* metadata);
801 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
802
803 // Relocation support
804 private:
805 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
806 inline void initialize_immediate_oop(oop* dest, jobject handle);
807
808 protected:
809 address oops_reloc_begin() const;
810
811 public:
812 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
813 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
814
815 bool is_at_poll_return(address pc);
816 bool is_at_poll_or_poll_return(address pc);
817
818 protected:
819 // Exception cache support
820 // Note: _exception_cache may be read and cleaned concurrently.
821 ExceptionCache* exception_cache() const { return _exception_cache; }
822 ExceptionCache* exception_cache_acquire() const;
823
824 public:
825 address handler_for_exception_and_pc(Handle exception, address pc);
826 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
827 void clean_exception_cache();
828
829 void add_exception_cache_entry(ExceptionCache* new_entry);
830 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
831
832
833 // Deopt
834 // Return true is the PC is one would expect if the frame is being deopted.
984 assert(*ref_count > 0, "Must be positive");
985 return --(*ref_count);
986 }
987
988 static void add_delayed_compiled_method_load_event(nmethod* nm) NOT_CDS_RETURN;
989
990 public:
991 // ScopeDesc retrieval operation
992 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
993 // pc_desc_near returns the first PcDesc at or after the given pc.
994 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
995
996 // ScopeDesc for an instruction
997 ScopeDesc* scope_desc_at(address pc);
998 ScopeDesc* scope_desc_near(address pc);
999
1000 // copying of debugging information
1001 void copy_scopes_pcs(PcDesc* pcs, int count);
1002 void copy_scopes_data(address buffer, int size);
1003
1004 int orig_pc_offset() { return _orig_pc_offset; }
1005
1006 // Post successful compilation
1007 void post_compiled_method(CompileTask* task);
1008
1009 // jvmti support:
1010 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
1011
1012 // verify operations
1013 void verify();
1014 void verify_scopes();
1015 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
1016
1017 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
1018 void decode2(outputStream* st) const;
1019 void print_constant_pool(outputStream* st);
1020
1021 // Avoid hiding of parent's 'decode(outputStream*)' method.
1022 void decode(outputStream* st) const { decode2(st); } // just delegate here.
1023
1024 // AOT cache support
1025 static void post_delayed_compiled_method_load_events() NOT_CDS_RETURN;
1026
1027 // printing support
1028 void print_on_impl(outputStream* st) const;
1029 void print_code();
1030 void print_value_on_impl(outputStream* st) const;
1031 void print_code_snippet(outputStream* st, address addr) const;
1032
1033 #if defined(SUPPORT_DATA_STRUCTS)
1034 // print output in opt build for disassembler library
1035 void print_relocations() PRODUCT_RETURN;
1036 void print_pcs_on(outputStream* st);
1037 void print_scopes() { print_scopes_on(tty); }
1038 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
1039 void print_handler_table();
1040 void print_nul_chk_table();
1041 void print_recorded_oop(int log_n, int index);
1042 void print_recorded_oops();
1043 void print_recorded_metadata();
1044
1045 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
1046 void print_metadata(outputStream* st); // metadata in metadata pool.
1047 #else
1048 void print_pcs_on(outputStream* st) { return; }
1049 #endif
1050
1051 void print_calls(outputStream* st) PRODUCT_RETURN;
1052 static void print_statistics() PRODUCT_RETURN;
1053
1054 void maybe_print_nmethod(const DirectiveSet* directive);
1055 void print_nmethod(bool print_code);
1084 ByteSize native_receiver_sp_offset() {
1085 assert(is_native_method(), "sanity");
1086 return _native_receiver_sp_offset;
1087 }
1088 ByteSize native_basic_lock_sp_offset() {
1089 assert(is_native_method(), "sanity");
1090 return _native_basic_lock_sp_offset;
1091 }
1092
1093 // support for code generation
1094 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1095 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1096
1097 void metadata_do(MetadataClosure* f);
1098
1099 address call_instruction_address(address pc) const;
1100
1101 void make_deoptimized();
1102 void finalize_relocations();
1103
1104 class Vptr : public CodeBlob::Vptr {
1105 void print_on(const CodeBlob* instance, outputStream* st) const override {
1106 ttyLocker ttyl;
1107 instance->as_nmethod()->print_on_impl(st);
1108 }
1109 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1110 instance->as_nmethod()->print_value_on_impl(st);
1111 }
1112 };
1113
1114 static const Vptr _vpntr;
1115 };
1116
1117 struct NMethodMarkingScope : StackObj {
1118 NMethodMarkingScope() {
1119 nmethod::oops_do_marking_prologue();
1120 }
1121 ~NMethodMarkingScope() {
1122 nmethod::oops_do_marking_epilogue();
1123 }
1124 };
1125
1126 #endif // SHARE_CODE_NMETHOD_HPP
|
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32 #include "runtime/mutexLocker.hpp"
33
34 class AbstractCompiler;
35 class CompiledDirectCall;
36 class CompiledIC;
37 class CompiledICData;
38 class CompileTask;
39 class DepChange;
40 class Dependencies;
41 class DirectiveSet;
42 class DebugInformationRecorder;
43 class ExceptionHandlerTable;
44 class ImplicitExceptionTable;
45 class JvmtiThreadState;
46 class MetadataClosure;
47 class NativeCallWrapper;
48 class OopIterateClosure;
49 class AOTCodeReader;
50 class AOTCodeEntry;
51 class ScopeDesc;
52 class xmlStream;
53
54 // This class is used internally by nmethods, to cache
55 // exception/pc/handler information.
56
57 class ExceptionCache : public CHeapObj<mtCode> {
58 friend class VMStructs;
59 private:
60 enum { cache_size = 16 };
61 Klass* _exception_type;
62 address _pc[cache_size];
63 address _handler[cache_size];
64 volatile int _count;
65 ExceptionCache* volatile _next;
66 ExceptionCache* _purge_list_next;
67
68 inline address pc_at(int index);
69 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
70
163 class FailedSpeculation;
164 class JVMCINMethodData;
165 #endif
166
167 class nmethod : public CodeBlob {
168 friend class VMStructs;
169 friend class JVMCIVMStructs;
170 friend class CodeCache; // scavengable oops
171 friend class JVMCINMethodData;
172 friend class DeoptimizationScope;
173
174 #define ImmutableDataRefCountSize ((int)sizeof(int))
175
176 private:
177
178 // Used to track in which deoptimize handshake this method will be deoptimized.
179 uint64_t _deoptimization_generation;
180
181 uint64_t _gc_epoch;
182
183 // Profiling counter used to figure out the hottest nmethods to record into CDS
184 volatile uint64_t _method_profiling_count;
185
186 Method* _method;
187
188 // To reduce header size union fields which usages do not overlap.
189 union {
190 // To support simple linked-list chaining of nmethods:
191 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
192 struct {
193 // These are used for compiled synchronized native methods to
194 // locate the owner and stack slot for the BasicLock. They are
195 // needed because there is no debug information for compiled native
196 // wrappers and the oop maps are insufficient to allow
197 // frame::retrieve_receiver() to work. Currently they are expected
198 // to be byte offsets from the Java stack pointer for maximum code
199 // sharing between platforms. JVMTI's GetLocalInstance() uses these
200 // offsets to find the receiver for non-static native wrapper frames.
201 ByteSize _native_receiver_sp_offset;
202 ByteSize _native_basic_lock_sp_offset;
203 };
204 };
205
249
250 // Offset in immutable data section
251 // _dependencies_offset == 0
252 uint16_t _nul_chk_table_offset;
253 uint16_t _handler_table_offset; // This table could be big in C1 code
254 int _scopes_pcs_offset;
255 int _scopes_data_offset;
256 #if INCLUDE_JVMCI
257 int _speculations_offset;
258 #endif
259 int _immutable_data_ref_count_offset;
260
261 // location in frame (offset for sp) that deopt can store the original
262 // pc during a deopt.
263 int _orig_pc_offset;
264
265 int _compile_id; // which compilation made this nmethod
266 CompLevel _comp_level; // compilation level (s1)
267 CompilerType _compiler_type; // which compiler made this nmethod (u1)
268
269 AOTCodeEntry* _aot_code_entry;
270
271 bool _used; // has this nmethod ever been invoked?
272
273 // Local state used to keep track of whether unloading is happening or not
274 volatile uint8_t _is_unloading_state;
275
276 // Protected by NMethodState_lock
277 volatile signed char _state; // {not_installed, in_use, not_entrant}
278
279 // set during construction
280 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
281 _has_wide_vectors:1, // Preserve wide vectors at safepoints
282 _has_monitors:1, // Fastpath monitor detection for continuations
283 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
284 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
285 _is_unlinked:1, // mark during class unloading
286 _load_reported:1, // used by jvmti to track if an event has been posted for this nmethod
287 _preloaded:1,
288 _has_clinit_barriers:1;
289
290 enum DeoptimizationStatus : u1 {
291 not_marked,
292 deoptimize,
293 deoptimize_noupdate,
294 deoptimize_done
295 };
296
297 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
298
299 DeoptimizationStatus deoptimization_status() const {
300 return AtomicAccess::load(&_deoptimization_status);
301 }
302
303 // Initialize fields to their default values
304 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
305
306 // Post initialization
307 void post_init();
308
467 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
468 bool oops_do_try_claim_weak_request();
469
470 // Attempt Unclaimed -> N|SD transition. Returns the current link.
471 oops_do_mark_link* oops_do_try_claim_strong_done();
472 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
473 nmethod* oops_do_try_add_to_list_as_weak_done();
474
475 // Attempt X|WD -> N|SR transition. Returns the current link.
476 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
477 // Attempt X|WD -> X|SD transition. Returns true if successful.
478 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
479
480 // Do the N|SD -> X|SD transition.
481 void oops_do_add_to_list_as_strong_done();
482
483 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
484 // transitions).
485 void oops_do_set_strong_done(nmethod* old_head);
486
487 void record_nmethod_dependency();
488
489 nmethod* restore(address code_cache_buffer,
490 const methodHandle& method,
491 int compile_id,
492 address reloc_data,
493 GrowableArray<Handle>& oop_list,
494 GrowableArray<Metadata*>& metadata_list,
495 ImmutableOopMapSet* oop_maps,
496 address immutable_data,
497 GrowableArray<Handle>& reloc_imm_oop_list,
498 GrowableArray<Metadata*>& reloc_imm_metadata_list,
499 AOTCodeReader* aot_code_reader);
500
501 public:
502 // create nmethod using archived nmethod from AOT code cache
503 static nmethod* new_nmethod(nmethod* archived_nm,
504 const methodHandle& method,
505 AbstractCompiler* compiler,
506 int compile_id,
507 address reloc_data,
508 GrowableArray<Handle>& oop_list,
509 GrowableArray<Metadata*>& metadata_list,
510 ImmutableOopMapSet* oop_maps,
511 address immutable_data,
512 GrowableArray<Handle>& reloc_imm_oop_list,
513 GrowableArray<Metadata*>& reloc_imm_metadata_list,
514 AOTCodeReader* aot_code_reader);
515
516 // If you change anything in this enum please patch
517 // vmStructs_jvmci.cpp accordingly.
518 enum class InvalidationReason : s1 {
519 NOT_INVALIDATED = -1,
520 C1_CODEPATCH,
521 C1_DEOPTIMIZE,
522 C1_DEOPTIMIZE_FOR_PATCHING,
523 C1_PREDICATE_FAILED_TRAP,
524 CI_REPLAY,
525 UNLOADING,
526 UNLOADING_COLD,
527 JVMCI_INVALIDATE,
528 JVMCI_MATERIALIZE_VIRTUAL_OBJECT,
529 JVMCI_REPLACED_WITH_NEW_CODE,
530 JVMCI_REPROFILE,
531 MARKED_FOR_DEOPTIMIZATION,
532 MISSING_EXCEPTION_HANDLER,
533 NOT_USED,
534 OSR_INVALIDATION_BACK_BRANCH,
535 OSR_INVALIDATION_FOR_COMPILING_WITH_C1,
615 );
616
617 // Relocate the nmethod to the code heap identified by code_blob_type.
618 // Returns nullptr if the code heap does not have enough space, the
619 // nmethod is unrelocatable, or the nmethod is invalidated during relocation,
620 // otherwise the relocated nmethod. The original nmethod will be marked not entrant.
621 nmethod* relocate(CodeBlobType code_blob_type);
622
623 static nmethod* new_native_nmethod(const methodHandle& method,
624 int compile_id,
625 CodeBuffer *code_buffer,
626 int vep_offset,
627 int frame_complete,
628 int frame_size,
629 ByteSize receiver_sp_offset,
630 ByteSize basic_lock_sp_offset,
631 OopMapSet* oop_maps,
632 int exception_handler = -1);
633
634 Method* method () const { return _method; }
635 uint16_t entry_bci () const { return _entry_bci; }
636 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
637 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
638 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
639
640 int orig_pc_offset() { return _orig_pc_offset; }
641 bool is_relocatable();
642
643 // Compiler task identification. Note that all OSR methods
644 // are numbered in an independent sequence if CICountOSR is true,
645 // and native method wrappers are also numbered independently if
646 // CICountNative is true.
647 int compile_id() const { return _compile_id; }
648 int comp_level() const { return _comp_level; }
649 const char* compile_kind() const;
650
651 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
652 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
653 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
654 CompilerType compiler_type () const { return _compiler_type; }
655 const char* compiler_name () const;
656
657 // boundaries for different parts
658 address consts_begin () const { return content_begin(); }
659 address consts_end () const { return code_begin() ; }
660 address insts_begin () const { return code_begin() ; }
661 address insts_end () const { return header_begin() + _stub_offset ; }
662 address stub_begin () const { return header_begin() + _stub_offset ; }
663 address stub_end () const { return code_end() ; }
664 address exception_begin () const { return header_begin() + _exception_offset ; }
665 address deopt_handler_entry () const { return header_begin() + _deopt_handler_entry_offset ; }
666 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
667 oop* oops_begin () const { return (oop*) data_begin(); }
668 oop* oops_end () const { return (oop*) data_end(); }
669
670 // mutable data
671 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
672 #if INCLUDE_JVMCI
673 Metadata** metadata_end () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
674 address jvmci_data_begin () const { return mutable_data_begin() + _relocation_size + _metadata_size; }
675 address jvmci_data_end () const { return mutable_data_end(); }
676 #else
677 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
678 #endif
679
680 // immutable data
681 void set_immutable_data(address data) { _immutable_data = data; }
682 address immutable_data_begin () const { return _immutable_data; }
683 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
684 address dependencies_begin () const { return _immutable_data; }
685 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
686 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
687 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
688 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
689 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
690 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
691 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
692 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
693
694 #if INCLUDE_JVMCI
695 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
696 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
697 address speculations_end () const { return _immutable_data + _immutable_data_ref_count_offset ; }
698 #else
699 address scopes_data_end () const { return _immutable_data + _immutable_data_ref_count_offset ; }
700 #endif
701 address immutable_data_ref_count_begin () const { return _immutable_data + _immutable_data_ref_count_offset ; }
742 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
743
744 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
745 // allowed to advance state
746 in_use = 0, // executable nmethod
747 not_entrant = 1 // marked for deoptimization but activations may still exist
748 };
749
750 // flag accessing and manipulation
751 bool is_not_installed() const { return _state == not_installed; }
752 bool is_in_use() const { return _state <= in_use; }
753 bool is_not_entrant() const { return _state == not_entrant; }
754 int get_state() const { return _state; }
755
756 void clear_unloading_state();
757 // Heuristically deduce an nmethod isn't worth keeping around
758 bool is_cold();
759 bool is_unloading();
760 void do_unloading(bool unloading_occurred);
761
762 void inc_method_profiling_count();
763 uint64_t method_profiling_count();
764
765 bool make_in_use() {
766 return try_transition(in_use);
767 }
768 // Make the nmethod non entrant. The nmethod will continue to be
769 // alive. It is used when an uncommon trap happens. Returns true
770 // if this thread changed the state of the nmethod or false if
771 // another thread performed the transition.
772 bool make_not_entrant(InvalidationReason invalidation_reason, bool keep_aot_entry = false);
773 bool make_not_used() { return make_not_entrant(InvalidationReason::NOT_USED, true /* keep AOT entry */); }
774
775 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
776 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
777 void set_deoptimized_done();
778
779 bool update_recompile_counts() const {
780 // Update recompile counts when either the update is explicitly requested (deoptimize)
781 // or the nmethod is not marked for deoptimization at all (not_marked).
782 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
783 DeoptimizationStatus status = deoptimization_status();
784 return status != deoptimize_noupdate && status != deoptimize_done;
785 }
786
787 // tells whether frames described by this nmethod can be deoptimized
788 // note: native wrappers cannot be deoptimized.
789 bool can_be_deoptimized() const { return is_java_method(); }
790
791 bool has_dependencies() { return dependencies_size() != 0; }
792 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
793 void flush_dependencies();
794
795 template<typename T>
796 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
797 template<typename T>
798 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
799
800 bool has_unsafe_access() const { return _has_unsafe_access; }
801 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
802
803 bool has_monitors() const { return _has_monitors; }
804 void set_has_monitors(bool z) { _has_monitors = z; }
805
806 bool has_scoped_access() const { return _has_scoped_access; }
807 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
808
809 bool has_wide_vectors() const { return _has_wide_vectors; }
810 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
811
812 bool has_clinit_barriers() const { return _has_clinit_barriers; }
813 void set_has_clinit_barriers(bool z) { _has_clinit_barriers = z; }
814
815 bool preloaded() const { return _preloaded; }
816 void set_preloaded(bool z) { _preloaded = z; }
817
818 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
819 void set_has_flushed_dependencies(bool z) {
820 assert(!has_flushed_dependencies(), "should only happen once");
821 _has_flushed_dependencies = z;
822 }
823
824 bool is_unlinked() const { return _is_unlinked; }
825 void set_is_unlinked() {
826 assert(!_is_unlinked, "already unlinked");
827 _is_unlinked = true;
828 }
829
830 bool used() const { return _used; }
831 void set_used() { _used = true; }
832
833 bool is_aot() const { return _aot_code_entry != nullptr; }
834 void set_aot_code_entry(AOTCodeEntry* entry) { _aot_code_entry = entry; }
835 AOTCodeEntry* aot_code_entry() const { return _aot_code_entry; }
836
837 // Support for oops in scopes and relocs:
838 // Note: index 0 is reserved for null.
839 oop oop_at(int index) const;
840 oop oop_at_phantom(int index) const; // phantom reference
841 oop* oop_addr_at(int index) const { // for GC
842 // relocation indexes are biased by 1 (because 0 is reserved)
843 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
844 return &oops_begin()[index - 1];
845 }
846
847 // Support for meta data in scopes and relocs:
848 // Note: index 0 is reserved for null.
849 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
850 Metadata** metadata_addr_at(int index) const { // for GC
851 // relocation indexes are biased by 1 (because 0 is reserved)
852 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
853 return &metadata_begin()[index - 1];
854 }
855
856 void copy_values(GrowableArray<Handle>* array);
857 void copy_values(GrowableArray<jobject>* oops);
858 void copy_values(GrowableArray<Metadata*>* metadata);
859 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
860
861 // Relocation support
862 private:
863 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
864 inline void initialize_immediate_oop(oop* dest, jobject handle);
865
866 protected:
867 address oops_reloc_begin() const;
868
869 public:
870 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
871 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
872
873 void create_reloc_immediates_list(JavaThread* thread, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list);
874
875 bool is_at_poll_return(address pc);
876 bool is_at_poll_or_poll_return(address pc);
877
878 protected:
879 // Exception cache support
880 // Note: _exception_cache may be read and cleaned concurrently.
881 ExceptionCache* exception_cache() const { return _exception_cache; }
882 ExceptionCache* exception_cache_acquire() const;
883
884 public:
885 address handler_for_exception_and_pc(Handle exception, address pc);
886 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
887 void clean_exception_cache();
888
889 void add_exception_cache_entry(ExceptionCache* new_entry);
890 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
891
892
893 // Deopt
894 // Return true is the PC is one would expect if the frame is being deopted.
1044 assert(*ref_count > 0, "Must be positive");
1045 return --(*ref_count);
1046 }
1047
1048 static void add_delayed_compiled_method_load_event(nmethod* nm) NOT_CDS_RETURN;
1049
1050 public:
1051 // ScopeDesc retrieval operation
1052 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
1053 // pc_desc_near returns the first PcDesc at or after the given pc.
1054 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
1055
1056 // ScopeDesc for an instruction
1057 ScopeDesc* scope_desc_at(address pc);
1058 ScopeDesc* scope_desc_near(address pc);
1059
1060 // copying of debugging information
1061 void copy_scopes_pcs(PcDesc* pcs, int count);
1062 void copy_scopes_data(address buffer, int size);
1063
1064 // Post successful compilation
1065 void post_compiled_method(CompileTask* task);
1066
1067 // jvmti support:
1068 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
1069
1070 // verify operations
1071 void verify();
1072 void verify_scopes();
1073 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
1074
1075 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
1076 void decode2(outputStream* st) const;
1077 void print_constant_pool(outputStream* st);
1078
1079 // Avoid hiding of parent's 'decode(outputStream*)' method.
1080 void decode(outputStream* st) const { decode2(st); } // just delegate here.
1081
1082 // AOT cache support
1083 static void post_delayed_compiled_method_load_events() NOT_CDS_RETURN;
1084
1085 // printing support
1086 void print_on_impl(outputStream* st) const;
1087 void print_code();
1088 void print_value_on_impl(outputStream* st) const;
1089 void print_code_snippet(outputStream* st, address addr) const;
1090
1091 #if defined(SUPPORT_DATA_STRUCTS)
1092 // print output in opt build for disassembler library
1093 void print_relocations_on(outputStream* st) PRODUCT_RETURN;
1094 void print_pcs_on(outputStream* st);
1095 void print_scopes() { print_scopes_on(tty); }
1096 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
1097 void print_handler_table();
1098 void print_nul_chk_table();
1099 void print_recorded_oop(int log_n, int index);
1100 void print_recorded_oops();
1101 void print_recorded_metadata();
1102
1103 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
1104 void print_metadata(outputStream* st); // metadata in metadata pool.
1105 #else
1106 void print_pcs_on(outputStream* st) { return; }
1107 #endif
1108
1109 void print_calls(outputStream* st) PRODUCT_RETURN;
1110 static void print_statistics() PRODUCT_RETURN;
1111
1112 void maybe_print_nmethod(const DirectiveSet* directive);
1113 void print_nmethod(bool print_code);
1142 ByteSize native_receiver_sp_offset() {
1143 assert(is_native_method(), "sanity");
1144 return _native_receiver_sp_offset;
1145 }
1146 ByteSize native_basic_lock_sp_offset() {
1147 assert(is_native_method(), "sanity");
1148 return _native_basic_lock_sp_offset;
1149 }
1150
1151 // support for code generation
1152 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1153 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1154
1155 void metadata_do(MetadataClosure* f);
1156
1157 address call_instruction_address(address pc) const;
1158
1159 void make_deoptimized();
1160 void finalize_relocations();
1161
1162 void prepare_for_archiving_impl();
1163
1164 class Vptr : public CodeBlob::Vptr {
1165 void print_on(const CodeBlob* instance, outputStream* st) const override {
1166 ttyLocker ttyl;
1167 instance->as_nmethod()->print_on_impl(st);
1168 }
1169 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1170 instance->as_nmethod()->print_value_on_impl(st);
1171 }
1172 void prepare_for_archiving(CodeBlob* instance) const override {
1173 ((nmethod*)instance)->prepare_for_archiving_impl();
1174 };
1175 };
1176
1177 static const Vptr _vpntr;
1178 };
1179
1180 struct NMethodMarkingScope : StackObj {
1181 NMethodMarkingScope() {
1182 nmethod::oops_do_marking_prologue();
1183 }
1184 ~NMethodMarkingScope() {
1185 nmethod::oops_do_marking_epilogue();
1186 }
1187 };
1188
1189 #endif // SHARE_CODE_NMETHOD_HPP
|