28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class ScopeDesc;
49 class xmlStream;
50
51 // This class is used internally by nmethods, to cache
52 // exception/pc/handler information.
53
54 class ExceptionCache : public CHeapObj<mtCode> {
55 friend class VMStructs;
56 private:
57 enum { cache_size = 16 };
58 Klass* _exception_type;
59 address _pc[cache_size];
60 address _handler[cache_size];
61 volatile int _count;
62 ExceptionCache* volatile _next;
63 ExceptionCache* _purge_list_next;
64
65 inline address pc_at(int index);
66 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
67
160 class FailedSpeculation;
161 class JVMCINMethodData;
162 #endif
163
164 class nmethod : public CodeBlob {
165 friend class VMStructs;
166 friend class JVMCIVMStructs;
167 friend class CodeCache; // scavengable oops
168 friend class JVMCINMethodData;
169 friend class DeoptimizationScope;
170
171 #define ImmutableDataReferencesCounterSize ((int)sizeof(int))
172
173 private:
174
175 // Used to track in which deoptimize handshake this method will be deoptimized.
176 uint64_t _deoptimization_generation;
177
178 uint64_t _gc_epoch;
179
180 Method* _method;
181
182 // To reduce header size union fields which usages do not overlap.
183 union {
184 // To support simple linked-list chaining of nmethods:
185 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
186 struct {
187 // These are used for compiled synchronized native methods to
188 // locate the owner and stack slot for the BasicLock. They are
189 // needed because there is no debug information for compiled native
190 // wrappers and the oop maps are insufficient to allow
191 // frame::retrieve_receiver() to work. Currently they are expected
192 // to be byte offsets from the Java stack pointer for maximum code
193 // sharing between platforms. JVMTI's GetLocalInstance() uses these
194 // offsets to find the receiver for non-static native wrapper frames.
195 ByteSize _native_receiver_sp_offset;
196 ByteSize _native_basic_lock_sp_offset;
197 };
198 };
199
242 #endif
243
244 // Offset in immutable data section
245 // _dependencies_offset == 0
246 uint16_t _nul_chk_table_offset;
247 uint16_t _handler_table_offset; // This table could be big in C1 code
248 int _scopes_pcs_offset;
249 int _scopes_data_offset;
250 #if INCLUDE_JVMCI
251 int _speculations_offset;
252 #endif
253
254 // location in frame (offset for sp) that deopt can store the original
255 // pc during a deopt.
256 int _orig_pc_offset;
257
258 int _compile_id; // which compilation made this nmethod
259 CompLevel _comp_level; // compilation level (s1)
260 CompilerType _compiler_type; // which compiler made this nmethod (u1)
261
262 // Local state used to keep track of whether unloading is happening or not
263 volatile uint8_t _is_unloading_state;
264
265 // Protected by NMethodState_lock
266 volatile signed char _state; // {not_installed, in_use, not_entrant}
267
268 // set during construction
269 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
270 _has_wide_vectors:1, // Preserve wide vectors at safepoints
271 _has_monitors:1, // Fastpath monitor detection for continuations
272 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
273 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
274 _is_unlinked:1, // mark during class unloading
275 _load_reported:1; // used by jvmti to track if an event has been posted for this nmethod
276
277 enum DeoptimizationStatus : u1 {
278 not_marked,
279 deoptimize,
280 deoptimize_noupdate,
281 deoptimize_done
282 };
283
284 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
285
286 DeoptimizationStatus deoptimization_status() const {
287 return AtomicAccess::load(&_deoptimization_status);
288 }
289
290 // Initialize fields to their default values
291 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
292
293 // Post initialization
294 void post_init();
295
454 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
455 bool oops_do_try_claim_weak_request();
456
457 // Attempt Unclaimed -> N|SD transition. Returns the current link.
458 oops_do_mark_link* oops_do_try_claim_strong_done();
459 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
460 nmethod* oops_do_try_add_to_list_as_weak_done();
461
462 // Attempt X|WD -> N|SR transition. Returns the current link.
463 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
464 // Attempt X|WD -> X|SD transition. Returns true if successful.
465 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
466
467 // Do the N|SD -> X|SD transition.
468 void oops_do_add_to_list_as_strong_done();
469
470 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
471 // transitions).
472 void oops_do_set_strong_done(nmethod* old_head);
473
474 public:
475 // If you change anything in this enum please patch
476 // vmStructs_jvmci.cpp accordingly.
477 enum class InvalidationReason : s1 {
478 NOT_INVALIDATED = -1,
479 C1_CODEPATCH,
480 C1_DEOPTIMIZE,
481 C1_DEOPTIMIZE_FOR_PATCHING,
482 C1_PREDICATE_FAILED_TRAP,
483 CI_REPLAY,
484 UNLOADING,
485 UNLOADING_COLD,
486 JVMCI_INVALIDATE,
487 JVMCI_MATERIALIZE_VIRTUAL_OBJECT,
488 JVMCI_REPLACED_WITH_NEW_CODE,
489 JVMCI_REPROFILE,
490 MARKED_FOR_DEOPTIMIZATION,
491 MISSING_EXCEPTION_HANDLER,
492 NOT_USED,
493 OSR_INVALIDATION_BACK_BRANCH,
494 OSR_INVALIDATION_FOR_COMPILING_WITH_C1,
571 );
572
573 // Relocate the nmethod to the code heap identified by code_blob_type.
574 // Returns nullptr if the code heap does not have enough space, the
575 // nmethod is unrelocatable, or the nmethod is invalidated during relocation,
576 // otherwise the relocated nmethod. The original nmethod will be marked not entrant.
577 nmethod* relocate(CodeBlobType code_blob_type);
578
579 static nmethod* new_native_nmethod(const methodHandle& method,
580 int compile_id,
581 CodeBuffer *code_buffer,
582 int vep_offset,
583 int frame_complete,
584 int frame_size,
585 ByteSize receiver_sp_offset,
586 ByteSize basic_lock_sp_offset,
587 OopMapSet* oop_maps,
588 int exception_handler = -1);
589
590 Method* method () const { return _method; }
591 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
592 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
593 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
594
595 bool is_relocatable();
596
597 // Compiler task identification. Note that all OSR methods
598 // are numbered in an independent sequence if CICountOSR is true,
599 // and native method wrappers are also numbered independently if
600 // CICountNative is true.
601 int compile_id() const { return _compile_id; }
602 const char* compile_kind() const;
603
604 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
605 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
606 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
607 CompilerType compiler_type () const { return _compiler_type; }
608 const char* compiler_name () const;
609
610 // boundaries for different parts
611 address consts_begin () const { return content_begin(); }
612 address consts_end () const { return code_begin() ; }
613 address insts_begin () const { return code_begin() ; }
614 address insts_end () const { return header_begin() + _stub_offset ; }
615 address stub_begin () const { return header_begin() + _stub_offset ; }
616 address stub_end () const { return code_end() ; }
617 address exception_begin () const { return header_begin() + _exception_offset ; }
618 address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
619 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
620 oop* oops_begin () const { return (oop*) data_begin(); }
621 oop* oops_end () const { return (oop*) data_end(); }
622
623 // mutable data
624 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
625 #if INCLUDE_JVMCI
626 Metadata** metadata_end () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
627 address jvmci_data_begin () const { return mutable_data_begin() + _relocation_size + _metadata_size; }
628 address jvmci_data_end () const { return mutable_data_end(); }
629 #else
630 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
631 #endif
632
633 // immutable data
634 address immutable_data_begin () const { return _immutable_data; }
635 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
636 address dependencies_begin () const { return _immutable_data; }
637 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
638 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
639 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
640 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
641 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
642 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
643 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
644 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
645
646 #if INCLUDE_JVMCI
647 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
648 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
649 address speculations_end () const { return immutable_data_end() - ImmutableDataReferencesCounterSize ; }
650 #else
651 address scopes_data_end () const { return immutable_data_end() - ImmutableDataReferencesCounterSize ; }
652 #endif
653
695 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
696
697 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
698 // allowed to advance state
699 in_use = 0, // executable nmethod
700 not_entrant = 1 // marked for deoptimization but activations may still exist
701 };
702
703 // flag accessing and manipulation
704 bool is_not_installed() const { return _state == not_installed; }
705 bool is_in_use() const { return _state <= in_use; }
706 bool is_not_entrant() const { return _state == not_entrant; }
707 int get_state() const { return _state; }
708
709 void clear_unloading_state();
710 // Heuristically deduce an nmethod isn't worth keeping around
711 bool is_cold();
712 bool is_unloading();
713 void do_unloading(bool unloading_occurred);
714
715 bool make_in_use() {
716 return try_transition(in_use);
717 }
718 // Make the nmethod non entrant. The nmethod will continue to be
719 // alive. It is used when an uncommon trap happens. Returns true
720 // if this thread changed the state of the nmethod or false if
721 // another thread performed the transition.
722 bool make_not_entrant(InvalidationReason invalidation_reason);
723 bool make_not_used() { return make_not_entrant(InvalidationReason::NOT_USED); }
724
725 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
726 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
727 void set_deoptimized_done();
728
729 bool update_recompile_counts() const {
730 // Update recompile counts when either the update is explicitly requested (deoptimize)
731 // or the nmethod is not marked for deoptimization at all (not_marked).
732 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
733 DeoptimizationStatus status = deoptimization_status();
734 return status != deoptimize_noupdate && status != deoptimize_done;
735 }
736
737 // tells whether frames described by this nmethod can be deoptimized
738 // note: native wrappers cannot be deoptimized.
739 bool can_be_deoptimized() const { return is_java_method(); }
740
741 bool has_dependencies() { return dependencies_size() != 0; }
742 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
743 void flush_dependencies();
744
745 template<typename T>
746 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
747 template<typename T>
748 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
749
750 bool has_unsafe_access() const { return _has_unsafe_access; }
751 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
752
753 bool has_monitors() const { return _has_monitors; }
754 void set_has_monitors(bool z) { _has_monitors = z; }
755
756 bool has_scoped_access() const { return _has_scoped_access; }
757 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
758
759 bool has_wide_vectors() const { return _has_wide_vectors; }
760 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
761
762 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
763 void set_has_flushed_dependencies(bool z) {
764 assert(!has_flushed_dependencies(), "should only happen once");
765 _has_flushed_dependencies = z;
766 }
767
768 bool is_unlinked() const { return _is_unlinked; }
769 void set_is_unlinked() {
770 assert(!_is_unlinked, "already unlinked");
771 _is_unlinked = true;
772 }
773
774 int comp_level() const { return _comp_level; }
775
776 // Support for oops in scopes and relocs:
777 // Note: index 0 is reserved for null.
778 oop oop_at(int index) const;
779 oop oop_at_phantom(int index) const; // phantom reference
780 oop* oop_addr_at(int index) const { // for GC
781 // relocation indexes are biased by 1 (because 0 is reserved)
782 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
783 return &oops_begin()[index - 1];
784 }
785
786 // Support for meta data in scopes and relocs:
787 // Note: index 0 is reserved for null.
788 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
789 Metadata** metadata_addr_at(int index) const { // for GC
790 // relocation indexes are biased by 1 (because 0 is reserved)
791 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
792 return &metadata_begin()[index - 1];
793 }
794
795 void copy_values(GrowableArray<jobject>* oops);
796 void copy_values(GrowableArray<Metadata*>* metadata);
797 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
798
799 // Relocation support
800 private:
801 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
802 inline void initialize_immediate_oop(oop* dest, jobject handle);
803
804 protected:
805 address oops_reloc_begin() const;
806
807 public:
808 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
809 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
810
811 bool is_at_poll_return(address pc);
812 bool is_at_poll_or_poll_return(address pc);
813
814 protected:
815 // Exception cache support
816 // Note: _exception_cache may be read and cleaned concurrently.
817 ExceptionCache* exception_cache() const { return _exception_cache; }
818 ExceptionCache* exception_cache_acquire() const;
819
820 public:
821 address handler_for_exception_and_pc(Handle exception, address pc);
822 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
823 void clean_exception_cache();
824
825 void add_exception_cache_entry(ExceptionCache* new_entry);
826 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
827
828
829 // Deopt
830 // Return true is the PC is one would expect if the frame is being deopted.
962 bool load_reported() const { return _load_reported; }
963 void set_load_reported() { _load_reported = true; }
964
965 inline int get_immutable_data_references_counter() { return *((int*)immutable_data_references_counter_begin()); }
966 inline void set_immutable_data_references_counter(int count) { *((int*)immutable_data_references_counter_begin()) = count; }
967
968 public:
969 // ScopeDesc retrieval operation
970 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
971 // pc_desc_near returns the first PcDesc at or after the given pc.
972 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
973
974 // ScopeDesc for an instruction
975 ScopeDesc* scope_desc_at(address pc);
976 ScopeDesc* scope_desc_near(address pc);
977
978 // copying of debugging information
979 void copy_scopes_pcs(PcDesc* pcs, int count);
980 void copy_scopes_data(address buffer, int size);
981
982 int orig_pc_offset() { return _orig_pc_offset; }
983
984 // Post successful compilation
985 void post_compiled_method(CompileTask* task);
986
987 // jvmti support:
988 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
989
990 // verify operations
991 void verify();
992 void verify_scopes();
993 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
994
995 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
996 void decode2(outputStream* st) const;
997 void print_constant_pool(outputStream* st);
998
999 // Avoid hiding of parent's 'decode(outputStream*)' method.
1000 void decode(outputStream* st) const { decode2(st); } // just delegate here.
1001
1002 // printing support
1003 void print_on_impl(outputStream* st) const;
1004 void print_code();
1005 void print_value_on_impl(outputStream* st) const;
1006
1007 #if defined(SUPPORT_DATA_STRUCTS)
1008 // print output in opt build for disassembler library
1009 void print_relocations() PRODUCT_RETURN;
1010 void print_pcs_on(outputStream* st);
1011 void print_scopes() { print_scopes_on(tty); }
1012 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
1013 void print_handler_table();
1014 void print_nul_chk_table();
1015 void print_recorded_oop(int log_n, int index);
1016 void print_recorded_oops();
1017 void print_recorded_metadata();
1018
1019 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
1020 void print_metadata(outputStream* st); // metadata in metadata pool.
1021 #else
1022 void print_pcs_on(outputStream* st) { return; }
1023 #endif
1024
1025 void print_calls(outputStream* st) PRODUCT_RETURN;
1026 static void print_statistics() PRODUCT_RETURN;
1027
1028 void maybe_print_nmethod(const DirectiveSet* directive);
1029 void print_nmethod(bool print_code);
1058 ByteSize native_receiver_sp_offset() {
1059 assert(is_native_method(), "sanity");
1060 return _native_receiver_sp_offset;
1061 }
1062 ByteSize native_basic_lock_sp_offset() {
1063 assert(is_native_method(), "sanity");
1064 return _native_basic_lock_sp_offset;
1065 }
1066
1067 // support for code generation
1068 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1069 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1070
1071 void metadata_do(MetadataClosure* f);
1072
1073 address call_instruction_address(address pc) const;
1074
1075 void make_deoptimized();
1076 void finalize_relocations();
1077
1078 class Vptr : public CodeBlob::Vptr {
1079 void print_on(const CodeBlob* instance, outputStream* st) const override {
1080 ttyLocker ttyl;
1081 instance->as_nmethod()->print_on_impl(st);
1082 }
1083 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1084 instance->as_nmethod()->print_value_on_impl(st);
1085 }
1086 };
1087
1088 static const Vptr _vpntr;
1089 };
1090
1091 struct NMethodMarkingScope : StackObj {
1092 NMethodMarkingScope() {
1093 nmethod::oops_do_marking_prologue();
1094 }
1095 ~NMethodMarkingScope() {
1096 nmethod::oops_do_marking_epilogue();
1097 }
1098 };
1099
1100 #endif // SHARE_CODE_NMETHOD_HPP
|
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class AOTCodeReader;
49 class AOTCodeEntry;
50 class ScopeDesc;
51 class xmlStream;
52
53 // This class is used internally by nmethods, to cache
54 // exception/pc/handler information.
55
56 class ExceptionCache : public CHeapObj<mtCode> {
57 friend class VMStructs;
58 private:
59 enum { cache_size = 16 };
60 Klass* _exception_type;
61 address _pc[cache_size];
62 address _handler[cache_size];
63 volatile int _count;
64 ExceptionCache* volatile _next;
65 ExceptionCache* _purge_list_next;
66
67 inline address pc_at(int index);
68 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
69
162 class FailedSpeculation;
163 class JVMCINMethodData;
164 #endif
165
166 class nmethod : public CodeBlob {
167 friend class VMStructs;
168 friend class JVMCIVMStructs;
169 friend class CodeCache; // scavengable oops
170 friend class JVMCINMethodData;
171 friend class DeoptimizationScope;
172
173 #define ImmutableDataReferencesCounterSize ((int)sizeof(int))
174
175 private:
176
177 // Used to track in which deoptimize handshake this method will be deoptimized.
178 uint64_t _deoptimization_generation;
179
180 uint64_t _gc_epoch;
181
182 // Profiling counter used to figure out the hottest nmethods to record into CDS
183 volatile uint64_t _method_profiling_count;
184
185 Method* _method;
186
187 // To reduce header size union fields which usages do not overlap.
188 union {
189 // To support simple linked-list chaining of nmethods:
190 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
191 struct {
192 // These are used for compiled synchronized native methods to
193 // locate the owner and stack slot for the BasicLock. They are
194 // needed because there is no debug information for compiled native
195 // wrappers and the oop maps are insufficient to allow
196 // frame::retrieve_receiver() to work. Currently they are expected
197 // to be byte offsets from the Java stack pointer for maximum code
198 // sharing between platforms. JVMTI's GetLocalInstance() uses these
199 // offsets to find the receiver for non-static native wrapper frames.
200 ByteSize _native_receiver_sp_offset;
201 ByteSize _native_basic_lock_sp_offset;
202 };
203 };
204
247 #endif
248
249 // Offset in immutable data section
250 // _dependencies_offset == 0
251 uint16_t _nul_chk_table_offset;
252 uint16_t _handler_table_offset; // This table could be big in C1 code
253 int _scopes_pcs_offset;
254 int _scopes_data_offset;
255 #if INCLUDE_JVMCI
256 int _speculations_offset;
257 #endif
258
259 // location in frame (offset for sp) that deopt can store the original
260 // pc during a deopt.
261 int _orig_pc_offset;
262
263 int _compile_id; // which compilation made this nmethod
264 CompLevel _comp_level; // compilation level (s1)
265 CompilerType _compiler_type; // which compiler made this nmethod (u1)
266
267 AOTCodeEntry* _aot_code_entry;
268
269 bool _used; // has this nmethod ever been invoked?
270
271 // Local state used to keep track of whether unloading is happening or not
272 volatile uint8_t _is_unloading_state;
273
274 // Protected by NMethodState_lock
275 volatile signed char _state; // {not_installed, in_use, not_entrant}
276
277 // set during construction
278 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
279 _has_wide_vectors:1, // Preserve wide vectors at safepoints
280 _has_monitors:1, // Fastpath monitor detection for continuations
281 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
282 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
283 _is_unlinked:1, // mark during class unloading
284 _load_reported:1, // used by jvmti to track if an event has been posted for this nmethod
285 _preloaded:1,
286 _has_clinit_barriers:1;
287
288 enum DeoptimizationStatus : u1 {
289 not_marked,
290 deoptimize,
291 deoptimize_noupdate,
292 deoptimize_done
293 };
294
295 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
296
297 DeoptimizationStatus deoptimization_status() const {
298 return AtomicAccess::load(&_deoptimization_status);
299 }
300
301 // Initialize fields to their default values
302 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
303
304 // Post initialization
305 void post_init();
306
465 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
466 bool oops_do_try_claim_weak_request();
467
468 // Attempt Unclaimed -> N|SD transition. Returns the current link.
469 oops_do_mark_link* oops_do_try_claim_strong_done();
470 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
471 nmethod* oops_do_try_add_to_list_as_weak_done();
472
473 // Attempt X|WD -> N|SR transition. Returns the current link.
474 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
475 // Attempt X|WD -> X|SD transition. Returns true if successful.
476 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
477
478 // Do the N|SD -> X|SD transition.
479 void oops_do_add_to_list_as_strong_done();
480
481 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
482 // transitions).
483 void oops_do_set_strong_done(nmethod* old_head);
484
485 void record_nmethod_dependency();
486
487 nmethod* restore(address code_cache_buffer,
488 const methodHandle& method,
489 int compile_id,
490 address reloc_data,
491 GrowableArray<Handle>& oop_list,
492 GrowableArray<Metadata*>& metadata_list,
493 ImmutableOopMapSet* oop_maps,
494 address immutable_data,
495 GrowableArray<Handle>& reloc_imm_oop_list,
496 GrowableArray<Metadata*>& reloc_imm_metadata_list,
497 AOTCodeReader* aot_code_reader);
498
499 public:
500 // create nmethod using archived nmethod from AOT code cache
501 static nmethod* new_nmethod(nmethod* archived_nm,
502 const methodHandle& method,
503 AbstractCompiler* compiler,
504 int compile_id,
505 address reloc_data,
506 GrowableArray<Handle>& oop_list,
507 GrowableArray<Metadata*>& metadata_list,
508 ImmutableOopMapSet* oop_maps,
509 address immutable_data,
510 GrowableArray<Handle>& reloc_imm_oop_list,
511 GrowableArray<Metadata*>& reloc_imm_metadata_list,
512 AOTCodeReader* aot_code_reader);
513
514 // If you change anything in this enum please patch
515 // vmStructs_jvmci.cpp accordingly.
516 enum class InvalidationReason : s1 {
517 NOT_INVALIDATED = -1,
518 C1_CODEPATCH,
519 C1_DEOPTIMIZE,
520 C1_DEOPTIMIZE_FOR_PATCHING,
521 C1_PREDICATE_FAILED_TRAP,
522 CI_REPLAY,
523 UNLOADING,
524 UNLOADING_COLD,
525 JVMCI_INVALIDATE,
526 JVMCI_MATERIALIZE_VIRTUAL_OBJECT,
527 JVMCI_REPLACED_WITH_NEW_CODE,
528 JVMCI_REPROFILE,
529 MARKED_FOR_DEOPTIMIZATION,
530 MISSING_EXCEPTION_HANDLER,
531 NOT_USED,
532 OSR_INVALIDATION_BACK_BRANCH,
533 OSR_INVALIDATION_FOR_COMPILING_WITH_C1,
610 );
611
612 // Relocate the nmethod to the code heap identified by code_blob_type.
613 // Returns nullptr if the code heap does not have enough space, the
614 // nmethod is unrelocatable, or the nmethod is invalidated during relocation,
615 // otherwise the relocated nmethod. The original nmethod will be marked not entrant.
616 nmethod* relocate(CodeBlobType code_blob_type);
617
618 static nmethod* new_native_nmethod(const methodHandle& method,
619 int compile_id,
620 CodeBuffer *code_buffer,
621 int vep_offset,
622 int frame_complete,
623 int frame_size,
624 ByteSize receiver_sp_offset,
625 ByteSize basic_lock_sp_offset,
626 OopMapSet* oop_maps,
627 int exception_handler = -1);
628
629 Method* method () const { return _method; }
630 uint16_t entry_bci () const { return _entry_bci; }
631 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
632 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
633 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
634
635 int orig_pc_offset() { return _orig_pc_offset; }
636 bool is_relocatable();
637
638 // Compiler task identification. Note that all OSR methods
639 // are numbered in an independent sequence if CICountOSR is true,
640 // and native method wrappers are also numbered independently if
641 // CICountNative is true.
642 int compile_id() const { return _compile_id; }
643 int comp_level() const { return _comp_level; }
644 const char* compile_kind() const;
645
646 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
647 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
648 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
649 CompilerType compiler_type () const { return _compiler_type; }
650 const char* compiler_name () const;
651
652 // boundaries for different parts
653 address consts_begin () const { return content_begin(); }
654 address consts_end () const { return code_begin() ; }
655 address insts_begin () const { return code_begin() ; }
656 address insts_end () const { return header_begin() + _stub_offset ; }
657 address stub_begin () const { return header_begin() + _stub_offset ; }
658 address stub_end () const { return code_end() ; }
659 address exception_begin () const { return header_begin() + _exception_offset ; }
660 address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
661 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
662 oop* oops_begin () const { return (oop*) data_begin(); }
663 oop* oops_end () const { return (oop*) data_end(); }
664
665 // mutable data
666 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
667 #if INCLUDE_JVMCI
668 Metadata** metadata_end () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
669 address jvmci_data_begin () const { return mutable_data_begin() + _relocation_size + _metadata_size; }
670 address jvmci_data_end () const { return mutable_data_end(); }
671 #else
672 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
673 #endif
674
675 // immutable data
676 void set_immutable_data(address data) { _immutable_data = data; }
677 address immutable_data_begin () const { return _immutable_data; }
678 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
679 address dependencies_begin () const { return _immutable_data; }
680 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
681 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
682 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
683 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
684 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
685 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
686 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
687 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
688
689 #if INCLUDE_JVMCI
690 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
691 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
692 address speculations_end () const { return immutable_data_end() - ImmutableDataReferencesCounterSize ; }
693 #else
694 address scopes_data_end () const { return immutable_data_end() - ImmutableDataReferencesCounterSize ; }
695 #endif
696
738 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
739
740 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
741 // allowed to advance state
742 in_use = 0, // executable nmethod
743 not_entrant = 1 // marked for deoptimization but activations may still exist
744 };
745
746 // flag accessing and manipulation
747 bool is_not_installed() const { return _state == not_installed; }
748 bool is_in_use() const { return _state <= in_use; }
749 bool is_not_entrant() const { return _state == not_entrant; }
750 int get_state() const { return _state; }
751
752 void clear_unloading_state();
753 // Heuristically deduce an nmethod isn't worth keeping around
754 bool is_cold();
755 bool is_unloading();
756 void do_unloading(bool unloading_occurred);
757
758 void inc_method_profiling_count();
759 uint64_t method_profiling_count();
760
761 bool make_in_use() {
762 return try_transition(in_use);
763 }
764 // Make the nmethod non entrant. The nmethod will continue to be
765 // alive. It is used when an uncommon trap happens. Returns true
766 // if this thread changed the state of the nmethod or false if
767 // another thread performed the transition.
768 bool make_not_entrant(InvalidationReason invalidation_reason, bool keep_aot_entry = false);
769 bool make_not_used() { return make_not_entrant(InvalidationReason::NOT_USED, true /* keep AOT entry */); }
770
771 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
772 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
773 void set_deoptimized_done();
774
775 bool update_recompile_counts() const {
776 // Update recompile counts when either the update is explicitly requested (deoptimize)
777 // or the nmethod is not marked for deoptimization at all (not_marked).
778 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
779 DeoptimizationStatus status = deoptimization_status();
780 return status != deoptimize_noupdate && status != deoptimize_done;
781 }
782
783 // tells whether frames described by this nmethod can be deoptimized
784 // note: native wrappers cannot be deoptimized.
785 bool can_be_deoptimized() const { return is_java_method(); }
786
787 bool has_dependencies() { return dependencies_size() != 0; }
788 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
789 void flush_dependencies();
790
791 template<typename T>
792 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
793 template<typename T>
794 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
795
796 bool has_unsafe_access() const { return _has_unsafe_access; }
797 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
798
799 bool has_monitors() const { return _has_monitors; }
800 void set_has_monitors(bool z) { _has_monitors = z; }
801
802 bool has_scoped_access() const { return _has_scoped_access; }
803 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
804
805 bool has_wide_vectors() const { return _has_wide_vectors; }
806 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
807
808 bool has_clinit_barriers() const { return _has_clinit_barriers; }
809 void set_has_clinit_barriers(bool z) { _has_clinit_barriers = z; }
810
811 bool preloaded() const { return _preloaded; }
812 void set_preloaded(bool z) { _preloaded = z; }
813
814 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
815 void set_has_flushed_dependencies(bool z) {
816 assert(!has_flushed_dependencies(), "should only happen once");
817 _has_flushed_dependencies = z;
818 }
819
820 bool is_unlinked() const { return _is_unlinked; }
821 void set_is_unlinked() {
822 assert(!_is_unlinked, "already unlinked");
823 _is_unlinked = true;
824 }
825
826 bool used() const { return _used; }
827 void set_used() { _used = true; }
828
829 bool is_aot() const { return _aot_code_entry != nullptr; }
830 void set_aot_code_entry(AOTCodeEntry* entry) { _aot_code_entry = entry; }
831 AOTCodeEntry* aot_code_entry() const { return _aot_code_entry; }
832
833 // Support for oops in scopes and relocs:
834 // Note: index 0 is reserved for null.
835 oop oop_at(int index) const;
836 oop oop_at_phantom(int index) const; // phantom reference
837 oop* oop_addr_at(int index) const { // for GC
838 // relocation indexes are biased by 1 (because 0 is reserved)
839 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
840 return &oops_begin()[index - 1];
841 }
842
843 // Support for meta data in scopes and relocs:
844 // Note: index 0 is reserved for null.
845 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
846 Metadata** metadata_addr_at(int index) const { // for GC
847 // relocation indexes are biased by 1 (because 0 is reserved)
848 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
849 return &metadata_begin()[index - 1];
850 }
851
852 void copy_values(GrowableArray<Handle>* array);
853 void copy_values(GrowableArray<jobject>* oops);
854 void copy_values(GrowableArray<Metadata*>* metadata);
855 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
856
857 // Relocation support
858 private:
859 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
860 inline void initialize_immediate_oop(oop* dest, jobject handle);
861
862 protected:
863 address oops_reloc_begin() const;
864
865 public:
866 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
867 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
868
869 void create_reloc_immediates_list(JavaThread* thread, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list);
870
871 bool is_at_poll_return(address pc);
872 bool is_at_poll_or_poll_return(address pc);
873
874 protected:
875 // Exception cache support
876 // Note: _exception_cache may be read and cleaned concurrently.
877 ExceptionCache* exception_cache() const { return _exception_cache; }
878 ExceptionCache* exception_cache_acquire() const;
879
880 public:
881 address handler_for_exception_and_pc(Handle exception, address pc);
882 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
883 void clean_exception_cache();
884
885 void add_exception_cache_entry(ExceptionCache* new_entry);
886 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
887
888
889 // Deopt
890 // Return true is the PC is one would expect if the frame is being deopted.
1022 bool load_reported() const { return _load_reported; }
1023 void set_load_reported() { _load_reported = true; }
1024
1025 inline int get_immutable_data_references_counter() { return *((int*)immutable_data_references_counter_begin()); }
1026 inline void set_immutable_data_references_counter(int count) { *((int*)immutable_data_references_counter_begin()) = count; }
1027
1028 public:
1029 // ScopeDesc retrieval operation
1030 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
1031 // pc_desc_near returns the first PcDesc at or after the given pc.
1032 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
1033
1034 // ScopeDesc for an instruction
1035 ScopeDesc* scope_desc_at(address pc);
1036 ScopeDesc* scope_desc_near(address pc);
1037
1038 // copying of debugging information
1039 void copy_scopes_pcs(PcDesc* pcs, int count);
1040 void copy_scopes_data(address buffer, int size);
1041
1042 // Post successful compilation
1043 void post_compiled_method(CompileTask* task);
1044
1045 // jvmti support:
1046 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
1047
1048 // verify operations
1049 void verify();
1050 void verify_scopes();
1051 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
1052
1053 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
1054 void decode2(outputStream* st) const;
1055 void print_constant_pool(outputStream* st);
1056
1057 // Avoid hiding of parent's 'decode(outputStream*)' method.
1058 void decode(outputStream* st) const { decode2(st); } // just delegate here.
1059
1060 // printing support
1061 void print_on_impl(outputStream* st) const;
1062 void print_code();
1063 void print_value_on_impl(outputStream* st) const;
1064
1065 #if defined(SUPPORT_DATA_STRUCTS)
1066 // print output in opt build for disassembler library
1067 void print_relocations_on(outputStream* st) PRODUCT_RETURN;
1068 void print_pcs_on(outputStream* st);
1069 void print_scopes() { print_scopes_on(tty); }
1070 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
1071 void print_handler_table();
1072 void print_nul_chk_table();
1073 void print_recorded_oop(int log_n, int index);
1074 void print_recorded_oops();
1075 void print_recorded_metadata();
1076
1077 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
1078 void print_metadata(outputStream* st); // metadata in metadata pool.
1079 #else
1080 void print_pcs_on(outputStream* st) { return; }
1081 #endif
1082
1083 void print_calls(outputStream* st) PRODUCT_RETURN;
1084 static void print_statistics() PRODUCT_RETURN;
1085
1086 void maybe_print_nmethod(const DirectiveSet* directive);
1087 void print_nmethod(bool print_code);
1116 ByteSize native_receiver_sp_offset() {
1117 assert(is_native_method(), "sanity");
1118 return _native_receiver_sp_offset;
1119 }
1120 ByteSize native_basic_lock_sp_offset() {
1121 assert(is_native_method(), "sanity");
1122 return _native_basic_lock_sp_offset;
1123 }
1124
1125 // support for code generation
1126 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1127 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1128
1129 void metadata_do(MetadataClosure* f);
1130
1131 address call_instruction_address(address pc) const;
1132
1133 void make_deoptimized();
1134 void finalize_relocations();
1135
1136 void prepare_for_archiving_impl();
1137
1138 class Vptr : public CodeBlob::Vptr {
1139 void print_on(const CodeBlob* instance, outputStream* st) const override {
1140 ttyLocker ttyl;
1141 instance->as_nmethod()->print_on_impl(st);
1142 }
1143 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1144 instance->as_nmethod()->print_value_on_impl(st);
1145 }
1146 void prepare_for_archiving(CodeBlob* instance) const override {
1147 ((nmethod*)instance)->prepare_for_archiving_impl();
1148 };
1149 };
1150
1151 static const Vptr _vpntr;
1152 };
1153
1154 struct NMethodMarkingScope : StackObj {
1155 NMethodMarkingScope() {
1156 nmethod::oops_do_marking_prologue();
1157 }
1158 ~NMethodMarkingScope() {
1159 nmethod::oops_do_marking_epilogue();
1160 }
1161 };
1162
1163 #endif // SHARE_CODE_NMETHOD_HPP
|