28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class ScopeDesc;
49 class xmlStream;
50
51 // This class is used internally by nmethods, to cache
52 // exception/pc/handler information.
53
54 class ExceptionCache : public CHeapObj<mtCode> {
55 friend class VMStructs;
56 private:
57 enum { cache_size = 16 };
58 Klass* _exception_type;
59 address _pc[cache_size];
60 address _handler[cache_size];
61 volatile int _count;
62 ExceptionCache* volatile _next;
63 ExceptionCache* _purge_list_next;
64
65 inline address pc_at(int index);
66 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
67
158
159 #if INCLUDE_JVMCI
160 class FailedSpeculation;
161 class JVMCINMethodData;
162 #endif
163
164 class nmethod : public CodeBlob {
165 friend class VMStructs;
166 friend class JVMCIVMStructs;
167 friend class CodeCache; // scavengable oops
168 friend class JVMCINMethodData;
169 friend class DeoptimizationScope;
170
171 private:
172
173 // Used to track in which deoptimize handshake this method will be deoptimized.
174 uint64_t _deoptimization_generation;
175
176 uint64_t _gc_epoch;
177
178 Method* _method;
179
180 // To reduce header size union fields which usages do not overlap.
181 union {
182 // To support simple linked-list chaining of nmethods:
183 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
184 struct {
185 // These are used for compiled synchronized native methods to
186 // locate the owner and stack slot for the BasicLock. They are
187 // needed because there is no debug information for compiled native
188 // wrappers and the oop maps are insufficient to allow
189 // frame::retrieve_receiver() to work. Currently they are expected
190 // to be byte offsets from the Java stack pointer for maximum code
191 // sharing between platforms. JVMTI's GetLocalInstance() uses these
192 // offsets to find the receiver for non-static native wrapper frames.
193 ByteSize _native_receiver_sp_offset;
194 ByteSize _native_basic_lock_sp_offset;
195 };
196 };
197
243 #endif
244
245 // Offset in immutable data section
246 // _dependencies_offset == 0
247 uint16_t _nul_chk_table_offset;
248 uint16_t _handler_table_offset; // This table could be big in C1 code
249 int _scopes_pcs_offset;
250 int _scopes_data_offset;
251 #if INCLUDE_JVMCI
252 int _speculations_offset;
253 #endif
254
255 // location in frame (offset for sp) that deopt can store the original
256 // pc during a deopt.
257 int _orig_pc_offset;
258
259 int _compile_id; // which compilation made this nmethod
260 CompLevel _comp_level; // compilation level (s1)
261 CompilerType _compiler_type; // which compiler made this nmethod (u1)
262
263 // Local state used to keep track of whether unloading is happening or not
264 volatile uint8_t _is_unloading_state;
265
266 // Protected by NMethodState_lock
267 volatile signed char _state; // {not_installed, in_use, not_entrant}
268
269 // set during construction
270 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
271 _has_method_handle_invokes:1,// Has this method MethodHandle invokes?
272 _has_wide_vectors:1, // Preserve wide vectors at safepoints
273 _has_monitors:1, // Fastpath monitor detection for continuations
274 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
275 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
276 _is_unlinked:1, // mark during class unloading
277 _load_reported:1; // used by jvmti to track if an event has been posted for this nmethod
278
279 enum DeoptimizationStatus : u1 {
280 not_marked,
281 deoptimize,
282 deoptimize_noupdate,
283 deoptimize_done
284 };
285
286 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
287
288 DeoptimizationStatus deoptimization_status() const {
289 return AtomicAccess::load(&_deoptimization_status);
290 }
291
292 // Initialize fields to their default values
293 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
294
295 // Post initialization
296 void post_init();
297
453 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
454 bool oops_do_try_claim_weak_request();
455
456 // Attempt Unclaimed -> N|SD transition. Returns the current link.
457 oops_do_mark_link* oops_do_try_claim_strong_done();
458 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
459 nmethod* oops_do_try_add_to_list_as_weak_done();
460
461 // Attempt X|WD -> N|SR transition. Returns the current link.
462 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
463 // Attempt X|WD -> X|SD transition. Returns true if successful.
464 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
465
466 // Do the N|SD -> X|SD transition.
467 void oops_do_add_to_list_as_strong_done();
468
469 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
470 // transitions).
471 void oops_do_set_strong_done(nmethod* old_head);
472
473 public:
474 // If you change anything in this enum please patch
475 // vmStructs_jvmci.cpp accordingly.
476 enum class InvalidationReason : s1 {
477 NOT_INVALIDATED = -1,
478 C1_CODEPATCH,
479 C1_DEOPTIMIZE,
480 C1_DEOPTIMIZE_FOR_PATCHING,
481 C1_PREDICATE_FAILED_TRAP,
482 CI_REPLAY,
483 UNLOADING,
484 UNLOADING_COLD,
485 JVMCI_INVALIDATE,
486 JVMCI_MATERIALIZE_VIRTUAL_OBJECT,
487 JVMCI_REPLACED_WITH_NEW_CODE,
488 JVMCI_REPROFILE,
489 MARKED_FOR_DEOPTIMIZATION,
490 MISSING_EXCEPTION_HANDLER,
491 NOT_USED,
492 OSR_INVALIDATION_BACK_BRANCH,
493 OSR_INVALIDATION_FOR_COMPILING_WITH_C1,
564 CompLevel comp_level
565 #if INCLUDE_JVMCI
566 , char* speculations = nullptr,
567 int speculations_len = 0,
568 JVMCINMethodData* jvmci_data = nullptr
569 #endif
570 );
571
572 static nmethod* new_native_nmethod(const methodHandle& method,
573 int compile_id,
574 CodeBuffer *code_buffer,
575 int vep_offset,
576 int frame_complete,
577 int frame_size,
578 ByteSize receiver_sp_offset,
579 ByteSize basic_lock_sp_offset,
580 OopMapSet* oop_maps,
581 int exception_handler = -1);
582
583 Method* method () const { return _method; }
584 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
585 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
586 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
587
588 // Compiler task identification. Note that all OSR methods
589 // are numbered in an independent sequence if CICountOSR is true,
590 // and native method wrappers are also numbered independently if
591 // CICountNative is true.
592 int compile_id() const { return _compile_id; }
593 const char* compile_kind() const;
594
595 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
596 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
597 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
598 CompilerType compiler_type () const { return _compiler_type; }
599 const char* compiler_name () const;
600
601 // boundaries for different parts
602 address consts_begin () const { return content_begin(); }
603 address consts_end () const { return code_begin() ; }
604 address insts_begin () const { return code_begin() ; }
605 address insts_end () const { return header_begin() + _stub_offset ; }
606 address stub_begin () const { return header_begin() + _stub_offset ; }
607 address stub_end () const { return code_end() ; }
608 address exception_begin () const { return header_begin() + _exception_offset ; }
609 address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
610 address deopt_mh_handler_begin() const { return header_begin() + _deopt_mh_handler_offset ; }
611 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
612 oop* oops_begin () const { return (oop*) data_begin(); }
613 oop* oops_end () const { return (oop*) data_end(); }
614
615 // mutable data
616 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
617 #if INCLUDE_JVMCI
618 Metadata** metadata_end () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
619 address jvmci_data_begin () const { return mutable_data_begin() + _relocation_size + _metadata_size; }
620 address jvmci_data_end () const { return mutable_data_end(); }
621 #else
622 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
623 #endif
624
625 // immutable data
626 address immutable_data_begin () const { return _immutable_data; }
627 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
628 address dependencies_begin () const { return _immutable_data; }
629 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
630 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
631 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
632 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
633 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
634 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
635 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
636 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
637
638 #if INCLUDE_JVMCI
639 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
640 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
641 address speculations_end () const { return immutable_data_end(); }
642 #else
643 address scopes_data_end () const { return immutable_data_end(); }
644 #endif
645
685 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
686
687 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
688 // allowed to advance state
689 in_use = 0, // executable nmethod
690 not_entrant = 1 // marked for deoptimization but activations may still exist
691 };
692
693 // flag accessing and manipulation
694 bool is_not_installed() const { return _state == not_installed; }
695 bool is_in_use() const { return _state <= in_use; }
696 bool is_not_entrant() const { return _state == not_entrant; }
697 int get_state() const { return _state; }
698
699 void clear_unloading_state();
700 // Heuristically deduce an nmethod isn't worth keeping around
701 bool is_cold();
702 bool is_unloading();
703 void do_unloading(bool unloading_occurred);
704
705 bool make_in_use() {
706 return try_transition(in_use);
707 }
708 // Make the nmethod non entrant. The nmethod will continue to be
709 // alive. It is used when an uncommon trap happens. Returns true
710 // if this thread changed the state of the nmethod or false if
711 // another thread performed the transition.
712 bool make_not_entrant(InvalidationReason invalidation_reason);
713 bool make_not_used() { return make_not_entrant(InvalidationReason::NOT_USED); }
714
715 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
716 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
717 void set_deoptimized_done();
718
719 bool update_recompile_counts() const {
720 // Update recompile counts when either the update is explicitly requested (deoptimize)
721 // or the nmethod is not marked for deoptimization at all (not_marked).
722 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
723 DeoptimizationStatus status = deoptimization_status();
724 return status != deoptimize_noupdate && status != deoptimize_done;
725 }
726
727 // tells whether frames described by this nmethod can be deoptimized
728 // note: native wrappers cannot be deoptimized.
729 bool can_be_deoptimized() const { return is_java_method(); }
730
731 bool has_dependencies() { return dependencies_size() != 0; }
732 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
733 void flush_dependencies();
735 template<typename T>
736 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
737 template<typename T>
738 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
739
740 bool has_unsafe_access() const { return _has_unsafe_access; }
741 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
742
743 bool has_monitors() const { return _has_monitors; }
744 void set_has_monitors(bool z) { _has_monitors = z; }
745
746 bool has_scoped_access() const { return _has_scoped_access; }
747 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
748
749 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
750 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
751
752 bool has_wide_vectors() const { return _has_wide_vectors; }
753 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
754
755 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
756 void set_has_flushed_dependencies(bool z) {
757 assert(!has_flushed_dependencies(), "should only happen once");
758 _has_flushed_dependencies = z;
759 }
760
761 bool is_unlinked() const { return _is_unlinked; }
762 void set_is_unlinked() {
763 assert(!_is_unlinked, "already unlinked");
764 _is_unlinked = true;
765 }
766
767 int comp_level() const { return _comp_level; }
768
769 // Support for oops in scopes and relocs:
770 // Note: index 0 is reserved for null.
771 oop oop_at(int index) const;
772 oop oop_at_phantom(int index) const; // phantom reference
773 oop* oop_addr_at(int index) const { // for GC
774 // relocation indexes are biased by 1 (because 0 is reserved)
775 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
776 return &oops_begin()[index - 1];
777 }
778
779 // Support for meta data in scopes and relocs:
780 // Note: index 0 is reserved for null.
781 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
782 Metadata** metadata_addr_at(int index) const { // for GC
783 // relocation indexes are biased by 1 (because 0 is reserved)
784 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
785 return &metadata_begin()[index - 1];
786 }
787
788 void copy_values(GrowableArray<jobject>* oops);
789 void copy_values(GrowableArray<Metadata*>* metadata);
790 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
791
792 // Relocation support
793 private:
794 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
795 inline void initialize_immediate_oop(oop* dest, jobject handle);
796
797 protected:
798 address oops_reloc_begin() const;
799
800 public:
801 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
802 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
803
804 bool is_at_poll_return(address pc);
805 bool is_at_poll_or_poll_return(address pc);
806
807 protected:
808 // Exception cache support
809 // Note: _exception_cache may be read and cleaned concurrently.
810 ExceptionCache* exception_cache() const { return _exception_cache; }
811 ExceptionCache* exception_cache_acquire() const;
812
813 public:
814 address handler_for_exception_and_pc(Handle exception, address pc);
815 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
816 void clean_exception_cache();
817
818 void add_exception_cache_entry(ExceptionCache* new_entry);
819 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
820
821
822 // MethodHandle
823 bool is_method_handle_return(address return_pc);
955 address* orig_pc_addr(const frame* fr);
956
957 // used by jvmti to track if the load events has been reported
958 bool load_reported() const { return _load_reported; }
959 void set_load_reported() { _load_reported = true; }
960
961 public:
962 // ScopeDesc retrieval operation
963 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
964 // pc_desc_near returns the first PcDesc at or after the given pc.
965 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
966
967 // ScopeDesc for an instruction
968 ScopeDesc* scope_desc_at(address pc);
969 ScopeDesc* scope_desc_near(address pc);
970
971 // copying of debugging information
972 void copy_scopes_pcs(PcDesc* pcs, int count);
973 void copy_scopes_data(address buffer, int size);
974
975 int orig_pc_offset() { return _orig_pc_offset; }
976
977 // Post successful compilation
978 void post_compiled_method(CompileTask* task);
979
980 // jvmti support:
981 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
982
983 // verify operations
984 void verify();
985 void verify_scopes();
986 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
987
988 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
989 void decode2(outputStream* st) const;
990 void print_constant_pool(outputStream* st);
991
992 // Avoid hiding of parent's 'decode(outputStream*)' method.
993 void decode(outputStream* st) const { decode2(st); } // just delegate here.
994
995 // printing support
996 void print_on_impl(outputStream* st) const;
997 void print_code();
998 void print_value_on_impl(outputStream* st) const;
999
1000 #if defined(SUPPORT_DATA_STRUCTS)
1001 // print output in opt build for disassembler library
1002 void print_relocations() PRODUCT_RETURN;
1003 void print_pcs_on(outputStream* st);
1004 void print_scopes() { print_scopes_on(tty); }
1005 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
1006 void print_handler_table();
1007 void print_nul_chk_table();
1008 void print_recorded_oop(int log_n, int index);
1009 void print_recorded_oops();
1010 void print_recorded_metadata();
1011
1012 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
1013 void print_metadata(outputStream* st); // metadata in metadata pool.
1014 #else
1015 void print_pcs_on(outputStream* st) { return; }
1016 #endif
1017
1018 void print_calls(outputStream* st) PRODUCT_RETURN;
1019 static void print_statistics() PRODUCT_RETURN;
1020
1021 void maybe_print_nmethod(const DirectiveSet* directive);
1022 void print_nmethod(bool print_code);
1050 ByteSize native_receiver_sp_offset() {
1051 assert(is_native_method(), "sanity");
1052 return _native_receiver_sp_offset;
1053 }
1054 ByteSize native_basic_lock_sp_offset() {
1055 assert(is_native_method(), "sanity");
1056 return _native_basic_lock_sp_offset;
1057 }
1058
1059 // support for code generation
1060 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1061 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1062
1063 void metadata_do(MetadataClosure* f);
1064
1065 address call_instruction_address(address pc) const;
1066
1067 void make_deoptimized();
1068 void finalize_relocations();
1069
1070 class Vptr : public CodeBlob::Vptr {
1071 void print_on(const CodeBlob* instance, outputStream* st) const override {
1072 ttyLocker ttyl;
1073 instance->as_nmethod()->print_on_impl(st);
1074 }
1075 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1076 instance->as_nmethod()->print_value_on_impl(st);
1077 }
1078 };
1079
1080 static const Vptr _vpntr;
1081 };
1082
1083 #endif // SHARE_CODE_NMETHOD_HPP
|
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class AOTCodeReader;
49 class AOTCodeEntry;
50 class ScopeDesc;
51 class xmlStream;
52
53 // This class is used internally by nmethods, to cache
54 // exception/pc/handler information.
55
56 class ExceptionCache : public CHeapObj<mtCode> {
57 friend class VMStructs;
58 private:
59 enum { cache_size = 16 };
60 Klass* _exception_type;
61 address _pc[cache_size];
62 address _handler[cache_size];
63 volatile int _count;
64 ExceptionCache* volatile _next;
65 ExceptionCache* _purge_list_next;
66
67 inline address pc_at(int index);
68 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
69
160
161 #if INCLUDE_JVMCI
162 class FailedSpeculation;
163 class JVMCINMethodData;
164 #endif
165
166 class nmethod : public CodeBlob {
167 friend class VMStructs;
168 friend class JVMCIVMStructs;
169 friend class CodeCache; // scavengable oops
170 friend class JVMCINMethodData;
171 friend class DeoptimizationScope;
172
173 private:
174
175 // Used to track in which deoptimize handshake this method will be deoptimized.
176 uint64_t _deoptimization_generation;
177
178 uint64_t _gc_epoch;
179
180 // Profiling counter used to figure out the hottest nmethods to record into CDS
181 volatile uint64_t _method_profiling_count;
182
183 Method* _method;
184
185 // To reduce header size union fields which usages do not overlap.
186 union {
187 // To support simple linked-list chaining of nmethods:
188 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
189 struct {
190 // These are used for compiled synchronized native methods to
191 // locate the owner and stack slot for the BasicLock. They are
192 // needed because there is no debug information for compiled native
193 // wrappers and the oop maps are insufficient to allow
194 // frame::retrieve_receiver() to work. Currently they are expected
195 // to be byte offsets from the Java stack pointer for maximum code
196 // sharing between platforms. JVMTI's GetLocalInstance() uses these
197 // offsets to find the receiver for non-static native wrapper frames.
198 ByteSize _native_receiver_sp_offset;
199 ByteSize _native_basic_lock_sp_offset;
200 };
201 };
202
248 #endif
249
250 // Offset in immutable data section
251 // _dependencies_offset == 0
252 uint16_t _nul_chk_table_offset;
253 uint16_t _handler_table_offset; // This table could be big in C1 code
254 int _scopes_pcs_offset;
255 int _scopes_data_offset;
256 #if INCLUDE_JVMCI
257 int _speculations_offset;
258 #endif
259
260 // location in frame (offset for sp) that deopt can store the original
261 // pc during a deopt.
262 int _orig_pc_offset;
263
264 int _compile_id; // which compilation made this nmethod
265 CompLevel _comp_level; // compilation level (s1)
266 CompilerType _compiler_type; // which compiler made this nmethod (u1)
267
268 AOTCodeEntry* _aot_code_entry;
269
270 bool _used; // has this nmethod ever been invoked?
271
272 // Local state used to keep track of whether unloading is happening or not
273 volatile uint8_t _is_unloading_state;
274
275 // Protected by NMethodState_lock
276 volatile signed char _state; // {not_installed, in_use, not_entrant}
277
278 // set during construction
279 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
280 _has_method_handle_invokes:1,// Has this method MethodHandle invokes?
281 _has_wide_vectors:1, // Preserve wide vectors at safepoints
282 _has_monitors:1, // Fastpath monitor detection for continuations
283 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
284 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
285 _is_unlinked:1, // mark during class unloading
286 _load_reported:1, // used by jvmti to track if an event has been posted for this nmethod
287 _preloaded:1,
288 _has_clinit_barriers:1;
289
290 enum DeoptimizationStatus : u1 {
291 not_marked,
292 deoptimize,
293 deoptimize_noupdate,
294 deoptimize_done
295 };
296
297 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
298
299 DeoptimizationStatus deoptimization_status() const {
300 return AtomicAccess::load(&_deoptimization_status);
301 }
302
303 // Initialize fields to their default values
304 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
305
306 // Post initialization
307 void post_init();
308
464 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
465 bool oops_do_try_claim_weak_request();
466
467 // Attempt Unclaimed -> N|SD transition. Returns the current link.
468 oops_do_mark_link* oops_do_try_claim_strong_done();
469 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
470 nmethod* oops_do_try_add_to_list_as_weak_done();
471
472 // Attempt X|WD -> N|SR transition. Returns the current link.
473 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
474 // Attempt X|WD -> X|SD transition. Returns true if successful.
475 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
476
477 // Do the N|SD -> X|SD transition.
478 void oops_do_add_to_list_as_strong_done();
479
480 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
481 // transitions).
482 void oops_do_set_strong_done(nmethod* old_head);
483
484 void record_nmethod_dependency();
485
486 nmethod* restore(address code_cache_buffer,
487 const methodHandle& method,
488 int compile_id,
489 address reloc_data,
490 GrowableArray<Handle>& oop_list,
491 GrowableArray<Metadata*>& metadata_list,
492 ImmutableOopMapSet* oop_maps,
493 address immutable_data,
494 GrowableArray<Handle>& reloc_imm_oop_list,
495 GrowableArray<Metadata*>& reloc_imm_metadata_list,
496 AOTCodeReader* aot_code_reader);
497
498 public:
499 // create nmethod using archived nmethod from AOT code cache
500 static nmethod* new_nmethod(nmethod* archived_nm,
501 const methodHandle& method,
502 AbstractCompiler* compiler,
503 int compile_id,
504 address reloc_data,
505 GrowableArray<Handle>& oop_list,
506 GrowableArray<Metadata*>& metadata_list,
507 ImmutableOopMapSet* oop_maps,
508 address immutable_data,
509 GrowableArray<Handle>& reloc_imm_oop_list,
510 GrowableArray<Metadata*>& reloc_imm_metadata_list,
511 AOTCodeReader* aot_code_reader);
512
513 // If you change anything in this enum please patch
514 // vmStructs_jvmci.cpp accordingly.
515 enum class InvalidationReason : s1 {
516 NOT_INVALIDATED = -1,
517 C1_CODEPATCH,
518 C1_DEOPTIMIZE,
519 C1_DEOPTIMIZE_FOR_PATCHING,
520 C1_PREDICATE_FAILED_TRAP,
521 CI_REPLAY,
522 UNLOADING,
523 UNLOADING_COLD,
524 JVMCI_INVALIDATE,
525 JVMCI_MATERIALIZE_VIRTUAL_OBJECT,
526 JVMCI_REPLACED_WITH_NEW_CODE,
527 JVMCI_REPROFILE,
528 MARKED_FOR_DEOPTIMIZATION,
529 MISSING_EXCEPTION_HANDLER,
530 NOT_USED,
531 OSR_INVALIDATION_BACK_BRANCH,
532 OSR_INVALIDATION_FOR_COMPILING_WITH_C1,
603 CompLevel comp_level
604 #if INCLUDE_JVMCI
605 , char* speculations = nullptr,
606 int speculations_len = 0,
607 JVMCINMethodData* jvmci_data = nullptr
608 #endif
609 );
610
611 static nmethod* new_native_nmethod(const methodHandle& method,
612 int compile_id,
613 CodeBuffer *code_buffer,
614 int vep_offset,
615 int frame_complete,
616 int frame_size,
617 ByteSize receiver_sp_offset,
618 ByteSize basic_lock_sp_offset,
619 OopMapSet* oop_maps,
620 int exception_handler = -1);
621
622 Method* method () const { return _method; }
623 uint16_t entry_bci () const { return _entry_bci; }
624 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
625 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
626 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
627
628 int orig_pc_offset() { return _orig_pc_offset; }
629
630 // Compiler task identification. Note that all OSR methods
631 // are numbered in an independent sequence if CICountOSR is true,
632 // and native method wrappers are also numbered independently if
633 // CICountNative is true.
634 int compile_id() const { return _compile_id; }
635 int comp_level() const { return _comp_level; }
636 const char* compile_kind() const;
637
638 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
639 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
640 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
641 CompilerType compiler_type () const { return _compiler_type; }
642 const char* compiler_name () const;
643
644 // boundaries for different parts
645 address consts_begin () const { return content_begin(); }
646 address consts_end () const { return code_begin() ; }
647 address insts_begin () const { return code_begin() ; }
648 address insts_end () const { return header_begin() + _stub_offset ; }
649 address stub_begin () const { return header_begin() + _stub_offset ; }
650 address stub_end () const { return code_end() ; }
651 address exception_begin () const { return header_begin() + _exception_offset ; }
652 address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
653 address deopt_mh_handler_begin() const { return _deopt_mh_handler_offset != -1 ? (header_begin() + _deopt_mh_handler_offset) : nullptr; }
654 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
655 oop* oops_begin () const { return (oop*) data_begin(); }
656 oop* oops_end () const { return (oop*) data_end(); }
657
658 // mutable data
659 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
660 #if INCLUDE_JVMCI
661 Metadata** metadata_end () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
662 address jvmci_data_begin () const { return mutable_data_begin() + _relocation_size + _metadata_size; }
663 address jvmci_data_end () const { return mutable_data_end(); }
664 #else
665 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
666 #endif
667
668 // immutable data
669 void set_immutable_data(address data) { _immutable_data = data; }
670 address immutable_data_begin () const { return _immutable_data; }
671 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
672 address dependencies_begin () const { return _immutable_data; }
673 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
674 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
675 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
676 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
677 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
678 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
679 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
680 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
681
682 #if INCLUDE_JVMCI
683 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
684 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
685 address speculations_end () const { return immutable_data_end(); }
686 #else
687 address scopes_data_end () const { return immutable_data_end(); }
688 #endif
689
729 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
730
731 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
732 // allowed to advance state
733 in_use = 0, // executable nmethod
734 not_entrant = 1 // marked for deoptimization but activations may still exist
735 };
736
737 // flag accessing and manipulation
738 bool is_not_installed() const { return _state == not_installed; }
739 bool is_in_use() const { return _state <= in_use; }
740 bool is_not_entrant() const { return _state == not_entrant; }
741 int get_state() const { return _state; }
742
743 void clear_unloading_state();
744 // Heuristically deduce an nmethod isn't worth keeping around
745 bool is_cold();
746 bool is_unloading();
747 void do_unloading(bool unloading_occurred);
748
749 void inc_method_profiling_count();
750 uint64_t method_profiling_count();
751
752 bool make_in_use() {
753 return try_transition(in_use);
754 }
755 // Make the nmethod non entrant. The nmethod will continue to be
756 // alive. It is used when an uncommon trap happens. Returns true
757 // if this thread changed the state of the nmethod or false if
758 // another thread performed the transition.
759 bool make_not_entrant(InvalidationReason invalidation_reason, bool keep_aot_entry = false);
760 bool make_not_used() { return make_not_entrant(InvalidationReason::NOT_USED, true /* keep AOT entry */); }
761
762 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
763 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
764 void set_deoptimized_done();
765
766 bool update_recompile_counts() const {
767 // Update recompile counts when either the update is explicitly requested (deoptimize)
768 // or the nmethod is not marked for deoptimization at all (not_marked).
769 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
770 DeoptimizationStatus status = deoptimization_status();
771 return status != deoptimize_noupdate && status != deoptimize_done;
772 }
773
774 // tells whether frames described by this nmethod can be deoptimized
775 // note: native wrappers cannot be deoptimized.
776 bool can_be_deoptimized() const { return is_java_method(); }
777
778 bool has_dependencies() { return dependencies_size() != 0; }
779 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
780 void flush_dependencies();
782 template<typename T>
783 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
784 template<typename T>
785 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
786
787 bool has_unsafe_access() const { return _has_unsafe_access; }
788 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
789
790 bool has_monitors() const { return _has_monitors; }
791 void set_has_monitors(bool z) { _has_monitors = z; }
792
793 bool has_scoped_access() const { return _has_scoped_access; }
794 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
795
796 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
797 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
798
799 bool has_wide_vectors() const { return _has_wide_vectors; }
800 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
801
802 bool has_clinit_barriers() const { return _has_clinit_barriers; }
803 void set_has_clinit_barriers(bool z) { _has_clinit_barriers = z; }
804
805 bool preloaded() const { return _preloaded; }
806 void set_preloaded(bool z) { _preloaded = z; }
807
808 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
809 void set_has_flushed_dependencies(bool z) {
810 assert(!has_flushed_dependencies(), "should only happen once");
811 _has_flushed_dependencies = z;
812 }
813
814 bool is_unlinked() const { return _is_unlinked; }
815 void set_is_unlinked() {
816 assert(!_is_unlinked, "already unlinked");
817 _is_unlinked = true;
818 }
819
820 bool used() const { return _used; }
821 void set_used() { _used = true; }
822
823 bool is_aot() const { return _aot_code_entry != nullptr; }
824 void set_aot_code_entry(AOTCodeEntry* entry) { _aot_code_entry = entry; }
825 AOTCodeEntry* aot_code_entry() const { return _aot_code_entry; }
826
827 // Support for oops in scopes and relocs:
828 // Note: index 0 is reserved for null.
829 oop oop_at(int index) const;
830 oop oop_at_phantom(int index) const; // phantom reference
831 oop* oop_addr_at(int index) const { // for GC
832 // relocation indexes are biased by 1 (because 0 is reserved)
833 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
834 return &oops_begin()[index - 1];
835 }
836
837 // Support for meta data in scopes and relocs:
838 // Note: index 0 is reserved for null.
839 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
840 Metadata** metadata_addr_at(int index) const { // for GC
841 // relocation indexes are biased by 1 (because 0 is reserved)
842 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
843 return &metadata_begin()[index - 1];
844 }
845
846 void copy_values(GrowableArray<Handle>* array);
847 void copy_values(GrowableArray<jobject>* oops);
848 void copy_values(GrowableArray<Metadata*>* metadata);
849 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
850
851 // Relocation support
852 private:
853 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
854 inline void initialize_immediate_oop(oop* dest, jobject handle);
855
856 protected:
857 address oops_reloc_begin() const;
858
859 public:
860 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
861 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
862
863 void create_reloc_immediates_list(JavaThread* thread, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list);
864
865 bool is_at_poll_return(address pc);
866 bool is_at_poll_or_poll_return(address pc);
867
868 protected:
869 // Exception cache support
870 // Note: _exception_cache may be read and cleaned concurrently.
871 ExceptionCache* exception_cache() const { return _exception_cache; }
872 ExceptionCache* exception_cache_acquire() const;
873
874 public:
875 address handler_for_exception_and_pc(Handle exception, address pc);
876 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
877 void clean_exception_cache();
878
879 void add_exception_cache_entry(ExceptionCache* new_entry);
880 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
881
882
883 // MethodHandle
884 bool is_method_handle_return(address return_pc);
1016 address* orig_pc_addr(const frame* fr);
1017
1018 // used by jvmti to track if the load events has been reported
1019 bool load_reported() const { return _load_reported; }
1020 void set_load_reported() { _load_reported = true; }
1021
1022 public:
1023 // ScopeDesc retrieval operation
1024 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
1025 // pc_desc_near returns the first PcDesc at or after the given pc.
1026 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
1027
1028 // ScopeDesc for an instruction
1029 ScopeDesc* scope_desc_at(address pc);
1030 ScopeDesc* scope_desc_near(address pc);
1031
1032 // copying of debugging information
1033 void copy_scopes_pcs(PcDesc* pcs, int count);
1034 void copy_scopes_data(address buffer, int size);
1035
1036 // Post successful compilation
1037 void post_compiled_method(CompileTask* task);
1038
1039 // jvmti support:
1040 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
1041
1042 // verify operations
1043 void verify();
1044 void verify_scopes();
1045 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
1046
1047 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
1048 void decode2(outputStream* st) const;
1049 void print_constant_pool(outputStream* st);
1050
1051 // Avoid hiding of parent's 'decode(outputStream*)' method.
1052 void decode(outputStream* st) const { decode2(st); } // just delegate here.
1053
1054 // printing support
1055 void print_on_impl(outputStream* st) const;
1056 void print_code();
1057 void print_value_on_impl(outputStream* st) const;
1058
1059 #if defined(SUPPORT_DATA_STRUCTS)
1060 // print output in opt build for disassembler library
1061 void print_relocations_on(outputStream* st) PRODUCT_RETURN;
1062 void print_pcs_on(outputStream* st);
1063 void print_scopes() { print_scopes_on(tty); }
1064 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
1065 void print_handler_table();
1066 void print_nul_chk_table();
1067 void print_recorded_oop(int log_n, int index);
1068 void print_recorded_oops();
1069 void print_recorded_metadata();
1070
1071 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
1072 void print_metadata(outputStream* st); // metadata in metadata pool.
1073 #else
1074 void print_pcs_on(outputStream* st) { return; }
1075 #endif
1076
1077 void print_calls(outputStream* st) PRODUCT_RETURN;
1078 static void print_statistics() PRODUCT_RETURN;
1079
1080 void maybe_print_nmethod(const DirectiveSet* directive);
1081 void print_nmethod(bool print_code);
1109 ByteSize native_receiver_sp_offset() {
1110 assert(is_native_method(), "sanity");
1111 return _native_receiver_sp_offset;
1112 }
1113 ByteSize native_basic_lock_sp_offset() {
1114 assert(is_native_method(), "sanity");
1115 return _native_basic_lock_sp_offset;
1116 }
1117
1118 // support for code generation
1119 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1120 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1121
1122 void metadata_do(MetadataClosure* f);
1123
1124 address call_instruction_address(address pc) const;
1125
1126 void make_deoptimized();
1127 void finalize_relocations();
1128
1129 void prepare_for_archiving_impl();
1130
1131 class Vptr : public CodeBlob::Vptr {
1132 void print_on(const CodeBlob* instance, outputStream* st) const override {
1133 ttyLocker ttyl;
1134 instance->as_nmethod()->print_on_impl(st);
1135 }
1136 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1137 instance->as_nmethod()->print_value_on_impl(st);
1138 }
1139 void prepare_for_archiving(CodeBlob* instance) const override {
1140 ((nmethod*)instance)->prepare_for_archiving_impl();
1141 };
1142 };
1143
1144 static const Vptr _vpntr;
1145 };
1146
1147 #endif // SHARE_CODE_NMETHOD_HPP
|