28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class ScopeDesc;
49 class xmlStream;
50
51 // This class is used internally by nmethods, to cache
52 // exception/pc/handler information.
53
54 class ExceptionCache : public CHeapObj<mtCode> {
55 friend class VMStructs;
56 private:
57 enum { cache_size = 16 };
58 Klass* _exception_type;
59 address _pc[cache_size];
60 address _handler[cache_size];
61 volatile int _count;
62 ExceptionCache* volatile _next;
63 ExceptionCache* _purge_list_next;
64
65 inline address pc_at(int index);
66 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
67
158
159 #if INCLUDE_JVMCI
160 class FailedSpeculation;
161 class JVMCINMethodData;
162 #endif
163
164 class nmethod : public CodeBlob {
165 friend class VMStructs;
166 friend class JVMCIVMStructs;
167 friend class CodeCache; // scavengable oops
168 friend class JVMCINMethodData;
169 friend class DeoptimizationScope;
170
171 private:
172
173 // Used to track in which deoptimize handshake this method will be deoptimized.
174 uint64_t _deoptimization_generation;
175
176 uint64_t _gc_epoch;
177
178 Method* _method;
179
180 // To reduce header size union fields which usages do not overlap.
181 union {
182 // To support simple linked-list chaining of nmethods:
183 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
184 struct {
185 // These are used for compiled synchronized native methods to
186 // locate the owner and stack slot for the BasicLock. They are
187 // needed because there is no debug information for compiled native
188 // wrappers and the oop maps are insufficient to allow
189 // frame::retrieve_receiver() to work. Currently they are expected
190 // to be byte offsets from the Java stack pointer for maximum code
191 // sharing between platforms. JVMTI's GetLocalInstance() uses these
192 // offsets to find the receiver for non-static native wrapper frames.
193 ByteSize _native_receiver_sp_offset;
194 ByteSize _native_basic_lock_sp_offset;
195 };
196 };
197
243 #endif
244
245 // Offset in immutable data section
246 // _dependencies_offset == 0
247 uint16_t _nul_chk_table_offset;
248 uint16_t _handler_table_offset; // This table could be big in C1 code
249 int _scopes_pcs_offset;
250 int _scopes_data_offset;
251 #if INCLUDE_JVMCI
252 int _speculations_offset;
253 #endif
254
255 // location in frame (offset for sp) that deopt can store the original
256 // pc during a deopt.
257 int _orig_pc_offset;
258
259 int _compile_id; // which compilation made this nmethod
260 CompLevel _comp_level; // compilation level (s1)
261 CompilerType _compiler_type; // which compiler made this nmethod (u1)
262
263 // Local state used to keep track of whether unloading is happening or not
264 volatile uint8_t _is_unloading_state;
265
266 // Protected by NMethodState_lock
267 volatile signed char _state; // {not_installed, in_use, not_entrant}
268
269 // set during construction
270 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
271 _has_method_handle_invokes:1,// Has this method MethodHandle invokes?
272 _has_wide_vectors:1, // Preserve wide vectors at safepoints
273 _has_monitors:1, // Fastpath monitor detection for continuations
274 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
275 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
276 _is_unlinked:1, // mark during class unloading
277 _load_reported:1; // used by jvmti to track if an event has been posted for this nmethod
278
279 enum DeoptimizationStatus : u1 {
280 not_marked,
281 deoptimize,
282 deoptimize_noupdate,
283 deoptimize_done
284 };
285
286 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
287
288 DeoptimizationStatus deoptimization_status() const {
289 return Atomic::load(&_deoptimization_status);
290 }
291
292 // Initialize fields to their default values
293 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
294
295 // Post initialization
296 void post_init();
297
311 // For normal JIT compiled code
312 nmethod(Method* method,
313 CompilerType type,
314 int nmethod_size,
315 int immutable_data_size,
316 int mutable_data_size,
317 int compile_id,
318 int entry_bci,
319 address immutable_data,
320 CodeOffsets* offsets,
321 int orig_pc_offset,
322 DebugInformationRecorder *recorder,
323 Dependencies* dependencies,
324 CodeBuffer *code_buffer,
325 int frame_size,
326 OopMapSet* oop_maps,
327 ExceptionHandlerTable* handler_table,
328 ImplicitExceptionTable* nul_chk_table,
329 AbstractCompiler* compiler,
330 CompLevel comp_level
331 #if INCLUDE_JVMCI
332 , char* speculations = nullptr,
333 int speculations_len = 0,
334 JVMCINMethodData* jvmci_data = nullptr
335 #endif
336 );
337
338 // helper methods
339 void* operator new(size_t size, int nmethod_size, int comp_level) throw();
340
341 // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
342 // Attention: Only allow NonNMethod space for special nmethods which don't need to be
343 // findable by nmethod iterators! In particular, they must not contain oops!
344 void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
345
346 const char* reloc_string_for(u_char* begin, u_char* end);
347
348 bool try_transition(signed char new_state);
349
350 // Returns true if this thread changed the state of the nmethod or
453 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
454 bool oops_do_try_claim_weak_request();
455
456 // Attempt Unclaimed -> N|SD transition. Returns the current link.
457 oops_do_mark_link* oops_do_try_claim_strong_done();
458 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
459 nmethod* oops_do_try_add_to_list_as_weak_done();
460
461 // Attempt X|WD -> N|SR transition. Returns the current link.
462 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
463 // Attempt X|WD -> X|SD transition. Returns true if successful.
464 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
465
466 // Do the N|SD -> X|SD transition.
467 void oops_do_add_to_list_as_strong_done();
468
469 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
470 // transitions).
471 void oops_do_set_strong_done(nmethod* old_head);
472
473 public:
474 // create nmethod with entry_bci
475 static nmethod* new_nmethod(const methodHandle& method,
476 int compile_id,
477 int entry_bci,
478 CodeOffsets* offsets,
479 int orig_pc_offset,
480 DebugInformationRecorder* recorder,
481 Dependencies* dependencies,
482 CodeBuffer *code_buffer,
483 int frame_size,
484 OopMapSet* oop_maps,
485 ExceptionHandlerTable* handler_table,
486 ImplicitExceptionTable* nul_chk_table,
487 AbstractCompiler* compiler,
488 CompLevel comp_level
489 #if INCLUDE_JVMCI
490 , char* speculations = nullptr,
491 int speculations_len = 0,
492 JVMCINMethodData* jvmci_data = nullptr
493 #endif
494 );
495
496 static nmethod* new_native_nmethod(const methodHandle& method,
497 int compile_id,
498 CodeBuffer *code_buffer,
499 int vep_offset,
500 int frame_complete,
501 int frame_size,
502 ByteSize receiver_sp_offset,
503 ByteSize basic_lock_sp_offset,
504 OopMapSet* oop_maps,
505 int exception_handler = -1);
506
507 Method* method () const { return _method; }
508 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
509 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
510 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
511
512 // Compiler task identification. Note that all OSR methods
513 // are numbered in an independent sequence if CICountOSR is true,
514 // and native method wrappers are also numbered independently if
515 // CICountNative is true.
516 int compile_id() const { return _compile_id; }
517 const char* compile_kind() const;
518
519 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
520 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
521 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
522 CompilerType compiler_type () const { return _compiler_type; }
523 const char* compiler_name () const;
524
525 // boundaries for different parts
526 address consts_begin () const { return content_begin(); }
527 address consts_end () const { return code_begin() ; }
528 address insts_begin () const { return code_begin() ; }
529 address insts_end () const { return header_begin() + _stub_offset ; }
530 address stub_begin () const { return header_begin() + _stub_offset ; }
531 address stub_end () const { return code_end() ; }
532 address exception_begin () const { return header_begin() + _exception_offset ; }
533 address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
534 address deopt_mh_handler_begin() const { return header_begin() + _deopt_mh_handler_offset ; }
535 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
536 oop* oops_begin () const { return (oop*) data_begin(); }
537 oop* oops_end () const { return (oop*) data_end(); }
538
539 // mutable data
540 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
541 #if INCLUDE_JVMCI
542 Metadata** metadata_end () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
543 address jvmci_data_begin () const { return mutable_data_begin() + _relocation_size + _metadata_size; }
544 address jvmci_data_end () const { return mutable_data_end(); }
545 #else
546 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
547 #endif
548
549 // immutable data
550 address immutable_data_begin () const { return _immutable_data; }
551 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
552 address dependencies_begin () const { return _immutable_data; }
553 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
554 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
555 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
556 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
557 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
558 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
559 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
560 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
561
562 #if INCLUDE_JVMCI
563 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
564 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
565 address speculations_end () const { return immutable_data_end(); }
566 #else
567 address scopes_data_end () const { return immutable_data_end(); }
568 #endif
569
609 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
610
611 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
612 // allowed to advance state
613 in_use = 0, // executable nmethod
614 not_entrant = 1 // marked for deoptimization but activations may still exist
615 };
616
617 // flag accessing and manipulation
618 bool is_not_installed() const { return _state == not_installed; }
619 bool is_in_use() const { return _state <= in_use; }
620 bool is_not_entrant() const { return _state == not_entrant; }
621 int get_state() const { return _state; }
622
623 void clear_unloading_state();
624 // Heuristically deduce an nmethod isn't worth keeping around
625 bool is_cold();
626 bool is_unloading();
627 void do_unloading(bool unloading_occurred);
628
629 bool make_in_use() {
630 return try_transition(in_use);
631 }
632 // Make the nmethod non entrant. The nmethod will continue to be
633 // alive. It is used when an uncommon trap happens. Returns true
634 // if this thread changed the state of the nmethod or false if
635 // another thread performed the transition.
636 bool make_not_entrant(const char* reason);
637 bool make_not_used() { return make_not_entrant("not used"); }
638
639 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
640 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
641 void set_deoptimized_done();
642
643 bool update_recompile_counts() const {
644 // Update recompile counts when either the update is explicitly requested (deoptimize)
645 // or the nmethod is not marked for deoptimization at all (not_marked).
646 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
647 DeoptimizationStatus status = deoptimization_status();
648 return status != deoptimize_noupdate && status != deoptimize_done;
649 }
650
651 // tells whether frames described by this nmethod can be deoptimized
652 // note: native wrappers cannot be deoptimized.
653 bool can_be_deoptimized() const { return is_java_method(); }
654
655 bool has_dependencies() { return dependencies_size() != 0; }
656 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
657 void flush_dependencies();
659 template<typename T>
660 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
661 template<typename T>
662 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
663
664 bool has_unsafe_access() const { return _has_unsafe_access; }
665 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
666
667 bool has_monitors() const { return _has_monitors; }
668 void set_has_monitors(bool z) { _has_monitors = z; }
669
670 bool has_scoped_access() const { return _has_scoped_access; }
671 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
672
673 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
674 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
675
676 bool has_wide_vectors() const { return _has_wide_vectors; }
677 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
678
679 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
680 void set_has_flushed_dependencies(bool z) {
681 assert(!has_flushed_dependencies(), "should only happen once");
682 _has_flushed_dependencies = z;
683 }
684
685 bool is_unlinked() const { return _is_unlinked; }
686 void set_is_unlinked() {
687 assert(!_is_unlinked, "already unlinked");
688 _is_unlinked = true;
689 }
690
691 int comp_level() const { return _comp_level; }
692
693 // Support for oops in scopes and relocs:
694 // Note: index 0 is reserved for null.
695 oop oop_at(int index) const;
696 oop oop_at_phantom(int index) const; // phantom reference
697 oop* oop_addr_at(int index) const { // for GC
698 // relocation indexes are biased by 1 (because 0 is reserved)
699 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
700 return &oops_begin()[index - 1];
701 }
702
703 // Support for meta data in scopes and relocs:
704 // Note: index 0 is reserved for null.
705 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
706 Metadata** metadata_addr_at(int index) const { // for GC
707 // relocation indexes are biased by 1 (because 0 is reserved)
708 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
709 return &metadata_begin()[index - 1];
710 }
711
712 void copy_values(GrowableArray<jobject>* oops);
713 void copy_values(GrowableArray<Metadata*>* metadata);
714 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
715
716 // Relocation support
717 private:
718 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
719 inline void initialize_immediate_oop(oop* dest, jobject handle);
720
721 protected:
722 address oops_reloc_begin() const;
723
724 public:
725 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
726 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
727
728 bool is_at_poll_return(address pc);
729 bool is_at_poll_or_poll_return(address pc);
730
731 protected:
732 // Exception cache support
733 // Note: _exception_cache may be read and cleaned concurrently.
734 ExceptionCache* exception_cache() const { return _exception_cache; }
735 ExceptionCache* exception_cache_acquire() const;
736
737 public:
738 address handler_for_exception_and_pc(Handle exception, address pc);
739 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
740 void clean_exception_cache();
741
742 void add_exception_cache_entry(ExceptionCache* new_entry);
743 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
744
745
746 // MethodHandle
747 bool is_method_handle_return(address return_pc);
876 address* orig_pc_addr(const frame* fr);
877
878 // used by jvmti to track if the load events has been reported
879 bool load_reported() const { return _load_reported; }
880 void set_load_reported() { _load_reported = true; }
881
882 public:
883 // ScopeDesc retrieval operation
884 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
885 // pc_desc_near returns the first PcDesc at or after the given pc.
886 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
887
888 // ScopeDesc for an instruction
889 ScopeDesc* scope_desc_at(address pc);
890 ScopeDesc* scope_desc_near(address pc);
891
892 // copying of debugging information
893 void copy_scopes_pcs(PcDesc* pcs, int count);
894 void copy_scopes_data(address buffer, int size);
895
896 int orig_pc_offset() { return _orig_pc_offset; }
897
898 // Post successful compilation
899 void post_compiled_method(CompileTask* task);
900
901 // jvmti support:
902 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
903
904 // verify operations
905 void verify();
906 void verify_scopes();
907 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
908
909 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
910 void decode2(outputStream* st) const;
911 void print_constant_pool(outputStream* st);
912
913 // Avoid hiding of parent's 'decode(outputStream*)' method.
914 void decode(outputStream* st) const { decode2(st); } // just delegate here.
915
916 // printing support
917 void print_on_impl(outputStream* st) const;
918 void print_code();
919 void print_value_on_impl(outputStream* st) const;
920
921 #if defined(SUPPORT_DATA_STRUCTS)
922 // print output in opt build for disassembler library
923 void print_relocations() PRODUCT_RETURN;
924 void print_pcs_on(outputStream* st);
925 void print_scopes() { print_scopes_on(tty); }
926 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
927 void print_handler_table();
928 void print_nul_chk_table();
929 void print_recorded_oop(int log_n, int index);
930 void print_recorded_oops();
931 void print_recorded_metadata();
932
933 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
934 void print_metadata(outputStream* st); // metadata in metadata pool.
935 #else
936 void print_pcs_on(outputStream* st) { return; }
937 #endif
938
939 void print_calls(outputStream* st) PRODUCT_RETURN;
940 static void print_statistics() PRODUCT_RETURN;
941
942 void maybe_print_nmethod(const DirectiveSet* directive);
943 void print_nmethod(bool print_code);
971 ByteSize native_receiver_sp_offset() {
972 assert(is_native_method(), "sanity");
973 return _native_receiver_sp_offset;
974 }
975 ByteSize native_basic_lock_sp_offset() {
976 assert(is_native_method(), "sanity");
977 return _native_basic_lock_sp_offset;
978 }
979
980 // support for code generation
981 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
982 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
983
984 void metadata_do(MetadataClosure* f);
985
986 address call_instruction_address(address pc) const;
987
988 void make_deoptimized();
989 void finalize_relocations();
990
991 class Vptr : public CodeBlob::Vptr {
992 void print_on(const CodeBlob* instance, outputStream* st) const override {
993 ttyLocker ttyl;
994 instance->as_nmethod()->print_on_impl(st);
995 }
996 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
997 instance->as_nmethod()->print_value_on_impl(st);
998 }
999 };
1000
1001 static const Vptr _vpntr;
1002 };
1003
1004 #endif // SHARE_CODE_NMETHOD_HPP
|
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class AOTCodeReader;
49 class AOTCodeEntry;
50 class ScopeDesc;
51 class xmlStream;
52
53 // This class is used internally by nmethods, to cache
54 // exception/pc/handler information.
55
56 class ExceptionCache : public CHeapObj<mtCode> {
57 friend class VMStructs;
58 private:
59 enum { cache_size = 16 };
60 Klass* _exception_type;
61 address _pc[cache_size];
62 address _handler[cache_size];
63 volatile int _count;
64 ExceptionCache* volatile _next;
65 ExceptionCache* _purge_list_next;
66
67 inline address pc_at(int index);
68 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
69
160
161 #if INCLUDE_JVMCI
162 class FailedSpeculation;
163 class JVMCINMethodData;
164 #endif
165
166 class nmethod : public CodeBlob {
167 friend class VMStructs;
168 friend class JVMCIVMStructs;
169 friend class CodeCache; // scavengable oops
170 friend class JVMCINMethodData;
171 friend class DeoptimizationScope;
172
173 private:
174
175 // Used to track in which deoptimize handshake this method will be deoptimized.
176 uint64_t _deoptimization_generation;
177
178 uint64_t _gc_epoch;
179
180 // Profiling counter used to figure out the hottest nmethods to record into CDS
181 volatile uint64_t _method_profiling_count;
182
183 Method* _method;
184
185 // To reduce header size union fields which usages do not overlap.
186 union {
187 // To support simple linked-list chaining of nmethods:
188 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
189 struct {
190 // These are used for compiled synchronized native methods to
191 // locate the owner and stack slot for the BasicLock. They are
192 // needed because there is no debug information for compiled native
193 // wrappers and the oop maps are insufficient to allow
194 // frame::retrieve_receiver() to work. Currently they are expected
195 // to be byte offsets from the Java stack pointer for maximum code
196 // sharing between platforms. JVMTI's GetLocalInstance() uses these
197 // offsets to find the receiver for non-static native wrapper frames.
198 ByteSize _native_receiver_sp_offset;
199 ByteSize _native_basic_lock_sp_offset;
200 };
201 };
202
248 #endif
249
250 // Offset in immutable data section
251 // _dependencies_offset == 0
252 uint16_t _nul_chk_table_offset;
253 uint16_t _handler_table_offset; // This table could be big in C1 code
254 int _scopes_pcs_offset;
255 int _scopes_data_offset;
256 #if INCLUDE_JVMCI
257 int _speculations_offset;
258 #endif
259
260 // location in frame (offset for sp) that deopt can store the original
261 // pc during a deopt.
262 int _orig_pc_offset;
263
264 int _compile_id; // which compilation made this nmethod
265 CompLevel _comp_level; // compilation level (s1)
266 CompilerType _compiler_type; // which compiler made this nmethod (u1)
267
268 AOTCodeEntry* _aot_code_entry;
269
270 bool _used; // has this nmethod ever been invoked?
271
272 // Local state used to keep track of whether unloading is happening or not
273 volatile uint8_t _is_unloading_state;
274
275 // Protected by NMethodState_lock
276 volatile signed char _state; // {not_installed, in_use, not_entrant}
277
278 // set during construction
279 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
280 _has_method_handle_invokes:1,// Has this method MethodHandle invokes?
281 _has_wide_vectors:1, // Preserve wide vectors at safepoints
282 _has_monitors:1, // Fastpath monitor detection for continuations
283 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
284 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
285 _is_unlinked:1, // mark during class unloading
286 _load_reported:1, // used by jvmti to track if an event has been posted for this nmethod
287 _preloaded:1,
288 _has_clinit_barriers:1;
289
290 enum DeoptimizationStatus : u1 {
291 not_marked,
292 deoptimize,
293 deoptimize_noupdate,
294 deoptimize_done
295 };
296
297 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
298
299 DeoptimizationStatus deoptimization_status() const {
300 return Atomic::load(&_deoptimization_status);
301 }
302
303 // Initialize fields to their default values
304 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
305
306 // Post initialization
307 void post_init();
308
322 // For normal JIT compiled code
323 nmethod(Method* method,
324 CompilerType type,
325 int nmethod_size,
326 int immutable_data_size,
327 int mutable_data_size,
328 int compile_id,
329 int entry_bci,
330 address immutable_data,
331 CodeOffsets* offsets,
332 int orig_pc_offset,
333 DebugInformationRecorder *recorder,
334 Dependencies* dependencies,
335 CodeBuffer *code_buffer,
336 int frame_size,
337 OopMapSet* oop_maps,
338 ExceptionHandlerTable* handler_table,
339 ImplicitExceptionTable* nul_chk_table,
340 AbstractCompiler* compiler,
341 CompLevel comp_level
342 , AOTCodeEntry* aot_code_entry
343 #if INCLUDE_JVMCI
344 , char* speculations = nullptr,
345 int speculations_len = 0,
346 JVMCINMethodData* jvmci_data = nullptr
347 #endif
348 );
349
350 // helper methods
351 void* operator new(size_t size, int nmethod_size, int comp_level) throw();
352
353 // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
354 // Attention: Only allow NonNMethod space for special nmethods which don't need to be
355 // findable by nmethod iterators! In particular, they must not contain oops!
356 void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
357
358 const char* reloc_string_for(u_char* begin, u_char* end);
359
360 bool try_transition(signed char new_state);
361
362 // Returns true if this thread changed the state of the nmethod or
465 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
466 bool oops_do_try_claim_weak_request();
467
468 // Attempt Unclaimed -> N|SD transition. Returns the current link.
469 oops_do_mark_link* oops_do_try_claim_strong_done();
470 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
471 nmethod* oops_do_try_add_to_list_as_weak_done();
472
473 // Attempt X|WD -> N|SR transition. Returns the current link.
474 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
475 // Attempt X|WD -> X|SD transition. Returns true if successful.
476 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
477
478 // Do the N|SD -> X|SD transition.
479 void oops_do_add_to_list_as_strong_done();
480
481 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
482 // transitions).
483 void oops_do_set_strong_done(nmethod* old_head);
484
485 void record_nmethod_dependency();
486
487 nmethod* restore(address code_cache_buffer,
488 const methodHandle& method,
489 int compile_id,
490 address reloc_data,
491 GrowableArray<Handle>& oop_list,
492 GrowableArray<Metadata*>& metadata_list,
493 ImmutableOopMapSet* oop_maps,
494 address immutable_data,
495 GrowableArray<Handle>& reloc_imm_oop_list,
496 GrowableArray<Metadata*>& reloc_imm_metadata_list,
497 AOTCodeReader* aot_code_reader);
498
499 public:
500 // create nmethod using archived nmethod from AOT code cache
501 static nmethod* new_nmethod(nmethod* archived_nm,
502 const methodHandle& method,
503 AbstractCompiler* compiler,
504 int compile_id,
505 address reloc_data,
506 GrowableArray<Handle>& oop_list,
507 GrowableArray<Metadata*>& metadata_list,
508 ImmutableOopMapSet* oop_maps,
509 address immutable_data,
510 GrowableArray<Handle>& reloc_imm_oop_list,
511 GrowableArray<Metadata*>& reloc_imm_metadata_list,
512 AOTCodeReader* aot_code_reader);
513
514 // create nmethod with entry_bci
515 static nmethod* new_nmethod(const methodHandle& method,
516 int compile_id,
517 int entry_bci,
518 CodeOffsets* offsets,
519 int orig_pc_offset,
520 DebugInformationRecorder* recorder,
521 Dependencies* dependencies,
522 CodeBuffer *code_buffer,
523 int frame_size,
524 OopMapSet* oop_maps,
525 ExceptionHandlerTable* handler_table,
526 ImplicitExceptionTable* nul_chk_table,
527 AbstractCompiler* compiler,
528 CompLevel comp_level
529 , AOTCodeEntry* aot_code_entry
530 #if INCLUDE_JVMCI
531 , char* speculations = nullptr,
532 int speculations_len = 0,
533 JVMCINMethodData* jvmci_data = nullptr
534 #endif
535 );
536
537 static nmethod* new_native_nmethod(const methodHandle& method,
538 int compile_id,
539 CodeBuffer *code_buffer,
540 int vep_offset,
541 int frame_complete,
542 int frame_size,
543 ByteSize receiver_sp_offset,
544 ByteSize basic_lock_sp_offset,
545 OopMapSet* oop_maps,
546 int exception_handler = -1);
547
548 Method* method () const { return _method; }
549 uint16_t entry_bci () const { return _entry_bci; }
550 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
551 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
552 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
553
554 int orig_pc_offset() { return _orig_pc_offset; }
555
556 // Compiler task identification. Note that all OSR methods
557 // are numbered in an independent sequence if CICountOSR is true,
558 // and native method wrappers are also numbered independently if
559 // CICountNative is true.
560 int compile_id() const { return _compile_id; }
561 int comp_level() const { return _comp_level; }
562 const char* compile_kind() const;
563
564 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
565 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
566 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
567 CompilerType compiler_type () const { return _compiler_type; }
568 const char* compiler_name () const;
569
570 // boundaries for different parts
571 address consts_begin () const { return content_begin(); }
572 address consts_end () const { return code_begin() ; }
573 address insts_begin () const { return code_begin() ; }
574 address insts_end () const { return header_begin() + _stub_offset ; }
575 address stub_begin () const { return header_begin() + _stub_offset ; }
576 address stub_end () const { return code_end() ; }
577 address exception_begin () const { return header_begin() + _exception_offset ; }
578 address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
579 address deopt_mh_handler_begin() const { return _deopt_mh_handler_offset != -1 ? (header_begin() + _deopt_mh_handler_offset) : nullptr; }
580 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
581 oop* oops_begin () const { return (oop*) data_begin(); }
582 oop* oops_end () const { return (oop*) data_end(); }
583
584 // mutable data
585 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
586 #if INCLUDE_JVMCI
587 Metadata** metadata_end () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
588 address jvmci_data_begin () const { return mutable_data_begin() + _relocation_size + _metadata_size; }
589 address jvmci_data_end () const { return mutable_data_end(); }
590 #else
591 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
592 #endif
593
594 // immutable data
595 void set_immutable_data(address data) { _immutable_data = data; }
596 address immutable_data_begin () const { return _immutable_data; }
597 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
598 address dependencies_begin () const { return _immutable_data; }
599 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
600 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
601 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
602 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
603 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
604 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
605 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
606 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
607
608 #if INCLUDE_JVMCI
609 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
610 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
611 address speculations_end () const { return immutable_data_end(); }
612 #else
613 address scopes_data_end () const { return immutable_data_end(); }
614 #endif
615
655 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
656
657 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
658 // allowed to advance state
659 in_use = 0, // executable nmethod
660 not_entrant = 1 // marked for deoptimization but activations may still exist
661 };
662
663 // flag accessing and manipulation
664 bool is_not_installed() const { return _state == not_installed; }
665 bool is_in_use() const { return _state <= in_use; }
666 bool is_not_entrant() const { return _state == not_entrant; }
667 int get_state() const { return _state; }
668
669 void clear_unloading_state();
670 // Heuristically deduce an nmethod isn't worth keeping around
671 bool is_cold();
672 bool is_unloading();
673 void do_unloading(bool unloading_occurred);
674
675 void inc_method_profiling_count();
676 uint64_t method_profiling_count();
677
678 bool make_in_use() {
679 return try_transition(in_use);
680 }
681 // Make the nmethod non entrant. The nmethod will continue to be
682 // alive. It is used when an uncommon trap happens. Returns true
683 // if this thread changed the state of the nmethod or false if
684 // another thread performed the transition.
685 bool make_not_entrant(const char* reason, bool keep_aot_entry = false);
686 bool make_not_used() { return make_not_entrant("not used", true /* keep AOT entry */); }
687
688 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
689 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
690 void set_deoptimized_done();
691
692 bool update_recompile_counts() const {
693 // Update recompile counts when either the update is explicitly requested (deoptimize)
694 // or the nmethod is not marked for deoptimization at all (not_marked).
695 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
696 DeoptimizationStatus status = deoptimization_status();
697 return status != deoptimize_noupdate && status != deoptimize_done;
698 }
699
700 // tells whether frames described by this nmethod can be deoptimized
701 // note: native wrappers cannot be deoptimized.
702 bool can_be_deoptimized() const { return is_java_method(); }
703
704 bool has_dependencies() { return dependencies_size() != 0; }
705 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
706 void flush_dependencies();
708 template<typename T>
709 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
710 template<typename T>
711 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
712
713 bool has_unsafe_access() const { return _has_unsafe_access; }
714 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
715
716 bool has_monitors() const { return _has_monitors; }
717 void set_has_monitors(bool z) { _has_monitors = z; }
718
719 bool has_scoped_access() const { return _has_scoped_access; }
720 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
721
722 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
723 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
724
725 bool has_wide_vectors() const { return _has_wide_vectors; }
726 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
727
728 bool has_clinit_barriers() const { return _has_clinit_barriers; }
729 void set_has_clinit_barriers(bool z) { _has_clinit_barriers = z; }
730
731 bool preloaded() const { return _preloaded; }
732 void set_preloaded(bool z) { _preloaded = z; }
733
734 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
735 void set_has_flushed_dependencies(bool z) {
736 assert(!has_flushed_dependencies(), "should only happen once");
737 _has_flushed_dependencies = z;
738 }
739
740 bool is_unlinked() const { return _is_unlinked; }
741 void set_is_unlinked() {
742 assert(!_is_unlinked, "already unlinked");
743 _is_unlinked = true;
744 }
745
746 bool used() const { return _used; }
747 void set_used() { _used = true; }
748
749 bool is_aot() const { return _aot_code_entry != nullptr; }
750 void set_aot_code_entry(AOTCodeEntry* entry) { _aot_code_entry = entry; }
751 AOTCodeEntry* aot_code_entry() const { return _aot_code_entry; }
752
753 // Support for oops in scopes and relocs:
754 // Note: index 0 is reserved for null.
755 oop oop_at(int index) const;
756 oop oop_at_phantom(int index) const; // phantom reference
757 oop* oop_addr_at(int index) const { // for GC
758 // relocation indexes are biased by 1 (because 0 is reserved)
759 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
760 return &oops_begin()[index - 1];
761 }
762
763 // Support for meta data in scopes and relocs:
764 // Note: index 0 is reserved for null.
765 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
766 Metadata** metadata_addr_at(int index) const { // for GC
767 // relocation indexes are biased by 1 (because 0 is reserved)
768 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
769 return &metadata_begin()[index - 1];
770 }
771
772 void copy_values(GrowableArray<Handle>* array);
773 void copy_values(GrowableArray<jobject>* oops);
774 void copy_values(GrowableArray<Metadata*>* metadata);
775 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
776
777 // Relocation support
778 private:
779 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
780 inline void initialize_immediate_oop(oop* dest, jobject handle);
781
782 protected:
783 address oops_reloc_begin() const;
784
785 public:
786 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
787 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
788
789 void create_reloc_immediates_list(JavaThread* thread, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list);
790
791 bool is_at_poll_return(address pc);
792 bool is_at_poll_or_poll_return(address pc);
793
794 protected:
795 // Exception cache support
796 // Note: _exception_cache may be read and cleaned concurrently.
797 ExceptionCache* exception_cache() const { return _exception_cache; }
798 ExceptionCache* exception_cache_acquire() const;
799
800 public:
801 address handler_for_exception_and_pc(Handle exception, address pc);
802 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
803 void clean_exception_cache();
804
805 void add_exception_cache_entry(ExceptionCache* new_entry);
806 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
807
808
809 // MethodHandle
810 bool is_method_handle_return(address return_pc);
939 address* orig_pc_addr(const frame* fr);
940
941 // used by jvmti to track if the load events has been reported
942 bool load_reported() const { return _load_reported; }
943 void set_load_reported() { _load_reported = true; }
944
945 public:
946 // ScopeDesc retrieval operation
947 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
948 // pc_desc_near returns the first PcDesc at or after the given pc.
949 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
950
951 // ScopeDesc for an instruction
952 ScopeDesc* scope_desc_at(address pc);
953 ScopeDesc* scope_desc_near(address pc);
954
955 // copying of debugging information
956 void copy_scopes_pcs(PcDesc* pcs, int count);
957 void copy_scopes_data(address buffer, int size);
958
959 // Post successful compilation
960 void post_compiled_method(CompileTask* task);
961
962 // jvmti support:
963 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
964
965 // verify operations
966 void verify();
967 void verify_scopes();
968 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
969
970 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
971 void decode2(outputStream* st) const;
972 void print_constant_pool(outputStream* st);
973
974 // Avoid hiding of parent's 'decode(outputStream*)' method.
975 void decode(outputStream* st) const { decode2(st); } // just delegate here.
976
977 // printing support
978 void print_on_impl(outputStream* st) const;
979 void print_code();
980 void print_value_on_impl(outputStream* st) const;
981
982 #if defined(SUPPORT_DATA_STRUCTS)
983 // print output in opt build for disassembler library
984 void print_relocations_on(outputStream* st) PRODUCT_RETURN;
985 void print_pcs_on(outputStream* st);
986 void print_scopes() { print_scopes_on(tty); }
987 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
988 void print_handler_table();
989 void print_nul_chk_table();
990 void print_recorded_oop(int log_n, int index);
991 void print_recorded_oops();
992 void print_recorded_metadata();
993
994 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
995 void print_metadata(outputStream* st); // metadata in metadata pool.
996 #else
997 void print_pcs_on(outputStream* st) { return; }
998 #endif
999
1000 void print_calls(outputStream* st) PRODUCT_RETURN;
1001 static void print_statistics() PRODUCT_RETURN;
1002
1003 void maybe_print_nmethod(const DirectiveSet* directive);
1004 void print_nmethod(bool print_code);
1032 ByteSize native_receiver_sp_offset() {
1033 assert(is_native_method(), "sanity");
1034 return _native_receiver_sp_offset;
1035 }
1036 ByteSize native_basic_lock_sp_offset() {
1037 assert(is_native_method(), "sanity");
1038 return _native_basic_lock_sp_offset;
1039 }
1040
1041 // support for code generation
1042 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1043 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1044
1045 void metadata_do(MetadataClosure* f);
1046
1047 address call_instruction_address(address pc) const;
1048
1049 void make_deoptimized();
1050 void finalize_relocations();
1051
1052 void prepare_for_archiving_impl();
1053
1054 class Vptr : public CodeBlob::Vptr {
1055 void print_on(const CodeBlob* instance, outputStream* st) const override {
1056 ttyLocker ttyl;
1057 instance->as_nmethod()->print_on_impl(st);
1058 }
1059 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1060 instance->as_nmethod()->print_value_on_impl(st);
1061 }
1062 void prepare_for_archiving(CodeBlob* instance) const override {
1063 ((nmethod*)instance)->prepare_for_archiving_impl();
1064 };
1065 };
1066
1067 static const Vptr _vpntr;
1068 };
1069
1070 #endif // SHARE_CODE_NMETHOD_HPP
|