28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class ScopeDesc;
49 class xmlStream;
50
51 // This class is used internally by nmethods, to cache
52 // exception/pc/handler information.
53
54 class ExceptionCache : public CHeapObj<mtCode> {
55 friend class VMStructs;
56 private:
57 enum { cache_size = 16 };
58 Klass* _exception_type;
59 address _pc[cache_size];
60 address _handler[cache_size];
61 volatile int _count;
62 ExceptionCache* volatile _next;
63 ExceptionCache* _purge_list_next;
64
65 inline address pc_at(int index);
66 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
67
158
159 #if INCLUDE_JVMCI
160 class FailedSpeculation;
161 class JVMCINMethodData;
162 #endif
163
164 class nmethod : public CodeBlob {
165 friend class VMStructs;
166 friend class JVMCIVMStructs;
167 friend class CodeCache; // scavengable oops
168 friend class JVMCINMethodData;
169 friend class DeoptimizationScope;
170
171 private:
172
173 // Used to track in which deoptimize handshake this method will be deoptimized.
174 uint64_t _deoptimization_generation;
175
176 uint64_t _gc_epoch;
177
178 Method* _method;
179
180 // To reduce header size union fields which usages do not overlap.
181 union {
182 // To support simple linked-list chaining of nmethods:
183 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
184 struct {
185 // These are used for compiled synchronized native methods to
186 // locate the owner and stack slot for the BasicLock. They are
187 // needed because there is no debug information for compiled native
188 // wrappers and the oop maps are insufficient to allow
189 // frame::retrieve_receiver() to work. Currently they are expected
190 // to be byte offsets from the Java stack pointer for maximum code
191 // sharing between platforms. JVMTI's GetLocalInstance() uses these
192 // offsets to find the receiver for non-static native wrapper frames.
193 ByteSize _native_receiver_sp_offset;
194 ByteSize _native_basic_lock_sp_offset;
195 };
196 };
197
241 #endif
242
243 // Offset in immutable data section
244 // _dependencies_offset == 0
245 uint16_t _nul_chk_table_offset;
246 uint16_t _handler_table_offset; // This table could be big in C1 code
247 int _scopes_pcs_offset;
248 int _scopes_data_offset;
249 #if INCLUDE_JVMCI
250 int _speculations_offset;
251 #endif
252
253 // location in frame (offset for sp) that deopt can store the original
254 // pc during a deopt.
255 int _orig_pc_offset;
256
257 int _compile_id; // which compilation made this nmethod
258 CompLevel _comp_level; // compilation level (s1)
259 CompilerType _compiler_type; // which compiler made this nmethod (u1)
260
261 // Local state used to keep track of whether unloading is happening or not
262 volatile uint8_t _is_unloading_state;
263
264 // Protected by NMethodState_lock
265 volatile signed char _state; // {not_installed, in_use, not_entrant}
266
267 // set during construction
268 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
269 _has_method_handle_invokes:1,// Has this method MethodHandle invokes?
270 _has_wide_vectors:1, // Preserve wide vectors at safepoints
271 _has_monitors:1, // Fastpath monitor detection for continuations
272 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
273 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
274 _is_unlinked:1, // mark during class unloading
275 _load_reported:1; // used by jvmti to track if an event has been posted for this nmethod
276
277 enum DeoptimizationStatus : u1 {
278 not_marked,
279 deoptimize,
280 deoptimize_noupdate,
281 deoptimize_done
282 };
283
284 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
285
286 DeoptimizationStatus deoptimization_status() const {
287 return Atomic::load(&_deoptimization_status);
288 }
289
290 // Initialize fields to their default values
291 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
292
293 // Post initialization
294 void post_init();
295
309 // For normal JIT compiled code
310 nmethod(Method* method,
311 CompilerType type,
312 int nmethod_size,
313 int immutable_data_size,
314 int mutable_data_size,
315 int compile_id,
316 int entry_bci,
317 address immutable_data,
318 CodeOffsets* offsets,
319 int orig_pc_offset,
320 DebugInformationRecorder *recorder,
321 Dependencies* dependencies,
322 CodeBuffer *code_buffer,
323 int frame_size,
324 OopMapSet* oop_maps,
325 ExceptionHandlerTable* handler_table,
326 ImplicitExceptionTable* nul_chk_table,
327 AbstractCompiler* compiler,
328 CompLevel comp_level
329 #if INCLUDE_JVMCI
330 , char* speculations = nullptr,
331 int speculations_len = 0,
332 JVMCINMethodData* jvmci_data = nullptr
333 #endif
334 );
335
336 // helper methods
337 void* operator new(size_t size, int nmethod_size, int comp_level) throw();
338
339 // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
340 // Attention: Only allow NonNMethod space for special nmethods which don't need to be
341 // findable by nmethod iterators! In particular, they must not contain oops!
342 void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
343
344 const char* reloc_string_for(u_char* begin, u_char* end);
345
346 bool try_transition(signed char new_state);
347
348 // Returns true if this thread changed the state of the nmethod or
451 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
452 bool oops_do_try_claim_weak_request();
453
454 // Attempt Unclaimed -> N|SD transition. Returns the current link.
455 oops_do_mark_link* oops_do_try_claim_strong_done();
456 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
457 nmethod* oops_do_try_add_to_list_as_weak_done();
458
459 // Attempt X|WD -> N|SR transition. Returns the current link.
460 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
461 // Attempt X|WD -> X|SD transition. Returns true if successful.
462 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
463
464 // Do the N|SD -> X|SD transition.
465 void oops_do_add_to_list_as_strong_done();
466
467 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
468 // transitions).
469 void oops_do_set_strong_done(nmethod* old_head);
470
471 public:
472 // create nmethod with entry_bci
473 static nmethod* new_nmethod(const methodHandle& method,
474 int compile_id,
475 int entry_bci,
476 CodeOffsets* offsets,
477 int orig_pc_offset,
478 DebugInformationRecorder* recorder,
479 Dependencies* dependencies,
480 CodeBuffer *code_buffer,
481 int frame_size,
482 OopMapSet* oop_maps,
483 ExceptionHandlerTable* handler_table,
484 ImplicitExceptionTable* nul_chk_table,
485 AbstractCompiler* compiler,
486 CompLevel comp_level
487 #if INCLUDE_JVMCI
488 , char* speculations = nullptr,
489 int speculations_len = 0,
490 JVMCINMethodData* jvmci_data = nullptr
491 #endif
492 );
493
494 static nmethod* new_native_nmethod(const methodHandle& method,
495 int compile_id,
496 CodeBuffer *code_buffer,
497 int vep_offset,
498 int frame_complete,
499 int frame_size,
500 ByteSize receiver_sp_offset,
501 ByteSize basic_lock_sp_offset,
502 OopMapSet* oop_maps,
503 int exception_handler = -1);
504
505 Method* method () const { return _method; }
506 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
507 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
508 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
509
510 // Compiler task identification. Note that all OSR methods
511 // are numbered in an independent sequence if CICountOSR is true,
512 // and native method wrappers are also numbered independently if
513 // CICountNative is true.
514 int compile_id() const { return _compile_id; }
515 const char* compile_kind() const;
516
517 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
518 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
519 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
520 CompilerType compiler_type () const { return _compiler_type; }
521 const char* compiler_name () const;
522
523 // boundaries for different parts
524 address consts_begin () const { return content_begin(); }
525 address consts_end () const { return code_begin() ; }
526 address insts_begin () const { return code_begin() ; }
527 address insts_end () const { return header_begin() + _stub_offset ; }
528 address stub_begin () const { return header_begin() + _stub_offset ; }
529 address stub_end () const { return code_end() ; }
530 address exception_begin () const { return header_begin() + _exception_offset ; }
531 address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
532 address deopt_mh_handler_begin() const { return header_begin() + _deopt_mh_handler_offset ; }
533 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
534 oop* oops_begin () const { return (oop*) data_begin(); }
535 oop* oops_end () const { return (oop*) data_end(); }
536
537 // mutable data
538 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
539 #if INCLUDE_JVMCI
540 Metadata** metadata_end () const { return (Metadata**) (mutable_data_end() - _jvmci_data_size); }
541 address jvmci_data_begin () const { return mutable_data_end() - _jvmci_data_size; }
542 address jvmci_data_end () const { return mutable_data_end(); }
543 #else
544 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
545 #endif
546
547 // immutable data
548 address immutable_data_begin () const { return _immutable_data; }
549 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
550 address dependencies_begin () const { return _immutable_data; }
551 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
552 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
553 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
554 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
555 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
556 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
557 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
558 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
559
560 #if INCLUDE_JVMCI
561 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
562 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
563 address speculations_end () const { return immutable_data_end(); }
564 #else
565 address scopes_data_end () const { return immutable_data_end(); }
566 #endif
567
607 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
608
609 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
610 // allowed to advance state
611 in_use = 0, // executable nmethod
612 not_entrant = 1 // marked for deoptimization but activations may still exist
613 };
614
615 // flag accessing and manipulation
616 bool is_not_installed() const { return _state == not_installed; }
617 bool is_in_use() const { return _state <= in_use; }
618 bool is_not_entrant() const { return _state == not_entrant; }
619 int get_state() const { return _state; }
620
621 void clear_unloading_state();
622 // Heuristically deduce an nmethod isn't worth keeping around
623 bool is_cold();
624 bool is_unloading();
625 void do_unloading(bool unloading_occurred);
626
627 bool make_in_use() {
628 return try_transition(in_use);
629 }
630 // Make the nmethod non entrant. The nmethod will continue to be
631 // alive. It is used when an uncommon trap happens. Returns true
632 // if this thread changed the state of the nmethod or false if
633 // another thread performed the transition.
634 bool make_not_entrant(const char* reason);
635 bool make_not_used() { return make_not_entrant("not used"); }
636
637 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
638 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
639 void set_deoptimized_done();
640
641 bool update_recompile_counts() const {
642 // Update recompile counts when either the update is explicitly requested (deoptimize)
643 // or the nmethod is not marked for deoptimization at all (not_marked).
644 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
645 DeoptimizationStatus status = deoptimization_status();
646 return status != deoptimize_noupdate && status != deoptimize_done;
647 }
648
649 // tells whether frames described by this nmethod can be deoptimized
650 // note: native wrappers cannot be deoptimized.
651 bool can_be_deoptimized() const { return is_java_method(); }
652
653 bool has_dependencies() { return dependencies_size() != 0; }
654 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
657 template<typename T>
658 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
659 template<typename T>
660 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
661
662 bool has_unsafe_access() const { return _has_unsafe_access; }
663 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
664
665 bool has_monitors() const { return _has_monitors; }
666 void set_has_monitors(bool z) { _has_monitors = z; }
667
668 bool has_scoped_access() const { return _has_scoped_access; }
669 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
670
671 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
672 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
673
674 bool has_wide_vectors() const { return _has_wide_vectors; }
675 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
676
677 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
678 void set_has_flushed_dependencies(bool z) {
679 assert(!has_flushed_dependencies(), "should only happen once");
680 _has_flushed_dependencies = z;
681 }
682
683 bool is_unlinked() const { return _is_unlinked; }
684 void set_is_unlinked() {
685 assert(!_is_unlinked, "already unlinked");
686 _is_unlinked = true;
687 }
688
689 int comp_level() const { return _comp_level; }
690
691 // Support for oops in scopes and relocs:
692 // Note: index 0 is reserved for null.
693 oop oop_at(int index) const;
694 oop oop_at_phantom(int index) const; // phantom reference
695 oop* oop_addr_at(int index) const { // for GC
696 // relocation indexes are biased by 1 (because 0 is reserved)
697 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
698 return &oops_begin()[index - 1];
699 }
700
701 // Support for meta data in scopes and relocs:
702 // Note: index 0 is reserved for null.
703 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
704 Metadata** metadata_addr_at(int index) const { // for GC
705 // relocation indexes are biased by 1 (because 0 is reserved)
706 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
707 return &metadata_begin()[index - 1];
708 }
709
710 void copy_values(GrowableArray<jobject>* oops);
711 void copy_values(GrowableArray<Metadata*>* metadata);
712 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
713
714 // Relocation support
715 private:
716 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
717 inline void initialize_immediate_oop(oop* dest, jobject handle);
718
719 protected:
720 address oops_reloc_begin() const;
721
722 public:
723 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
724 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
725
726 bool is_at_poll_return(address pc);
727 bool is_at_poll_or_poll_return(address pc);
728
729 protected:
730 // Exception cache support
731 // Note: _exception_cache may be read and cleaned concurrently.
732 ExceptionCache* exception_cache() const { return _exception_cache; }
733 ExceptionCache* exception_cache_acquire() const;
734
735 public:
736 address handler_for_exception_and_pc(Handle exception, address pc);
737 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
738 void clean_exception_cache();
739
740 void add_exception_cache_entry(ExceptionCache* new_entry);
741 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
742
743
744 // MethodHandle
745 bool is_method_handle_return(address return_pc);
876 // used by jvmti to track if the load events has been reported
877 bool load_reported() const { return _load_reported; }
878 void set_load_reported() { _load_reported = true; }
879
880 public:
881 // ScopeDesc retrieval operation
882 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
883 // pc_desc_near returns the first PcDesc at or after the given pc.
884 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
885
886 // ScopeDesc for an instruction
887 ScopeDesc* scope_desc_at(address pc);
888 ScopeDesc* scope_desc_near(address pc);
889
890 // copying of debugging information
891 void copy_scopes_pcs(PcDesc* pcs, int count);
892 void copy_scopes_data(address buffer, int size);
893
894 int orig_pc_offset() { return _orig_pc_offset; }
895
896 // Post successful compilation
897 void post_compiled_method(CompileTask* task);
898
899 // jvmti support:
900 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
901
902 // verify operations
903 void verify();
904 void verify_scopes();
905 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
906
907 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
908 void decode2(outputStream* st) const;
909 void print_constant_pool(outputStream* st);
910
911 // Avoid hiding of parent's 'decode(outputStream*)' method.
912 void decode(outputStream* st) const { decode2(st); } // just delegate here.
913
914 // printing support
915 void print_on_impl(outputStream* st) const;
916 void print_code();
917 void print_value_on_impl(outputStream* st) const;
918
919 #if defined(SUPPORT_DATA_STRUCTS)
920 // print output in opt build for disassembler library
921 void print_relocations() PRODUCT_RETURN;
922 void print_pcs_on(outputStream* st);
923 void print_scopes() { print_scopes_on(tty); }
924 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
925 void print_handler_table();
926 void print_nul_chk_table();
927 void print_recorded_oop(int log_n, int index);
928 void print_recorded_oops();
929 void print_recorded_metadata();
930
931 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
932 void print_metadata(outputStream* st); // metadata in metadata pool.
933 #else
934 void print_pcs_on(outputStream* st) { return; }
935 #endif
936
937 void print_calls(outputStream* st) PRODUCT_RETURN;
938 static void print_statistics() PRODUCT_RETURN;
939
940 void maybe_print_nmethod(const DirectiveSet* directive);
941 void print_nmethod(bool print_code);
969 ByteSize native_receiver_sp_offset() {
970 assert(is_native_method(), "sanity");
971 return _native_receiver_sp_offset;
972 }
973 ByteSize native_basic_lock_sp_offset() {
974 assert(is_native_method(), "sanity");
975 return _native_basic_lock_sp_offset;
976 }
977
978 // support for code generation
979 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
980 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
981
982 void metadata_do(MetadataClosure* f);
983
984 address call_instruction_address(address pc) const;
985
986 void make_deoptimized();
987 void finalize_relocations();
988
989 class Vptr : public CodeBlob::Vptr {
990 void print_on(const CodeBlob* instance, outputStream* st) const override {
991 ttyLocker ttyl;
992 instance->as_nmethod()->print_on_impl(st);
993 }
994 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
995 instance->as_nmethod()->print_value_on_impl(st);
996 }
997 };
998
999 static const Vptr _vpntr;
1000 };
1001
1002 #endif // SHARE_CODE_NMETHOD_HPP
|
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class SCCReader;
49 class SCCEntry;
50 class ScopeDesc;
51 class xmlStream;
52
53 // This class is used internally by nmethods, to cache
54 // exception/pc/handler information.
55
56 class ExceptionCache : public CHeapObj<mtCode> {
57 friend class VMStructs;
58 private:
59 enum { cache_size = 16 };
60 Klass* _exception_type;
61 address _pc[cache_size];
62 address _handler[cache_size];
63 volatile int _count;
64 ExceptionCache* volatile _next;
65 ExceptionCache* _purge_list_next;
66
67 inline address pc_at(int index);
68 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
69
160
161 #if INCLUDE_JVMCI
162 class FailedSpeculation;
163 class JVMCINMethodData;
164 #endif
165
166 class nmethod : public CodeBlob {
167 friend class VMStructs;
168 friend class JVMCIVMStructs;
169 friend class CodeCache; // scavengable oops
170 friend class JVMCINMethodData;
171 friend class DeoptimizationScope;
172
173 private:
174
175 // Used to track in which deoptimize handshake this method will be deoptimized.
176 uint64_t _deoptimization_generation;
177
178 uint64_t _gc_epoch;
179
180 // Profiling counter used to figure out the hottest nmethods to record into CDS
181 volatile uint64_t _method_profiling_count;
182
183 Method* _method;
184
185 // To reduce header size union fields which usages do not overlap.
186 union {
187 // To support simple linked-list chaining of nmethods:
188 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
189 struct {
190 // These are used for compiled synchronized native methods to
191 // locate the owner and stack slot for the BasicLock. They are
192 // needed because there is no debug information for compiled native
193 // wrappers and the oop maps are insufficient to allow
194 // frame::retrieve_receiver() to work. Currently they are expected
195 // to be byte offsets from the Java stack pointer for maximum code
196 // sharing between platforms. JVMTI's GetLocalInstance() uses these
197 // offsets to find the receiver for non-static native wrapper frames.
198 ByteSize _native_receiver_sp_offset;
199 ByteSize _native_basic_lock_sp_offset;
200 };
201 };
202
246 #endif
247
248 // Offset in immutable data section
249 // _dependencies_offset == 0
250 uint16_t _nul_chk_table_offset;
251 uint16_t _handler_table_offset; // This table could be big in C1 code
252 int _scopes_pcs_offset;
253 int _scopes_data_offset;
254 #if INCLUDE_JVMCI
255 int _speculations_offset;
256 #endif
257
258 // location in frame (offset for sp) that deopt can store the original
259 // pc during a deopt.
260 int _orig_pc_offset;
261
262 int _compile_id; // which compilation made this nmethod
263 CompLevel _comp_level; // compilation level (s1)
264 CompilerType _compiler_type; // which compiler made this nmethod (u1)
265
266 SCCEntry* _scc_entry;
267
268 bool _used; // has this nmethod ever been invoked?
269
270 // Local state used to keep track of whether unloading is happening or not
271 volatile uint8_t _is_unloading_state;
272
273 // Protected by NMethodState_lock
274 volatile signed char _state; // {not_installed, in_use, not_entrant}
275
276 // set during construction
277 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
278 _has_method_handle_invokes:1,// Has this method MethodHandle invokes?
279 _has_wide_vectors:1, // Preserve wide vectors at safepoints
280 _has_monitors:1, // Fastpath monitor detection for continuations
281 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
282 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
283 _is_unlinked:1, // mark during class unloading
284 _load_reported:1, // used by jvmti to track if an event has been posted for this nmethod
285 _preloaded:1,
286 _has_clinit_barriers:1;
287
288 enum DeoptimizationStatus : u1 {
289 not_marked,
290 deoptimize,
291 deoptimize_noupdate,
292 deoptimize_done
293 };
294
295 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
296
297 DeoptimizationStatus deoptimization_status() const {
298 return Atomic::load(&_deoptimization_status);
299 }
300
301 // Initialize fields to their default values
302 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
303
304 // Post initialization
305 void post_init();
306
320 // For normal JIT compiled code
321 nmethod(Method* method,
322 CompilerType type,
323 int nmethod_size,
324 int immutable_data_size,
325 int mutable_data_size,
326 int compile_id,
327 int entry_bci,
328 address immutable_data,
329 CodeOffsets* offsets,
330 int orig_pc_offset,
331 DebugInformationRecorder *recorder,
332 Dependencies* dependencies,
333 CodeBuffer *code_buffer,
334 int frame_size,
335 OopMapSet* oop_maps,
336 ExceptionHandlerTable* handler_table,
337 ImplicitExceptionTable* nul_chk_table,
338 AbstractCompiler* compiler,
339 CompLevel comp_level
340 , SCCEntry* scc_entry
341 #if INCLUDE_JVMCI
342 , char* speculations = nullptr,
343 int speculations_len = 0,
344 JVMCINMethodData* jvmci_data = nullptr
345 #endif
346 );
347
348 // helper methods
349 void* operator new(size_t size, int nmethod_size, int comp_level) throw();
350
351 // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
352 // Attention: Only allow NonNMethod space for special nmethods which don't need to be
353 // findable by nmethod iterators! In particular, they must not contain oops!
354 void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
355
356 const char* reloc_string_for(u_char* begin, u_char* end);
357
358 bool try_transition(signed char new_state);
359
360 // Returns true if this thread changed the state of the nmethod or
463 // Attempt Unclaimed -> N|WR transition. Returns true if successful.
464 bool oops_do_try_claim_weak_request();
465
466 // Attempt Unclaimed -> N|SD transition. Returns the current link.
467 oops_do_mark_link* oops_do_try_claim_strong_done();
468 // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
469 nmethod* oops_do_try_add_to_list_as_weak_done();
470
471 // Attempt X|WD -> N|SR transition. Returns the current link.
472 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
473 // Attempt X|WD -> X|SD transition. Returns true if successful.
474 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
475
476 // Do the N|SD -> X|SD transition.
477 void oops_do_add_to_list_as_strong_done();
478
479 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
480 // transitions).
481 void oops_do_set_strong_done(nmethod* old_head);
482
483 void record_nmethod_dependency();
484
485 void restore_from_archive(nmethod* archived_nm,
486 const methodHandle& method,
487 int compile_id,
488 address reloc_data,
489 GrowableArray<Handle>& oop_list,
490 GrowableArray<Metadata*>& metadata_list,
491 ImmutableOopMapSet* oop_maps,
492 address immutable_data,
493 GrowableArray<Handle>& reloc_imm_oop_list,
494 GrowableArray<Metadata*>& reloc_imm_metadata_list,
495 #ifndef PRODUCT
496 AsmRemarks& asm_remarks,
497 DbgStrings& dbg_strings,
498 #endif /* PRODUCT */
499 SCCReader* scc_reader);
500
501 public:
502 // create nmethod using archived nmethod from AOT code cache
503 static nmethod* new_nmethod(nmethod* archived_nm,
504 const methodHandle& method,
505 AbstractCompiler* compiler,
506 int compile_id,
507 address reloc_data,
508 GrowableArray<Handle>& oop_list,
509 GrowableArray<Metadata*>& metadata_list,
510 ImmutableOopMapSet* oop_maps,
511 address immutable_data,
512 GrowableArray<Handle>& reloc_imm_oop_list,
513 GrowableArray<Metadata*>& reloc_imm_metadata_list,
514 #ifndef PRODUCT
515 AsmRemarks& asm_remarks,
516 DbgStrings& dbg_strings,
517 #endif /* PRODUCT */
518 SCCReader* scc_reader);
519
520 // create nmethod with entry_bci
521 static nmethod* new_nmethod(const methodHandle& method,
522 int compile_id,
523 int entry_bci,
524 CodeOffsets* offsets,
525 int orig_pc_offset,
526 DebugInformationRecorder* recorder,
527 Dependencies* dependencies,
528 CodeBuffer *code_buffer,
529 int frame_size,
530 OopMapSet* oop_maps,
531 ExceptionHandlerTable* handler_table,
532 ImplicitExceptionTable* nul_chk_table,
533 AbstractCompiler* compiler,
534 CompLevel comp_level
535 , SCCEntry* scc_entry
536 #if INCLUDE_JVMCI
537 , char* speculations = nullptr,
538 int speculations_len = 0,
539 JVMCINMethodData* jvmci_data = nullptr
540 #endif
541 );
542
543 static nmethod* new_native_nmethod(const methodHandle& method,
544 int compile_id,
545 CodeBuffer *code_buffer,
546 int vep_offset,
547 int frame_complete,
548 int frame_size,
549 ByteSize receiver_sp_offset,
550 ByteSize basic_lock_sp_offset,
551 OopMapSet* oop_maps,
552 int exception_handler = -1);
553
554 void copy_to(address dest) {
555 memcpy(dest, this, size());
556 }
557
558 Method* method () const { return _method; }
559 uint16_t entry_bci () const { return _entry_bci; }
560 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
561 bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
562 bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
563
564 // Compiler task identification. Note that all OSR methods
565 // are numbered in an independent sequence if CICountOSR is true,
566 // and native method wrappers are also numbered independently if
567 // CICountNative is true.
568 int compile_id() const { return _compile_id; }
569 const char* compile_kind() const;
570
571 inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
572 inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
573 inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
574 CompilerType compiler_type () const { return _compiler_type; }
575 const char* compiler_name () const;
576
577 // boundaries for different parts
578 address consts_begin () const { return content_begin(); }
579 address consts_end () const { return code_begin() ; }
580 address insts_begin () const { return code_begin() ; }
581 address insts_end () const { return header_begin() + _stub_offset ; }
582 address stub_begin () const { return header_begin() + _stub_offset ; }
583 address stub_end () const { return code_end() ; }
584 address exception_begin () const { return header_begin() + _exception_offset ; }
585 address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
586 address deopt_mh_handler_begin() const { return _deopt_mh_handler_offset != -1 ? (header_begin() + _deopt_mh_handler_offset) : nullptr; }
587 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
588 oop* oops_begin () const { return (oop*) data_begin(); }
589 oop* oops_end () const { return (oop*) data_end(); }
590
591 // mutable data
592 Metadata** metadata_begin () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
593 #if INCLUDE_JVMCI
594 Metadata** metadata_end () const { return (Metadata**) (mutable_data_end() - _jvmci_data_size); }
595 address jvmci_data_begin () const { return mutable_data_end() - _jvmci_data_size; }
596 address jvmci_data_end () const { return mutable_data_end(); }
597 #else
598 Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
599 #endif
600
601 // immutable data
602 void set_immutable_data(address data) { _immutable_data = data; }
603 address immutable_data_begin () const { return _immutable_data; }
604 address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
605 address dependencies_begin () const { return _immutable_data; }
606 address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
607 address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
608 address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
609 address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
610 address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
611 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
612 PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
613 address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
614
615 #if INCLUDE_JVMCI
616 address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
617 address speculations_begin () const { return _immutable_data + _speculations_offset ; }
618 address speculations_end () const { return immutable_data_end(); }
619 #else
620 address scopes_data_end () const { return immutable_data_end(); }
621 #endif
622
662 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
663
664 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
665 // allowed to advance state
666 in_use = 0, // executable nmethod
667 not_entrant = 1 // marked for deoptimization but activations may still exist
668 };
669
670 // flag accessing and manipulation
671 bool is_not_installed() const { return _state == not_installed; }
672 bool is_in_use() const { return _state <= in_use; }
673 bool is_not_entrant() const { return _state == not_entrant; }
674 int get_state() const { return _state; }
675
676 void clear_unloading_state();
677 // Heuristically deduce an nmethod isn't worth keeping around
678 bool is_cold();
679 bool is_unloading();
680 void do_unloading(bool unloading_occurred);
681
682 void inc_method_profiling_count();
683 uint64_t method_profiling_count();
684
685 bool make_in_use() {
686 return try_transition(in_use);
687 }
688 // Make the nmethod non entrant. The nmethod will continue to be
689 // alive. It is used when an uncommon trap happens. Returns true
690 // if this thread changed the state of the nmethod or false if
691 // another thread performed the transition.
692 bool make_not_entrant(const char* reason, bool make_not_entrant = true);
693 bool make_not_used() { return make_not_entrant("not used"); }
694
695 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
696 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
697 void set_deoptimized_done();
698
699 bool update_recompile_counts() const {
700 // Update recompile counts when either the update is explicitly requested (deoptimize)
701 // or the nmethod is not marked for deoptimization at all (not_marked).
702 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
703 DeoptimizationStatus status = deoptimization_status();
704 return status != deoptimize_noupdate && status != deoptimize_done;
705 }
706
707 // tells whether frames described by this nmethod can be deoptimized
708 // note: native wrappers cannot be deoptimized.
709 bool can_be_deoptimized() const { return is_java_method(); }
710
711 bool has_dependencies() { return dependencies_size() != 0; }
712 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
715 template<typename T>
716 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
717 template<typename T>
718 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
719
720 bool has_unsafe_access() const { return _has_unsafe_access; }
721 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
722
723 bool has_monitors() const { return _has_monitors; }
724 void set_has_monitors(bool z) { _has_monitors = z; }
725
726 bool has_scoped_access() const { return _has_scoped_access; }
727 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
728
729 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
730 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
731
732 bool has_wide_vectors() const { return _has_wide_vectors; }
733 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
734
735 bool has_clinit_barriers() const { return _has_clinit_barriers; }
736 void set_has_clinit_barriers(bool z) { _has_clinit_barriers = z; }
737
738 bool preloaded() const { return _preloaded; }
739 void set_preloaded(bool z) { _preloaded = z; }
740
741 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
742 void set_has_flushed_dependencies(bool z) {
743 assert(!has_flushed_dependencies(), "should only happen once");
744 _has_flushed_dependencies = z;
745 }
746
747 bool is_unlinked() const { return _is_unlinked; }
748 void set_is_unlinked() {
749 assert(!_is_unlinked, "already unlinked");
750 _is_unlinked = true;
751 }
752
753 int comp_level() const { return _comp_level; }
754
755 // Support for oops in scopes and relocs:
756 // Note: index 0 is reserved for null.
757 oop oop_at(int index) const;
758 oop oop_at_phantom(int index) const; // phantom reference
759 oop* oop_addr_at(int index) const { // for GC
760 // relocation indexes are biased by 1 (because 0 is reserved)
761 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
762 return &oops_begin()[index - 1];
763 }
764
765 // Support for meta data in scopes and relocs:
766 // Note: index 0 is reserved for null.
767 Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
768 Metadata** metadata_addr_at(int index) const { // for GC
769 // relocation indexes are biased by 1 (because 0 is reserved)
770 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
771 return &metadata_begin()[index - 1];
772 }
773
774 void copy_values(GrowableArray<Handle>* array);
775 void copy_values(GrowableArray<jobject>* oops);
776 void copy_values(GrowableArray<Metadata*>* metadata);
777 void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
778
779 // Relocation support
780 private:
781 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
782 inline void initialize_immediate_oop(oop* dest, jobject handle);
783
784 protected:
785 address oops_reloc_begin() const;
786
787 public:
788 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
789 void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
790
791 void create_reloc_immediates_list(JavaThread* thread, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list);
792
793 bool is_at_poll_return(address pc);
794 bool is_at_poll_or_poll_return(address pc);
795
796 protected:
797 // Exception cache support
798 // Note: _exception_cache may be read and cleaned concurrently.
799 ExceptionCache* exception_cache() const { return _exception_cache; }
800 ExceptionCache* exception_cache_acquire() const;
801
802 public:
803 address handler_for_exception_and_pc(Handle exception, address pc);
804 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
805 void clean_exception_cache();
806
807 void add_exception_cache_entry(ExceptionCache* new_entry);
808 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
809
810
811 // MethodHandle
812 bool is_method_handle_return(address return_pc);
943 // used by jvmti to track if the load events has been reported
944 bool load_reported() const { return _load_reported; }
945 void set_load_reported() { _load_reported = true; }
946
947 public:
948 // ScopeDesc retrieval operation
949 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
950 // pc_desc_near returns the first PcDesc at or after the given pc.
951 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
952
953 // ScopeDesc for an instruction
954 ScopeDesc* scope_desc_at(address pc);
955 ScopeDesc* scope_desc_near(address pc);
956
957 // copying of debugging information
958 void copy_scopes_pcs(PcDesc* pcs, int count);
959 void copy_scopes_data(address buffer, int size);
960
961 int orig_pc_offset() { return _orig_pc_offset; }
962
963 SCCEntry* scc_entry() const { return _scc_entry; }
964 bool is_scc() const { return scc_entry() != nullptr; }
965 void set_scc_entry(SCCEntry* entry) { _scc_entry = entry; }
966
967 bool used() const { return _used; }
968 void set_used() { _used = true; }
969
970 // Post successful compilation
971 void post_compiled_method(CompileTask* task);
972
973 // jvmti support:
974 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
975
976 // verify operations
977 void verify();
978 void verify_scopes();
979 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
980
981 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
982 void decode2(outputStream* st) const;
983 void print_constant_pool(outputStream* st);
984
985 // Avoid hiding of parent's 'decode(outputStream*)' method.
986 void decode(outputStream* st) const { decode2(st); } // just delegate here.
987
988 // printing support
989 void print_on_impl(outputStream* st) const;
990 void print_code();
991 void print_value_on_impl(outputStream* st) const;
992
993 #if defined(SUPPORT_DATA_STRUCTS)
994 // print output in opt build for disassembler library
995 void print_relocations_on(outputStream* st) PRODUCT_RETURN;
996 void print_pcs_on(outputStream* st);
997 void print_scopes() { print_scopes_on(tty); }
998 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
999 void print_handler_table();
1000 void print_nul_chk_table();
1001 void print_recorded_oop(int log_n, int index);
1002 void print_recorded_oops();
1003 void print_recorded_metadata();
1004
1005 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
1006 void print_metadata(outputStream* st); // metadata in metadata pool.
1007 #else
1008 void print_pcs_on(outputStream* st) { return; }
1009 #endif
1010
1011 void print_calls(outputStream* st) PRODUCT_RETURN;
1012 static void print_statistics() PRODUCT_RETURN;
1013
1014 void maybe_print_nmethod(const DirectiveSet* directive);
1015 void print_nmethod(bool print_code);
1043 ByteSize native_receiver_sp_offset() {
1044 assert(is_native_method(), "sanity");
1045 return _native_receiver_sp_offset;
1046 }
1047 ByteSize native_basic_lock_sp_offset() {
1048 assert(is_native_method(), "sanity");
1049 return _native_basic_lock_sp_offset;
1050 }
1051
1052 // support for code generation
1053 static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1054 static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
1055
1056 void metadata_do(MetadataClosure* f);
1057
1058 address call_instruction_address(address pc) const;
1059
1060 void make_deoptimized();
1061 void finalize_relocations();
1062
1063 void prepare_for_archiving();
1064
1065 class Vptr : public CodeBlob::Vptr {
1066 void print_on(const CodeBlob* instance, outputStream* st) const override {
1067 ttyLocker ttyl;
1068 instance->as_nmethod()->print_on_impl(st);
1069 }
1070 void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1071 instance->as_nmethod()->print_value_on_impl(st);
1072 }
1073 };
1074
1075 static const Vptr _vpntr;
1076 };
1077
1078 #endif // SHARE_CODE_NMETHOD_HPP
|