28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class ScopeDesc;
49 class xmlStream;
50
51 // This class is used internally by nmethods, to cache
52 // exception/pc/handler information.
53
54 class ExceptionCache : public CHeapObj<mtCode> {
55 friend class VMStructs;
56 private:
57 enum { cache_size = 16 };
58 Klass* _exception_type;
59 address _pc[cache_size];
60 address _handler[cache_size];
61 volatile int _count;
62 ExceptionCache* volatile _next;
63 ExceptionCache* _purge_list_next;
64
65 inline address pc_at(int index);
66 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
67
158
159 #if INCLUDE_JVMCI
160 class FailedSpeculation;
161 class JVMCINMethodData;
162 #endif
163
164 class nmethod : public CodeBlob {
165 friend class VMStructs;
166 friend class JVMCIVMStructs;
167 friend class CodeCache; // scavengable oops
168 friend class JVMCINMethodData;
169 friend class DeoptimizationScope;
170
171 private:
172
173 // Used to track in which deoptimize handshake this method will be deoptimized.
174 uint64_t _deoptimization_generation;
175
176 uint64_t _gc_epoch;
177
178 Method* _method;
179
180 // To reduce header size union fields which usages do not overlap.
181 union {
182 // To support simple linked-list chaining of nmethods:
183 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
184 struct {
185 // These are used for compiled synchronized native methods to
186 // locate the owner and stack slot for the BasicLock. They are
187 // needed because there is no debug information for compiled native
188 // wrappers and the oop maps are insufficient to allow
189 // frame::retrieve_receiver() to work. Currently they are expected
190 // to be byte offsets from the Java stack pointer for maximum code
191 // sharing between platforms. JVMTI's GetLocalInstance() uses these
192 // offsets to find the receiver for non-static native wrapper frames.
193 ByteSize _native_receiver_sp_offset;
194 ByteSize _native_basic_lock_sp_offset;
195 };
196 };
197
243 #endif
244
245 // Offset in immutable data section
246 // _dependencies_offset == 0
247 uint16_t _nul_chk_table_offset;
248 uint16_t _handler_table_offset; // This table could be big in C1 code
249 int _scopes_pcs_offset;
250 int _scopes_data_offset;
251 #if INCLUDE_JVMCI
252 int _speculations_offset;
253 #endif
254
255 // location in frame (offset for sp) that deopt can store the original
256 // pc during a deopt.
257 int _orig_pc_offset;
258
259 int _compile_id; // which compilation made this nmethod
260 CompLevel _comp_level; // compilation level (s1)
261 CompilerType _compiler_type; // which compiler made this nmethod (u1)
262
263 // Local state used to keep track of whether unloading is happening or not
264 volatile uint8_t _is_unloading_state;
265
266 // Protected by NMethodState_lock
267 volatile signed char _state; // {not_installed, in_use, not_entrant}
268
269 // set during construction
270 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
271 _has_method_handle_invokes:1,// Has this method MethodHandle invokes?
272 _has_wide_vectors:1, // Preserve wide vectors at safepoints
273 _has_monitors:1, // Fastpath monitor detection for continuations
274 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
275 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
276 _is_unlinked:1, // mark during class unloading
277 _load_reported:1; // used by jvmti to track if an event has been posted for this nmethod
278
279 enum DeoptimizationStatus : u1 {
280 not_marked,
281 deoptimize,
282 deoptimize_noupdate,
283 deoptimize_done
284 };
285
286 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
287
288 DeoptimizationStatus deoptimization_status() const {
289 return Atomic::load(&_deoptimization_status);
290 }
291
292 // Initialize fields to their default values
293 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
294
295 // Post initialization
296 void post_init();
297
309
310 // For normal JIT compiled code
311 nmethod(Method* method,
312 CompilerType type,
313 int nmethod_size,
314 int immutable_data_size,
315 int compile_id,
316 int entry_bci,
317 address immutable_data,
318 CodeOffsets* offsets,
319 int orig_pc_offset,
320 DebugInformationRecorder *recorder,
321 Dependencies* dependencies,
322 CodeBuffer *code_buffer,
323 int frame_size,
324 OopMapSet* oop_maps,
325 ExceptionHandlerTable* handler_table,
326 ImplicitExceptionTable* nul_chk_table,
327 AbstractCompiler* compiler,
328 CompLevel comp_level
329 #if INCLUDE_JVMCI
330 , char* speculations = nullptr,
331 int speculations_len = 0,
332 JVMCINMethodData* jvmci_data = nullptr
333 #endif
334 );
335
336 // helper methods
337 void* operator new(size_t size, int nmethod_size, int comp_level) throw();
338
339 // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
340 // Attention: Only allow NonNMethod space for special nmethods which don't need to be
341 // findable by nmethod iterators! In particular, they must not contain oops!
342 void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
343
344 const char* reloc_string_for(u_char* begin, u_char* end);
345
346 bool try_transition(signed char new_state);
347
348 // Returns true if this thread changed the state of the nmethod or
467 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
468 // transitions).
469 void oops_do_set_strong_done(nmethod* old_head);
470
471 public:
472 // create nmethod with entry_bci
473 static nmethod* new_nmethod(const methodHandle& method,
474 int compile_id,
475 int entry_bci,
476 CodeOffsets* offsets,
477 int orig_pc_offset,
478 DebugInformationRecorder* recorder,
479 Dependencies* dependencies,
480 CodeBuffer *code_buffer,
481 int frame_size,
482 OopMapSet* oop_maps,
483 ExceptionHandlerTable* handler_table,
484 ImplicitExceptionTable* nul_chk_table,
485 AbstractCompiler* compiler,
486 CompLevel comp_level
487 #if INCLUDE_JVMCI
488 , char* speculations = nullptr,
489 int speculations_len = 0,
490 JVMCINMethodData* jvmci_data = nullptr
491 #endif
492 );
493
494 static nmethod* new_native_nmethod(const methodHandle& method,
495 int compile_id,
496 CodeBuffer *code_buffer,
497 int vep_offset,
498 int frame_complete,
499 int frame_size,
500 ByteSize receiver_sp_offset,
501 ByteSize basic_lock_sp_offset,
502 OopMapSet* oop_maps,
503 int exception_handler = -1);
504
505 Method* method () const { return _method; }
506 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
607 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
608
609 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
610 // allowed to advance state
611 in_use = 0, // executable nmethod
612 not_entrant = 1 // marked for deoptimization but activations may still exist
613 };
614
615 // flag accessing and manipulation
616 bool is_not_installed() const { return _state == not_installed; }
617 bool is_in_use() const { return _state <= in_use; }
618 bool is_not_entrant() const { return _state == not_entrant; }
619 int get_state() const { return _state; }
620
621 void clear_unloading_state();
622 // Heuristically deduce an nmethod isn't worth keeping around
623 bool is_cold();
624 bool is_unloading();
625 void do_unloading(bool unloading_occurred);
626
627 bool make_in_use() {
628 return try_transition(in_use);
629 }
630 // Make the nmethod non entrant. The nmethod will continue to be
631 // alive. It is used when an uncommon trap happens. Returns true
632 // if this thread changed the state of the nmethod or false if
633 // another thread performed the transition.
634 bool make_not_entrant();
635 bool make_not_used() { return make_not_entrant(); }
636
637 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
638 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
639 void set_deoptimized_done();
640
641 bool update_recompile_counts() const {
642 // Update recompile counts when either the update is explicitly requested (deoptimize)
643 // or the nmethod is not marked for deoptimization at all (not_marked).
644 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
645 DeoptimizationStatus status = deoptimization_status();
646 return status != deoptimize_noupdate && status != deoptimize_done;
647 }
648
649 // tells whether frames described by this nmethod can be deoptimized
650 // note: native wrappers cannot be deoptimized.
651 bool can_be_deoptimized() const { return is_java_method(); }
652
653 bool has_dependencies() { return dependencies_size() != 0; }
654 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
655 void flush_dependencies();
657 template<typename T>
658 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
659 template<typename T>
660 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
661
662 bool has_unsafe_access() const { return _has_unsafe_access; }
663 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
664
665 bool has_monitors() const { return _has_monitors; }
666 void set_has_monitors(bool z) { _has_monitors = z; }
667
668 bool has_scoped_access() const { return _has_scoped_access; }
669 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
670
671 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
672 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
673
674 bool has_wide_vectors() const { return _has_wide_vectors; }
675 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
676
677 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
678 void set_has_flushed_dependencies(bool z) {
679 assert(!has_flushed_dependencies(), "should only happen once");
680 _has_flushed_dependencies = z;
681 }
682
683 bool is_unlinked() const { return _is_unlinked; }
684 void set_is_unlinked() {
685 assert(!_is_unlinked, "already unlinked");
686 _is_unlinked = true;
687 }
688
689 int comp_level() const { return _comp_level; }
690
691 // Support for oops in scopes and relocs:
692 // Note: index 0 is reserved for null.
693 oop oop_at(int index) const;
694 oop oop_at_phantom(int index) const; // phantom reference
695 oop* oop_addr_at(int index) const { // for GC
696 // relocation indexes are biased by 1 (because 0 is reserved)
876 // used by jvmti to track if the load events has been reported
877 bool load_reported() const { return _load_reported; }
878 void set_load_reported() { _load_reported = true; }
879
880 public:
881 // ScopeDesc retrieval operation
882 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
883 // pc_desc_near returns the first PcDesc at or after the given pc.
884 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
885
886 // ScopeDesc for an instruction
887 ScopeDesc* scope_desc_at(address pc);
888 ScopeDesc* scope_desc_near(address pc);
889
890 // copying of debugging information
891 void copy_scopes_pcs(PcDesc* pcs, int count);
892 void copy_scopes_data(address buffer, int size);
893
894 int orig_pc_offset() { return _orig_pc_offset; }
895
896 // Post successful compilation
897 void post_compiled_method(CompileTask* task);
898
899 // jvmti support:
900 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
901
902 // verify operations
903 void verify() override;
904 void verify_scopes();
905 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
906
907 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
908 void decode2(outputStream* st) const;
909 void print_constant_pool(outputStream* st);
910
911 // Avoid hiding of parent's 'decode(outputStream*)' method.
912 void decode(outputStream* st) const { decode2(st); } // just delegate here.
913
914 // printing support
915 void print() const override;
916 void print(outputStream* st) const;
917 void print_code();
918
919 #if defined(SUPPORT_DATA_STRUCTS)
920 // print output in opt build for disassembler library
921 void print_relocations() PRODUCT_RETURN;
922 void print_pcs_on(outputStream* st);
923 void print_scopes() { print_scopes_on(tty); }
924 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
925 void print_value_on(outputStream* st) const override;
926 void print_handler_table();
927 void print_nul_chk_table();
928 void print_recorded_oop(int log_n, int index);
929 void print_recorded_oops();
930 void print_recorded_metadata();
931
932 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
933 void print_metadata(outputStream* st); // metadata in metadata pool.
934 #else
935 void print_pcs_on(outputStream* st) { return; }
936 #endif
937
938 void print_calls(outputStream* st) PRODUCT_RETURN;
939 static void print_statistics() PRODUCT_RETURN;
940
941 void maybe_print_nmethod(const DirectiveSet* directive);
|
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32
33 class AbstractCompiler;
34 class CompiledDirectCall;
35 class CompiledIC;
36 class CompiledICData;
37 class CompileTask;
38 class DepChange;
39 class Dependencies;
40 class DirectiveSet;
41 class DebugInformationRecorder;
42 class ExceptionHandlerTable;
43 class ImplicitExceptionTable;
44 class JvmtiThreadState;
45 class MetadataClosure;
46 class NativeCallWrapper;
47 class OopIterateClosure;
48 class SCCEntry;
49 class ScopeDesc;
50 class xmlStream;
51
52 // This class is used internally by nmethods, to cache
53 // exception/pc/handler information.
54
55 class ExceptionCache : public CHeapObj<mtCode> {
56 friend class VMStructs;
57 private:
58 enum { cache_size = 16 };
59 Klass* _exception_type;
60 address _pc[cache_size];
61 address _handler[cache_size];
62 volatile int _count;
63 ExceptionCache* volatile _next;
64 ExceptionCache* _purge_list_next;
65
66 inline address pc_at(int index);
67 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
68
159
160 #if INCLUDE_JVMCI
161 class FailedSpeculation;
162 class JVMCINMethodData;
163 #endif
164
165 class nmethod : public CodeBlob {
166 friend class VMStructs;
167 friend class JVMCIVMStructs;
168 friend class CodeCache; // scavengable oops
169 friend class JVMCINMethodData;
170 friend class DeoptimizationScope;
171
172 private:
173
174 // Used to track in which deoptimize handshake this method will be deoptimized.
175 uint64_t _deoptimization_generation;
176
177 uint64_t _gc_epoch;
178
179 // Profiling counter used to figure out the hottest nmethods to record into CDS
180 volatile uint64_t _method_profiling_count;
181
182 Method* _method;
183
184 // To reduce header size union fields which usages do not overlap.
185 union {
186 // To support simple linked-list chaining of nmethods:
187 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
188 struct {
189 // These are used for compiled synchronized native methods to
190 // locate the owner and stack slot for the BasicLock. They are
191 // needed because there is no debug information for compiled native
192 // wrappers and the oop maps are insufficient to allow
193 // frame::retrieve_receiver() to work. Currently they are expected
194 // to be byte offsets from the Java stack pointer for maximum code
195 // sharing between platforms. JVMTI's GetLocalInstance() uses these
196 // offsets to find the receiver for non-static native wrapper frames.
197 ByteSize _native_receiver_sp_offset;
198 ByteSize _native_basic_lock_sp_offset;
199 };
200 };
201
247 #endif
248
249 // Offset in immutable data section
250 // _dependencies_offset == 0
251 uint16_t _nul_chk_table_offset;
252 uint16_t _handler_table_offset; // This table could be big in C1 code
253 int _scopes_pcs_offset;
254 int _scopes_data_offset;
255 #if INCLUDE_JVMCI
256 int _speculations_offset;
257 #endif
258
259 // location in frame (offset for sp) that deopt can store the original
260 // pc during a deopt.
261 int _orig_pc_offset;
262
263 int _compile_id; // which compilation made this nmethod
264 CompLevel _comp_level; // compilation level (s1)
265 CompilerType _compiler_type; // which compiler made this nmethod (u1)
266
267 SCCEntry* _scc_entry;
268
269 bool _used; // has this nmethod ever been invoked?
270
271 // Local state used to keep track of whether unloading is happening or not
272 volatile uint8_t _is_unloading_state;
273
274 // Protected by NMethodState_lock
275 volatile signed char _state; // {not_installed, in_use, not_entrant}
276
277 // set during construction
278 uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
279 _has_method_handle_invokes:1,// Has this method MethodHandle invokes?
280 _has_wide_vectors:1, // Preserve wide vectors at safepoints
281 _has_monitors:1, // Fastpath monitor detection for continuations
282 _has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
283 _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
284 _is_unlinked:1, // mark during class unloading
285 _load_reported:1, // used by jvmti to track if an event has been posted for this nmethod
286 _preloaded:1,
287 _has_clinit_barriers:1;
288
289 enum DeoptimizationStatus : u1 {
290 not_marked,
291 deoptimize,
292 deoptimize_noupdate,
293 deoptimize_done
294 };
295
296 volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
297
298 DeoptimizationStatus deoptimization_status() const {
299 return Atomic::load(&_deoptimization_status);
300 }
301
302 // Initialize fields to their default values
303 void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
304
305 // Post initialization
306 void post_init();
307
319
320 // For normal JIT compiled code
321 nmethod(Method* method,
322 CompilerType type,
323 int nmethod_size,
324 int immutable_data_size,
325 int compile_id,
326 int entry_bci,
327 address immutable_data,
328 CodeOffsets* offsets,
329 int orig_pc_offset,
330 DebugInformationRecorder *recorder,
331 Dependencies* dependencies,
332 CodeBuffer *code_buffer,
333 int frame_size,
334 OopMapSet* oop_maps,
335 ExceptionHandlerTable* handler_table,
336 ImplicitExceptionTable* nul_chk_table,
337 AbstractCompiler* compiler,
338 CompLevel comp_level
339 , SCCEntry* scc_entry
340 #if INCLUDE_JVMCI
341 , char* speculations = nullptr,
342 int speculations_len = 0,
343 JVMCINMethodData* jvmci_data = nullptr
344 #endif
345 );
346
347 // helper methods
348 void* operator new(size_t size, int nmethod_size, int comp_level) throw();
349
350 // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
351 // Attention: Only allow NonNMethod space for special nmethods which don't need to be
352 // findable by nmethod iterators! In particular, they must not contain oops!
353 void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
354
355 const char* reloc_string_for(u_char* begin, u_char* end);
356
357 bool try_transition(signed char new_state);
358
359 // Returns true if this thread changed the state of the nmethod or
478 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
479 // transitions).
480 void oops_do_set_strong_done(nmethod* old_head);
481
482 public:
483 // create nmethod with entry_bci
484 static nmethod* new_nmethod(const methodHandle& method,
485 int compile_id,
486 int entry_bci,
487 CodeOffsets* offsets,
488 int orig_pc_offset,
489 DebugInformationRecorder* recorder,
490 Dependencies* dependencies,
491 CodeBuffer *code_buffer,
492 int frame_size,
493 OopMapSet* oop_maps,
494 ExceptionHandlerTable* handler_table,
495 ImplicitExceptionTable* nul_chk_table,
496 AbstractCompiler* compiler,
497 CompLevel comp_level
498 , SCCEntry* scc_entry
499 #if INCLUDE_JVMCI
500 , char* speculations = nullptr,
501 int speculations_len = 0,
502 JVMCINMethodData* jvmci_data = nullptr
503 #endif
504 );
505
506 static nmethod* new_native_nmethod(const methodHandle& method,
507 int compile_id,
508 CodeBuffer *code_buffer,
509 int vep_offset,
510 int frame_complete,
511 int frame_size,
512 ByteSize receiver_sp_offset,
513 ByteSize basic_lock_sp_offset,
514 OopMapSet* oop_maps,
515 int exception_handler = -1);
516
517 Method* method () const { return _method; }
518 bool is_native_method() const { return _method != nullptr && _method->is_native(); }
619 address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
620
621 enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
622 // allowed to advance state
623 in_use = 0, // executable nmethod
624 not_entrant = 1 // marked for deoptimization but activations may still exist
625 };
626
627 // flag accessing and manipulation
628 bool is_not_installed() const { return _state == not_installed; }
629 bool is_in_use() const { return _state <= in_use; }
630 bool is_not_entrant() const { return _state == not_entrant; }
631 int get_state() const { return _state; }
632
633 void clear_unloading_state();
634 // Heuristically deduce an nmethod isn't worth keeping around
635 bool is_cold();
636 bool is_unloading();
637 void do_unloading(bool unloading_occurred);
638
639 void inc_method_profiling_count();
640 uint64_t method_profiling_count();
641
642 bool make_in_use() {
643 return try_transition(in_use);
644 }
645 // Make the nmethod non entrant. The nmethod will continue to be
646 // alive. It is used when an uncommon trap happens. Returns true
647 // if this thread changed the state of the nmethod or false if
648 // another thread performed the transition.
649 bool make_not_entrant(bool make_not_entrant = true);
650 bool make_not_used() { return make_not_entrant(false); }
651
652 bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
653 bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
654 void set_deoptimized_done();
655
656 bool update_recompile_counts() const {
657 // Update recompile counts when either the update is explicitly requested (deoptimize)
658 // or the nmethod is not marked for deoptimization at all (not_marked).
659 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
660 DeoptimizationStatus status = deoptimization_status();
661 return status != deoptimize_noupdate && status != deoptimize_done;
662 }
663
664 // tells whether frames described by this nmethod can be deoptimized
665 // note: native wrappers cannot be deoptimized.
666 bool can_be_deoptimized() const { return is_java_method(); }
667
668 bool has_dependencies() { return dependencies_size() != 0; }
669 void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
670 void flush_dependencies();
672 template<typename T>
673 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
674 template<typename T>
675 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
676
677 bool has_unsafe_access() const { return _has_unsafe_access; }
678 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
679
680 bool has_monitors() const { return _has_monitors; }
681 void set_has_monitors(bool z) { _has_monitors = z; }
682
683 bool has_scoped_access() const { return _has_scoped_access; }
684 void set_has_scoped_access(bool z) { _has_scoped_access = z; }
685
686 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
687 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
688
689 bool has_wide_vectors() const { return _has_wide_vectors; }
690 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
691
692 bool has_clinit_barriers() const { return _has_clinit_barriers; }
693 void set_has_clinit_barriers(bool z) { _has_clinit_barriers = z; }
694
695 bool preloaded() const { return _preloaded; }
696 void set_preloaded(bool z) { _preloaded = z; }
697
698 bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
699 void set_has_flushed_dependencies(bool z) {
700 assert(!has_flushed_dependencies(), "should only happen once");
701 _has_flushed_dependencies = z;
702 }
703
704 bool is_unlinked() const { return _is_unlinked; }
705 void set_is_unlinked() {
706 assert(!_is_unlinked, "already unlinked");
707 _is_unlinked = true;
708 }
709
710 int comp_level() const { return _comp_level; }
711
712 // Support for oops in scopes and relocs:
713 // Note: index 0 is reserved for null.
714 oop oop_at(int index) const;
715 oop oop_at_phantom(int index) const; // phantom reference
716 oop* oop_addr_at(int index) const { // for GC
717 // relocation indexes are biased by 1 (because 0 is reserved)
897 // used by jvmti to track if the load events has been reported
898 bool load_reported() const { return _load_reported; }
899 void set_load_reported() { _load_reported = true; }
900
901 public:
902 // ScopeDesc retrieval operation
903 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
904 // pc_desc_near returns the first PcDesc at or after the given pc.
905 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
906
907 // ScopeDesc for an instruction
908 ScopeDesc* scope_desc_at(address pc);
909 ScopeDesc* scope_desc_near(address pc);
910
911 // copying of debugging information
912 void copy_scopes_pcs(PcDesc* pcs, int count);
913 void copy_scopes_data(address buffer, int size);
914
915 int orig_pc_offset() { return _orig_pc_offset; }
916
917 SCCEntry* scc_entry() const { return _scc_entry; }
918 bool is_scc() const { return scc_entry() != nullptr; }
919
920 bool used() const { return _used; }
921 void set_used() { _used = true; }
922
923 // Post successful compilation
924 void post_compiled_method(CompileTask* task);
925
926 // jvmti support:
927 void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
928
929 // verify operations
930 void verify() override;
931 void verify_scopes();
932 void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
933
934 // Disassemble this nmethod with additional debug information, e.g. information about blocks.
935 void decode2(outputStream* st) const;
936 void print_constant_pool(outputStream* st);
937
938 // Avoid hiding of parent's 'decode(outputStream*)' method.
939 void decode(outputStream* st) const { decode2(st); } // just delegate here.
940
941 // printing support
942 void print() const override;
943 void print(outputStream* st) const;
944 void print_code();
945
946 #if defined(SUPPORT_DATA_STRUCTS)
947 // print output in opt build for disassembler library
948 void print_relocations_on(outputStream* st) PRODUCT_RETURN;
949 void print_pcs_on(outputStream* st);
950 void print_scopes() { print_scopes_on(tty); }
951 void print_scopes_on(outputStream* st) PRODUCT_RETURN;
952 void print_value_on(outputStream* st) const override;
953 void print_handler_table();
954 void print_nul_chk_table();
955 void print_recorded_oop(int log_n, int index);
956 void print_recorded_oops();
957 void print_recorded_metadata();
958
959 void print_oops(outputStream* st); // oops from the underlying CodeBlob.
960 void print_metadata(outputStream* st); // metadata in metadata pool.
961 #else
962 void print_pcs_on(outputStream* st) { return; }
963 #endif
964
965 void print_calls(outputStream* st) PRODUCT_RETURN;
966 static void print_statistics() PRODUCT_RETURN;
967
968 void maybe_print_nmethod(const DirectiveSet* directive);
|