< prev index next > src/hotspot/share/code/nmethod.hpp
Print this page
class ImplicitExceptionTable;
class JvmtiThreadState;
class MetadataClosure;
class NativeCallWrapper;
class OopIterateClosure;
+ class SCCReader;
+ class SCCEntry;
class ScopeDesc;
class xmlStream;
// This class is used internally by nmethods, to cache
// exception/pc/handler information.
// Used to track in which deoptimize handshake this method will be deoptimized.
uint64_t _deoptimization_generation;
uint64_t _gc_epoch;
+ // Profiling counter used to figure out the hottest nmethods to record into CDS
+ volatile uint64_t _method_profiling_count;
+
Method* _method;
// To reduce header size union fields which usages do not overlap.
union {
// To support simple linked-list chaining of nmethods:
int _compile_id; // which compilation made this nmethod
CompLevel _comp_level; // compilation level (s1)
CompilerType _compiler_type; // which compiler made this nmethod (u1)
+ SCCEntry* _scc_entry;
+
+ bool _used; // has this nmethod ever been invoked?
+
// Local state used to keep track of whether unloading is happening or not
volatile uint8_t _is_unloading_state;
// Protected by NMethodState_lock
volatile signed char _state; // {not_installed, in_use, not_entrant}
_has_wide_vectors:1, // Preserve wide vectors at safepoints
_has_monitors:1, // Fastpath monitor detection for continuations
_has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
_has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
_is_unlinked:1, // mark during class unloading
! _load_reported:1; // used by jvmti to track if an event has been posted for this nmethod
enum DeoptimizationStatus : u1 {
not_marked,
deoptimize,
deoptimize_noupdate,
_has_wide_vectors:1, // Preserve wide vectors at safepoints
_has_monitors:1, // Fastpath monitor detection for continuations
_has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
_has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
_is_unlinked:1, // mark during class unloading
! _load_reported:1, // used by jvmti to track if an event has been posted for this nmethod
+ _preloaded:1,
+ _has_clinit_barriers:1;
enum DeoptimizationStatus : u1 {
not_marked,
deoptimize,
deoptimize_noupdate,
OopMapSet* oop_maps,
ExceptionHandlerTable* handler_table,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
CompLevel comp_level
+ , SCCEntry* scc_entry
#if INCLUDE_JVMCI
, char* speculations = nullptr,
int speculations_len = 0,
JVMCINMethodData* jvmci_data = nullptr
#endif
// Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
// transitions).
void oops_do_set_strong_done(nmethod* old_head);
+ void record_nmethod_dependency();
+
+ void restore_from_archive(nmethod* archived_nm,
+ const methodHandle& method,
+ int compile_id,
+ address reloc_data,
+ GrowableArray<Handle>& oop_list,
+ GrowableArray<Metadata*>& metadata_list,
+ ImmutableOopMapSet* oop_maps,
+ address immutable_data,
+ GrowableArray<Handle>& reloc_imm_oop_list,
+ GrowableArray<Metadata*>& reloc_imm_metadata_list,
+ #ifndef PRODUCT
+ AsmRemarks& asm_remarks,
+ DbgStrings& dbg_strings,
+ #endif /* PRODUCT */
+ SCCReader* scc_reader);
+
public:
+ // create nmethod using archived nmethod from AOT code cache
+ static nmethod* new_nmethod(nmethod* archived_nm,
+ const methodHandle& method,
+ AbstractCompiler* compiler,
+ int compile_id,
+ address reloc_data,
+ GrowableArray<Handle>& oop_list,
+ GrowableArray<Metadata*>& metadata_list,
+ ImmutableOopMapSet* oop_maps,
+ address immutable_data,
+ GrowableArray<Handle>& reloc_imm_oop_list,
+ GrowableArray<Metadata*>& reloc_imm_metadata_list,
+ #ifndef PRODUCT
+ AsmRemarks& asm_remarks,
+ DbgStrings& dbg_strings,
+ #endif /* PRODUCT */
+ SCCReader* scc_reader);
+
// create nmethod with entry_bci
static nmethod* new_nmethod(const methodHandle& method,
int compile_id,
int entry_bci,
CodeOffsets* offsets,
OopMapSet* oop_maps,
ExceptionHandlerTable* handler_table,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
CompLevel comp_level
+ , SCCEntry* scc_entry
#if INCLUDE_JVMCI
, char* speculations = nullptr,
int speculations_len = 0,
JVMCINMethodData* jvmci_data = nullptr
#endif
ByteSize receiver_sp_offset,
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps,
int exception_handler = -1);
+ void copy_to(address dest) {
+ memcpy(dest, this, size());
+ }
+
Method* method () const { return _method; }
+ uint16_t entry_bci () const { return _entry_bci; }
bool is_native_method() const { return _method != nullptr && _method->is_native(); }
bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
// Compiler task identification. Note that all OSR methods
address insts_end () const { return header_begin() + _stub_offset ; }
address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return code_end() ; }
address exception_begin () const { return header_begin() + _exception_offset ; }
address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
! address deopt_mh_handler_begin() const { return header_begin() + _deopt_mh_handler_offset ; }
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
oop* oops_begin () const { return (oop*) data_begin(); }
oop* oops_end () const { return (oop*) data_end(); }
// mutable data
address insts_end () const { return header_begin() + _stub_offset ; }
address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return code_end() ; }
address exception_begin () const { return header_begin() + _exception_offset ; }
address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
! address deopt_mh_handler_begin() const { return _deopt_mh_handler_offset != -1 ? (header_begin() + _deopt_mh_handler_offset) : nullptr; }
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
oop* oops_begin () const { return (oop*) data_begin(); }
oop* oops_end () const { return (oop*) data_end(); }
// mutable data
#else
Metadata** metadata_end () const { return (Metadata**) mutable_data_end(); }
#endif
// immutable data
+ void set_immutable_data(address data) { _immutable_data = data; }
address immutable_data_begin () const { return _immutable_data; }
address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
address dependencies_begin () const { return _immutable_data; }
address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
// Heuristically deduce an nmethod isn't worth keeping around
bool is_cold();
bool is_unloading();
void do_unloading(bool unloading_occurred);
bool make_in_use() {
return try_transition(in_use);
}
// Make the nmethod non entrant. The nmethod will continue to be
// alive. It is used when an uncommon trap happens. Returns true
// if this thread changed the state of the nmethod or false if
// another thread performed the transition.
! bool make_not_entrant(const char* reason);
bool make_not_used() { return make_not_entrant("not used"); }
bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
void set_deoptimized_done();
// Heuristically deduce an nmethod isn't worth keeping around
bool is_cold();
bool is_unloading();
void do_unloading(bool unloading_occurred);
+ void inc_method_profiling_count();
+ uint64_t method_profiling_count();
+
bool make_in_use() {
return try_transition(in_use);
}
// Make the nmethod non entrant. The nmethod will continue to be
// alive. It is used when an uncommon trap happens. Returns true
// if this thread changed the state of the nmethod or false if
// another thread performed the transition.
! bool make_not_entrant(const char* reason, bool make_not_entrant = true);
bool make_not_used() { return make_not_entrant("not used"); }
bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
void set_deoptimized_done();
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool has_wide_vectors() const { return _has_wide_vectors; }
void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
+ bool has_clinit_barriers() const { return _has_clinit_barriers; }
+ void set_has_clinit_barriers(bool z) { _has_clinit_barriers = z; }
+
+ bool preloaded() const { return _preloaded; }
+ void set_preloaded(bool z) { _preloaded = z; }
+
bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
void set_has_flushed_dependencies(bool z) {
assert(!has_flushed_dependencies(), "should only happen once");
_has_flushed_dependencies = z;
}
// relocation indexes are biased by 1 (because 0 is reserved)
assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
return &metadata_begin()[index - 1];
}
+ void copy_values(GrowableArray<Handle>* array);
void copy_values(GrowableArray<jobject>* oops);
void copy_values(GrowableArray<Metadata*>* metadata);
void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
// Relocation support
public:
void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
+ void create_reloc_immediates_list(JavaThread* thread, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list);
+
bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc);
protected:
// Exception cache support
void copy_scopes_pcs(PcDesc* pcs, int count);
void copy_scopes_data(address buffer, int size);
int orig_pc_offset() { return _orig_pc_offset; }
+ SCCEntry* scc_entry() const { return _scc_entry; }
+ bool is_scc() const { return scc_entry() != nullptr; }
+ void set_scc_entry(SCCEntry* entry) { _scc_entry = entry; }
+
+ bool used() const { return _used; }
+ void set_used() { _used = true; }
+
// Post successful compilation
void post_compiled_method(CompileTask* task);
// jvmti support:
void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
void print_code();
void print_value_on_impl(outputStream* st) const;
#if defined(SUPPORT_DATA_STRUCTS)
// print output in opt build for disassembler library
! void print_relocations() PRODUCT_RETURN;
void print_pcs_on(outputStream* st);
void print_scopes() { print_scopes_on(tty); }
void print_scopes_on(outputStream* st) PRODUCT_RETURN;
void print_handler_table();
void print_nul_chk_table();
void print_code();
void print_value_on_impl(outputStream* st) const;
#if defined(SUPPORT_DATA_STRUCTS)
// print output in opt build for disassembler library
! void print_relocations_on(outputStream* st) PRODUCT_RETURN;
void print_pcs_on(outputStream* st);
void print_scopes() { print_scopes_on(tty); }
void print_scopes_on(outputStream* st) PRODUCT_RETURN;
void print_handler_table();
void print_nul_chk_table();
address call_instruction_address(address pc) const;
void make_deoptimized();
void finalize_relocations();
+ void prepare_for_archiving();
+
class Vptr : public CodeBlob::Vptr {
void print_on(const CodeBlob* instance, outputStream* st) const override {
ttyLocker ttyl;
instance->as_nmethod()->print_on_impl(st);
}
< prev index next >