1 /*
   2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_CODE_NMETHOD_HPP
  26 #define SHARE_CODE_NMETHOD_HPP
  27 
  28 #include "code/codeBlob.hpp"
  29 #include "code/pcDesc.hpp"
  30 #include "compiler/compilerDefinitions.hpp"
  31 #include "oops/metadata.hpp"
  32 #include "oops/method.hpp"
  33 #include "runtime/mutexLocker.hpp"
  34 
  35 class AbstractCompiler;
  36 class CompiledDirectCall;
  37 class CompiledIC;
  38 class CompiledICData;
  39 class CompileTask;
  40 class DepChange;
  41 class Dependencies;
  42 class DirectiveSet;
  43 class DebugInformationRecorder;
  44 class ExceptionHandlerTable;
  45 class ImplicitExceptionTable;
  46 class JvmtiThreadState;
  47 class MetadataClosure;
  48 class NativeCallWrapper;
  49 class OopIterateClosure;
  50 class ScopeDesc;
  51 class xmlStream;
  52 
  53 // This class is used internally by nmethods, to cache
  54 // exception/pc/handler information.
  55 
  56 class ExceptionCache : public CHeapObj<mtCode> {
  57   friend class VMStructs;
  58  private:
  59   enum { cache_size = 16 };
  60   Klass*   _exception_type;
  61   address  _pc[cache_size];
  62   address  _handler[cache_size];
  63   volatile int _count;
  64   ExceptionCache* volatile _next;
  65   ExceptionCache* _purge_list_next;
  66 
  67   inline address pc_at(int index);
  68   void set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
  69 
  70   inline address handler_at(int index);
  71   void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
  72 
  73   inline int count();
  74   // increment_count is only called under lock, but there may be concurrent readers.
  75   void increment_count();
  76 
  77  public:
  78 
  79   ExceptionCache(Handle exception, address pc, address handler);
  80 
  81   Klass*    exception_type()                { return _exception_type; }
  82   ExceptionCache* next();
  83   void      set_next(ExceptionCache *ec);
  84   ExceptionCache* purge_list_next()                 { return _purge_list_next; }
  85   void      set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
  86 
  87   address match(Handle exception, address pc);
  88   bool    match_exception_with_space(Handle exception) ;
  89   address test_address(address addr);
  90   bool    add_address_and_handler(address addr, address handler) ;
  91 };
  92 
  93 // cache pc descs found in earlier inquiries
  94 class PcDescCache {
  95  private:
  96   enum { cache_size = 4 };
  97   // The array elements MUST be volatile! Several threads may modify
  98   // and read from the cache concurrently. find_pc_desc_internal has
  99   // returned wrong results. C++ compiler (namely xlC12) may duplicate
 100   // C++ field accesses if the elements are not volatile.
 101   typedef PcDesc* PcDescPtr;
 102   volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
 103  public:
 104   PcDescCache() { DEBUG_ONLY(_pc_descs[0] = nullptr); }
 105   void    init_to(PcDesc* initial_pc_desc);
 106   PcDesc* find_pc_desc(int pc_offset, bool approximate);
 107   void    add_pc_desc(PcDesc* pc_desc);
 108   PcDesc* last_pc_desc() { return _pc_descs[0]; }
 109 };
 110 
 111 class PcDescContainer : public CHeapObj<mtCode> {
 112 private:
 113   PcDescCache _pc_desc_cache;
 114 public:
 115   PcDescContainer(PcDesc* initial_pc_desc) { _pc_desc_cache.init_to(initial_pc_desc); }
 116 
 117   PcDesc* find_pc_desc_internal(address pc, bool approximate, address code_begin,
 118                                 PcDesc* lower, PcDesc* upper);
 119 
 120   PcDesc* find_pc_desc(address pc, bool approximate, address code_begin, PcDesc* lower, PcDesc* upper)
 121 #ifdef PRODUCT
 122   {
 123     PcDesc* desc = _pc_desc_cache.last_pc_desc();
 124     assert(desc != nullptr, "PcDesc cache should be initialized already");
 125     if (desc->pc_offset() == (pc - code_begin)) {
 126       // Cached value matched
 127       return desc;
 128     }
 129     return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
 130   }
 131 #endif
 132   ;
 133 };
 134 
 135 // nmethods (native methods) are the compiled code versions of Java methods.
 136 //
 137 // An nmethod contains:
 138 //  - Header                 (the nmethod structure)
 139 //  - Constant part          (doubles, longs and floats used in nmethod)
 140 //  - Code part:
 141 //    - Code body
 142 //    - Exception handler
 143 //    - Stub code
 144 //    - OOP table
 145 //
 146 // As a CodeBlob, an nmethod references [mutable data] allocated on the C heap:
 147 //  - CodeBlob relocation data
 148 //  - Metainfo
 149 //  - JVMCI data
 150 //
 151 // An nmethod references [immutable data] allocated on C heap:
 152 //  - Dependency assertions data
 153 //  - Implicit null table array
 154 //  - Handler entry point array
 155 //  - Debugging information:
 156 //    - Scopes data array
 157 //    - Scopes pcs array
 158 //  - JVMCI speculations array
 159 //  - Nmethod reference counter
 160 
 161 #if INCLUDE_JVMCI
 162 class FailedSpeculation;
 163 class JVMCINMethodData;
 164 #endif
 165 
 166 class nmethod : public CodeBlob {
 167   friend class VMStructs;
 168   friend class JVMCIVMStructs;
 169   friend class CodeCache;  // scavengable oops
 170   friend class JVMCINMethodData;
 171   friend class DeoptimizationScope;
 172 
 173   #define ImmutableDataRefCountSize ((int)sizeof(int))
 174 
 175  private:
 176 
 177   // Used to track in which deoptimize handshake this method will be deoptimized.
 178   uint64_t  _deoptimization_generation;
 179 
 180   uint64_t  _gc_epoch;
 181 
 182   Method*   _method;
 183 
 184   // To reduce header size union fields which usages do not overlap.
 185   union {
 186     // To support simple linked-list chaining of nmethods:
 187     nmethod*  _osr_link; // from InstanceKlass::osr_nmethods_head
 188     struct {
 189       // These are used for compiled synchronized native methods to
 190       // locate the owner and stack slot for the BasicLock. They are
 191       // needed because there is no debug information for compiled native
 192       // wrappers and the oop maps are insufficient to allow
 193       // frame::retrieve_receiver() to work. Currently they are expected
 194       // to be byte offsets from the Java stack pointer for maximum code
 195       // sharing between platforms. JVMTI's GetLocalInstance() uses these
 196       // offsets to find the receiver for non-static native wrapper frames.
 197       ByteSize _native_receiver_sp_offset;
 198       ByteSize _native_basic_lock_sp_offset;
 199     };
 200   };
 201 
 202   // nmethod's read-only data
 203   address _immutable_data;
 204 
 205   PcDescContainer* _pc_desc_container;
 206   ExceptionCache* volatile _exception_cache;
 207 
 208   void* _gc_data;
 209 
 210   struct oops_do_mark_link; // Opaque data type.
 211   static nmethod*    volatile _oops_do_mark_nmethods;
 212   oops_do_mark_link* volatile _oops_do_mark_link;
 213 
 214   CompiledICData* _compiled_ic_data;
 215 
 216   // offsets for entry points
 217   address  _osr_entry_point;       // entry point for on stack replacement
 218   uint16_t _entry_offset;          // entry point with class check
 219   uint16_t _verified_entry_offset; // entry point without class check
 220   uint16_t _inline_entry_offset;             // inline type entry point (unpack all inline type args) with class check
 221   uint16_t _verified_inline_entry_offset;    // inline type entry point (unpack all inline type args) without class check
 222   uint16_t _verified_inline_ro_entry_offset; // inline type entry point (unpack receiver only) without class check
 223   int      _entry_bci;             // != InvocationEntryBci if this nmethod is an on-stack replacement method
 224   int      _immutable_data_size;
 225 
 226   // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
 227 
 228   int _skipped_instructions_size;
 229 
 230   int _stub_offset;
 231 
 232   // Offsets for different stubs section parts
 233   int _exception_offset;
 234   // All deoptee's will resume execution at this location described by
 235   // this offset.
 236   int _deopt_handler_entry_offset;
 237   // Offset (from insts_end) of the unwind handler if it exists
 238   int16_t  _unwind_handler_offset;
 239   // Number of arguments passed on the stack
 240   uint16_t _num_stack_arg_slots;
 241 
 242 #if INCLUDE_JVMCI
 243   // _metadata_size is not specific to JVMCI. In the non-JVMCI case, it can be derived as:
 244   // _metadata_size = mutable_data_size - relocation_size
 245   int _metadata_size;
 246 #endif
 247 
 248   // Offset in immutable data section
 249   // _dependencies_offset == 0
 250   uint16_t _nul_chk_table_offset;
 251   uint16_t _handler_table_offset; // This table could be big in C1 code
 252   int      _scopes_pcs_offset;
 253   int      _scopes_data_offset;
 254 #if INCLUDE_JVMCI
 255   int      _speculations_offset;
 256 #endif
 257   int      _immutable_data_ref_count_offset;
 258 
 259   // location in frame (offset for sp) that deopt can store the original
 260   // pc during a deopt.
 261   int _orig_pc_offset;
 262 
 263   int          _compile_id;            // which compilation made this nmethod
 264   CompLevel    _comp_level;            // compilation level (s1)
 265   CompilerType _compiler_type;         // which compiler made this nmethod (u1)
 266 
 267   // Local state used to keep track of whether unloading is happening or not
 268   volatile uint8_t _is_unloading_state;
 269 
 270   // Protected by NMethodState_lock
 271   volatile signed char _state;         // {not_installed, in_use, not_entrant}
 272 
 273   // set during construction
 274   uint8_t _has_unsafe_access:1,        // May fault due to unsafe access.
 275           _has_wide_vectors:1,         // Preserve wide vectors at safepoints
 276           _has_monitors:1,             // Fastpath monitor detection for continuations
 277           _has_scoped_access:1,        // used by for shared scope closure (scopedMemoryAccess.cpp)
 278           _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
 279           _is_unlinked:1,              // mark during class unloading
 280           _load_reported:1;            // used by jvmti to track if an event has been posted for this nmethod
 281 
 282   enum DeoptimizationStatus : u1 {
 283     not_marked,
 284     deoptimize,
 285     deoptimize_noupdate,
 286     deoptimize_done
 287   };
 288 
 289   volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
 290 
 291   DeoptimizationStatus deoptimization_status() const {
 292     return AtomicAccess::load(&_deoptimization_status);
 293   }
 294 
 295   // Initialize fields to their default values
 296   void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
 297 
 298   // Post initialization
 299   void post_init();
 300 
 301   // For native wrappers
 302   nmethod(Method* method,
 303           CompilerType type,
 304           int nmethod_size,
 305           int compile_id,
 306           CodeOffsets* offsets,
 307           CodeBuffer *code_buffer,
 308           int frame_size,
 309           ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
 310           ByteSize basic_lock_sp_offset,       /* synchronized natives only */
 311           OopMapSet* oop_maps,
 312           int mutable_data_size);
 313 
 314   // For normal JIT compiled code
 315   nmethod(Method* method,
 316           CompilerType type,
 317           int nmethod_size,
 318           int immutable_data_size,
 319           int mutable_data_size,
 320           int compile_id,
 321           int entry_bci,
 322           address immutable_data,
 323           CodeOffsets* offsets,
 324           int orig_pc_offset,
 325           DebugInformationRecorder *recorder,
 326           Dependencies* dependencies,
 327           CodeBuffer *code_buffer,
 328           int frame_size,
 329           OopMapSet* oop_maps,
 330           ExceptionHandlerTable* handler_table,
 331           ImplicitExceptionTable* nul_chk_table,
 332           AbstractCompiler* compiler,
 333           CompLevel comp_level
 334 #if INCLUDE_JVMCI
 335           , char* speculations = nullptr,
 336           int speculations_len = 0,
 337           JVMCINMethodData* jvmci_data = nullptr
 338 #endif
 339           );
 340 
 341   nmethod(const nmethod &nm);
 342 
 343   // helper methods
 344   void* operator new(size_t size, int nmethod_size, int comp_level) throw();
 345   void* operator new(size_t size, int nmethod_size, CodeBlobType code_blob_type) throw();
 346 
 347   // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
 348   // Attention: Only allow NonNMethod space for special nmethods which don't need to be
 349   // findable by nmethod iterators! In particular, they must not contain oops!
 350   void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
 351 
 352   const char* reloc_string_for(u_char* begin, u_char* end);
 353 
 354   bool try_transition(signed char new_state);
 355 
 356   // Returns true if this thread changed the state of the nmethod or
 357   // false if another thread performed the transition.
 358   bool make_entrant() { Unimplemented(); return false; }
 359   void inc_decompile_count();
 360 
 361   // Inform external interfaces that a compiled method has been unloaded
 362   void post_compiled_method_unload();
 363 
 364   PcDesc* find_pc_desc(address pc, bool approximate) {
 365     if (_pc_desc_container == nullptr) return nullptr; // native method
 366     return _pc_desc_container->find_pc_desc(pc, approximate, code_begin(), scopes_pcs_begin(), scopes_pcs_end());
 367   }
 368 
 369   // STW two-phase nmethod root processing helpers.
 370   //
 371   // When determining liveness of a given nmethod to do code cache unloading,
 372   // some collectors need to do different things depending on whether the nmethods
 373   // need to absolutely be kept alive during root processing; "strong"ly reachable
 374   // nmethods are known to be kept alive at root processing, but the liveness of
 375   // "weak"ly reachable ones is to be determined later.
 376   //
 377   // We want to allow strong and weak processing of nmethods by different threads
 378   // at the same time without heavy synchronization. Additional constraints are
 379   // to make sure that every nmethod is processed a minimal amount of time, and
 380   // nmethods themselves are always iterated at most once at a particular time.
 381   //
 382   // Note that strong processing work must be a superset of weak processing work
 383   // for this code to work.
 384   //
 385   // We store state and claim information in the _oops_do_mark_link member, using
 386   // the two LSBs for the state and the remaining upper bits for linking together
 387   // nmethods that were already visited.
 388   // The last element is self-looped, i.e. points to itself to avoid some special
 389   // "end-of-list" sentinel value.
 390   //
 391   // _oops_do_mark_link special values:
 392   //
 393   //   _oops_do_mark_link == nullptr: the nmethod has not been visited at all yet, i.e.
 394   //      is Unclaimed.
 395   //
 396   // For other values, its lowest two bits indicate the following states of the nmethod:
 397   //
 398   //   weak_request (WR): the nmethod has been claimed by a thread for weak processing
 399   //   weak_done (WD): weak processing has been completed for this nmethod.
 400   //   strong_request (SR): the nmethod has been found to need strong processing while
 401   //       being weak processed.
 402   //   strong_done (SD): strong processing has been completed for this nmethod .
 403   //
 404   // The following shows the _only_ possible progressions of the _oops_do_mark_link
 405   // pointer.
 406   //
 407   // Given
 408   //   N as the nmethod
 409   //   X the current next value of _oops_do_mark_link
 410   //
 411   // Unclaimed (C)-> N|WR (C)-> X|WD: the nmethod has been processed weakly by
 412   //   a single thread.
 413   // Unclaimed (C)-> N|WR (C)-> X|WD (O)-> X|SD: after weak processing has been
 414   //   completed (as above) another thread found that the nmethod needs strong
 415   //   processing after all.
 416   // Unclaimed (C)-> N|WR (O)-> N|SR (C)-> X|SD: during weak processing another
 417   //   thread finds that the nmethod needs strong processing, marks it as such and
 418   //   terminates. The original thread completes strong processing.
 419   // Unclaimed (C)-> N|SD (C)-> X|SD: the nmethod has been processed strongly from
 420   //   the beginning by a single thread.
 421   //
 422   // "|" describes the concatenation of bits in _oops_do_mark_link.
 423   //
 424   // The diagram also describes the threads responsible for changing the nmethod to
 425   // the next state by marking the _transition_ with (C) and (O), which mean "current"
 426   // and "other" thread respectively.
 427   //
 428 
 429   // States used for claiming nmethods during root processing.
 430   static const uint claim_weak_request_tag = 0;
 431   static const uint claim_weak_done_tag = 1;
 432   static const uint claim_strong_request_tag = 2;
 433   static const uint claim_strong_done_tag = 3;
 434 
 435   static oops_do_mark_link* mark_link(nmethod* nm, uint tag) {
 436     assert(tag <= claim_strong_done_tag, "invalid tag %u", tag);
 437     assert(is_aligned(nm, 4), "nmethod pointer must have zero lower two LSB");
 438     return (oops_do_mark_link*)(((uintptr_t)nm & ~0x3) | tag);
 439   }
 440 
 441   static uint extract_state(oops_do_mark_link* link) {
 442     return (uint)((uintptr_t)link & 0x3);
 443   }
 444 
 445   static nmethod* extract_nmethod(oops_do_mark_link* link) {
 446     return (nmethod*)((uintptr_t)link & ~0x3);
 447   }
 448 
 449   void oops_do_log_change(const char* state);
 450 
 451   static bool oops_do_has_weak_request(oops_do_mark_link* next) {
 452     return extract_state(next) == claim_weak_request_tag;
 453   }
 454 
 455   static bool oops_do_has_any_strong_state(oops_do_mark_link* next) {
 456     return extract_state(next) >= claim_strong_request_tag;
 457   }
 458 
 459   // Attempt Unclaimed -> N|WR transition. Returns true if successful.
 460   bool oops_do_try_claim_weak_request();
 461 
 462   // Attempt Unclaimed -> N|SD transition. Returns the current link.
 463   oops_do_mark_link* oops_do_try_claim_strong_done();
 464   // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
 465   nmethod* oops_do_try_add_to_list_as_weak_done();
 466 
 467   // Attempt X|WD -> N|SR transition. Returns the current link.
 468   oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
 469   // Attempt X|WD -> X|SD transition. Returns true if successful.
 470   bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
 471 
 472   // Do the N|SD -> X|SD transition.
 473   void oops_do_add_to_list_as_strong_done();
 474 
 475   // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
 476   // transitions).
 477   void oops_do_set_strong_done(nmethod* old_head);
 478 
 479 public:
 480   // If you change anything in this enum please patch
 481   // vmStructs_jvmci.cpp accordingly.
 482   enum class InvalidationReason : s1 {
 483     NOT_INVALIDATED = -1,
 484     C1_CODEPATCH,
 485     C1_DEOPTIMIZE,
 486     C1_DEOPTIMIZE_FOR_PATCHING,
 487     C1_PREDICATE_FAILED_TRAP,
 488     CI_REPLAY,
 489     UNLOADING,
 490     UNLOADING_COLD,
 491     JVMCI_INVALIDATE,
 492     JVMCI_MATERIALIZE_VIRTUAL_OBJECT,
 493     JVMCI_REPLACED_WITH_NEW_CODE,
 494     JVMCI_REPROFILE,
 495     MARKED_FOR_DEOPTIMIZATION,
 496     MISSING_EXCEPTION_HANDLER,
 497     NOT_USED,
 498     OSR_INVALIDATION_BACK_BRANCH,
 499     OSR_INVALIDATION_FOR_COMPILING_WITH_C1,
 500     OSR_INVALIDATION_OF_LOWER_LEVEL,
 501     SET_NATIVE_FUNCTION,
 502     UNCOMMON_TRAP,
 503     WHITEBOX_DEOPTIMIZATION,
 504     ZOMBIE,
 505     RELOCATED,
 506     INVALIDATION_REASONS_COUNT
 507   };
 508 
 509 
 510   static const char* invalidation_reason_to_string(InvalidationReason invalidation_reason) {
 511     switch (invalidation_reason) {
 512       case InvalidationReason::C1_CODEPATCH:
 513         return "C1 code patch";
 514       case InvalidationReason::C1_DEOPTIMIZE:
 515         return "C1 deoptimized";
 516       case InvalidationReason::C1_DEOPTIMIZE_FOR_PATCHING:
 517         return "C1 deoptimize for patching";
 518       case InvalidationReason::C1_PREDICATE_FAILED_TRAP:
 519         return "C1 predicate failed trap";
 520       case InvalidationReason::CI_REPLAY:
 521         return "CI replay";
 522       case InvalidationReason::JVMCI_INVALIDATE:
 523         return "JVMCI invalidate";
 524       case InvalidationReason::JVMCI_MATERIALIZE_VIRTUAL_OBJECT:
 525         return "JVMCI materialize virtual object";
 526       case InvalidationReason::JVMCI_REPLACED_WITH_NEW_CODE:
 527         return "JVMCI replaced with new code";
 528       case InvalidationReason::JVMCI_REPROFILE:
 529         return "JVMCI reprofile";
 530       case InvalidationReason::MARKED_FOR_DEOPTIMIZATION:
 531         return "marked for deoptimization";
 532       case InvalidationReason::MISSING_EXCEPTION_HANDLER:
 533         return "missing exception handler";
 534       case InvalidationReason::NOT_USED:
 535         return "not used";
 536       case InvalidationReason::OSR_INVALIDATION_BACK_BRANCH:
 537         return "OSR invalidation back branch";
 538       case InvalidationReason::OSR_INVALIDATION_FOR_COMPILING_WITH_C1:
 539         return "OSR invalidation for compiling with C1";
 540       case InvalidationReason::OSR_INVALIDATION_OF_LOWER_LEVEL:
 541         return "OSR invalidation of lower level";
 542       case InvalidationReason::SET_NATIVE_FUNCTION:
 543         return "set native function";
 544       case InvalidationReason::UNCOMMON_TRAP:
 545         return "uncommon trap";
 546       case InvalidationReason::WHITEBOX_DEOPTIMIZATION:
 547         return "whitebox deoptimization";
 548       case InvalidationReason::ZOMBIE:
 549         return "zombie";
 550       case InvalidationReason::RELOCATED:
 551         return "relocated";
 552       default: {
 553         assert(false, "Unhandled reason");
 554         return "Unknown";
 555       }
 556     }
 557   }
 558 
 559   // create nmethod with entry_bci
 560   static nmethod* new_nmethod(const methodHandle& method,
 561                               int compile_id,
 562                               int entry_bci,
 563                               CodeOffsets* offsets,
 564                               int orig_pc_offset,
 565                               DebugInformationRecorder* recorder,
 566                               Dependencies* dependencies,
 567                               CodeBuffer *code_buffer,
 568                               int frame_size,
 569                               OopMapSet* oop_maps,
 570                               ExceptionHandlerTable* handler_table,
 571                               ImplicitExceptionTable* nul_chk_table,
 572                               AbstractCompiler* compiler,
 573                               CompLevel comp_level
 574 #if INCLUDE_JVMCI
 575                               , char* speculations = nullptr,
 576                               int speculations_len = 0,
 577                               JVMCINMethodData* jvmci_data = nullptr
 578 #endif
 579   );
 580 
 581   // Relocate the nmethod to the code heap identified by code_blob_type.
 582   // Returns nullptr if the code heap does not have enough space, the
 583   // nmethod is unrelocatable, or the nmethod is invalidated during relocation,
 584   // otherwise the relocated nmethod. The original nmethod will be marked not entrant.
 585   nmethod* relocate(CodeBlobType code_blob_type);
 586 
 587   static nmethod* new_native_nmethod(const methodHandle& method,
 588                                      int compile_id,
 589                                      CodeBuffer *code_buffer,
 590                                      int vep_offset,
 591                                      int frame_complete,
 592                                      int frame_size,
 593                                      ByteSize receiver_sp_offset,
 594                                      ByteSize basic_lock_sp_offset,
 595                                      OopMapSet* oop_maps,
 596                                      int exception_handler = -1);
 597 
 598   Method* method       () const { return _method; }
 599   bool is_native_method() const { return _method != nullptr && _method->is_native(); }
 600   bool is_java_method  () const { return _method != nullptr && !_method->is_native(); }
 601   bool is_osr_method   () const { return _entry_bci != InvocationEntryBci; }
 602 
 603   bool is_relocatable();
 604 
 605   // Compiler task identification.  Note that all OSR methods
 606   // are numbered in an independent sequence if CICountOSR is true,
 607   // and native method wrappers are also numbered independently if
 608   // CICountNative is true.
 609   int compile_id() const { return _compile_id; }
 610   const char* compile_kind() const;
 611 
 612   inline bool  is_compiled_by_c1   () const { return _compiler_type == compiler_c1; }
 613   inline bool  is_compiled_by_c2   () const { return _compiler_type == compiler_c2; }
 614   inline bool  is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
 615   CompilerType compiler_type       () const { return _compiler_type; }
 616   const char*  compiler_name       () const;
 617 
 618   // boundaries for different parts
 619   address consts_begin          () const { return           content_begin(); }
 620   address consts_end            () const { return           code_begin()   ; }
 621   address insts_begin           () const { return           code_begin()   ; }
 622   address insts_end             () const { return           header_begin() + _stub_offset             ; }
 623   address stub_begin            () const { return           header_begin() + _stub_offset             ; }
 624   address stub_end              () const { return           code_end()     ; }
 625   address exception_begin       () const { return           header_begin() + _exception_offset        ; }
 626   address deopt_handler_entry   () const { return           header_begin() + _deopt_handler_entry_offset    ; }
 627   address unwind_handler_begin  () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
 628   oop*    oops_begin            () const { return (oop*)    data_begin(); }
 629   oop*    oops_end              () const { return (oop*)    data_end(); }
 630 
 631   // mutable data
 632   Metadata** metadata_begin     () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
 633 #if INCLUDE_JVMCI
 634   Metadata** metadata_end       () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
 635   address jvmci_data_begin      () const { return               mutable_data_begin() + _relocation_size + _metadata_size; }
 636   address jvmci_data_end        () const { return               mutable_data_end(); }
 637 #else
 638   Metadata** metadata_end       () const { return (Metadata**)  mutable_data_end(); }
 639 #endif
 640 
 641   // immutable data
 642   address immutable_data_begin  () const { return           _immutable_data; }
 643   address immutable_data_end    () const { return           _immutable_data + _immutable_data_size ; }
 644   address dependencies_begin    () const { return           _immutable_data; }
 645   address dependencies_end      () const { return           _immutable_data + _nul_chk_table_offset; }
 646   address nul_chk_table_begin   () const { return           _immutable_data + _nul_chk_table_offset; }
 647   address nul_chk_table_end     () const { return           _immutable_data + _handler_table_offset; }
 648   address handler_table_begin   () const { return           _immutable_data + _handler_table_offset; }
 649   address handler_table_end     () const { return           _immutable_data + _scopes_pcs_offset   ; }
 650   PcDesc* scopes_pcs_begin      () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset)  ; }
 651   PcDesc* scopes_pcs_end        () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
 652   address scopes_data_begin     () const { return           _immutable_data + _scopes_data_offset  ; }
 653 
 654 #if INCLUDE_JVMCI
 655   address scopes_data_end       () const { return           _immutable_data + _speculations_offset ; }
 656   address speculations_begin    () const { return           _immutable_data + _speculations_offset ; }
 657   address speculations_end      () const { return           _immutable_data + _immutable_data_ref_count_offset ; }
 658 #else
 659   address scopes_data_end       () const { return           _immutable_data + _immutable_data_ref_count_offset ; }
 660 #endif
 661   address immutable_data_ref_count_begin () const { return  _immutable_data + _immutable_data_ref_count_offset ; }
 662 
 663   // Sizes
 664   int immutable_data_size() const { return _immutable_data_size; }
 665   int consts_size        () const { return int(          consts_end       () -           consts_begin       ()); }
 666   int insts_size         () const { return int(          insts_end        () -           insts_begin        ()); }
 667   int stub_size          () const { return int(          stub_end         () -           stub_begin         ()); }
 668   int oops_size          () const { return int((address) oops_end         () - (address) oops_begin         ()); }
 669   int metadata_size      () const { return int((address) metadata_end     () - (address) metadata_begin     ()); }
 670   int scopes_data_size   () const { return int(          scopes_data_end  () -           scopes_data_begin  ()); }
 671   int scopes_pcs_size    () const { return int((intptr_t)scopes_pcs_end   () - (intptr_t)scopes_pcs_begin   ()); }
 672   int dependencies_size  () const { return int(          dependencies_end () -           dependencies_begin ()); }
 673   int handler_table_size () const { return int(          handler_table_end() -           handler_table_begin()); }
 674   int nul_chk_table_size () const { return int(          nul_chk_table_end() -           nul_chk_table_begin()); }
 675 #if INCLUDE_JVMCI
 676   int speculations_size  () const { return int(          speculations_end () -           speculations_begin ()); }
 677   int jvmci_data_size    () const { return int(          jvmci_data_end   () -           jvmci_data_begin   ()); }
 678 #endif
 679 
 680   int     oops_count() const { assert(oops_size() % oopSize == 0, "");  return (oops_size() / oopSize) + 1; }
 681   int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
 682 
 683   int skipped_instructions_size () const { return _skipped_instructions_size; }
 684   int total_size() const;
 685 
 686   // Containment
 687   bool consts_contains         (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
 688   // Returns true if a given address is in the 'insts' section. The method
 689   // insts_contains_inclusive() is end-inclusive.
 690   bool insts_contains          (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
 691   bool insts_contains_inclusive(address addr) const { return insts_begin        () <= addr && addr <= insts_end       (); }
 692   bool stub_contains           (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
 693   bool oops_contains           (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
 694   bool metadata_contains       (Metadata** addr) const { return metadata_begin  () <= addr && addr < metadata_end     (); }
 695   bool scopes_data_contains    (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
 696   bool scopes_pcs_contains     (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
 697   bool handler_table_contains  (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
 698   bool nul_chk_table_contains  (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
 699 
 700   // entry points
 701   address entry_point() const          { return code_begin() + _entry_offset;          } // normal entry point
 702   address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
 703   address inline_entry_point() const              { return code_begin() + _inline_entry_offset; }             // inline type entry point (unpack all inline type args)
 704   address verified_inline_entry_point() const     { return code_begin() + _verified_inline_entry_offset; }    // inline type entry point (unpack all inline type args) without class check
 705   address verified_inline_ro_entry_point() const  { return code_begin() + _verified_inline_ro_entry_offset; } // inline type entry point (only unpack receiver) without class check
 706 
 707   enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
 708                                            // allowed to advance state
 709                        in_use        = 0,  // executable nmethod
 710                        not_entrant   = 1   // marked for deoptimization but activations may still exist
 711   };
 712 
 713   // flag accessing and manipulation
 714   bool is_not_installed() const        { return _state == not_installed; }
 715   bool is_in_use() const               { return _state <= in_use; }
 716   bool is_not_entrant() const          { return _state == not_entrant; }
 717   int  get_state() const               { return _state; }
 718 
 719   void clear_unloading_state();
 720   // Heuristically deduce an nmethod isn't worth keeping around
 721   bool is_cold();
 722   bool is_unloading();
 723   void do_unloading(bool unloading_occurred);
 724 
 725   bool make_in_use() {
 726     return try_transition(in_use);
 727   }
 728   // Make the nmethod non entrant. The nmethod will continue to be
 729   // alive.  It is used when an uncommon trap happens.  Returns true
 730   // if this thread changed the state of the nmethod or false if
 731   // another thread performed the transition.
 732   bool  make_not_entrant(InvalidationReason invalidation_reason);
 733   bool  make_not_used() { return make_not_entrant(InvalidationReason::NOT_USED); }
 734 
 735   bool  is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
 736   bool  has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
 737   void  set_deoptimized_done();
 738 
 739   bool update_recompile_counts() const {
 740     // Update recompile counts when either the update is explicitly requested (deoptimize)
 741     // or the nmethod is not marked for deoptimization at all (not_marked).
 742     // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
 743     DeoptimizationStatus status = deoptimization_status();
 744     return status != deoptimize_noupdate && status != deoptimize_done;
 745   }
 746 
 747   // tells whether frames described by this nmethod can be deoptimized
 748   // note: native wrappers cannot be deoptimized.
 749   bool can_be_deoptimized() const { return is_java_method(); }
 750 
 751   bool has_dependencies()                         { return dependencies_size() != 0; }
 752   void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
 753   void flush_dependencies();
 754 
 755   template<typename T>
 756   T* gc_data() const                              { return reinterpret_cast<T*>(_gc_data); }
 757   template<typename T>
 758   void set_gc_data(T* gc_data)                    { _gc_data = reinterpret_cast<void*>(gc_data); }
 759 
 760   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
 761   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
 762 
 763   bool  has_monitors() const                      { return _has_monitors; }
 764   void  set_has_monitors(bool z)                  { _has_monitors = z; }
 765 
 766   bool  has_scoped_access() const                 { return _has_scoped_access; }
 767   void  set_has_scoped_access(bool z)             { _has_scoped_access = z; }
 768 
 769   bool  has_wide_vectors() const                  { return _has_wide_vectors; }
 770   void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }
 771 
 772   bool  needs_stack_repair() const {
 773     if (is_compiled_by_c1()) {
 774       return method()->c1_needs_stack_repair();
 775     } else if (is_compiled_by_c2()) {
 776       return method()->c2_needs_stack_repair();
 777     } else {
 778       return false;
 779     }
 780   }
 781 
 782   bool  has_flushed_dependencies() const          { return _has_flushed_dependencies; }
 783   void  set_has_flushed_dependencies(bool z)      {
 784     assert(!has_flushed_dependencies(), "should only happen once");
 785     _has_flushed_dependencies = z;
 786   }
 787 
 788   bool  is_unlinked() const                       { return _is_unlinked; }
 789   void  set_is_unlinked()                         {
 790      assert(!_is_unlinked, "already unlinked");
 791       _is_unlinked = true;
 792   }
 793 
 794   int   comp_level() const                        { return _comp_level; }
 795 
 796   // Support for oops in scopes and relocs:
 797   // Note: index 0 is reserved for null.
 798   oop   oop_at(int index) const;
 799   oop   oop_at_phantom(int index) const; // phantom reference
 800   oop*  oop_addr_at(int index) const {  // for GC
 801     // relocation indexes are biased by 1 (because 0 is reserved)
 802     assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
 803     return &oops_begin()[index - 1];
 804   }
 805 
 806   // Support for meta data in scopes and relocs:
 807   // Note: index 0 is reserved for null.
 808   Metadata*   metadata_at(int index) const      { return index == 0 ? nullptr: *metadata_addr_at(index); }
 809   Metadata**  metadata_addr_at(int index) const {  // for GC
 810     // relocation indexes are biased by 1 (because 0 is reserved)
 811     assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
 812     return &metadata_begin()[index - 1];
 813   }
 814 
 815   void copy_values(GrowableArray<jobject>* oops);
 816   void copy_values(GrowableArray<Metadata*>* metadata);
 817   void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
 818 
 819   // Relocation support
 820 private:
 821   void fix_oop_relocations(address begin, address end, bool initialize_immediates);
 822   inline void initialize_immediate_oop(oop* dest, jobject handle);
 823 
 824 protected:
 825   address oops_reloc_begin() const;
 826 
 827 public:
 828   void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
 829   void fix_oop_relocations()                           { fix_oop_relocations(nullptr, nullptr, false); }
 830 
 831   bool is_at_poll_return(address pc);
 832   bool is_at_poll_or_poll_return(address pc);
 833 
 834 protected:
 835   // Exception cache support
 836   // Note: _exception_cache may be read and cleaned concurrently.
 837   ExceptionCache* exception_cache() const         { return _exception_cache; }
 838   ExceptionCache* exception_cache_acquire() const;
 839 
 840 public:
 841   address handler_for_exception_and_pc(Handle exception, address pc);
 842   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
 843   void clean_exception_cache();
 844 
 845   void add_exception_cache_entry(ExceptionCache* new_entry);
 846   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
 847 
 848 
 849   // Deopt
 850   // Return true is the PC is one would expect if the frame is being deopted.
 851   inline bool is_deopt_pc(address pc);
 852   inline bool is_deopt_entry(address pc);
 853 
 854   // Accessor/mutator for the original pc of a frame before a frame was deopted.
 855   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
 856   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
 857 
 858   const char* state() const;
 859 
 860   bool inlinecache_check_contains(address addr) const {
 861     return (addr >= code_begin() && (addr < verified_entry_point() || addr < verified_inline_entry_point()));
 862   }
 863 
 864   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
 865 
 866   // implicit exceptions support
 867   address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
 868   address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
 869 
 870   // Inline cache support for class unloading and nmethod unloading
 871  private:
 872   void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
 873 
 874   address continuation_for_implicit_exception(address pc, bool for_div0_check);
 875 
 876  public:
 877   // Serial version used by whitebox test
 878   void cleanup_inline_caches_whitebox();
 879 
 880   void clear_inline_caches();
 881 
 882   // Execute nmethod barrier code, as if entering through nmethod call.
 883   void run_nmethod_entry_barrier();
 884 
 885   void verify_oop_relocations();
 886 
 887   bool has_evol_metadata();
 888 
 889   Method* attached_method(address call_pc);
 890   Method* attached_method_before_pc(address pc);
 891 
 892   // GC unloading support
 893   // Cleans unloaded klasses and unloaded nmethods in inline caches
 894 
 895   void unload_nmethod_caches(bool class_unloading_occurred);
 896 
 897   void unlink_from_method();
 898 
 899   // On-stack replacement support
 900   int      osr_entry_bci()    const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
 901   address  osr_entry()        const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
 902   nmethod* osr_link()         const { return _osr_link; }
 903   void     set_osr_link(nmethod *n) { _osr_link = n; }
 904   void     invalidate_osr_method();
 905 
 906   int num_stack_arg_slots(bool rounded = true) const {
 907     return rounded ? align_up(_num_stack_arg_slots, 2) : _num_stack_arg_slots;
 908   }
 909 
 910   // Verify calls to dead methods have been cleaned.
 911   void verify_clean_inline_caches();
 912 
 913   // Unlink this nmethod from the system
 914   void unlink();
 915 
 916   // Deallocate this nmethod - called by the GC
 917   void purge(bool unregister_nmethod);
 918 
 919   // See comment at definition of _last_seen_on_stack
 920   void mark_as_maybe_on_stack();
 921   bool is_maybe_on_stack();
 922 
 923   // Evolution support. We make old (discarded) compiled methods point to new Method*s.
 924   void set_method(Method* method) { _method = method; }
 925 
 926 #if INCLUDE_JVMCI
 927   // Gets the JVMCI name of this nmethod.
 928   const char* jvmci_name();
 929 
 930   // Records the pending failed speculation in the
 931   // JVMCI speculation log associated with this nmethod.
 932   void update_speculation(JavaThread* thread);
 933 
 934   // Gets the data specific to a JVMCI compiled method.
 935   // This returns a non-nullptr value iff this nmethod was
 936   // compiled by the JVMCI compiler.
 937   JVMCINMethodData* jvmci_nmethod_data() const {
 938     return jvmci_data_size() == 0 ? nullptr : (JVMCINMethodData*) jvmci_data_begin();
 939   }
 940 
 941   // Returns true if the runtime should NOT collect deoptimization profile for a JVMCI
 942   // compiled method
 943   bool jvmci_skip_profile_deopt() const;
 944 #endif
 945 
 946   void oops_do(OopClosure* f);
 947 
 948   // All-in-one claiming of nmethods: returns true if the caller successfully claimed that
 949   // nmethod.
 950   bool oops_do_try_claim();
 951 
 952   // Loom support for following nmethods on the stack
 953   void follow_nmethod(OopIterateClosure* cl);
 954 
 955   // Class containing callbacks for the oops_do_process_weak/strong() methods
 956   // below.
 957   class OopsDoProcessor {
 958   public:
 959     // Process the oops of the given nmethod based on whether it has been called
 960     // in a weak or strong processing context, i.e. apply either weak or strong
 961     // work on it.
 962     virtual void do_regular_processing(nmethod* nm) = 0;
 963     // Assuming that the oops of the given nmethod has already been its weak
 964     // processing applied, apply the remaining strong processing part.
 965     virtual void do_remaining_strong_processing(nmethod* nm) = 0;
 966   };
 967 
 968   // The following two methods do the work corresponding to weak/strong nmethod
 969   // processing.
 970   void oops_do_process_weak(OopsDoProcessor* p);
 971   void oops_do_process_strong(OopsDoProcessor* p);
 972 
 973   static void oops_do_marking_prologue();
 974   static void oops_do_marking_epilogue();
 975 
 976  private:
 977   ScopeDesc* scope_desc_in(address begin, address end);
 978 
 979   address* orig_pc_addr(const frame* fr);
 980 
 981   // used by jvmti to track if the load events has been reported
 982   bool  load_reported() const                     { return _load_reported; }
 983   void  set_load_reported()                       { _load_reported = true; }
 984 
 985   inline void init_immutable_data_ref_count() {
 986     assert(is_not_installed(), "should be called in nmethod constructor");
 987     *((int*)immutable_data_ref_count_begin()) = 1;
 988   }
 989 
 990   inline int inc_immutable_data_ref_count() {
 991     assert_lock_strong(CodeCache_lock);
 992     int* ref_count = (int*)immutable_data_ref_count_begin();
 993     assert(*ref_count > 0, "Must be positive");
 994     return ++(*ref_count);
 995   }
 996 
 997   inline int dec_immutable_data_ref_count() {
 998     assert_lock_strong(CodeCache_lock);
 999     int* ref_count = (int*)immutable_data_ref_count_begin();
1000     assert(*ref_count > 0, "Must be positive");
1001     return --(*ref_count);
1002   }
1003 
1004   static void add_delayed_compiled_method_load_event(nmethod* nm) NOT_CDS_RETURN;
1005 
1006  public:
1007   // ScopeDesc retrieval operation
1008   PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
1009   // pc_desc_near returns the first PcDesc at or after the given pc.
1010   PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
1011 
1012   // ScopeDesc for an instruction
1013   ScopeDesc* scope_desc_at(address pc);
1014   ScopeDesc* scope_desc_near(address pc);
1015 
1016   // copying of debugging information
1017   void copy_scopes_pcs(PcDesc* pcs, int count);
1018   void copy_scopes_data(address buffer, int size);
1019 
1020   int orig_pc_offset() { return _orig_pc_offset; }
1021 
1022   // Post successful compilation
1023   void post_compiled_method(CompileTask* task);
1024 
1025   // jvmti support:
1026   void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
1027 
1028   // verify operations
1029   void verify();
1030   void verify_scopes();
1031   void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
1032 
1033   // Disassemble this nmethod with additional debug information, e.g. information about blocks.
1034   void decode2(outputStream* st) const;
1035   void print_constant_pool(outputStream* st);
1036 
1037   // Avoid hiding of parent's 'decode(outputStream*)' method.
1038   void decode(outputStream* st) const { decode2(st); } // just delegate here.
1039 
1040   // AOT cache support
1041   static void post_delayed_compiled_method_load_events() NOT_CDS_RETURN;
1042 
1043   // printing support
1044   void print_on_impl(outputStream* st) const;
1045   void print_code();
1046   void print_value_on_impl(outputStream* st) const;
1047   void print_code_snippet(outputStream* st, address addr) const;
1048 
1049 #if defined(SUPPORT_DATA_STRUCTS)
1050   // print output in opt build for disassembler library
1051   void print_relocations()                        PRODUCT_RETURN;
1052   void print_pcs_on(outputStream* st);
1053   void print_scopes() { print_scopes_on(tty); }
1054   void print_scopes_on(outputStream* st)          PRODUCT_RETURN;
1055   void print_handler_table();
1056   void print_nul_chk_table();
1057   void print_recorded_oop(int log_n, int index);
1058   void print_recorded_oops();
1059   void print_recorded_metadata();
1060 
1061   void print_oops(outputStream* st);     // oops from the underlying CodeBlob.
1062   void print_metadata(outputStream* st); // metadata in metadata pool.
1063 #else
1064   void print_pcs_on(outputStream* st) { return; }
1065 #endif
1066 
1067   void print_calls(outputStream* st)              PRODUCT_RETURN;
1068   static void print_statistics()                  PRODUCT_RETURN;
1069 
1070   void maybe_print_nmethod(const DirectiveSet* directive);
1071   void print_nmethod(bool print_code);
1072 
1073   void print_on_with_msg(outputStream* st, const char* msg) const;
1074 
1075   // Logging
1076   void log_identity(xmlStream* log) const;
1077   void log_new_nmethod() const;
1078   void log_relocated_nmethod(nmethod* original) const;
1079   void log_state_change(InvalidationReason invalidation_reason) const;
1080 
1081   // Prints block-level comments, including nmethod specific block labels:
1082   void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1083   const char* nmethod_section_label(address pos) const;
1084 
1085   // returns whether this nmethod has code comments.
1086   bool has_code_comment(address begin, address end);
1087   // Prints a comment for one native instruction (reloc info, pc desc)
1088   void print_code_comment_on(outputStream* st, int column, address begin, address end);
1089 
1090   // tells if this compiled method is dependent on the given changes,
1091   // and the changes have invalidated it
1092   bool check_dependency_on(DepChange& changes);
1093 
1094   // Tells if this compiled method is dependent on the given method.
1095   // Returns true if this nmethod corresponds to the given method as well.
1096   // It is used for fast breakpoint support and updating the calling convention
1097   // in case of mismatch.
1098   bool is_dependent_on_method(Method* dependee);
1099 
1100   // JVMTI's GetLocalInstance() support
1101   ByteSize native_receiver_sp_offset() {
1102     assert(is_native_method(), "sanity");
1103     return _native_receiver_sp_offset;
1104   }
1105   ByteSize native_basic_lock_sp_offset() {
1106     assert(is_native_method(), "sanity");
1107     return _native_basic_lock_sp_offset;
1108   }
1109 
1110   // support for code generation
1111   static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1112   static ByteSize state_offset()           { return byte_offset_of(nmethod, _state); }
1113 
1114   void metadata_do(MetadataClosure* f);
1115 
1116   address call_instruction_address(address pc) const;
1117 
1118   void make_deoptimized();
1119   void finalize_relocations();
1120 
1121   class Vptr : public CodeBlob::Vptr {
1122     void print_on(const CodeBlob* instance, outputStream* st) const override {
1123       ttyLocker ttyl;
1124       instance->as_nmethod()->print_on_impl(st);
1125     }
1126     void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1127       instance->as_nmethod()->print_value_on_impl(st);
1128     }
1129   };
1130 
1131   static const Vptr _vpntr;
1132 };
1133 
1134 struct NMethodMarkingScope : StackObj {
1135   NMethodMarkingScope() {
1136     nmethod::oops_do_marking_prologue();
1137   }
1138   ~NMethodMarkingScope() {
1139     nmethod::oops_do_marking_epilogue();
1140   }
1141 };
1142 
1143 #endif // SHARE_CODE_NMETHOD_HPP