1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_CODE_NMETHOD_HPP
  26 #define SHARE_CODE_NMETHOD_HPP
  27 
  28 #include "code/codeBlob.hpp"
  29 #include "code/pcDesc.hpp"
  30 #include "oops/metadata.hpp"
  31 #include "oops/method.hpp"
  32 
  33 class AbstractCompiler;
  34 class CompiledDirectCall;
  35 class CompiledIC;
  36 class CompiledICData;
  37 class CompileTask;
  38 class DepChange;
  39 class Dependencies;
  40 class DirectiveSet;
  41 class DebugInformationRecorder;
  42 class ExceptionHandlerTable;
  43 class ImplicitExceptionTable;
  44 class JvmtiThreadState;
  45 class MetadataClosure;
  46 class NativeCallWrapper;
  47 class OopIterateClosure;
  48 class AOTCodeReader;
  49 class AOTCodeEntry;
  50 class ScopeDesc;
  51 class xmlStream;
  52 
  53 // This class is used internally by nmethods, to cache
  54 // exception/pc/handler information.
  55 
  56 class ExceptionCache : public CHeapObj<mtCode> {
  57   friend class VMStructs;
  58  private:
  59   enum { cache_size = 16 };
  60   Klass*   _exception_type;
  61   address  _pc[cache_size];
  62   address  _handler[cache_size];
  63   volatile int _count;
  64   ExceptionCache* volatile _next;
  65   ExceptionCache* _purge_list_next;
  66 
  67   inline address pc_at(int index);
  68   void set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
  69 
  70   inline address handler_at(int index);
  71   void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
  72 
  73   inline int count();
  74   // increment_count is only called under lock, but there may be concurrent readers.
  75   void increment_count();
  76 
  77  public:
  78 
  79   ExceptionCache(Handle exception, address pc, address handler);
  80 
  81   Klass*    exception_type()                { return _exception_type; }
  82   ExceptionCache* next();
  83   void      set_next(ExceptionCache *ec);
  84   ExceptionCache* purge_list_next()                 { return _purge_list_next; }
  85   void      set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
  86 
  87   address match(Handle exception, address pc);
  88   bool    match_exception_with_space(Handle exception) ;
  89   address test_address(address addr);
  90   bool    add_address_and_handler(address addr, address handler) ;
  91 };
  92 
  93 // cache pc descs found in earlier inquiries
  94 class PcDescCache {
  95  private:
  96   enum { cache_size = 4 };
  97   // The array elements MUST be volatile! Several threads may modify
  98   // and read from the cache concurrently. find_pc_desc_internal has
  99   // returned wrong results. C++ compiler (namely xlC12) may duplicate
 100   // C++ field accesses if the elements are not volatile.
 101   typedef PcDesc* PcDescPtr;
 102   volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
 103  public:
 104   PcDescCache() { DEBUG_ONLY(_pc_descs[0] = nullptr); }
 105   void    init_to(PcDesc* initial_pc_desc);
 106   PcDesc* find_pc_desc(int pc_offset, bool approximate);
 107   void    add_pc_desc(PcDesc* pc_desc);
 108   PcDesc* last_pc_desc() { return _pc_descs[0]; }
 109 };
 110 
 111 class PcDescContainer : public CHeapObj<mtCode> {
 112 private:
 113   PcDescCache _pc_desc_cache;
 114 public:
 115   PcDescContainer(PcDesc* initial_pc_desc) { _pc_desc_cache.init_to(initial_pc_desc); }
 116 
 117   PcDesc* find_pc_desc_internal(address pc, bool approximate, address code_begin,
 118                                 PcDesc* lower, PcDesc* upper);
 119 
 120   PcDesc* find_pc_desc(address pc, bool approximate, address code_begin, PcDesc* lower, PcDesc* upper)
 121 #ifdef PRODUCT
 122   {
 123     PcDesc* desc = _pc_desc_cache.last_pc_desc();
 124     assert(desc != nullptr, "PcDesc cache should be initialized already");
 125     if (desc->pc_offset() == (pc - code_begin)) {
 126       // Cached value matched
 127       return desc;
 128     }
 129     return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
 130   }
 131 #endif
 132   ;
 133 };
 134 
 135 // nmethods (native methods) are the compiled code versions of Java methods.
 136 //
 137 // An nmethod contains:
 138 //  - Header                 (the nmethod structure)
 139 //  - Constant part          (doubles, longs and floats used in nmethod)
 140 //  - Code part:
 141 //    - Code body
 142 //    - Exception handler
 143 //    - Stub code
 144 //    - OOP table
 145 //
 146 // As a CodeBlob, an nmethod references [mutable data] allocated on the C heap:
 147 //  - CodeBlob relocation data
 148 //  - Metainfo
 149 //  - JVMCI data
 150 //
 151 // An nmethod references [immutable data] allocated on C heap:
 152 //  - Dependency assertions data
 153 //  - Implicit null table array
 154 //  - Handler entry point array
 155 //  - Debugging information:
 156 //    - Scopes data array
 157 //    - Scopes pcs array
 158 //  - JVMCI speculations array
 159 //  - Nmethod reference counter
 160 
 161 #if INCLUDE_JVMCI
 162 class FailedSpeculation;
 163 class JVMCINMethodData;
 164 #endif
 165 
 166 class nmethod : public CodeBlob {
 167   friend class VMStructs;
 168   friend class JVMCIVMStructs;
 169   friend class CodeCache;  // scavengable oops
 170   friend class JVMCINMethodData;
 171   friend class DeoptimizationScope;
 172 
 173   #define ImmutableDataReferencesCounterSize ((int)sizeof(int))
 174 
 175  private:
 176 
 177   // Used to track in which deoptimize handshake this method will be deoptimized.
 178   uint64_t  _deoptimization_generation;
 179 
 180   uint64_t  _gc_epoch;
 181 
 182   // Profiling counter used to figure out the hottest nmethods to record into CDS
 183   volatile uint64_t _method_profiling_count;
 184 
 185   Method*   _method;
 186 
 187   // To reduce header size union fields which usages do not overlap.
 188   union {
 189     // To support simple linked-list chaining of nmethods:
 190     nmethod*  _osr_link; // from InstanceKlass::osr_nmethods_head
 191     struct {
 192       // These are used for compiled synchronized native methods to
 193       // locate the owner and stack slot for the BasicLock. They are
 194       // needed because there is no debug information for compiled native
 195       // wrappers and the oop maps are insufficient to allow
 196       // frame::retrieve_receiver() to work. Currently they are expected
 197       // to be byte offsets from the Java stack pointer for maximum code
 198       // sharing between platforms. JVMTI's GetLocalInstance() uses these
 199       // offsets to find the receiver for non-static native wrapper frames.
 200       ByteSize _native_receiver_sp_offset;
 201       ByteSize _native_basic_lock_sp_offset;
 202     };
 203   };
 204 
 205   // nmethod's read-only data
 206   address _immutable_data;
 207 
 208   PcDescContainer* _pc_desc_container;
 209   ExceptionCache* volatile _exception_cache;
 210 
 211   void* _gc_data;
 212 
 213   struct oops_do_mark_link; // Opaque data type.
 214   static nmethod*    volatile _oops_do_mark_nmethods;
 215   oops_do_mark_link* volatile _oops_do_mark_link;
 216 
 217   CompiledICData* _compiled_ic_data;
 218 
 219   // offsets for entry points
 220   address  _osr_entry_point;       // entry point for on stack replacement
 221   uint16_t _entry_offset;          // entry point with class check
 222   uint16_t _verified_entry_offset; // entry point without class check
 223   int      _entry_bci;             // != InvocationEntryBci if this nmethod is an on-stack replacement method
 224   int      _immutable_data_size;
 225 
 226   // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
 227 
 228   int _skipped_instructions_size;
 229 
 230   int _stub_offset;
 231 
 232   // Offsets for different stubs section parts
 233   int _exception_offset;
 234   // All deoptee's will resume execution at this location described by
 235   // this offset.
 236   int _deopt_handler_offset;
 237   // Offset (from insts_end) of the unwind handler if it exists
 238   int16_t  _unwind_handler_offset;
 239   // Number of arguments passed on the stack
 240   uint16_t _num_stack_arg_slots;
 241 
 242   uint16_t _oops_size;
 243 #if INCLUDE_JVMCI
 244   // _metadata_size is not specific to JVMCI. In the non-JVMCI case, it can be derived as:
 245   // _metadata_size = mutable_data_size - relocation_size
 246   uint16_t _metadata_size;
 247 #endif
 248 
 249   // Offset in immutable data section
 250   // _dependencies_offset == 0
 251   uint16_t _nul_chk_table_offset;
 252   uint16_t _handler_table_offset; // This table could be big in C1 code
 253   int      _scopes_pcs_offset;
 254   int      _scopes_data_offset;
 255 #if INCLUDE_JVMCI
 256   int      _speculations_offset;
 257 #endif
 258 
 259   // location in frame (offset for sp) that deopt can store the original
 260   // pc during a deopt.
 261   int _orig_pc_offset;
 262 
 263   int          _compile_id;            // which compilation made this nmethod
 264   CompLevel    _comp_level;            // compilation level (s1)
 265   CompilerType _compiler_type;         // which compiler made this nmethod (u1)
 266 
 267   AOTCodeEntry* _aot_code_entry;
 268 
 269   bool _used; // has this nmethod ever been invoked?
 270 
 271   // Local state used to keep track of whether unloading is happening or not
 272   volatile uint8_t _is_unloading_state;
 273 
 274   // Protected by NMethodState_lock
 275   volatile signed char _state;         // {not_installed, in_use, not_entrant}
 276 
 277   // set during construction
 278   uint8_t _has_unsafe_access:1,        // May fault due to unsafe access.
 279           _has_wide_vectors:1,         // Preserve wide vectors at safepoints
 280           _has_monitors:1,             // Fastpath monitor detection for continuations
 281           _has_scoped_access:1,        // used by for shared scope closure (scopedMemoryAccess.cpp)
 282           _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
 283           _is_unlinked:1,              // mark during class unloading
 284           _load_reported:1,            // used by jvmti to track if an event has been posted for this nmethod
 285           _preloaded:1,
 286           _has_clinit_barriers:1;
 287 
 288   enum DeoptimizationStatus : u1 {
 289     not_marked,
 290     deoptimize,
 291     deoptimize_noupdate,
 292     deoptimize_done
 293   };
 294 
 295   volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
 296 
 297   DeoptimizationStatus deoptimization_status() const {
 298     return AtomicAccess::load(&_deoptimization_status);
 299   }
 300 
 301   // Initialize fields to their default values
 302   void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
 303 
 304   // Post initialization
 305   void post_init();
 306 
 307   // For native wrappers
 308   nmethod(Method* method,
 309           CompilerType type,
 310           int nmethod_size,
 311           int compile_id,
 312           CodeOffsets* offsets,
 313           CodeBuffer *code_buffer,
 314           int frame_size,
 315           ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
 316           ByteSize basic_lock_sp_offset,       /* synchronized natives only */
 317           OopMapSet* oop_maps,
 318           int mutable_data_size);
 319 
 320   // For normal JIT compiled code
 321   nmethod(Method* method,
 322           CompilerType type,
 323           int nmethod_size,
 324           int immutable_data_size,
 325           int mutable_data_size,
 326           int compile_id,
 327           int entry_bci,
 328           address immutable_data,
 329           CodeOffsets* offsets,
 330           int orig_pc_offset,
 331           DebugInformationRecorder *recorder,
 332           Dependencies* dependencies,
 333           CodeBuffer *code_buffer,
 334           int frame_size,
 335           OopMapSet* oop_maps,
 336           ExceptionHandlerTable* handler_table,
 337           ImplicitExceptionTable* nul_chk_table,
 338           AbstractCompiler* compiler,
 339           CompLevel comp_level
 340 #if INCLUDE_JVMCI
 341           , char* speculations = nullptr,
 342           int speculations_len = 0,
 343           JVMCINMethodData* jvmci_data = nullptr
 344 #endif
 345           );
 346 
 347   nmethod(const nmethod &nm);
 348 
 349   // helper methods
 350   void* operator new(size_t size, int nmethod_size, int comp_level) throw();
 351   void* operator new(size_t size, int nmethod_size, CodeBlobType code_blob_type) throw();
 352 
 353   // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
 354   // Attention: Only allow NonNMethod space for special nmethods which don't need to be
 355   // findable by nmethod iterators! In particular, they must not contain oops!
 356   void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
 357 
 358   const char* reloc_string_for(u_char* begin, u_char* end);
 359 
 360   bool try_transition(signed char new_state);
 361 
 362   // Returns true if this thread changed the state of the nmethod or
 363   // false if another thread performed the transition.
 364   bool make_entrant() { Unimplemented(); return false; }
 365   void inc_decompile_count();
 366 
 367   // Inform external interfaces that a compiled method has been unloaded
 368   void post_compiled_method_unload();
 369 
 370   PcDesc* find_pc_desc(address pc, bool approximate) {
 371     if (_pc_desc_container == nullptr) return nullptr; // native method
 372     return _pc_desc_container->find_pc_desc(pc, approximate, code_begin(), scopes_pcs_begin(), scopes_pcs_end());
 373   }
 374 
 375   // STW two-phase nmethod root processing helpers.
 376   //
 377   // When determining liveness of a given nmethod to do code cache unloading,
 378   // some collectors need to do different things depending on whether the nmethods
 379   // need to absolutely be kept alive during root processing; "strong"ly reachable
 380   // nmethods are known to be kept alive at root processing, but the liveness of
 381   // "weak"ly reachable ones is to be determined later.
 382   //
 383   // We want to allow strong and weak processing of nmethods by different threads
 384   // at the same time without heavy synchronization. Additional constraints are
 385   // to make sure that every nmethod is processed a minimal amount of time, and
 386   // nmethods themselves are always iterated at most once at a particular time.
 387   //
 388   // Note that strong processing work must be a superset of weak processing work
 389   // for this code to work.
 390   //
 391   // We store state and claim information in the _oops_do_mark_link member, using
 392   // the two LSBs for the state and the remaining upper bits for linking together
 393   // nmethods that were already visited.
 394   // The last element is self-looped, i.e. points to itself to avoid some special
 395   // "end-of-list" sentinel value.
 396   //
 397   // _oops_do_mark_link special values:
 398   //
 399   //   _oops_do_mark_link == nullptr: the nmethod has not been visited at all yet, i.e.
 400   //      is Unclaimed.
 401   //
 402   // For other values, its lowest two bits indicate the following states of the nmethod:
 403   //
 404   //   weak_request (WR): the nmethod has been claimed by a thread for weak processing
 405   //   weak_done (WD): weak processing has been completed for this nmethod.
 406   //   strong_request (SR): the nmethod has been found to need strong processing while
 407   //       being weak processed.
 408   //   strong_done (SD): strong processing has been completed for this nmethod .
 409   //
 410   // The following shows the _only_ possible progressions of the _oops_do_mark_link
 411   // pointer.
 412   //
 413   // Given
 414   //   N as the nmethod
 415   //   X the current next value of _oops_do_mark_link
 416   //
 417   // Unclaimed (C)-> N|WR (C)-> X|WD: the nmethod has been processed weakly by
 418   //   a single thread.
 419   // Unclaimed (C)-> N|WR (C)-> X|WD (O)-> X|SD: after weak processing has been
 420   //   completed (as above) another thread found that the nmethod needs strong
 421   //   processing after all.
 422   // Unclaimed (C)-> N|WR (O)-> N|SR (C)-> X|SD: during weak processing another
 423   //   thread finds that the nmethod needs strong processing, marks it as such and
 424   //   terminates. The original thread completes strong processing.
 425   // Unclaimed (C)-> N|SD (C)-> X|SD: the nmethod has been processed strongly from
 426   //   the beginning by a single thread.
 427   //
 428   // "|" describes the concatenation of bits in _oops_do_mark_link.
 429   //
 430   // The diagram also describes the threads responsible for changing the nmethod to
 431   // the next state by marking the _transition_ with (C) and (O), which mean "current"
 432   // and "other" thread respectively.
 433   //
 434 
 435   // States used for claiming nmethods during root processing.
 436   static const uint claim_weak_request_tag = 0;
 437   static const uint claim_weak_done_tag = 1;
 438   static const uint claim_strong_request_tag = 2;
 439   static const uint claim_strong_done_tag = 3;
 440 
 441   static oops_do_mark_link* mark_link(nmethod* nm, uint tag) {
 442     assert(tag <= claim_strong_done_tag, "invalid tag %u", tag);
 443     assert(is_aligned(nm, 4), "nmethod pointer must have zero lower two LSB");
 444     return (oops_do_mark_link*)(((uintptr_t)nm & ~0x3) | tag);
 445   }
 446 
 447   static uint extract_state(oops_do_mark_link* link) {
 448     return (uint)((uintptr_t)link & 0x3);
 449   }
 450 
 451   static nmethod* extract_nmethod(oops_do_mark_link* link) {
 452     return (nmethod*)((uintptr_t)link & ~0x3);
 453   }
 454 
 455   void oops_do_log_change(const char* state);
 456 
 457   static bool oops_do_has_weak_request(oops_do_mark_link* next) {
 458     return extract_state(next) == claim_weak_request_tag;
 459   }
 460 
 461   static bool oops_do_has_any_strong_state(oops_do_mark_link* next) {
 462     return extract_state(next) >= claim_strong_request_tag;
 463   }
 464 
 465   // Attempt Unclaimed -> N|WR transition. Returns true if successful.
 466   bool oops_do_try_claim_weak_request();
 467 
 468   // Attempt Unclaimed -> N|SD transition. Returns the current link.
 469   oops_do_mark_link* oops_do_try_claim_strong_done();
 470   // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
 471   nmethod* oops_do_try_add_to_list_as_weak_done();
 472 
 473   // Attempt X|WD -> N|SR transition. Returns the current link.
 474   oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
 475   // Attempt X|WD -> X|SD transition. Returns true if successful.
 476   bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
 477 
 478   // Do the N|SD -> X|SD transition.
 479   void oops_do_add_to_list_as_strong_done();
 480 
 481   // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
 482   // transitions).
 483   void oops_do_set_strong_done(nmethod* old_head);
 484 
 485   void record_nmethod_dependency();
 486 
 487   nmethod* restore(address code_cache_buffer,
 488                    const methodHandle& method,
 489                    int compile_id,
 490                    address reloc_data,
 491                    GrowableArray<Handle>& oop_list,
 492                    GrowableArray<Metadata*>& metadata_list,
 493                    ImmutableOopMapSet* oop_maps,
 494                    address immutable_data,
 495                    GrowableArray<Handle>& reloc_imm_oop_list,
 496                    GrowableArray<Metadata*>& reloc_imm_metadata_list,
 497                    AOTCodeReader* aot_code_reader);
 498 
 499 public:
 500   // create nmethod using archived nmethod from AOT code cache
 501   static nmethod* new_nmethod(nmethod* archived_nm,
 502                               const methodHandle& method,
 503                               AbstractCompiler* compiler,
 504                               int compile_id,
 505                               address reloc_data,
 506                               GrowableArray<Handle>& oop_list,
 507                               GrowableArray<Metadata*>& metadata_list,
 508                               ImmutableOopMapSet* oop_maps,
 509                               address immutable_data,
 510                               GrowableArray<Handle>& reloc_imm_oop_list,
 511                               GrowableArray<Metadata*>& reloc_imm_metadata_list,
 512                               AOTCodeReader* aot_code_reader);
 513 
 514   // If you change anything in this enum please patch
 515   // vmStructs_jvmci.cpp accordingly.
 516   enum class InvalidationReason : s1 {
 517     NOT_INVALIDATED = -1,
 518     C1_CODEPATCH,
 519     C1_DEOPTIMIZE,
 520     C1_DEOPTIMIZE_FOR_PATCHING,
 521     C1_PREDICATE_FAILED_TRAP,
 522     CI_REPLAY,
 523     UNLOADING,
 524     UNLOADING_COLD,
 525     JVMCI_INVALIDATE,
 526     JVMCI_MATERIALIZE_VIRTUAL_OBJECT,
 527     JVMCI_REPLACED_WITH_NEW_CODE,
 528     JVMCI_REPROFILE,
 529     MARKED_FOR_DEOPTIMIZATION,
 530     MISSING_EXCEPTION_HANDLER,
 531     NOT_USED,
 532     OSR_INVALIDATION_BACK_BRANCH,
 533     OSR_INVALIDATION_FOR_COMPILING_WITH_C1,
 534     OSR_INVALIDATION_OF_LOWER_LEVEL,
 535     SET_NATIVE_FUNCTION,
 536     UNCOMMON_TRAP,
 537     WHITEBOX_DEOPTIMIZATION,
 538     ZOMBIE,
 539     INVALIDATION_REASONS_COUNT
 540   };
 541 
 542 
 543   static const char* invalidation_reason_to_string(InvalidationReason invalidation_reason) {
 544     switch (invalidation_reason) {
 545       case InvalidationReason::C1_CODEPATCH:
 546         return "C1 code patch";
 547       case InvalidationReason::C1_DEOPTIMIZE:
 548         return "C1 deoptimized";
 549       case InvalidationReason::C1_DEOPTIMIZE_FOR_PATCHING:
 550         return "C1 deoptimize for patching";
 551       case InvalidationReason::C1_PREDICATE_FAILED_TRAP:
 552         return "C1 predicate failed trap";
 553       case InvalidationReason::CI_REPLAY:
 554         return "CI replay";
 555       case InvalidationReason::JVMCI_INVALIDATE:
 556         return "JVMCI invalidate";
 557       case InvalidationReason::JVMCI_MATERIALIZE_VIRTUAL_OBJECT:
 558         return "JVMCI materialize virtual object";
 559       case InvalidationReason::JVMCI_REPLACED_WITH_NEW_CODE:
 560         return "JVMCI replaced with new code";
 561       case InvalidationReason::JVMCI_REPROFILE:
 562         return "JVMCI reprofile";
 563       case InvalidationReason::MARKED_FOR_DEOPTIMIZATION:
 564         return "marked for deoptimization";
 565       case InvalidationReason::MISSING_EXCEPTION_HANDLER:
 566         return "missing exception handler";
 567       case InvalidationReason::NOT_USED:
 568         return "not used";
 569       case InvalidationReason::OSR_INVALIDATION_BACK_BRANCH:
 570         return "OSR invalidation back branch";
 571       case InvalidationReason::OSR_INVALIDATION_FOR_COMPILING_WITH_C1:
 572         return "OSR invalidation for compiling with C1";
 573       case InvalidationReason::OSR_INVALIDATION_OF_LOWER_LEVEL:
 574         return "OSR invalidation of lower level";
 575       case InvalidationReason::SET_NATIVE_FUNCTION:
 576         return "set native function";
 577       case InvalidationReason::UNCOMMON_TRAP:
 578         return "uncommon trap";
 579       case InvalidationReason::WHITEBOX_DEOPTIMIZATION:
 580         return "whitebox deoptimization";
 581       case InvalidationReason::ZOMBIE:
 582         return "zombie";
 583       default: {
 584         assert(false, "Unhandled reason");
 585         return "Unknown";
 586       }
 587     }
 588   }
 589 
 590   // create nmethod with entry_bci
 591   static nmethod* new_nmethod(const methodHandle& method,
 592                               int compile_id,
 593                               int entry_bci,
 594                               CodeOffsets* offsets,
 595                               int orig_pc_offset,
 596                               DebugInformationRecorder* recorder,
 597                               Dependencies* dependencies,
 598                               CodeBuffer *code_buffer,
 599                               int frame_size,
 600                               OopMapSet* oop_maps,
 601                               ExceptionHandlerTable* handler_table,
 602                               ImplicitExceptionTable* nul_chk_table,
 603                               AbstractCompiler* compiler,
 604                               CompLevel comp_level
 605 #if INCLUDE_JVMCI
 606                               , char* speculations = nullptr,
 607                               int speculations_len = 0,
 608                               JVMCINMethodData* jvmci_data = nullptr
 609 #endif
 610   );
 611 
 612   // Relocate the nmethod to the code heap identified by code_blob_type.
 613   // Returns nullptr if the code heap does not have enough space, the
 614   // nmethod is unrelocatable, or the nmethod is invalidated during relocation,
 615   // otherwise the relocated nmethod. The original nmethod will be marked not entrant.
 616   nmethod* relocate(CodeBlobType code_blob_type);
 617 
 618   static nmethod* new_native_nmethod(const methodHandle& method,
 619                                      int compile_id,
 620                                      CodeBuffer *code_buffer,
 621                                      int vep_offset,
 622                                      int frame_complete,
 623                                      int frame_size,
 624                                      ByteSize receiver_sp_offset,
 625                                      ByteSize basic_lock_sp_offset,
 626                                      OopMapSet* oop_maps,
 627                                      int exception_handler = -1);
 628 
 629   Method* method       () const { return _method; }
 630   uint16_t entry_bci   () const { return _entry_bci; }
 631   bool is_native_method() const { return _method != nullptr && _method->is_native(); }
 632   bool is_java_method  () const { return _method != nullptr && !_method->is_native(); }
 633   bool is_osr_method   () const { return _entry_bci != InvocationEntryBci; }
 634 
 635   int  orig_pc_offset() { return _orig_pc_offset; }
 636   bool is_relocatable();
 637 
 638   // Compiler task identification.  Note that all OSR methods
 639   // are numbered in an independent sequence if CICountOSR is true,
 640   // and native method wrappers are also numbered independently if
 641   // CICountNative is true.
 642   int compile_id() const { return _compile_id; }
 643   int comp_level() const { return _comp_level; }
 644   const char* compile_kind() const;
 645 
 646   inline bool  is_compiled_by_c1   () const { return _compiler_type == compiler_c1; }
 647   inline bool  is_compiled_by_c2   () const { return _compiler_type == compiler_c2; }
 648   inline bool  is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
 649   CompilerType compiler_type       () const { return _compiler_type; }
 650   const char*  compiler_name       () const;
 651 
 652   // boundaries for different parts
 653   address consts_begin          () const { return           content_begin(); }
 654   address consts_end            () const { return           code_begin()   ; }
 655   address insts_begin           () const { return           code_begin()   ; }
 656   address insts_end             () const { return           header_begin() + _stub_offset             ; }
 657   address stub_begin            () const { return           header_begin() + _stub_offset             ; }
 658   address stub_end              () const { return           code_end()     ; }
 659   address exception_begin       () const { return           header_begin() + _exception_offset        ; }
 660   address deopt_handler_begin   () const { return           header_begin() + _deopt_handler_offset    ; }
 661   address unwind_handler_begin  () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
 662   oop*    oops_begin            () const { return (oop*)    data_begin(); }
 663   oop*    oops_end              () const { return (oop*)    data_end(); }
 664 
 665   // mutable data
 666   Metadata** metadata_begin     () const { return (Metadata**) (mutable_data_begin() + _relocation_size); }
 667 #if INCLUDE_JVMCI
 668   Metadata** metadata_end       () const { return (Metadata**) (mutable_data_begin() + _relocation_size + _metadata_size); }
 669   address jvmci_data_begin      () const { return               mutable_data_begin() + _relocation_size + _metadata_size; }
 670   address jvmci_data_end        () const { return               mutable_data_end(); }
 671 #else
 672   Metadata** metadata_end       () const { return (Metadata**)  mutable_data_end(); }
 673 #endif
 674 
 675   // immutable data
 676   void set_immutable_data(address data) { _immutable_data = data; }
 677   address immutable_data_begin  () const { return           _immutable_data; }
 678   address immutable_data_end    () const { return           _immutable_data + _immutable_data_size ; }
 679   address dependencies_begin    () const { return           _immutable_data; }
 680   address dependencies_end      () const { return           _immutable_data + _nul_chk_table_offset; }
 681   address nul_chk_table_begin   () const { return           _immutable_data + _nul_chk_table_offset; }
 682   address nul_chk_table_end     () const { return           _immutable_data + _handler_table_offset; }
 683   address handler_table_begin   () const { return           _immutable_data + _handler_table_offset; }
 684   address handler_table_end     () const { return           _immutable_data + _scopes_pcs_offset   ; }
 685   PcDesc* scopes_pcs_begin      () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset)  ; }
 686   PcDesc* scopes_pcs_end        () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
 687   address scopes_data_begin     () const { return           _immutable_data + _scopes_data_offset  ; }
 688 
 689 #if INCLUDE_JVMCI
 690   address scopes_data_end       () const { return           _immutable_data + _speculations_offset ; }
 691   address speculations_begin    () const { return           _immutable_data + _speculations_offset ; }
 692   address speculations_end      () const { return           immutable_data_end() - ImmutableDataReferencesCounterSize ; }
 693 #else
 694   address scopes_data_end       () const { return           immutable_data_end() - ImmutableDataReferencesCounterSize ; }
 695 #endif
 696 
 697   address immutable_data_references_counter_begin () const { return immutable_data_end() - ImmutableDataReferencesCounterSize ; }
 698 
 699   // Sizes
 700   int immutable_data_size() const { return _immutable_data_size; }
 701   int consts_size        () const { return int(          consts_end       () -           consts_begin       ()); }
 702   int insts_size         () const { return int(          insts_end        () -           insts_begin        ()); }
 703   int stub_size          () const { return int(          stub_end         () -           stub_begin         ()); }
 704   int oops_size          () const { return int((address) oops_end         () - (address) oops_begin         ()); }
 705   int metadata_size      () const { return int((address) metadata_end     () - (address) metadata_begin     ()); }
 706   int scopes_data_size   () const { return int(          scopes_data_end  () -           scopes_data_begin  ()); }
 707   int scopes_pcs_size    () const { return int((intptr_t)scopes_pcs_end   () - (intptr_t)scopes_pcs_begin   ()); }
 708   int dependencies_size  () const { return int(          dependencies_end () -           dependencies_begin ()); }
 709   int handler_table_size () const { return int(          handler_table_end() -           handler_table_begin()); }
 710   int nul_chk_table_size () const { return int(          nul_chk_table_end() -           nul_chk_table_begin()); }
 711 #if INCLUDE_JVMCI
 712   int speculations_size  () const { return int(          speculations_end () -           speculations_begin ()); }
 713   int jvmci_data_size    () const { return int(          jvmci_data_end   () -           jvmci_data_begin   ()); }
 714 #endif
 715 
 716   int     oops_count() const { assert(oops_size() % oopSize == 0, "");  return (oops_size() / oopSize) + 1; }
 717   int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
 718 
 719   int skipped_instructions_size () const { return _skipped_instructions_size; }
 720   int total_size() const;
 721 
 722   // Containment
 723   bool consts_contains         (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
 724   // Returns true if a given address is in the 'insts' section. The method
 725   // insts_contains_inclusive() is end-inclusive.
 726   bool insts_contains          (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
 727   bool insts_contains_inclusive(address addr) const { return insts_begin        () <= addr && addr <= insts_end       (); }
 728   bool stub_contains           (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
 729   bool oops_contains           (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
 730   bool metadata_contains       (Metadata** addr) const { return metadata_begin  () <= addr && addr < metadata_end     (); }
 731   bool scopes_data_contains    (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
 732   bool scopes_pcs_contains     (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
 733   bool handler_table_contains  (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
 734   bool nul_chk_table_contains  (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
 735 
 736   // entry points
 737   address entry_point() const          { return code_begin() + _entry_offset;          } // normal entry point
 738   address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
 739 
 740   enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
 741                                            // allowed to advance state
 742                        in_use        = 0,  // executable nmethod
 743                        not_entrant   = 1   // marked for deoptimization but activations may still exist
 744   };
 745 
 746   // flag accessing and manipulation
 747   bool is_not_installed() const        { return _state == not_installed; }
 748   bool is_in_use() const               { return _state <= in_use; }
 749   bool is_not_entrant() const          { return _state == not_entrant; }
 750   int  get_state() const               { return _state; }
 751 
 752   void clear_unloading_state();
 753   // Heuristically deduce an nmethod isn't worth keeping around
 754   bool is_cold();
 755   bool is_unloading();
 756   void do_unloading(bool unloading_occurred);
 757 
 758   void inc_method_profiling_count();
 759   uint64_t method_profiling_count();
 760 
 761   bool make_in_use() {
 762     return try_transition(in_use);
 763   }
 764   // Make the nmethod non entrant. The nmethod will continue to be
 765   // alive.  It is used when an uncommon trap happens.  Returns true
 766   // if this thread changed the state of the nmethod or false if
 767   // another thread performed the transition.
 768   bool  make_not_entrant(InvalidationReason invalidation_reason, bool keep_aot_entry = false);
 769   bool  make_not_used() { return make_not_entrant(InvalidationReason::NOT_USED, true /* keep AOT entry */); }
 770 
 771   bool  is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
 772   bool  has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
 773   void  set_deoptimized_done();
 774 
 775   bool update_recompile_counts() const {
 776     // Update recompile counts when either the update is explicitly requested (deoptimize)
 777     // or the nmethod is not marked for deoptimization at all (not_marked).
 778     // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
 779     DeoptimizationStatus status = deoptimization_status();
 780     return status != deoptimize_noupdate && status != deoptimize_done;
 781   }
 782 
 783   // tells whether frames described by this nmethod can be deoptimized
 784   // note: native wrappers cannot be deoptimized.
 785   bool can_be_deoptimized() const { return is_java_method(); }
 786 
 787   bool has_dependencies()                         { return dependencies_size() != 0; }
 788   void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
 789   void flush_dependencies();
 790 
 791   template<typename T>
 792   T* gc_data() const                              { return reinterpret_cast<T*>(_gc_data); }
 793   template<typename T>
 794   void set_gc_data(T* gc_data)                    { _gc_data = reinterpret_cast<void*>(gc_data); }
 795 
 796   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
 797   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
 798 
 799   bool  has_monitors() const                      { return _has_monitors; }
 800   void  set_has_monitors(bool z)                  { _has_monitors = z; }
 801 
 802   bool  has_scoped_access() const                 { return _has_scoped_access; }
 803   void  set_has_scoped_access(bool z)             { _has_scoped_access = z; }
 804 
 805   bool  has_wide_vectors() const                  { return _has_wide_vectors; }
 806   void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }
 807 
 808   bool  has_clinit_barriers() const               { return _has_clinit_barriers; }
 809   void  set_has_clinit_barriers(bool z)           { _has_clinit_barriers = z; }
 810 
 811   bool  preloaded() const                         { return _preloaded; }
 812   void  set_preloaded(bool z)                     { _preloaded = z; }
 813 
 814   bool  has_flushed_dependencies() const          { return _has_flushed_dependencies; }
 815   void  set_has_flushed_dependencies(bool z)      {
 816     assert(!has_flushed_dependencies(), "should only happen once");
 817     _has_flushed_dependencies = z;
 818   }
 819 
 820   bool  is_unlinked() const                       { return _is_unlinked; }
 821   void  set_is_unlinked()                         {
 822      assert(!_is_unlinked, "already unlinked");
 823       _is_unlinked = true;
 824   }
 825 
 826   bool  used() const                              { return _used; }
 827   void  set_used()                                { _used = true; }
 828 
 829   bool is_aot() const                             { return _aot_code_entry != nullptr; }
 830   void set_aot_code_entry(AOTCodeEntry* entry)    { _aot_code_entry = entry; }
 831   AOTCodeEntry* aot_code_entry() const            { return _aot_code_entry; }
 832 
 833   // Support for oops in scopes and relocs:
 834   // Note: index 0 is reserved for null.
 835   oop   oop_at(int index) const;
 836   oop   oop_at_phantom(int index) const; // phantom reference
 837   oop*  oop_addr_at(int index) const {  // for GC
 838     // relocation indexes are biased by 1 (because 0 is reserved)
 839     assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
 840     return &oops_begin()[index - 1];
 841   }
 842 
 843   // Support for meta data in scopes and relocs:
 844   // Note: index 0 is reserved for null.
 845   Metadata*   metadata_at(int index) const      { return index == 0 ? nullptr: *metadata_addr_at(index); }
 846   Metadata**  metadata_addr_at(int index) const {  // for GC
 847     // relocation indexes are biased by 1 (because 0 is reserved)
 848     assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
 849     return &metadata_begin()[index - 1];
 850   }
 851 
 852   void copy_values(GrowableArray<Handle>* array);
 853   void copy_values(GrowableArray<jobject>* oops);
 854   void copy_values(GrowableArray<Metadata*>* metadata);
 855   void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
 856 
 857   // Relocation support
 858 private:
 859   void fix_oop_relocations(address begin, address end, bool initialize_immediates);
 860   inline void initialize_immediate_oop(oop* dest, jobject handle);
 861 
 862 protected:
 863   address oops_reloc_begin() const;
 864 
 865 public:
 866   void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
 867   void fix_oop_relocations()                           { fix_oop_relocations(nullptr, nullptr, false); }
 868 
 869   void create_reloc_immediates_list(JavaThread* thread, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list);
 870 
 871   bool is_at_poll_return(address pc);
 872   bool is_at_poll_or_poll_return(address pc);
 873 
 874 protected:
 875   // Exception cache support
 876   // Note: _exception_cache may be read and cleaned concurrently.
 877   ExceptionCache* exception_cache() const         { return _exception_cache; }
 878   ExceptionCache* exception_cache_acquire() const;
 879 
 880 public:
 881   address handler_for_exception_and_pc(Handle exception, address pc);
 882   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
 883   void clean_exception_cache();
 884 
 885   void add_exception_cache_entry(ExceptionCache* new_entry);
 886   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
 887 
 888 
 889   // Deopt
 890   // Return true is the PC is one would expect if the frame is being deopted.
 891   inline bool is_deopt_pc(address pc);
 892   inline bool is_deopt_entry(address pc);
 893 
 894   // Accessor/mutator for the original pc of a frame before a frame was deopted.
 895   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
 896   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
 897 
 898   const char* state() const;
 899 
 900   bool inlinecache_check_contains(address addr) const {
 901     return (addr >= code_begin() && addr < verified_entry_point());
 902   }
 903 
 904   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
 905 
 906   // implicit exceptions support
 907   address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
 908   address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
 909 
 910   // Inline cache support for class unloading and nmethod unloading
 911  private:
 912   void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
 913 
 914   address continuation_for_implicit_exception(address pc, bool for_div0_check);
 915 
 916  public:
 917   // Serial version used by whitebox test
 918   void cleanup_inline_caches_whitebox();
 919 
 920   void clear_inline_caches();
 921 
 922   // Execute nmethod barrier code, as if entering through nmethod call.
 923   void run_nmethod_entry_barrier();
 924 
 925   void verify_oop_relocations();
 926 
 927   bool has_evol_metadata();
 928 
 929   Method* attached_method(address call_pc);
 930   Method* attached_method_before_pc(address pc);
 931 
 932   // GC unloading support
 933   // Cleans unloaded klasses and unloaded nmethods in inline caches
 934 
 935   void unload_nmethod_caches(bool class_unloading_occurred);
 936 
 937   void unlink_from_method();
 938 
 939   // On-stack replacement support
 940   int      osr_entry_bci()    const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
 941   address  osr_entry()        const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
 942   nmethod* osr_link()         const { return _osr_link; }
 943   void     set_osr_link(nmethod *n) { _osr_link = n; }
 944   void     invalidate_osr_method();
 945 
 946   int num_stack_arg_slots(bool rounded = true) const {
 947     return rounded ? align_up(_num_stack_arg_slots, 2) : _num_stack_arg_slots;
 948   }
 949 
 950   // Verify calls to dead methods have been cleaned.
 951   void verify_clean_inline_caches();
 952 
 953   // Unlink this nmethod from the system
 954   void unlink();
 955 
 956   // Deallocate this nmethod - called by the GC
 957   void purge(bool unregister_nmethod);
 958 
 959   // See comment at definition of _last_seen_on_stack
 960   void mark_as_maybe_on_stack();
 961   bool is_maybe_on_stack();
 962 
 963   // Evolution support. We make old (discarded) compiled methods point to new Method*s.
 964   void set_method(Method* method) { _method = method; }
 965 
 966 #if INCLUDE_JVMCI
 967   // Gets the JVMCI name of this nmethod.
 968   const char* jvmci_name();
 969 
 970   // Records the pending failed speculation in the
 971   // JVMCI speculation log associated with this nmethod.
 972   void update_speculation(JavaThread* thread);
 973 
 974   // Gets the data specific to a JVMCI compiled method.
 975   // This returns a non-nullptr value iff this nmethod was
 976   // compiled by the JVMCI compiler.
 977   JVMCINMethodData* jvmci_nmethod_data() const {
 978     return jvmci_data_size() == 0 ? nullptr : (JVMCINMethodData*) jvmci_data_begin();
 979   }
 980 
 981   // Returns true if the runtime should NOT collect deoptimization profile for a JVMCI
 982   // compiled method
 983   bool jvmci_skip_profile_deopt() const;
 984 #endif
 985 
 986   void oops_do(OopClosure* f);
 987 
 988   // All-in-one claiming of nmethods: returns true if the caller successfully claimed that
 989   // nmethod.
 990   bool oops_do_try_claim();
 991 
 992   // Loom support for following nmethods on the stack
 993   void follow_nmethod(OopIterateClosure* cl);
 994 
 995   // Class containing callbacks for the oops_do_process_weak/strong() methods
 996   // below.
 997   class OopsDoProcessor {
 998   public:
 999     // Process the oops of the given nmethod based on whether it has been called
1000     // in a weak or strong processing context, i.e. apply either weak or strong
1001     // work on it.
1002     virtual void do_regular_processing(nmethod* nm) = 0;
1003     // Assuming that the oops of the given nmethod has already been its weak
1004     // processing applied, apply the remaining strong processing part.
1005     virtual void do_remaining_strong_processing(nmethod* nm) = 0;
1006   };
1007 
1008   // The following two methods do the work corresponding to weak/strong nmethod
1009   // processing.
1010   void oops_do_process_weak(OopsDoProcessor* p);
1011   void oops_do_process_strong(OopsDoProcessor* p);
1012 
1013   static void oops_do_marking_prologue();
1014   static void oops_do_marking_epilogue();
1015 
1016  private:
1017   ScopeDesc* scope_desc_in(address begin, address end);
1018 
1019   address* orig_pc_addr(const frame* fr);
1020 
1021   // used by jvmti to track if the load events has been reported
1022   bool  load_reported() const                     { return _load_reported; }
1023   void  set_load_reported()                       { _load_reported = true; }
1024 
1025   inline int  get_immutable_data_references_counter()           { return *((int*)immutable_data_references_counter_begin());  }
1026   inline void set_immutable_data_references_counter(int count)  { *((int*)immutable_data_references_counter_begin()) = count; }
1027 
1028  public:
1029   // ScopeDesc retrieval operation
1030   PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
1031   // pc_desc_near returns the first PcDesc at or after the given pc.
1032   PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
1033 
1034   // ScopeDesc for an instruction
1035   ScopeDesc* scope_desc_at(address pc);
1036   ScopeDesc* scope_desc_near(address pc);
1037 
1038   // copying of debugging information
1039   void copy_scopes_pcs(PcDesc* pcs, int count);
1040   void copy_scopes_data(address buffer, int size);
1041 
1042   // Post successful compilation
1043   void post_compiled_method(CompileTask* task);
1044 
1045   // jvmti support:
1046   void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
1047 
1048   // verify operations
1049   void verify();
1050   void verify_scopes();
1051   void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
1052 
1053   // Disassemble this nmethod with additional debug information, e.g. information about blocks.
1054   void decode2(outputStream* st) const;
1055   void print_constant_pool(outputStream* st);
1056 
1057   // Avoid hiding of parent's 'decode(outputStream*)' method.
1058   void decode(outputStream* st) const { decode2(st); } // just delegate here.
1059 
1060   // printing support
1061   void print_on_impl(outputStream* st) const;
1062   void print_code();
1063   void print_value_on_impl(outputStream* st) const;
1064 
1065 #if defined(SUPPORT_DATA_STRUCTS)
1066   // print output in opt build for disassembler library
1067   void print_relocations_on(outputStream* st)     PRODUCT_RETURN;
1068   void print_pcs_on(outputStream* st);
1069   void print_scopes() { print_scopes_on(tty); }
1070   void print_scopes_on(outputStream* st)          PRODUCT_RETURN;
1071   void print_handler_table();
1072   void print_nul_chk_table();
1073   void print_recorded_oop(int log_n, int index);
1074   void print_recorded_oops();
1075   void print_recorded_metadata();
1076 
1077   void print_oops(outputStream* st);     // oops from the underlying CodeBlob.
1078   void print_metadata(outputStream* st); // metadata in metadata pool.
1079 #else
1080   void print_pcs_on(outputStream* st) { return; }
1081 #endif
1082 
1083   void print_calls(outputStream* st)              PRODUCT_RETURN;
1084   static void print_statistics()                  PRODUCT_RETURN;
1085 
1086   void maybe_print_nmethod(const DirectiveSet* directive);
1087   void print_nmethod(bool print_code);
1088 
1089   void print_on_with_msg(outputStream* st, const char* msg) const;
1090 
1091   // Logging
1092   void log_identity(xmlStream* log) const;
1093   void log_new_nmethod() const;
1094   void log_relocated_nmethod(nmethod* original) const;
1095   void log_state_change(InvalidationReason invalidation_reason) const;
1096 
1097   // Prints block-level comments, including nmethod specific block labels:
1098   void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
1099   const char* nmethod_section_label(address pos) const;
1100 
1101   // returns whether this nmethod has code comments.
1102   bool has_code_comment(address begin, address end);
1103   // Prints a comment for one native instruction (reloc info, pc desc)
1104   void print_code_comment_on(outputStream* st, int column, address begin, address end);
1105 
1106   // tells if this compiled method is dependent on the given changes,
1107   // and the changes have invalidated it
1108   bool check_dependency_on(DepChange& changes);
1109 
1110   // Fast breakpoint support. Tells if this compiled method is
1111   // dependent on the given method. Returns true if this nmethod
1112   // corresponds to the given method as well.
1113   bool is_dependent_on_method(Method* dependee);
1114 
1115   // JVMTI's GetLocalInstance() support
1116   ByteSize native_receiver_sp_offset() {
1117     assert(is_native_method(), "sanity");
1118     return _native_receiver_sp_offset;
1119   }
1120   ByteSize native_basic_lock_sp_offset() {
1121     assert(is_native_method(), "sanity");
1122     return _native_basic_lock_sp_offset;
1123   }
1124 
1125   // support for code generation
1126   static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
1127   static ByteSize state_offset()           { return byte_offset_of(nmethod, _state); }
1128 
1129   void metadata_do(MetadataClosure* f);
1130 
1131   address call_instruction_address(address pc) const;
1132 
1133   void make_deoptimized();
1134   void finalize_relocations();
1135 
1136   void prepare_for_archiving_impl();
1137 
1138   class Vptr : public CodeBlob::Vptr {
1139     void print_on(const CodeBlob* instance, outputStream* st) const override {
1140       ttyLocker ttyl;
1141       instance->as_nmethod()->print_on_impl(st);
1142     }
1143     void print_value_on(const CodeBlob* instance, outputStream* st) const override {
1144       instance->as_nmethod()->print_value_on_impl(st);
1145     }
1146     void prepare_for_archiving(CodeBlob* instance) const override {
1147       ((nmethod*)instance)->prepare_for_archiving_impl();
1148     };
1149   };
1150 
1151   static const Vptr _vpntr;
1152 };
1153 
1154 struct NMethodMarkingScope : StackObj {
1155   NMethodMarkingScope() {
1156     nmethod::oops_do_marking_prologue();
1157   }
1158   ~NMethodMarkingScope() {
1159     nmethod::oops_do_marking_epilogue();
1160   }
1161 };
1162 
1163 #endif // SHARE_CODE_NMETHOD_HPP