1 /*
   2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_CODE_NMETHOD_HPP
  26 #define SHARE_CODE_NMETHOD_HPP
  27 
  28 #include "code/codeBlob.hpp"
  29 #include "code/pcDesc.hpp"
  30 #include "oops/metadata.hpp"
  31 #include "oops/method.hpp"
  32 
  33 class AbstractCompiler;
  34 class CompiledDirectCall;
  35 class CompiledIC;
  36 class CompiledICData;
  37 class CompileTask;
  38 class DepChange;
  39 class Dependencies;
  40 class DirectiveSet;
  41 class DebugInformationRecorder;
  42 class ExceptionHandlerTable;
  43 class ImplicitExceptionTable;
  44 class JvmtiThreadState;
  45 class MetadataClosure;
  46 class NativeCallWrapper;
  47 class OopIterateClosure;
  48 class ScopeDesc;
  49 class xmlStream;
  50 
  51 // This class is used internally by nmethods, to cache
  52 // exception/pc/handler information.
  53 
  54 class ExceptionCache : public CHeapObj<mtCode> {
  55   friend class VMStructs;
  56  private:
  57   enum { cache_size = 16 };
  58   Klass*   _exception_type;
  59   address  _pc[cache_size];
  60   address  _handler[cache_size];
  61   volatile int _count;
  62   ExceptionCache* volatile _next;
  63   ExceptionCache* _purge_list_next;
  64 
  65   inline address pc_at(int index);
  66   void set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
  67 
  68   inline address handler_at(int index);
  69   void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
  70 
  71   inline int count();
  72   // increment_count is only called under lock, but there may be concurrent readers.
  73   void increment_count();
  74 
  75  public:
  76 
  77   ExceptionCache(Handle exception, address pc, address handler);
  78 
  79   Klass*    exception_type()                { return _exception_type; }
  80   ExceptionCache* next();
  81   void      set_next(ExceptionCache *ec);
  82   ExceptionCache* purge_list_next()                 { return _purge_list_next; }
  83   void      set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
  84 
  85   address match(Handle exception, address pc);
  86   bool    match_exception_with_space(Handle exception) ;
  87   address test_address(address addr);
  88   bool    add_address_and_handler(address addr, address handler) ;
  89 };
  90 
  91 // cache pc descs found in earlier inquiries
  92 class PcDescCache {
  93   friend class VMStructs;
  94  private:
  95   enum { cache_size = 4 };
  96   // The array elements MUST be volatile! Several threads may modify
  97   // and read from the cache concurrently. find_pc_desc_internal has
  98   // returned wrong results. C++ compiler (namely xlC12) may duplicate
  99   // C++ field accesses if the elements are not volatile.
 100   typedef PcDesc* PcDescPtr;
 101   volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
 102  public:
 103   PcDescCache() { debug_only(_pc_descs[0] = nullptr); }
 104   void    init_to(PcDesc* initial_pc_desc);
 105   PcDesc* find_pc_desc(int pc_offset, bool approximate);
 106   void    add_pc_desc(PcDesc* pc_desc);
 107   PcDesc* last_pc_desc() { return _pc_descs[0]; }
 108 };
 109 
 110 class PcDescContainer : public CHeapObj<mtCode> {
 111 private:
 112   PcDescCache _pc_desc_cache;
 113 public:
 114   PcDescContainer(PcDesc* initial_pc_desc) { _pc_desc_cache.init_to(initial_pc_desc); }
 115 
 116   PcDesc* find_pc_desc_internal(address pc, bool approximate, address code_begin,
 117                                 PcDesc* lower, PcDesc* upper);
 118 
 119   PcDesc* find_pc_desc(address pc, bool approximate, address code_begin, PcDesc* lower, PcDesc* upper)
 120 #ifdef PRODUCT
 121   {
 122     PcDesc* desc = _pc_desc_cache.last_pc_desc();
 123     assert(desc != nullptr, "PcDesc cache should be initialized already");
 124     if (desc->pc_offset() == (pc - code_begin)) {
 125       // Cached value matched
 126       return desc;
 127     }
 128     return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
 129   }
 130 #endif
 131   ;
 132 };
 133 
 134 // nmethods (native methods) are the compiled code versions of Java methods.
 135 //
 136 // An nmethod contains:
 137 //  - header                 (the nmethod structure)
 138 //  [Relocation]
 139 //  - relocation information
 140 //  - constant part          (doubles, longs and floats used in nmethod)
 141 //  - oop table
 142 //  [Code]
 143 //  - code body
 144 //  - exception handler
 145 //  - stub code
 146 //  [Debugging information]
 147 //  - oop array
 148 //  - data array
 149 //  - pcs
 150 //  [Exception handler table]
 151 //  - handler entry point array
 152 //  [Implicit Null Pointer exception table]
 153 //  - implicit null table array
 154 //  [Speculations]
 155 //  - encoded speculations array
 156 //  [JVMCINMethodData]
 157 //  - meta data for JVMCI compiled nmethod
 158 
 159 #if INCLUDE_JVMCI
 160 class FailedSpeculation;
 161 class JVMCINMethodData;
 162 #endif
 163 
 164 class nmethod : public CodeBlob {
 165   friend class VMStructs;
 166   friend class JVMCIVMStructs;
 167   friend class CodeCache;  // scavengable oops
 168   friend class JVMCINMethodData;
 169   friend class DeoptimizationScope;
 170 
 171  private:
 172 
 173   // Used to track in which deoptimize handshake this method will be deoptimized.
 174   uint64_t  _deoptimization_generation;
 175 
 176   uint64_t  _gc_epoch;
 177 
 178   Method*   _method;
 179 
 180   // To reduce header size union fields which usages do not overlap.
 181   union {
 182     // To support simple linked-list chaining of nmethods:
 183     nmethod*  _osr_link; // from InstanceKlass::osr_nmethods_head
 184     struct {
 185       // These are used for compiled synchronized native methods to
 186       // locate the owner and stack slot for the BasicLock. They are
 187       // needed because there is no debug information for compiled native
 188       // wrappers and the oop maps are insufficient to allow
 189       // frame::retrieve_receiver() to work. Currently they are expected
 190       // to be byte offsets from the Java stack pointer for maximum code
 191       // sharing between platforms. JVMTI's GetLocalInstance() uses these
 192       // offsets to find the receiver for non-static native wrapper frames.
 193       ByteSize _native_receiver_sp_offset;
 194       ByteSize _native_basic_lock_sp_offset;
 195     };
 196   };
 197 
 198   // nmethod's read-only data
 199   address _immutable_data;
 200 
 201   PcDescContainer* _pc_desc_container;
 202   ExceptionCache* volatile _exception_cache;
 203 
 204   void* _gc_data;
 205 
 206   struct oops_do_mark_link; // Opaque data type.
 207   static nmethod*    volatile _oops_do_mark_nmethods;
 208   oops_do_mark_link* volatile _oops_do_mark_link;
 209 
 210   CompiledICData* _compiled_ic_data;
 211 
 212   // offsets for entry points
 213   address  _osr_entry_point;       // entry point for on stack replacement
 214   uint16_t _entry_offset;          // entry point with class check
 215   uint16_t _verified_entry_offset; // entry point without class check
 216   int      _entry_bci;             // != InvocationEntryBci if this nmethod is an on-stack replacement method
 217   int      _immutable_data_size;
 218 
 219   // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
 220 
 221   int _skipped_instructions_size;
 222 
 223   int _stub_offset;
 224 
 225   // Offsets for different stubs section parts
 226   int _exception_offset;
 227   // All deoptee's will resume execution at this location described by
 228   // this offset.
 229   int _deopt_handler_offset;
 230   // All deoptee's at a MethodHandle call site will resume execution
 231   // at this location described by this offset.
 232   int _deopt_mh_handler_offset;
 233   // Offset (from insts_end) of the unwind handler if it exists
 234   int16_t  _unwind_handler_offset;
 235   // Number of arguments passed on the stack
 236   uint16_t _num_stack_arg_slots;
 237 
 238   // Offsets in mutable data section
 239   // _oops_offset == _data_offset,  offset where embedded oop table begins (inside data)
 240   uint16_t _metadata_offset; // embedded meta data table
 241 #if INCLUDE_JVMCI
 242   uint16_t _jvmci_data_offset;
 243 #endif
 244 
 245   // Offset in immutable data section
 246   // _dependencies_offset == 0
 247   uint16_t _nul_chk_table_offset;
 248   uint16_t _handler_table_offset; // This table could be big in C1 code
 249   int      _scopes_pcs_offset;
 250   int      _scopes_data_offset;
 251 #if INCLUDE_JVMCI
 252   int      _speculations_offset;
 253 #endif
 254 
 255   // location in frame (offset for sp) that deopt can store the original
 256   // pc during a deopt.
 257   int _orig_pc_offset;
 258 
 259   int          _compile_id;            // which compilation made this nmethod
 260   CompLevel    _comp_level;            // compilation level (s1)
 261   CompilerType _compiler_type;         // which compiler made this nmethod (u1)
 262 
 263   // Local state used to keep track of whether unloading is happening or not
 264   volatile uint8_t _is_unloading_state;
 265 
 266   // Protected by NMethodState_lock
 267   volatile signed char _state;         // {not_installed, in_use, not_entrant}
 268 
 269   // set during construction
 270   uint8_t _has_unsafe_access:1,        // May fault due to unsafe access.
 271           _has_method_handle_invokes:1,// Has this method MethodHandle invokes?
 272           _has_wide_vectors:1,         // Preserve wide vectors at safepoints
 273           _has_monitors:1,             // Fastpath monitor detection for continuations
 274           _has_scoped_access:1,        // used by for shared scope closure (scopedMemoryAccess.cpp)
 275           _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
 276           _is_unlinked:1,              // mark during class unloading
 277           _load_reported:1;            // used by jvmti to track if an event has been posted for this nmethod
 278 
 279   enum DeoptimizationStatus : u1 {
 280     not_marked,
 281     deoptimize,
 282     deoptimize_noupdate,
 283     deoptimize_done
 284   };
 285 
 286   volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
 287 
 288   DeoptimizationStatus deoptimization_status() const {
 289     return Atomic::load(&_deoptimization_status);
 290   }
 291 
 292   // Initialize fields to their default values
 293   void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
 294 
 295   // Post initialization
 296   void post_init();
 297 
 298   // For native wrappers
 299   nmethod(Method* method,
 300           CompilerType type,
 301           int nmethod_size,
 302           int compile_id,
 303           CodeOffsets* offsets,
 304           CodeBuffer *code_buffer,
 305           int frame_size,
 306           ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
 307           ByteSize basic_lock_sp_offset,       /* synchronized natives only */
 308           OopMapSet* oop_maps);
 309 
 310   // For normal JIT compiled code
 311   nmethod(Method* method,
 312           CompilerType type,
 313           int nmethod_size,
 314           int immutable_data_size,
 315           int compile_id,
 316           int entry_bci,
 317           address immutable_data,
 318           CodeOffsets* offsets,
 319           int orig_pc_offset,
 320           DebugInformationRecorder *recorder,
 321           Dependencies* dependencies,
 322           CodeBuffer *code_buffer,
 323           int frame_size,
 324           OopMapSet* oop_maps,
 325           ExceptionHandlerTable* handler_table,
 326           ImplicitExceptionTable* nul_chk_table,
 327           AbstractCompiler* compiler,
 328           CompLevel comp_level
 329 #if INCLUDE_JVMCI
 330           , char* speculations = nullptr,
 331           int speculations_len = 0,
 332           JVMCINMethodData* jvmci_data = nullptr
 333 #endif
 334           );
 335 
 336   // helper methods
 337   void* operator new(size_t size, int nmethod_size, int comp_level) throw();
 338 
 339   // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
 340   // Attention: Only allow NonNMethod space for special nmethods which don't need to be
 341   // findable by nmethod iterators! In particular, they must not contain oops!
 342   void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
 343 
 344   const char* reloc_string_for(u_char* begin, u_char* end);
 345 
 346   bool try_transition(signed char new_state);
 347 
 348   // Returns true if this thread changed the state of the nmethod or
 349   // false if another thread performed the transition.
 350   bool make_entrant() { Unimplemented(); return false; }
 351   void inc_decompile_count();
 352 
 353   // Inform external interfaces that a compiled method has been unloaded
 354   void post_compiled_method_unload();
 355 
 356   PcDesc* find_pc_desc(address pc, bool approximate) {
 357     if (_pc_desc_container == nullptr) return nullptr; // native method
 358     return _pc_desc_container->find_pc_desc(pc, approximate, code_begin(), scopes_pcs_begin(), scopes_pcs_end());
 359   }
 360 
 361   // STW two-phase nmethod root processing helpers.
 362   //
 363   // When determining liveness of a given nmethod to do code cache unloading,
 364   // some collectors need to do different things depending on whether the nmethods
 365   // need to absolutely be kept alive during root processing; "strong"ly reachable
 366   // nmethods are known to be kept alive at root processing, but the liveness of
 367   // "weak"ly reachable ones is to be determined later.
 368   //
 369   // We want to allow strong and weak processing of nmethods by different threads
 370   // at the same time without heavy synchronization. Additional constraints are
 371   // to make sure that every nmethod is processed a minimal amount of time, and
 372   // nmethods themselves are always iterated at most once at a particular time.
 373   //
 374   // Note that strong processing work must be a superset of weak processing work
 375   // for this code to work.
 376   //
 377   // We store state and claim information in the _oops_do_mark_link member, using
 378   // the two LSBs for the state and the remaining upper bits for linking together
 379   // nmethods that were already visited.
 380   // The last element is self-looped, i.e. points to itself to avoid some special
 381   // "end-of-list" sentinel value.
 382   //
 383   // _oops_do_mark_link special values:
 384   //
 385   //   _oops_do_mark_link == nullptr: the nmethod has not been visited at all yet, i.e.
 386   //      is Unclaimed.
 387   //
 388   // For other values, its lowest two bits indicate the following states of the nmethod:
 389   //
 390   //   weak_request (WR): the nmethod has been claimed by a thread for weak processing
 391   //   weak_done (WD): weak processing has been completed for this nmethod.
 392   //   strong_request (SR): the nmethod has been found to need strong processing while
 393   //       being weak processed.
 394   //   strong_done (SD): strong processing has been completed for this nmethod .
 395   //
 396   // The following shows the _only_ possible progressions of the _oops_do_mark_link
 397   // pointer.
 398   //
 399   // Given
 400   //   N as the nmethod
 401   //   X the current next value of _oops_do_mark_link
 402   //
 403   // Unclaimed (C)-> N|WR (C)-> X|WD: the nmethod has been processed weakly by
 404   //   a single thread.
 405   // Unclaimed (C)-> N|WR (C)-> X|WD (O)-> X|SD: after weak processing has been
 406   //   completed (as above) another thread found that the nmethod needs strong
 407   //   processing after all.
 408   // Unclaimed (C)-> N|WR (O)-> N|SR (C)-> X|SD: during weak processing another
 409   //   thread finds that the nmethod needs strong processing, marks it as such and
 410   //   terminates. The original thread completes strong processing.
 411   // Unclaimed (C)-> N|SD (C)-> X|SD: the nmethod has been processed strongly from
 412   //   the beginning by a single thread.
 413   //
 414   // "|" describes the concatenation of bits in _oops_do_mark_link.
 415   //
 416   // The diagram also describes the threads responsible for changing the nmethod to
 417   // the next state by marking the _transition_ with (C) and (O), which mean "current"
 418   // and "other" thread respectively.
 419   //
 420 
 421   // States used for claiming nmethods during root processing.
 422   static const uint claim_weak_request_tag = 0;
 423   static const uint claim_weak_done_tag = 1;
 424   static const uint claim_strong_request_tag = 2;
 425   static const uint claim_strong_done_tag = 3;
 426 
 427   static oops_do_mark_link* mark_link(nmethod* nm, uint tag) {
 428     assert(tag <= claim_strong_done_tag, "invalid tag %u", tag);
 429     assert(is_aligned(nm, 4), "nmethod pointer must have zero lower two LSB");
 430     return (oops_do_mark_link*)(((uintptr_t)nm & ~0x3) | tag);
 431   }
 432 
 433   static uint extract_state(oops_do_mark_link* link) {
 434     return (uint)((uintptr_t)link & 0x3);
 435   }
 436 
 437   static nmethod* extract_nmethod(oops_do_mark_link* link) {
 438     return (nmethod*)((uintptr_t)link & ~0x3);
 439   }
 440 
 441   void oops_do_log_change(const char* state);
 442 
 443   static bool oops_do_has_weak_request(oops_do_mark_link* next) {
 444     return extract_state(next) == claim_weak_request_tag;
 445   }
 446 
 447   static bool oops_do_has_any_strong_state(oops_do_mark_link* next) {
 448     return extract_state(next) >= claim_strong_request_tag;
 449   }
 450 
 451   // Attempt Unclaimed -> N|WR transition. Returns true if successful.
 452   bool oops_do_try_claim_weak_request();
 453 
 454   // Attempt Unclaimed -> N|SD transition. Returns the current link.
 455   oops_do_mark_link* oops_do_try_claim_strong_done();
 456   // Attempt N|WR -> X|WD transition. Returns nullptr if successful, X otherwise.
 457   nmethod* oops_do_try_add_to_list_as_weak_done();
 458 
 459   // Attempt X|WD -> N|SR transition. Returns the current link.
 460   oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
 461   // Attempt X|WD -> X|SD transition. Returns true if successful.
 462   bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
 463 
 464   // Do the N|SD -> X|SD transition.
 465   void oops_do_add_to_list_as_strong_done();
 466 
 467   // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
 468   // transitions).
 469   void oops_do_set_strong_done(nmethod* old_head);
 470 
 471 public:
 472   // create nmethod with entry_bci
 473   static nmethod* new_nmethod(const methodHandle& method,
 474                               int compile_id,
 475                               int entry_bci,
 476                               CodeOffsets* offsets,
 477                               int orig_pc_offset,
 478                               DebugInformationRecorder* recorder,
 479                               Dependencies* dependencies,
 480                               CodeBuffer *code_buffer,
 481                               int frame_size,
 482                               OopMapSet* oop_maps,
 483                               ExceptionHandlerTable* handler_table,
 484                               ImplicitExceptionTable* nul_chk_table,
 485                               AbstractCompiler* compiler,
 486                               CompLevel comp_level
 487 #if INCLUDE_JVMCI
 488                               , char* speculations = nullptr,
 489                               int speculations_len = 0,
 490                               JVMCINMethodData* jvmci_data = nullptr
 491 #endif
 492   );
 493 
 494   static nmethod* new_native_nmethod(const methodHandle& method,
 495                                      int compile_id,
 496                                      CodeBuffer *code_buffer,
 497                                      int vep_offset,
 498                                      int frame_complete,
 499                                      int frame_size,
 500                                      ByteSize receiver_sp_offset,
 501                                      ByteSize basic_lock_sp_offset,
 502                                      OopMapSet* oop_maps,
 503                                      int exception_handler = -1);
 504 
 505   Method* method       () const { return _method; }
 506   bool is_native_method() const { return _method != nullptr && _method->is_native(); }
 507   bool is_java_method  () const { return _method != nullptr && !_method->is_native(); }
 508   bool is_osr_method   () const { return _entry_bci != InvocationEntryBci; }
 509 
 510   // Compiler task identification.  Note that all OSR methods
 511   // are numbered in an independent sequence if CICountOSR is true,
 512   // and native method wrappers are also numbered independently if
 513   // CICountNative is true.
 514   int compile_id() const { return _compile_id; }
 515   const char* compile_kind() const;
 516 
 517   inline bool  is_compiled_by_c1   () const { return _compiler_type == compiler_c1; }
 518   inline bool  is_compiled_by_c2   () const { return _compiler_type == compiler_c2; }
 519   inline bool  is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
 520   CompilerType compiler_type       () const { return _compiler_type; }
 521   const char*  compiler_name       () const;
 522 
 523   // boundaries for different parts
 524   address consts_begin          () const { return           content_begin(); }
 525   address consts_end            () const { return           code_begin()   ; }
 526   address insts_begin           () const { return           code_begin()   ; }
 527   address insts_end             () const { return           header_begin() + _stub_offset             ; }
 528   address stub_begin            () const { return           header_begin() + _stub_offset             ; }
 529   address stub_end              () const { return           data_begin()   ; }
 530   address exception_begin       () const { return           header_begin() + _exception_offset        ; }
 531   address deopt_handler_begin   () const { return           header_begin() + _deopt_handler_offset    ; }
 532   address deopt_mh_handler_begin() const { return           header_begin() + _deopt_mh_handler_offset ; }
 533   address unwind_handler_begin  () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
 534 
 535   // mutable data
 536   oop*    oops_begin            () const { return (oop*)        data_begin(); }
 537   oop*    oops_end              () const { return (oop*)       (data_begin() + _metadata_offset)      ; }
 538   Metadata** metadata_begin     () const { return (Metadata**) (data_begin() + _metadata_offset)      ; }
 539 #if INCLUDE_JVMCI
 540   Metadata** metadata_end       () const { return (Metadata**) (data_begin() + _jvmci_data_offset)    ; }
 541   address jvmci_data_begin      () const { return               data_begin() + _jvmci_data_offset     ; }
 542   address jvmci_data_end        () const { return               data_end(); }
 543 #else
 544   Metadata** metadata_end       () const { return (Metadata**)  data_end(); }
 545 #endif
 546 
 547   // immutable data
 548   address immutable_data_begin  () const { return           _immutable_data; }
 549   address immutable_data_end    () const { return           _immutable_data + _immutable_data_size ; }
 550   address dependencies_begin    () const { return           _immutable_data; }
 551   address dependencies_end      () const { return           _immutable_data + _nul_chk_table_offset; }
 552   address nul_chk_table_begin   () const { return           _immutable_data + _nul_chk_table_offset; }
 553   address nul_chk_table_end     () const { return           _immutable_data + _handler_table_offset; }
 554   address handler_table_begin   () const { return           _immutable_data + _handler_table_offset; }
 555   address handler_table_end     () const { return           _immutable_data + _scopes_pcs_offset   ; }
 556   PcDesc* scopes_pcs_begin      () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset)  ; }
 557   PcDesc* scopes_pcs_end        () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
 558   address scopes_data_begin     () const { return           _immutable_data + _scopes_data_offset  ; }
 559 
 560 #if INCLUDE_JVMCI
 561   address scopes_data_end       () const { return           _immutable_data + _speculations_offset ; }
 562   address speculations_begin    () const { return           _immutable_data + _speculations_offset ; }
 563   address speculations_end      () const { return            immutable_data_end(); }
 564 #else
 565   address scopes_data_end       () const { return            immutable_data_end(); }
 566 #endif
 567 
 568   // Sizes
 569   int immutable_data_size() const { return _immutable_data_size; }
 570   int consts_size        () const { return int(          consts_end       () -           consts_begin       ()); }
 571   int insts_size         () const { return int(          insts_end        () -           insts_begin        ()); }
 572   int stub_size          () const { return int(          stub_end         () -           stub_begin         ()); }
 573   int oops_size          () const { return int((address) oops_end         () - (address) oops_begin         ()); }
 574   int metadata_size      () const { return int((address) metadata_end     () - (address) metadata_begin     ()); }
 575   int scopes_data_size   () const { return int(          scopes_data_end  () -           scopes_data_begin  ()); }
 576   int scopes_pcs_size    () const { return int((intptr_t)scopes_pcs_end   () - (intptr_t)scopes_pcs_begin   ()); }
 577   int dependencies_size  () const { return int(          dependencies_end () -           dependencies_begin ()); }
 578   int handler_table_size () const { return int(          handler_table_end() -           handler_table_begin()); }
 579   int nul_chk_table_size () const { return int(          nul_chk_table_end() -           nul_chk_table_begin()); }
 580 #if INCLUDE_JVMCI
 581   int speculations_size  () const { return int(          speculations_end () -           speculations_begin ()); }
 582   int jvmci_data_size    () const { return int(          jvmci_data_end   () -           jvmci_data_begin   ()); }
 583 #endif
 584 
 585   int     oops_count() const { assert(oops_size() % oopSize == 0, "");  return (oops_size() / oopSize) + 1; }
 586   int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
 587 
 588   int skipped_instructions_size () const { return _skipped_instructions_size; }
 589   int total_size() const;
 590 
 591   // Containment
 592   bool consts_contains         (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
 593   // Returns true if a given address is in the 'insts' section. The method
 594   // insts_contains_inclusive() is end-inclusive.
 595   bool insts_contains          (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
 596   bool insts_contains_inclusive(address addr) const { return insts_begin        () <= addr && addr <= insts_end       (); }
 597   bool stub_contains           (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
 598   bool oops_contains           (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
 599   bool metadata_contains       (Metadata** addr) const { return metadata_begin  () <= addr && addr < metadata_end     (); }
 600   bool scopes_data_contains    (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
 601   bool scopes_pcs_contains     (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
 602   bool handler_table_contains  (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
 603   bool nul_chk_table_contains  (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
 604 
 605   // entry points
 606   address entry_point() const          { return code_begin() + _entry_offset;          } // normal entry point
 607   address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
 608 
 609   enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
 610                                            // allowed to advance state
 611                        in_use        = 0,  // executable nmethod
 612                        not_entrant   = 1   // marked for deoptimization but activations may still exist
 613   };
 614 
 615   // flag accessing and manipulation
 616   bool is_not_installed() const        { return _state == not_installed; }
 617   bool is_in_use() const               { return _state <= in_use; }
 618   bool is_not_entrant() const          { return _state == not_entrant; }
 619   int  get_state() const               { return _state; }
 620 
 621   void clear_unloading_state();
 622   // Heuristically deduce an nmethod isn't worth keeping around
 623   bool is_cold();
 624   bool is_unloading();
 625   void do_unloading(bool unloading_occurred);
 626 
 627   bool make_in_use() {
 628     return try_transition(in_use);
 629   }
 630   // Make the nmethod non entrant. The nmethod will continue to be
 631   // alive.  It is used when an uncommon trap happens.  Returns true
 632   // if this thread changed the state of the nmethod or false if
 633   // another thread performed the transition.
 634   bool  make_not_entrant();
 635   bool  make_not_used()    { return make_not_entrant(); }
 636 
 637   bool  is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
 638   bool  has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
 639   void  set_deoptimized_done();
 640 
 641   bool update_recompile_counts() const {
 642     // Update recompile counts when either the update is explicitly requested (deoptimize)
 643     // or the nmethod is not marked for deoptimization at all (not_marked).
 644     // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
 645     DeoptimizationStatus status = deoptimization_status();
 646     return status != deoptimize_noupdate && status != deoptimize_done;
 647   }
 648 
 649   // tells whether frames described by this nmethod can be deoptimized
 650   // note: native wrappers cannot be deoptimized.
 651   bool can_be_deoptimized() const { return is_java_method(); }
 652 
 653   bool has_dependencies()                         { return dependencies_size() != 0; }
 654   void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
 655   void flush_dependencies();
 656 
 657   template<typename T>
 658   T* gc_data() const                              { return reinterpret_cast<T*>(_gc_data); }
 659   template<typename T>
 660   void set_gc_data(T* gc_data)                    { _gc_data = reinterpret_cast<void*>(gc_data); }
 661 
 662   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
 663   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
 664 
 665   bool  has_monitors() const                      { return _has_monitors; }
 666   void  set_has_monitors(bool z)                  { _has_monitors = z; }
 667 
 668   bool  has_scoped_access() const                 { return _has_scoped_access; }
 669   void  set_has_scoped_access(bool z)             { _has_scoped_access = z; }
 670 
 671   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
 672   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
 673 
 674   bool  has_wide_vectors() const                  { return _has_wide_vectors; }
 675   void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }
 676 
 677   bool  has_flushed_dependencies() const          { return _has_flushed_dependencies; }
 678   void  set_has_flushed_dependencies(bool z)      {
 679     assert(!has_flushed_dependencies(), "should only happen once");
 680     _has_flushed_dependencies = z;
 681   }
 682 
 683   bool  is_unlinked() const                       { return _is_unlinked; }
 684   void  set_is_unlinked()                         {
 685      assert(!_is_unlinked, "already unlinked");
 686       _is_unlinked = true;
 687   }
 688 
 689   int   comp_level() const                        { return _comp_level; }
 690 
 691   // Support for oops in scopes and relocs:
 692   // Note: index 0 is reserved for null.
 693   oop   oop_at(int index) const;
 694   oop   oop_at_phantom(int index) const; // phantom reference
 695   oop*  oop_addr_at(int index) const {  // for GC
 696     // relocation indexes are biased by 1 (because 0 is reserved)
 697     assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
 698     return &oops_begin()[index - 1];
 699   }
 700 
 701   // Support for meta data in scopes and relocs:
 702   // Note: index 0 is reserved for null.
 703   Metadata*   metadata_at(int index) const      { return index == 0 ? nullptr: *metadata_addr_at(index); }
 704   Metadata**  metadata_addr_at(int index) const {  // for GC
 705     // relocation indexes are biased by 1 (because 0 is reserved)
 706     assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
 707     return &metadata_begin()[index - 1];
 708   }
 709 
 710   void copy_values(GrowableArray<jobject>* oops);
 711   void copy_values(GrowableArray<Metadata*>* metadata);
 712   void copy_values(GrowableArray<address>* metadata) {} // Nothing to do
 713 
 714   // Relocation support
 715 private:
 716   void fix_oop_relocations(address begin, address end, bool initialize_immediates);
 717   inline void initialize_immediate_oop(oop* dest, jobject handle);
 718 
 719 protected:
 720   address oops_reloc_begin() const;
 721 
 722 public:
 723   void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
 724   void fix_oop_relocations()                           { fix_oop_relocations(nullptr, nullptr, false); }
 725 
 726   bool is_at_poll_return(address pc);
 727   bool is_at_poll_or_poll_return(address pc);
 728 
 729 protected:
 730   // Exception cache support
 731   // Note: _exception_cache may be read and cleaned concurrently.
 732   ExceptionCache* exception_cache() const         { return _exception_cache; }
 733   ExceptionCache* exception_cache_acquire() const;
 734 
 735 public:
 736   address handler_for_exception_and_pc(Handle exception, address pc);
 737   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
 738   void clean_exception_cache();
 739 
 740   void add_exception_cache_entry(ExceptionCache* new_entry);
 741   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
 742 
 743 
 744   // MethodHandle
 745   bool is_method_handle_return(address return_pc);
 746   // Deopt
 747   // Return true is the PC is one would expect if the frame is being deopted.
 748   inline bool is_deopt_pc(address pc);
 749   inline bool is_deopt_mh_entry(address pc);
 750   inline bool is_deopt_entry(address pc);
 751 
 752   // Accessor/mutator for the original pc of a frame before a frame was deopted.
 753   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
 754   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
 755 
 756   const char* state() const;
 757 
 758   bool inlinecache_check_contains(address addr) const {
 759     return (addr >= code_begin() && addr < verified_entry_point());
 760   }
 761 
 762   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
 763 
 764   // implicit exceptions support
 765   address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
 766   address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
 767 
 768   // Inline cache support for class unloading and nmethod unloading
 769  private:
 770   void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
 771 
 772   address continuation_for_implicit_exception(address pc, bool for_div0_check);
 773 
 774  public:
 775   // Serial version used by whitebox test
 776   void cleanup_inline_caches_whitebox();
 777 
 778   void clear_inline_caches();
 779 
 780   // Execute nmethod barrier code, as if entering through nmethod call.
 781   void run_nmethod_entry_barrier();
 782 
 783   void verify_oop_relocations();
 784 
 785   bool has_evol_metadata();
 786 
 787   Method* attached_method(address call_pc);
 788   Method* attached_method_before_pc(address pc);
 789 
 790   // GC unloading support
 791   // Cleans unloaded klasses and unloaded nmethods in inline caches
 792 
 793   void unload_nmethod_caches(bool class_unloading_occurred);
 794 
 795   void unlink_from_method();
 796 
 797   // On-stack replacement support
 798   int      osr_entry_bci()    const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
 799   address  osr_entry()        const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
 800   nmethod* osr_link()         const { return _osr_link; }
 801   void     set_osr_link(nmethod *n) { _osr_link = n; }
 802   void     invalidate_osr_method();
 803 
 804   int num_stack_arg_slots(bool rounded = true) const {
 805     return rounded ? align_up(_num_stack_arg_slots, 2) : _num_stack_arg_slots;
 806   }
 807 
 808   // Verify calls to dead methods have been cleaned.
 809   void verify_clean_inline_caches();
 810 
 811   // Unlink this nmethod from the system
 812   void unlink();
 813 
 814   // Deallocate this nmethod - called by the GC
 815   void purge(bool unregister_nmethod);
 816 
 817   // See comment at definition of _last_seen_on_stack
 818   void mark_as_maybe_on_stack();
 819   bool is_maybe_on_stack();
 820 
 821   // Evolution support. We make old (discarded) compiled methods point to new Method*s.
 822   void set_method(Method* method) { _method = method; }
 823 
 824 #if INCLUDE_JVMCI
 825   // Gets the JVMCI name of this nmethod.
 826   const char* jvmci_name();
 827 
 828   // Records the pending failed speculation in the
 829   // JVMCI speculation log associated with this nmethod.
 830   void update_speculation(JavaThread* thread);
 831 
 832   // Gets the data specific to a JVMCI compiled method.
 833   // This returns a non-nullptr value iff this nmethod was
 834   // compiled by the JVMCI compiler.
 835   JVMCINMethodData* jvmci_nmethod_data() const {
 836     return jvmci_data_size() == 0 ? nullptr : (JVMCINMethodData*) jvmci_data_begin();
 837   }
 838 #endif
 839 
 840   void oops_do(OopClosure* f) { oops_do(f, false); }
 841   void oops_do(OopClosure* f, bool allow_dead);
 842 
 843   // All-in-one claiming of nmethods: returns true if the caller successfully claimed that
 844   // nmethod.
 845   bool oops_do_try_claim();
 846 
 847   // Loom support for following nmethods on the stack
 848   void follow_nmethod(OopIterateClosure* cl);
 849 
 850   // Class containing callbacks for the oops_do_process_weak/strong() methods
 851   // below.
 852   class OopsDoProcessor {
 853   public:
 854     // Process the oops of the given nmethod based on whether it has been called
 855     // in a weak or strong processing context, i.e. apply either weak or strong
 856     // work on it.
 857     virtual void do_regular_processing(nmethod* nm) = 0;
 858     // Assuming that the oops of the given nmethod has already been its weak
 859     // processing applied, apply the remaining strong processing part.
 860     virtual void do_remaining_strong_processing(nmethod* nm) = 0;
 861   };
 862 
 863   // The following two methods do the work corresponding to weak/strong nmethod
 864   // processing.
 865   void oops_do_process_weak(OopsDoProcessor* p);
 866   void oops_do_process_strong(OopsDoProcessor* p);
 867 
 868   static void oops_do_marking_prologue();
 869   static void oops_do_marking_epilogue();
 870 
 871  private:
 872   ScopeDesc* scope_desc_in(address begin, address end);
 873 
 874   address* orig_pc_addr(const frame* fr);
 875 
 876   // used by jvmti to track if the load events has been reported
 877   bool  load_reported() const                     { return _load_reported; }
 878   void  set_load_reported()                       { _load_reported = true; }
 879 
 880  public:
 881   // ScopeDesc retrieval operation
 882   PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
 883   // pc_desc_near returns the first PcDesc at or after the given pc.
 884   PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
 885 
 886   // ScopeDesc for an instruction
 887   ScopeDesc* scope_desc_at(address pc);
 888   ScopeDesc* scope_desc_near(address pc);
 889 
 890   // copying of debugging information
 891   void copy_scopes_pcs(PcDesc* pcs, int count);
 892   void copy_scopes_data(address buffer, int size);
 893 
 894   int orig_pc_offset() { return _orig_pc_offset; }
 895 
 896   // Post successful compilation
 897   void post_compiled_method(CompileTask* task);
 898 
 899   // jvmti support:
 900   void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
 901 
 902   // verify operations
 903   void verify() override;
 904   void verify_scopes();
 905   void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
 906 
 907   // Disassemble this nmethod with additional debug information, e.g. information about blocks.
 908   void decode2(outputStream* st) const;
 909   void print_constant_pool(outputStream* st);
 910 
 911   // Avoid hiding of parent's 'decode(outputStream*)' method.
 912   void decode(outputStream* st) const { decode2(st); } // just delegate here.
 913 
 914   // printing support
 915   void print()                 const override;
 916   void print(outputStream* st) const;
 917   void print_code();
 918 
 919 #if defined(SUPPORT_DATA_STRUCTS)
 920   // print output in opt build for disassembler library
 921   void print_relocations()                        PRODUCT_RETURN;
 922   void print_pcs_on(outputStream* st);
 923   void print_scopes() { print_scopes_on(tty); }
 924   void print_scopes_on(outputStream* st)          PRODUCT_RETURN;
 925   void print_value_on(outputStream* st) const override;
 926   void print_handler_table();
 927   void print_nul_chk_table();
 928   void print_recorded_oop(int log_n, int index);
 929   void print_recorded_oops();
 930   void print_recorded_metadata();
 931 
 932   void print_oops(outputStream* st);     // oops from the underlying CodeBlob.
 933   void print_metadata(outputStream* st); // metadata in metadata pool.
 934 #else
 935   void print_pcs_on(outputStream* st) { return; }
 936 #endif
 937 
 938   void print_calls(outputStream* st)              PRODUCT_RETURN;
 939   static void print_statistics()                  PRODUCT_RETURN;
 940 
 941   void maybe_print_nmethod(const DirectiveSet* directive);
 942   void print_nmethod(bool print_code);
 943 
 944   // need to re-define this from CodeBlob else the overload hides it
 945   void print_on(outputStream* st) const override { CodeBlob::print_on(st); }
 946   void print_on(outputStream* st, const char* msg) const;
 947 
 948   // Logging
 949   void log_identity(xmlStream* log) const;
 950   void log_new_nmethod() const;
 951   void log_state_change() const;
 952 
 953   // Prints block-level comments, including nmethod specific block labels:
 954   void print_block_comment(outputStream* stream, address block_begin) const override {
 955 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
 956     print_nmethod_labels(stream, block_begin);
 957     CodeBlob::print_block_comment(stream, block_begin);
 958 #endif
 959   }
 960 
 961   void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
 962   const char* nmethod_section_label(address pos) const;
 963 
 964   // returns whether this nmethod has code comments.
 965   bool has_code_comment(address begin, address end);
 966   // Prints a comment for one native instruction (reloc info, pc desc)
 967   void print_code_comment_on(outputStream* st, int column, address begin, address end);
 968 
 969   // tells if this compiled method is dependent on the given changes,
 970   // and the changes have invalidated it
 971   bool check_dependency_on(DepChange& changes);
 972 
 973   // Fast breakpoint support. Tells if this compiled method is
 974   // dependent on the given method. Returns true if this nmethod
 975   // corresponds to the given method as well.
 976   bool is_dependent_on_method(Method* dependee);
 977 
 978   // JVMTI's GetLocalInstance() support
 979   ByteSize native_receiver_sp_offset() {
 980     assert(is_native_method(), "sanity");
 981     return _native_receiver_sp_offset;
 982   }
 983   ByteSize native_basic_lock_sp_offset() {
 984     assert(is_native_method(), "sanity");
 985     return _native_basic_lock_sp_offset;
 986   }
 987 
 988   // support for code generation
 989   static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
 990   static ByteSize state_offset()           { return byte_offset_of(nmethod, _state); }
 991 
 992   void metadata_do(MetadataClosure* f);
 993 
 994   address call_instruction_address(address pc) const;
 995 
 996   void make_deoptimized();
 997   void finalize_relocations();
 998 };
 999 
1000 #endif // SHARE_CODE_NMETHOD_HPP