< prev index next >

src/hotspot/share/code/nmethod.hpp

Print this page

 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_CODE_NMETHOD_HPP
 26 #define SHARE_CODE_NMETHOD_HPP
 27 
 28 #include "code/compiledMethod.hpp"
 29 
 30 class DepChange;
 31 class DirectiveSet;
 32 class DebugInformationRecorder;
 33 class JvmtiThreadState;

 34 
 35 // nmethods (native methods) are the compiled code versions of Java methods.
 36 //
 37 // An nmethod contains:
 38 //  - header                 (the nmethod structure)
 39 //  [Relocation]
 40 //  - relocation information
 41 //  - constant part          (doubles, longs and floats used in nmethod)
 42 //  - oop table
 43 //  [Code]
 44 //  - code body
 45 //  - exception handler
 46 //  - stub code
 47 //  [Debugging information]
 48 //  - oop array
 49 //  - data array
 50 //  - pcs
 51 //  [Exception handler table]
 52 //  - handler entry point array
 53 //  [Implicit Null Pointer exception table]

 56 //  - encoded speculations array
 57 //  [JVMCINMethodData]
 58 //  - meta data for JVMCI compiled nmethod
 59 
 60 #if INCLUDE_JVMCI
 61 class FailedSpeculation;
 62 class JVMCINMethodData;
 63 #endif
 64 
 65 class nmethod : public CompiledMethod {
 66   friend class VMStructs;
 67   friend class JVMCIVMStructs;
 68   friend class NMethodSweeper;
 69   friend class CodeCache;  // scavengable oops
 70   friend class JVMCINMethodData;
 71 
 72  private:
 73   // Shared fields for all nmethod's
 74   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
 75 


 76   // To support simple linked-list chaining of nmethods:
 77   nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
 78 
 79   // STW two-phase nmethod root processing helpers.
 80   //
 81   // When determining liveness of a given nmethod to do code cache unloading,
 82   // some collectors need to do different things depending on whether the nmethods
 83   // need to absolutely be kept alive during root processing; "strong"ly reachable
 84   // nmethods are known to be kept alive at root processing, but the liveness of
 85   // "weak"ly reachable ones is to be determined later.
 86   //
 87   // We want to allow strong and weak processing of nmethods by different threads
 88   // at the same time without heavy synchronization. Additional constraints are
 89   // to make sure that every nmethod is processed a minimal amount of time, and
 90   // nmethods themselves are always iterated at most once at a particular time.
 91   //
 92   // Note that strong processing work must be a superset of weak processing work
 93   // for this code to work.
 94   //
 95   // We store state and claim information in the _oops_do_mark_link member, using

207   int _scopes_data_offset;
208   int _scopes_pcs_offset;
209   int _dependencies_offset;
210   int _native_invokers_offset;
211   int _handler_table_offset;
212   int _nul_chk_table_offset;
213 #if INCLUDE_JVMCI
214   int _speculations_offset;
215   int _jvmci_data_offset;
216 #endif
217   int _nmethod_end_offset;
218 
219   int code_offset() const { return (address) code_begin() - header_begin(); }
220 
221   // location in frame (offset for sp) that deopt can store the original
222   // pc during a deopt.
223   int _orig_pc_offset;
224 
225   int _compile_id;                           // which compilation made this nmethod
226   int _comp_level;                           // compilation level






227 
228   // protected by CodeCache_lock
229   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
230 
231   // used by jvmti to track if an event has been posted for this nmethod.
232   bool _unload_reported;
233   bool _load_reported;
234 
235   // Protected by CompiledMethod_lock
236   volatile signed char _state;               // {not_installed, in_use, not_entrant, zombie, unloaded}
237 
238 #ifdef ASSERT
239   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
240 #endif
241 
242 #if INCLUDE_RTM_OPT
243   // RTM state at compile time. Used during deoptimization to decide
244   // whether to restart collecting RTM locking abort statistic again.
245   RTMState _rtm_state;
246 #endif

368                               FailedSpeculation** failed_speculations = NULL
369 #endif
370   );
371 
372   // Only used for unit tests.
373   nmethod()
374     : CompiledMethod(),
375       _is_unloading_state(0),
376       _native_receiver_sp_offset(in_ByteSize(-1)),
377       _native_basic_lock_sp_offset(in_ByteSize(-1)) {}
378 
379 
380   static nmethod* new_native_nmethod(const methodHandle& method,
381                                      int compile_id,
382                                      CodeBuffer *code_buffer,
383                                      int vep_offset,
384                                      int frame_complete,
385                                      int frame_size,
386                                      ByteSize receiver_sp_offset,
387                                      ByteSize basic_lock_sp_offset,
388                                      OopMapSet* oop_maps);

389 
390   // type info
391   bool is_nmethod() const                         { return true; }
392   bool is_osr_method() const                      { return _entry_bci != InvocationEntryBci; }
393 
394   // boundaries for different parts
395   address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
396   address consts_end            () const          { return           code_begin()                           ; }
397   address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
398   address stub_end              () const          { return           header_begin() + _oops_offset          ; }
399   address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
400   address unwind_handler_begin  () const          { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
401   oop*    oops_begin            () const          { return (oop*)   (header_begin() + _oops_offset)         ; }
402   oop*    oops_end              () const          { return (oop*)   (header_begin() + _metadata_offset)     ; }
403 
404   Metadata** metadata_begin   () const            { return (Metadata**)  (header_begin() + _metadata_offset)     ; }
405   Metadata** metadata_end     () const            { return (Metadata**)  _scopes_data_begin; }
406 
407   address scopes_data_end       () const          { return           header_begin() + _scopes_pcs_offset    ; }
408   PcDesc* scopes_pcs_begin      () const          { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }

531   // Relocation support
532 private:
533   void fix_oop_relocations(address begin, address end, bool initialize_immediates);
534   inline void initialize_immediate_oop(oop* dest, jobject handle);
535 
536 public:
537   void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
538   void fix_oop_relocations()                           { fix_oop_relocations(NULL, NULL, false); }
539 
540   // Sweeper support
541   long  stack_traversal_mark()                    { return _stack_traversal_mark; }
542   void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }
543 
544   // On-stack replacement support
545   int   osr_entry_bci() const                     { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
546   address  osr_entry() const                      { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
547   void  invalidate_osr_method();
548   nmethod* osr_link() const                       { return _osr_link; }
549   void     set_osr_link(nmethod *n)               { _osr_link = n; }
550 


551   // Verify calls to dead methods have been cleaned.
552   void verify_clean_inline_caches();
553 
554   // unlink and deallocate this nmethod
555   // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
556   // expected to use any other private methods/data in this class.
557 
558  protected:
559   void flush();
560 
561  public:
562   // When true is returned, it is unsafe to remove this nmethod even if
563   // it is a zombie, since the VM or the ServiceThread might still be
564   // using it.
565   bool is_locked_by_vm() const                    { return _lock_count >0; }
566 
567   // See comment at definition of _last_seen_on_stack
568   void mark_as_seen_on_stack();


569   bool can_convert_to_zombie();
570 
571   // Evolution support. We make old (discarded) compiled methods point to new Method*s.
572   void set_method(Method* method) { _method = method; }
573 
574 #if INCLUDE_JVMCI
575   // Gets the JVMCI name of this nmethod.
576   const char* jvmci_name();
577 
578   // Records the pending failed speculation in the
579   // JVMCI speculation log associated with this nmethod.
580   void update_speculation(JavaThread* thread);
581 
582   // Gets the data specific to a JVMCI compiled method.
583   // This returns a non-NULL value iff this nmethod was
584   // compiled by the JVMCI compiler.
585   JVMCINMethodData* jvmci_nmethod_data() const {
586     return jvmci_data_size() == 0 ? NULL : (JVMCINMethodData*) jvmci_data_begin();
587   }
588 #endif
589 
590  public:
591   void oops_do(OopClosure* f) { oops_do(f, false); }
592   void oops_do(OopClosure* f, bool allow_dead);
593 
594   // All-in-one claiming of nmethods: returns true if the caller successfully claimed that
595   // nmethod.
596   bool oops_do_try_claim();
597 



598   // Class containing callbacks for the oops_do_process_weak/strong() methods
599   // below.
600   class OopsDoProcessor {
601   public:
602     // Process the oops of the given nmethod based on whether it has been called
603     // in a weak or strong processing context, i.e. apply either weak or strong
604     // work on it.
605     virtual void do_regular_processing(nmethod* nm) = 0;
606     // Assuming that the oops of the given nmethod has already been its weak
607     // processing applied, apply the remaining strong processing part.
608     virtual void do_remaining_strong_processing(nmethod* nm) = 0;
609   };
610 
611   // The following two methods do the work corresponding to weak/strong nmethod
612   // processing.
613   void oops_do_process_weak(OopsDoProcessor* p);
614   void oops_do_process_strong(OopsDoProcessor* p);
615 
616   static void oops_do_marking_prologue();
617   static void oops_do_marking_epilogue();
618 
619  private:
620   ScopeDesc* scope_desc_in(address begin, address end);
621 
622   address* orig_pc_addr(const frame* fr);
623 
624   // used by jvmti to track if the load and unload events has been reported
625   bool  unload_reported() const                   { return _unload_reported; }
626   void  set_unload_reported()                     { _unload_reported = true; }
627   bool  load_reported() const                     { return _load_reported; }
628   void  set_load_reported()                       { _load_reported = true; }
629 
630  public:
631   // copying of debugging information
632   void copy_scopes_pcs(PcDesc* pcs, int count);
633   void copy_scopes_data(address buffer, int size);
634 
635   // Accessor/mutator for the original pc of a frame before a frame was deopted.
636   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
637   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
638 
639   // jvmti support:
640   void post_compiled_method_load_event(JvmtiThreadState* state = NULL);
641 
642   // verify operations
643   void verify();
644   void verify_scopes();
645   void verify_interrupt_point(address interrupt_point);
646 
647   // Disassemble this nmethod with additional debug information, e.g. information about blocks.
648   void decode2(outputStream* st) const;
649   void print_constant_pool(outputStream* st);
650 
651   // Avoid hiding of parent's 'decode(outputStream*)' method.
652   void decode(outputStream* st) const { decode2(st); } // just delegate here.
653 
654   // printing support
655   void print()                          const;
656   void print(outputStream* st)          const;
657   void print_code();

737     return _native_receiver_sp_offset;
738   }
739   ByteSize native_basic_lock_sp_offset() {
740     return _native_basic_lock_sp_offset;
741   }
742 
743   // support for code generation
744   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
745   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
746   static int state_offset()                       { return offset_of(nmethod, _state); }
747 
748   virtual void metadata_do(MetadataClosure* f);
749 
750   NativeCallWrapper* call_wrapper_at(address call) const;
751   NativeCallWrapper* call_wrapper_before(address return_pc) const;
752   address call_instruction_address(address pc) const;
753 
754   virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const;
755   virtual CompiledStaticCall* compiledStaticCall_at(address addr) const;
756   virtual CompiledStaticCall* compiledStaticCall_before(address addr) const;


757 };
758 
759 // Locks an nmethod so its code will not get removed and it will not
760 // be made into a zombie, even if it is a not_entrant method. After the
761 // nmethod becomes a zombie, if CompiledMethodUnload event processing
762 // needs to be done, then lock_nmethod() is used directly to keep the
763 // generated code from being reused too early.
764 class nmethodLocker : public StackObj {
765   CompiledMethod* _nm;
766 
767  public:
768 
769   // note: nm can be NULL
770   // Only JvmtiDeferredEvent::compiled_method_unload_event()
771   // should pass zombie_ok == true.
772   static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false);
773   static void unlock_nmethod(CompiledMethod* nm); // (ditto)
774 
775   nmethodLocker(address pc); // derive nm from pc
776   nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }

 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_CODE_NMETHOD_HPP
 26 #define SHARE_CODE_NMETHOD_HPP
 27 
 28 #include "code/compiledMethod.hpp"
 29 
 30 class DepChange;
 31 class DirectiveSet;
 32 class DebugInformationRecorder;
 33 class JvmtiThreadState;
 34 class OopIterateClosure;
 35 
 36 // nmethods (native methods) are the compiled code versions of Java methods.
 37 //
 38 // An nmethod contains:
 39 //  - header                 (the nmethod structure)
 40 //  [Relocation]
 41 //  - relocation information
 42 //  - constant part          (doubles, longs and floats used in nmethod)
 43 //  - oop table
 44 //  [Code]
 45 //  - code body
 46 //  - exception handler
 47 //  - stub code
 48 //  [Debugging information]
 49 //  - oop array
 50 //  - data array
 51 //  - pcs
 52 //  [Exception handler table]
 53 //  - handler entry point array
 54 //  [Implicit Null Pointer exception table]

 57 //  - encoded speculations array
 58 //  [JVMCINMethodData]
 59 //  - meta data for JVMCI compiled nmethod
 60 
 61 #if INCLUDE_JVMCI
 62 class FailedSpeculation;
 63 class JVMCINMethodData;
 64 #endif
 65 
 66 class nmethod : public CompiledMethod {
 67   friend class VMStructs;
 68   friend class JVMCIVMStructs;
 69   friend class NMethodSweeper;
 70   friend class CodeCache;  // scavengable oops
 71   friend class JVMCINMethodData;
 72 
 73  private:
 74   // Shared fields for all nmethod's
 75   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
 76 
 77   uint64_t  _marking_cycle;
 78 
 79   // To support simple linked-list chaining of nmethods:
 80   nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
 81 
 82   // STW two-phase nmethod root processing helpers.
 83   //
 84   // When determining liveness of a given nmethod to do code cache unloading,
 85   // some collectors need to do different things depending on whether the nmethods
 86   // need to absolutely be kept alive during root processing; "strong"ly reachable
 87   // nmethods are known to be kept alive at root processing, but the liveness of
 88   // "weak"ly reachable ones is to be determined later.
 89   //
 90   // We want to allow strong and weak processing of nmethods by different threads
 91   // at the same time without heavy synchronization. Additional constraints are
 92   // to make sure that every nmethod is processed a minimal amount of time, and
 93   // nmethods themselves are always iterated at most once at a particular time.
 94   //
 95   // Note that strong processing work must be a superset of weak processing work
 96   // for this code to work.
 97   //
 98   // We store state and claim information in the _oops_do_mark_link member, using

210   int _scopes_data_offset;
211   int _scopes_pcs_offset;
212   int _dependencies_offset;
213   int _native_invokers_offset;
214   int _handler_table_offset;
215   int _nul_chk_table_offset;
216 #if INCLUDE_JVMCI
217   int _speculations_offset;
218   int _jvmci_data_offset;
219 #endif
220   int _nmethod_end_offset;
221 
222   int code_offset() const { return (address) code_begin() - header_begin(); }
223 
224   // location in frame (offset for sp) that deopt can store the original
225   // pc during a deopt.
226   int _orig_pc_offset;
227 
228   int _compile_id;                           // which compilation made this nmethod
229   int _comp_level;                           // compilation level
230   int _nr_oops;
231  public:
232   int nr_oops() const { return _nr_oops; }
233   void verify_nr_oops();
234   int count_oops();
235  private:
236 
237   // protected by CodeCache_lock
238   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
239 
240   // used by jvmti to track if an event has been posted for this nmethod.
241   bool _unload_reported;
242   bool _load_reported;
243 
244   // Protected by CompiledMethod_lock
245   volatile signed char _state;               // {not_installed, in_use, not_entrant, zombie, unloaded}
246 
247 #ifdef ASSERT
248   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
249 #endif
250 
251 #if INCLUDE_RTM_OPT
252   // RTM state at compile time. Used during deoptimization to decide
253   // whether to restart collecting RTM locking abort statistic again.
254   RTMState _rtm_state;
255 #endif

377                               FailedSpeculation** failed_speculations = NULL
378 #endif
379   );
380 
381   // Only used for unit tests.
382   nmethod()
383     : CompiledMethod(),
384       _is_unloading_state(0),
385       _native_receiver_sp_offset(in_ByteSize(-1)),
386       _native_basic_lock_sp_offset(in_ByteSize(-1)) {}
387 
388 
389   static nmethod* new_native_nmethod(const methodHandle& method,
390                                      int compile_id,
391                                      CodeBuffer *code_buffer,
392                                      int vep_offset,
393                                      int frame_complete,
394                                      int frame_size,
395                                      ByteSize receiver_sp_offset,
396                                      ByteSize basic_lock_sp_offset,
397                                      OopMapSet* oop_maps,
398                                      int exception_handler = -1);
399 
400   // type info
401   bool is_nmethod() const                         { return true; }
402   bool is_osr_method() const                      { return _entry_bci != InvocationEntryBci; }
403 
404   // boundaries for different parts
405   address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
406   address consts_end            () const          { return           code_begin()                           ; }
407   address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
408   address stub_end              () const          { return           header_begin() + _oops_offset          ; }
409   address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
410   address unwind_handler_begin  () const          { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
411   oop*    oops_begin            () const          { return (oop*)   (header_begin() + _oops_offset)         ; }
412   oop*    oops_end              () const          { return (oop*)   (header_begin() + _metadata_offset)     ; }
413 
414   Metadata** metadata_begin   () const            { return (Metadata**)  (header_begin() + _metadata_offset)     ; }
415   Metadata** metadata_end     () const            { return (Metadata**)  _scopes_data_begin; }
416 
417   address scopes_data_end       () const          { return           header_begin() + _scopes_pcs_offset    ; }
418   PcDesc* scopes_pcs_begin      () const          { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }

541   // Relocation support
542 private:
543   void fix_oop_relocations(address begin, address end, bool initialize_immediates);
544   inline void initialize_immediate_oop(oop* dest, jobject handle);
545 
546 public:
547   void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
548   void fix_oop_relocations()                           { fix_oop_relocations(NULL, NULL, false); }
549 
550   // Sweeper support
551   long  stack_traversal_mark()                    { return _stack_traversal_mark; }
552   void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }
553 
554   // On-stack replacement support
555   int   osr_entry_bci() const                     { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
556   address  osr_entry() const                      { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
557   void  invalidate_osr_method();
558   nmethod* osr_link() const                       { return _osr_link; }
559   void     set_osr_link(nmethod *n)               { _osr_link = n; }
560 
561   void set_immediate_oops_patched(int nr)         { _nr_oops += nr; }
562 
563   // Verify calls to dead methods have been cleaned.
564   void verify_clean_inline_caches();
565 
566   // unlink and deallocate this nmethod
567   // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
568   // expected to use any other private methods/data in this class.
569 
570  protected:
571   void flush();
572 
573  public:
574   // When true is returned, it is unsafe to remove this nmethod even if
575   // it is a zombie, since the VM or the ServiceThread might still be
576   // using it.
577   bool is_locked_by_vm() const                    { return _lock_count >0; }
578 
579   // See comment at definition of _last_seen_on_stack
580   void mark_as_seen_on_stack();
581   void mark_as_maybe_on_continuation();
582   bool is_not_on_continuation_stack();
583   bool can_convert_to_zombie();
584 
585   // Evolution support. We make old (discarded) compiled methods point to new Method*s.
586   void set_method(Method* method) { _method = method; }
587 
588 #if INCLUDE_JVMCI
589   // Gets the JVMCI name of this nmethod.
590   const char* jvmci_name();
591 
592   // Records the pending failed speculation in the
593   // JVMCI speculation log associated with this nmethod.
594   void update_speculation(JavaThread* thread);
595 
596   // Gets the data specific to a JVMCI compiled method.
597   // This returns a non-NULL value iff this nmethod was
598   // compiled by the JVMCI compiler.
599   JVMCINMethodData* jvmci_nmethod_data() const {
600     return jvmci_data_size() == 0 ? NULL : (JVMCINMethodData*) jvmci_data_begin();
601   }
602 #endif
603 
604  public:
605   void oops_do(OopClosure* f) { oops_do(f, false, false); }
606   void oops_do(OopClosure* f, bool allow_dead, bool allow_null = false);
607 
608   // All-in-one claiming of nmethods: returns true if the caller successfully claimed that
609   // nmethod.
610   bool oops_do_try_claim();
611 
612   // Loom support for following nmethods on the stack
613   void follow_nmethod(OopIterateClosure* cl);
614 
615   // Class containing callbacks for the oops_do_process_weak/strong() methods
616   // below.
617   class OopsDoProcessor {
618   public:
619     // Process the oops of the given nmethod based on whether it has been called
620     // in a weak or strong processing context, i.e. apply either weak or strong
621     // work on it.
622     virtual void do_regular_processing(nmethod* nm) = 0;
623     // Assuming that the oops of the given nmethod has already been its weak
624     // processing applied, apply the remaining strong processing part.
625     virtual void do_remaining_strong_processing(nmethod* nm) = 0;
626   };
627 
628   // The following two methods do the work corresponding to weak/strong nmethod
629   // processing.
630   void oops_do_process_weak(OopsDoProcessor* p);
631   void oops_do_process_strong(OopsDoProcessor* p);
632 
633   static void oops_do_marking_prologue();
634   static void oops_do_marking_epilogue();
635 
636  private:
637   ScopeDesc* scope_desc_in(address begin, address end);
638 
639   address* orig_pc_addr(const frame* fr);
640 
641   // used by jvmti to track if the load and unload events has been reported
642   bool  unload_reported() const                   { return _unload_reported; }
643   void  set_unload_reported()                     { _unload_reported = true; }
644   bool  load_reported() const                     { return _load_reported; }
645   void  set_load_reported()                       { _load_reported = true; }
646 
647  public:
648   // copying of debugging information
649   void copy_scopes_pcs(PcDesc* pcs, int count);
650   void copy_scopes_data(address buffer, int size);
651 
652   int orig_pc_offset() { return _orig_pc_offset; }


653 
654   // jvmti support:
655   void post_compiled_method_load_event(JvmtiThreadState* state = NULL);
656 
657   // verify operations
658   void verify();
659   void verify_scopes();
660   void verify_interrupt_point(address interrupt_point);
661 
662   // Disassemble this nmethod with additional debug information, e.g. information about blocks.
663   void decode2(outputStream* st) const;
664   void print_constant_pool(outputStream* st);
665 
666   // Avoid hiding of parent's 'decode(outputStream*)' method.
667   void decode(outputStream* st) const { decode2(st); } // just delegate here.
668 
669   // printing support
670   void print()                          const;
671   void print(outputStream* st)          const;
672   void print_code();

752     return _native_receiver_sp_offset;
753   }
754   ByteSize native_basic_lock_sp_offset() {
755     return _native_basic_lock_sp_offset;
756   }
757 
758   // support for code generation
759   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
760   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
761   static int state_offset()                       { return offset_of(nmethod, _state); }
762 
763   virtual void metadata_do(MetadataClosure* f);
764 
765   NativeCallWrapper* call_wrapper_at(address call) const;
766   NativeCallWrapper* call_wrapper_before(address return_pc) const;
767   address call_instruction_address(address pc) const;
768 
769   virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const;
770   virtual CompiledStaticCall* compiledStaticCall_at(address addr) const;
771   virtual CompiledStaticCall* compiledStaticCall_before(address addr) const;
772 
773   virtual void  make_deoptimized();
774 };
775 
776 // Locks an nmethod so its code will not get removed and it will not
777 // be made into a zombie, even if it is a not_entrant method. After the
778 // nmethod becomes a zombie, if CompiledMethodUnload event processing
779 // needs to be done, then lock_nmethod() is used directly to keep the
780 // generated code from being reused too early.
781 class nmethodLocker : public StackObj {
782   CompiledMethod* _nm;
783 
784  public:
785 
786   // note: nm can be NULL
787   // Only JvmtiDeferredEvent::compiled_method_unload_event()
788   // should pass zombie_ok == true.
789   static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false);
790   static void unlock_nmethod(CompiledMethod* nm); // (ditto)
791 
792   nmethodLocker(address pc); // derive nm from pc
793   nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
< prev index next >