< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "asm/assembler.inline.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/compiledMethod.inline.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/nativeInst.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "compiler/abstractCompiler.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compileLog.hpp"
  38 #include "compiler/compilerDirectives.hpp"
  39 #include "compiler/directivesParser.hpp"
  40 #include "compiler/disassembler.hpp"

  41 #include "interpreter/bytecode.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "memory/universe.hpp"
  47 #include "oops/access.inline.hpp"
  48 #include "oops/method.inline.hpp"
  49 #include "oops/methodData.hpp"
  50 #include "oops/oop.inline.hpp"

  51 #include "prims/jvmtiImpl.hpp"
  52 #include "runtime/atomic.hpp"
  53 #include "runtime/flags/flagSetting.hpp"
  54 #include "runtime/frame.inline.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/jniHandles.inline.hpp"
  57 #include "runtime/orderAccess.hpp"
  58 #include "runtime/os.hpp"
  59 #include "runtime/safepointVerifiers.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/sweeper.hpp"
  62 #include "runtime/vmThread.hpp"
  63 #include "utilities/align.hpp"
  64 #include "utilities/dtrace.hpp"
  65 #include "utilities/events.hpp"
  66 #include "utilities/resourceHash.hpp"
  67 #include "utilities/xmlstream.hpp"
  68 #if INCLUDE_JVMCI
  69 #include "jvmci/jvmciRuntime.hpp"
  70 #endif


 394   int nsize = align_up(pcs_size,   oopSize);
 395   if ((nsize % sizeof(PcDesc)) != 0) {
 396     nsize = pcs_size + sizeof(PcDesc);
 397   }
 398   assert((nsize % oopSize) == 0, "correct alignment");
 399   return nsize;
 400 }
 401 
 402 
 403 int nmethod::total_size() const {
 404   return
 405     consts_size()        +
 406     insts_size()         +
 407     stub_size()          +
 408     scopes_data_size()   +
 409     scopes_pcs_size()    +
 410     handler_table_size() +
 411     nul_chk_table_size();
 412 }
 413 
 414 address* nmethod::orig_pc_addr(const frame* fr) {
 415   return (address*) ((address)fr->unextended_sp() + _orig_pc_offset);
 416 }
 417 
 418 const char* nmethod::compile_kind() const {
 419   if (is_osr_method())     return "osr";
 420   if (method() != NULL && is_native_method())  return "c2n";
 421   return NULL;
 422 }
 423 
 424 // Fill in default values for various flag fields
 425 void nmethod::init_defaults() {
 426   _state                      = not_installed;
 427   _has_flushed_dependencies   = 0;
 428   _lock_count                 = 0;
 429   _stack_traversal_mark       = 0;
 430   _unload_reported            = false; // jvmti state
 431   _is_far_code                = false; // nmethods are located in CodeCache
 432 
 433 #ifdef ASSERT
 434   _oops_are_stale             = false;
 435 #endif
 436 
 437   _oops_do_mark_link       = NULL;


 562           Klass* klass = deps.context_type();
 563           if (klass == NULL) {
 564             continue;  // ignore things like evol_method
 565           }
 566           // record this nmethod as dependent on this klass
 567           InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
 568         }
 569       }
 570       NOT_PRODUCT(if (nm != NULL)  note_java_nmethod(nm));
 571     }
 572   }
 573   // Do verification and logging outside CodeCache_lock.
 574   if (nm != NULL) {
 575     // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
 576     DEBUG_ONLY(nm->verify();)
 577     nm->log_new_nmethod();
 578   }
 579   return nm;
 580 }
 581 












 582 // For native wrappers
 583 nmethod::nmethod(
 584   Method* method,
 585   CompilerType type,
 586   int nmethod_size,
 587   int compile_id,
 588   CodeOffsets* offsets,
 589   CodeBuffer* code_buffer,
 590   int frame_size,
 591   ByteSize basic_lock_owner_sp_offset,
 592   ByteSize basic_lock_sp_offset,
 593   OopMapSet* oop_maps )
 594   : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
 595   _is_unloading_state(0),
 596   _native_receiver_sp_offset(basic_lock_owner_sp_offset),
 597   _native_basic_lock_sp_offset(basic_lock_sp_offset)
 598 {
 599   {
 600     int scopes_data_offset   = 0;
 601     int deoptimize_offset    = 0;
 602     int deoptimize_mh_offset = 0;
 603 
 604     debug_only(NoSafepointVerifier nsv;)
 605     assert_locked_or_safepoint(CodeCache_lock);
 606 
 607     init_defaults();
 608     _entry_bci               = InvocationEntryBci;
 609     // We have no exception handler or deopt handler make the
 610     // values something that will never match a pc like the nmethod vtable entry
 611     _exception_offset        = 0;
 612     _orig_pc_offset          = 0;
 613 
 614     _consts_offset           = data_offset();


 704   int nmethod_size,
 705   int compile_id,
 706   int entry_bci,
 707   CodeOffsets* offsets,
 708   int orig_pc_offset,
 709   DebugInformationRecorder* debug_info,
 710   Dependencies* dependencies,
 711   CodeBuffer *code_buffer,
 712   int frame_size,
 713   OopMapSet* oop_maps,
 714   ExceptionHandlerTable* handler_table,
 715   ImplicitExceptionTable* nul_chk_table,
 716   AbstractCompiler* compiler,
 717   int comp_level
 718 #if INCLUDE_JVMCI
 719   , char* speculations,
 720   int speculations_len,
 721   int jvmci_data_size
 722 #endif
 723   )
 724   : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
 725   _is_unloading_state(0),
 726   _native_receiver_sp_offset(in_ByteSize(-1)),
 727   _native_basic_lock_sp_offset(in_ByteSize(-1))
 728 {
 729   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 730   {
 731     debug_only(NoSafepointVerifier nsv;)
 732     assert_locked_or_safepoint(CodeCache_lock);
 733 
 734     _deopt_handler_begin = (address) this;
 735     _deopt_mh_handler_begin = (address) this;
 736 
 737     init_defaults();
 738     _entry_bci               = entry_bci;
 739     _compile_id              = compile_id;
 740     _comp_level              = comp_level;
 741     _orig_pc_offset          = orig_pc_offset;
 742     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 743 
 744     // Section offsets


 819     debug_only(Universe::heap()->verify_nmethod(this));
 820 
 821     CodeCache::commit(this);
 822 
 823     // Copy contents of ExceptionHandlerTable to nmethod
 824     handler_table->copy_to(this);
 825     nul_chk_table->copy_to(this);
 826 
 827 #if INCLUDE_JVMCI
 828     // Copy speculations to nmethod
 829     if (speculations_size() != 0) {
 830       memcpy(speculations_begin(), speculations, speculations_len);
 831     }
 832 #endif
 833 
 834     // we use the information of entry points to find out if a method is
 835     // static or non static
 836     assert(compiler->is_c2() || compiler->is_jvmci() ||
 837            _method->is_static() == (entry_point() == _verified_entry_point),
 838            " entry points must be same for static methods and vice versa");






 839   }
 840 }
 841 






 842 // Print a short set of xml attributes to identify this nmethod.  The
 843 // output should be embedded in some other element.
 844 void nmethod::log_identity(xmlStream* log) const {
 845   log->print(" compile_id='%d'", compile_id());
 846   const char* nm_kind = compile_kind();
 847   if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
 848   log->print(" compiler='%s'", compiler_name());
 849   if (TieredCompilation) {
 850     log->print(" level='%d'", comp_level());
 851   }
 852 #if INCLUDE_JVMCI
 853   if (jvmci_nmethod_data() != NULL) {
 854     const char* jvmci_name = jvmci_nmethod_data()->name();
 855     if (jvmci_name != NULL) {
 856       log->print(" jvmci_mirror_name='");
 857       log->text("%s", jvmci_name);
 858       log->print("'");
 859     }
 860   }
 861 #endif


1104   // cleaning passes before moving to zombie.
1105   set_stack_traversal_mark(NMethodSweeper::traversal_count());
1106 }
1107 
1108 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1109 // there are no activations on the stack, not in use by the VM,
1110 // and not in use by the ServiceThread)
1111 bool nmethod::can_convert_to_zombie() {
1112   // Note that this is called when the sweeper has observed the nmethod to be
1113   // not_entrant. However, with concurrent code cache unloading, the state
1114   // might have moved on to unloaded if it is_unloading(), due to racing
1115   // concurrent GC threads.
1116   assert(is_not_entrant() || is_unloading(), "must be a non-entrant method");
1117 
1118   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1119   // count can be greater than the stack traversal count before it hits the
1120   // nmethod for the second time.
1121   // If an is_unloading() nmethod is still not_entrant, then it is not safe to
1122   // convert it to zombie due to GC unloading interactions. However, if it
1123   // has become unloaded, then it is okay to convert such nmethods to zombie.
1124   return stack_traversal_mark() + 1 < NMethodSweeper::traversal_count() &&
1125          !is_locked_by_vm() && (!is_unloading() || is_unloaded());
1126 }
1127 
1128 void nmethod::inc_decompile_count() {
1129   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1130   // Could be gated by ProfileTraps, but do not bother...
1131   Method* m = method();
1132   if (m == NULL)  return;
1133   MethodData* mdo = m->method_data();
1134   if (mdo == NULL)  return;
1135   // There is a benign race here.  See comments in methodData.hpp.
1136   mdo->inc_decompile_count();
1137 }
1138 
1139 bool nmethod::try_transition(int new_state_int) {
1140   signed char new_state = new_state_int;
1141   for (;;) {
1142     signed char old_state = Atomic::load(&_state);
1143     if (old_state >= new_state) {
1144       // Ensure monotonicity of transitions.
1145       return false;
1146     }
1147     if (Atomic::cmpxchg(new_state, &_state, old_state) == old_state) {
1148       return true;
1149     }
1150   }
1151 }
1152 
1153 void nmethod::make_unloaded() {


1154   post_compiled_method_unload();
1155 
1156   // This nmethod is being unloaded, make sure that dependencies
1157   // recorded in instanceKlasses get flushed.
1158   // Since this work is being done during a GC, defer deleting dependencies from the
1159   // InstanceKlass.
1160   assert(Universe::heap()->is_gc_active() || Thread::current()->is_ConcurrentGC_thread(),
1161          "should only be called during gc");
1162   flush_dependencies(/*delete_immediately*/false);
1163 
1164   // Break cycle between nmethod & method
1165   LogTarget(Trace, class, unload, nmethod) lt;
1166   if (lt.is_enabled()) {
1167     LogStream ls(lt);
1168     ls.print("making nmethod " INTPTR_FORMAT
1169              " unloadable, Method*(" INTPTR_FORMAT
1170              ") ",
1171              p2i(this), p2i(_method));
1172      ls.cr();
1173   }


1786 }
1787 
1788 
1789 // This is called at the end of the strong tracing/marking phase of a
1790 // GC to unload an nmethod if it contains otherwise unreachable
1791 // oops.
1792 
1793 void nmethod::do_unloading(bool unloading_occurred) {
1794   // Make sure the oop's ready to receive visitors
1795   assert(!is_zombie() && !is_unloaded(),
1796          "should not call follow on zombie or unloaded nmethod");
1797 
1798   if (is_unloading()) {
1799     make_unloaded();
1800   } else {
1801     guarantee(unload_nmethod_caches(unloading_occurred),
1802               "Should not need transition stubs");
1803   }
1804 }
1805 
1806 void nmethod::oops_do(OopClosure* f, bool allow_dead) {
1807   // make sure the oops ready to receive visitors
1808   assert(allow_dead || is_alive(), "should not call follow on dead nmethod");
1809 









1810   // Prevent extra code cache walk for platforms that don't have immediate oops.
1811   if (relocInfo::mustIterateImmediateOopsInCode()) {
1812     RelocIterator iter(this, oops_reloc_begin());
1813 
1814     while (iter.next()) {
1815       if (iter.type() == relocInfo::oop_type ) {
1816         oop_Relocation* r = iter.oop_reloc();
1817         // In this loop, we must only follow those oops directly embedded in
1818         // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1819         assert(1 == (r->oop_is_immediate()) +
1820                (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1821                "oop must be found in exactly one place");
1822         if (r->oop_is_immediate() && r->oop_value() != NULL) {
1823           f->do_oop(r->oop_addr());
1824         }
1825       }
1826     }
1827   }
1828 
1829   // Scopes
1830   // This includes oop constants not inlined in the code stream.
1831   for (oop* p = oops_begin(); p < oops_end(); p++) {
1832     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1833     f->do_oop(p);
1834   }
1835 }
1836 
1837 #define NMETHOD_SENTINEL ((nmethod*)badAddress)
1838 
1839 nmethod* volatile nmethod::_oops_do_mark_nmethods;
1840 
1841 // An nmethod is "marked" if its _mark_link is set non-null.
1842 // Even if it is the end of the linked list, it will have a non-null link value,




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "asm/assembler.inline.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/compiledMethod.inline.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/nativeInst.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "compiler/abstractCompiler.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compileLog.hpp"
  38 #include "compiler/compilerDirectives.hpp"
  39 #include "compiler/directivesParser.hpp"
  40 #include "compiler/disassembler.hpp"
  41 #include "compiler/oopMap.inline.hpp"
  42 #include "interpreter/bytecode.hpp"
  43 #include "logging/log.hpp"
  44 #include "logging/logStream.hpp"
  45 #include "memory/allocation.inline.hpp"
  46 #include "memory/resourceArea.hpp"
  47 #include "memory/universe.hpp"
  48 #include "oops/access.inline.hpp"
  49 #include "oops/method.inline.hpp"
  50 #include "oops/methodData.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "oops/weakHandle.inline.hpp"
  53 #include "prims/jvmtiImpl.hpp"
  54 #include "runtime/atomic.hpp"
  55 #include "runtime/flags/flagSetting.hpp"
  56 #include "runtime/frame.inline.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/jniHandles.inline.hpp"
  59 #include "runtime/orderAccess.hpp"
  60 #include "runtime/os.hpp"
  61 #include "runtime/safepointVerifiers.hpp"
  62 #include "runtime/sharedRuntime.hpp"
  63 #include "runtime/sweeper.hpp"
  64 #include "runtime/vmThread.hpp"
  65 #include "utilities/align.hpp"
  66 #include "utilities/dtrace.hpp"
  67 #include "utilities/events.hpp"
  68 #include "utilities/resourceHash.hpp"
  69 #include "utilities/xmlstream.hpp"
  70 #if INCLUDE_JVMCI
  71 #include "jvmci/jvmciRuntime.hpp"
  72 #endif


 396   int nsize = align_up(pcs_size,   oopSize);
 397   if ((nsize % sizeof(PcDesc)) != 0) {
 398     nsize = pcs_size + sizeof(PcDesc);
 399   }
 400   assert((nsize % oopSize) == 0, "correct alignment");
 401   return nsize;
 402 }
 403 
 404 
 405 int nmethod::total_size() const {
 406   return
 407     consts_size()        +
 408     insts_size()         +
 409     stub_size()          +
 410     scopes_data_size()   +
 411     scopes_pcs_size()    +
 412     handler_table_size() +
 413     nul_chk_table_size();
 414 }
 415 




 416 const char* nmethod::compile_kind() const {
 417   if (is_osr_method())     return "osr";
 418   if (method() != NULL && is_native_method())  return "c2n";
 419   return NULL;
 420 }
 421 
 422 // Fill in default values for various flag fields
 423 void nmethod::init_defaults() {
 424   _state                      = not_installed;
 425   _has_flushed_dependencies   = 0;
 426   _lock_count                 = 0;
 427   _stack_traversal_mark       = 0;
 428   _unload_reported            = false; // jvmti state
 429   _is_far_code                = false; // nmethods are located in CodeCache
 430 
 431 #ifdef ASSERT
 432   _oops_are_stale             = false;
 433 #endif
 434 
 435   _oops_do_mark_link       = NULL;


 560           Klass* klass = deps.context_type();
 561           if (klass == NULL) {
 562             continue;  // ignore things like evol_method
 563           }
 564           // record this nmethod as dependent on this klass
 565           InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
 566         }
 567       }
 568       NOT_PRODUCT(if (nm != NULL)  note_java_nmethod(nm));
 569     }
 570   }
 571   // Do verification and logging outside CodeCache_lock.
 572   if (nm != NULL) {
 573     // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
 574     DEBUG_ONLY(nm->verify();)
 575     nm->log_new_nmethod();
 576   }
 577   return nm;
 578 }
 579 
 580   class CountOops : public OopClosure {
 581   private:
 582     int _nr_oops;
 583   public:
 584     CountOops() : _nr_oops(0) {}
 585     int nr_oops() const { return _nr_oops; }
 586 
 587 
 588     virtual void do_oop(oop* o) { _nr_oops++; }
 589     virtual void do_oop(narrowOop* o) { _nr_oops++; }
 590   };
 591 
 592 // For native wrappers
 593 nmethod::nmethod(
 594   Method* method,
 595   CompilerType type,
 596   int nmethod_size,
 597   int compile_id,
 598   CodeOffsets* offsets,
 599   CodeBuffer* code_buffer,
 600   int frame_size,
 601   ByteSize basic_lock_owner_sp_offset,
 602   ByteSize basic_lock_sp_offset,
 603   OopMapSet* oop_maps )
 604   : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
 605   _is_unloading_state(0),
 606   _native_receiver_sp_offset(basic_lock_owner_sp_offset),
 607   _native_basic_lock_sp_offset(basic_lock_sp_offset)
 608 {
 609   {
 610     int scopes_data_offset   = 0;
 611     int deoptimize_offset    = 0;
 612     int deoptimize_mh_offset = 0;
 613 
 614     debug_only(NoSafepointVerifier nsv;)
 615     assert_locked_or_safepoint(CodeCache_lock);
 616 
 617     init_defaults();
 618     _entry_bci               = InvocationEntryBci;
 619     // We have no exception handler or deopt handler make the
 620     // values something that will never match a pc like the nmethod vtable entry
 621     _exception_offset        = 0;
 622     _orig_pc_offset          = 0;
 623 
 624     _consts_offset           = data_offset();


 714   int nmethod_size,
 715   int compile_id,
 716   int entry_bci,
 717   CodeOffsets* offsets,
 718   int orig_pc_offset,
 719   DebugInformationRecorder* debug_info,
 720   Dependencies* dependencies,
 721   CodeBuffer *code_buffer,
 722   int frame_size,
 723   OopMapSet* oop_maps,
 724   ExceptionHandlerTable* handler_table,
 725   ImplicitExceptionTable* nul_chk_table,
 726   AbstractCompiler* compiler,
 727   int comp_level
 728 #if INCLUDE_JVMCI
 729   , char* speculations,
 730   int speculations_len,
 731   int jvmci_data_size
 732 #endif
 733   )
 734   : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
 735   _is_unloading_state(0),
 736   _native_receiver_sp_offset(in_ByteSize(-1)),
 737   _native_basic_lock_sp_offset(in_ByteSize(-1))
 738 {
 739   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 740   {
 741     debug_only(NoSafepointVerifier nsv;)
 742     assert_locked_or_safepoint(CodeCache_lock);
 743 
 744     _deopt_handler_begin = (address) this;
 745     _deopt_mh_handler_begin = (address) this;
 746 
 747     init_defaults();
 748     _entry_bci               = entry_bci;
 749     _compile_id              = compile_id;
 750     _comp_level              = comp_level;
 751     _orig_pc_offset          = orig_pc_offset;
 752     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 753 
 754     // Section offsets


 829     debug_only(Universe::heap()->verify_nmethod(this));
 830 
 831     CodeCache::commit(this);
 832 
 833     // Copy contents of ExceptionHandlerTable to nmethod
 834     handler_table->copy_to(this);
 835     nul_chk_table->copy_to(this);
 836 
 837 #if INCLUDE_JVMCI
 838     // Copy speculations to nmethod
 839     if (speculations_size() != 0) {
 840       memcpy(speculations_begin(), speculations, speculations_len);
 841     }
 842 #endif
 843 
 844     // we use the information of entry points to find out if a method is
 845     // static or non static
 846     assert(compiler->is_c2() || compiler->is_jvmci() ||
 847            _method->is_static() == (entry_point() == _verified_entry_point),
 848            " entry points must be same for static methods and vice versa");
 849 
 850     {
 851       CountOops count;
 852       this->oops_do(&count, false, true);
 853       _nr_oops = count.nr_oops();
 854     }
 855   }
 856 }
 857 
 858 int nmethod::count_oops() {
 859   CountOops count;
 860   this->oops_do(&count, false, true);
 861   return count.nr_oops();
 862 }
 863 
 864 // Print a short set of xml attributes to identify this nmethod.  The
 865 // output should be embedded in some other element.
 866 void nmethod::log_identity(xmlStream* log) const {
 867   log->print(" compile_id='%d'", compile_id());
 868   const char* nm_kind = compile_kind();
 869   if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
 870   log->print(" compiler='%s'", compiler_name());
 871   if (TieredCompilation) {
 872     log->print(" level='%d'", comp_level());
 873   }
 874 #if INCLUDE_JVMCI
 875   if (jvmci_nmethod_data() != NULL) {
 876     const char* jvmci_name = jvmci_nmethod_data()->name();
 877     if (jvmci_name != NULL) {
 878       log->print(" jvmci_mirror_name='");
 879       log->text("%s", jvmci_name);
 880       log->print("'");
 881     }
 882   }
 883 #endif


1126   // cleaning passes before moving to zombie.
1127   set_stack_traversal_mark(NMethodSweeper::traversal_count());
1128 }
1129 
1130 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1131 // there are no activations on the stack, not in use by the VM,
1132 // and not in use by the ServiceThread)
1133 bool nmethod::can_convert_to_zombie() {
1134   // Note that this is called when the sweeper has observed the nmethod to be
1135   // not_entrant. However, with concurrent code cache unloading, the state
1136   // might have moved on to unloaded if it is_unloading(), due to racing
1137   // concurrent GC threads.
1138   assert(is_not_entrant() || is_unloading(), "must be a non-entrant method");
1139 
1140   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1141   // count can be greater than the stack traversal count before it hits the
1142   // nmethod for the second time.
1143   // If an is_unloading() nmethod is still not_entrant, then it is not safe to
1144   // convert it to zombie due to GC unloading interactions. However, if it
1145   // has become unloaded, then it is okay to convert such nmethods to zombie.
1146   return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && !is_on_continuation_stack() &&
1147           !is_locked_by_vm() && (!is_unloading() || is_unloaded());
1148 }
1149 
1150 void nmethod::inc_decompile_count() {
1151   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1152   // Could be gated by ProfileTraps, but do not bother...
1153   Method* m = method();
1154   if (m == NULL)  return;
1155   MethodData* mdo = m->method_data();
1156   if (mdo == NULL)  return;
1157   // There is a benign race here.  See comments in methodData.hpp.
1158   mdo->inc_decompile_count();
1159 }
1160 
1161 bool nmethod::try_transition(int new_state_int) {
1162   signed char new_state = new_state_int;
1163   for (;;) {
1164     signed char old_state = Atomic::load(&_state);
1165     if (old_state >= new_state) {
1166       // Ensure monotonicity of transitions.
1167       return false;
1168     }
1169     if (Atomic::cmpxchg(new_state, &_state, old_state) == old_state) {
1170       return true;
1171     }
1172   }
1173 }
1174 
1175 void nmethod::make_unloaded() {
1176   assert(!is_on_continuation_stack(), "can't be on continuation stack");
1177 
1178   post_compiled_method_unload();
1179 
1180   // This nmethod is being unloaded, make sure that dependencies
1181   // recorded in instanceKlasses get flushed.
1182   // Since this work is being done during a GC, defer deleting dependencies from the
1183   // InstanceKlass.
1184   assert(Universe::heap()->is_gc_active() || Thread::current()->is_ConcurrentGC_thread(),
1185          "should only be called during gc");
1186   flush_dependencies(/*delete_immediately*/false);
1187 
1188   // Break cycle between nmethod & method
1189   LogTarget(Trace, class, unload, nmethod) lt;
1190   if (lt.is_enabled()) {
1191     LogStream ls(lt);
1192     ls.print("making nmethod " INTPTR_FORMAT
1193              " unloadable, Method*(" INTPTR_FORMAT
1194              ") ",
1195              p2i(this), p2i(_method));
1196      ls.cr();
1197   }


1810 }
1811 
1812 
1813 // This is called at the end of the strong tracing/marking phase of a
1814 // GC to unload an nmethod if it contains otherwise unreachable
1815 // oops.
1816 
1817 void nmethod::do_unloading(bool unloading_occurred) {
1818   // Make sure the oop's ready to receive visitors
1819   assert(!is_zombie() && !is_unloaded(),
1820          "should not call follow on zombie or unloaded nmethod");
1821 
1822   if (is_unloading()) {
1823     make_unloaded();
1824   } else {
1825     guarantee(unload_nmethod_caches(unloading_occurred),
1826               "Should not need transition stubs");
1827   }
1828 }
1829 
1830 void nmethod::oops_do(OopClosure* f, bool allow_dead, bool allow_null, bool keepalive_is_strong) {
1831   // make sure the oops ready to receive visitors
1832   assert(allow_dead || is_alive(), "should not call follow on dead nmethod");
1833 
1834   if (keepalive_is_strong) {
1835     if (_keepalive != NULL) {
1836       WeakHandle<vm_nmethod_keepalive_data> wh = WeakHandle<vm_nmethod_keepalive_data>::from_raw(_keepalive);
1837       if (wh.resolve() != NULL) {
1838         f->do_oop(_keepalive);
1839       }
1840     }
1841   }
1842 
1843   // Prevent extra code cache walk for platforms that don't have immediate oops.
1844   if (relocInfo::mustIterateImmediateOopsInCode()) {
1845     RelocIterator iter(this, oops_reloc_begin());
1846 
1847     while (iter.next()) {
1848       if (iter.type() == relocInfo::oop_type ) {
1849         oop_Relocation* r = iter.oop_reloc();
1850         // In this loop, we must only follow those oops directly embedded in
1851         // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1852         assert(1 == (r->oop_is_immediate()) +
1853                (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1854                "oop must be found in exactly one place");
1855         if (r->oop_is_immediate() && (r->oop_value() != NULL || allow_null)) {
1856           f->do_oop(r->oop_addr());
1857         }
1858       }
1859     }
1860   }
1861 
1862   // Scopes
1863   // This includes oop constants not inlined in the code stream.
1864   for (oop* p = oops_begin(); p < oops_end(); p++) {
1865     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1866     f->do_oop(p);
1867   }
1868 }
1869 
1870 #define NMETHOD_SENTINEL ((nmethod*)badAddress)
1871 
1872 nmethod* volatile nmethod::_oops_do_mark_nmethods;
1873 
1874 // An nmethod is "marked" if its _mark_link is set non-null.
1875 // Even if it is the end of the linked list, it will have a non-null link value,


< prev index next >