< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page

  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "asm/assembler.inline.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/compiledMethod.inline.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/nativeInst.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "compiler/abstractCompiler.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compileLog.hpp"
  38 #include "compiler/compilerDirectives.hpp"
  39 #include "compiler/directivesParser.hpp"
  40 #include "compiler/disassembler.hpp"
  41 #include "compiler/oopMap.hpp"


  42 #include "gc/shared/collectedHeap.hpp"
  43 #include "interpreter/bytecode.hpp"
  44 #include "logging/log.hpp"
  45 #include "logging/logStream.hpp"
  46 #include "memory/allocation.inline.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "memory/universe.hpp"
  49 #include "oops/access.inline.hpp"
  50 #include "oops/klass.inline.hpp"
  51 #include "oops/method.inline.hpp"
  52 #include "oops/methodData.hpp"
  53 #include "oops/oop.inline.hpp"

  54 #include "prims/jvmtiImpl.hpp"
  55 #include "prims/jvmtiThreadState.hpp"
  56 #include "prims/methodHandles.hpp"
  57 #include "runtime/atomic.hpp"
  58 #include "runtime/deoptimization.hpp"
  59 #include "runtime/flags/flagSetting.hpp"
  60 #include "runtime/frame.inline.hpp"
  61 #include "runtime/handles.inline.hpp"
  62 #include "runtime/jniHandles.inline.hpp"
  63 #include "runtime/orderAccess.hpp"
  64 #include "runtime/os.hpp"
  65 #include "runtime/safepointVerifiers.hpp"
  66 #include "runtime/serviceThread.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/signature.hpp"
  69 #include "runtime/sweeper.hpp"
  70 #include "runtime/threadWXSetters.inline.hpp"
  71 #include "runtime/vmThread.hpp"
  72 #include "utilities/align.hpp"
  73 #include "utilities/copy.hpp"

 405   int nsize = align_up(pcs_size,   oopSize);
 406   if ((nsize % sizeof(PcDesc)) != 0) {
 407     nsize = pcs_size + sizeof(PcDesc);
 408   }
 409   assert((nsize % oopSize) == 0, "correct alignment");
 410   return nsize;
 411 }
 412 
 413 
 414 int nmethod::total_size() const {
 415   return
 416     consts_size()        +
 417     insts_size()         +
 418     stub_size()          +
 419     scopes_data_size()   +
 420     scopes_pcs_size()    +
 421     handler_table_size() +
 422     nul_chk_table_size();
 423 }
 424 
 425 address* nmethod::orig_pc_addr(const frame* fr) {
 426   return (address*) ((address)fr->unextended_sp() + _orig_pc_offset);
 427 }
 428 
 429 const char* nmethod::compile_kind() const {
 430   if (is_osr_method())     return "osr";
 431   if (method() != NULL && is_native_method())  return "c2n";





 432   return NULL;
 433 }
 434 
 435 // Fill in default values for various flag fields
 436 void nmethod::init_defaults() {
 437   _state                      = not_installed;
 438   _has_flushed_dependencies   = 0;
 439   _lock_count                 = 0;
 440   _stack_traversal_mark       = 0;
 441   _load_reported              = false; // jvmti state
 442   _unload_reported            = false;
 443 
 444 #ifdef ASSERT
 445   _oops_are_stale             = false;
 446 #endif
 447 
 448   _oops_do_mark_link       = NULL;
 449   _osr_link                = NULL;
 450 #if INCLUDE_RTM_OPT
 451   _rtm_state               = NoRTM;
 452 #endif
 453 }
 454 
 455 nmethod* nmethod::new_native_nmethod(const methodHandle& method,
 456   int compile_id,
 457   CodeBuffer *code_buffer,
 458   int vep_offset,
 459   int frame_complete,
 460   int frame_size,
 461   ByteSize basic_lock_owner_sp_offset,
 462   ByteSize basic_lock_sp_offset,
 463   OopMapSet* oop_maps) {

 464   code_buffer->finalize_oop_references(method);
 465   // create nmethod
 466   nmethod* nm = NULL;
 467   {
 468     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 469     int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
 470 
 471     CodeOffsets offsets;
 472     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 473     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);



 474     nm = new (native_nmethod_size, CompLevel_none)
 475     nmethod(method(), compiler_none, native_nmethod_size,
 476             compile_id, &offsets,
 477             code_buffer, frame_size,
 478             basic_lock_owner_sp_offset,
 479             basic_lock_sp_offset,
 480             oop_maps);
 481     NOT_PRODUCT(if (nm != NULL)  native_nmethod_stats.note_native_nmethod(nm));
 482   }
 483 
 484   if (nm != NULL) {
 485     // verify nmethod
 486     debug_only(nm->verify();) // might block
 487 
 488     nm->log_new_nmethod();
 489   }
 490   return nm;
 491 }
 492 
 493 nmethod* nmethod::new_nmethod(const methodHandle& method,

 574           Klass* klass = deps.context_type();
 575           if (klass == NULL) {
 576             continue;  // ignore things like evol_method
 577           }
 578           // record this nmethod as dependent on this klass
 579           InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
 580         }
 581       }
 582       NOT_PRODUCT(if (nm != NULL)  note_java_nmethod(nm));
 583     }
 584   }
 585   // Do verification and logging outside CodeCache_lock.
 586   if (nm != NULL) {
 587     // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
 588     DEBUG_ONLY(nm->verify();)
 589     nm->log_new_nmethod();
 590   }
 591   return nm;
 592 }
 593 












 594 // For native wrappers
 595 nmethod::nmethod(
 596   Method* method,
 597   CompilerType type,
 598   int nmethod_size,
 599   int compile_id,
 600   CodeOffsets* offsets,
 601   CodeBuffer* code_buffer,
 602   int frame_size,
 603   ByteSize basic_lock_owner_sp_offset,
 604   ByteSize basic_lock_sp_offset,
 605   OopMapSet* oop_maps )
 606   : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
 607   _is_unloading_state(0),
 608   _native_receiver_sp_offset(basic_lock_owner_sp_offset),
 609   _native_basic_lock_sp_offset(basic_lock_sp_offset)
 610 {
 611   {
 612     int scopes_data_offset   = 0;
 613     int deoptimize_offset    = 0;
 614     int deoptimize_mh_offset = 0;
 615 
 616     debug_only(NoSafepointVerifier nsv;)
 617     assert_locked_or_safepoint(CodeCache_lock);
 618 
 619     init_defaults();
 620     _entry_bci               = InvocationEntryBci;
 621     // We have no exception handler or deopt handler make the
 622     // values something that will never match a pc like the nmethod vtable entry
 623     _exception_offset        = 0;
 624     _orig_pc_offset          = 0;

 625 
 626     _consts_offset           = data_offset();
 627     _stub_offset             = data_offset();
 628     _oops_offset             = data_offset();
 629     _metadata_offset         = _oops_offset         + align_up(code_buffer->total_oop_size(), oopSize);
 630     scopes_data_offset       = _metadata_offset     + align_up(code_buffer->total_metadata_size(), wordSize);
 631     _scopes_pcs_offset       = scopes_data_offset;
 632     _dependencies_offset     = _scopes_pcs_offset;
 633     _native_invokers_offset     = _dependencies_offset;
 634     _handler_table_offset    = _native_invokers_offset;
 635     _nul_chk_table_offset    = _handler_table_offset;
 636 #if INCLUDE_JVMCI
 637     _speculations_offset     = _nul_chk_table_offset;
 638     _jvmci_data_offset       = _speculations_offset;
 639     _nmethod_end_offset      = _jvmci_data_offset;
 640 #else
 641     _nmethod_end_offset      = _nul_chk_table_offset;
 642 #endif
 643     _compile_id              = compile_id;
 644     _comp_level              = CompLevel_none;
 645     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
 646     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
 647     _osr_entry_point         = NULL;
 648     _exception_cache         = NULL;
 649     _pc_desc_container.reset_to(NULL);
 650     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 651 


 652     _scopes_data_begin = (address) this + scopes_data_offset;
 653     _deopt_handler_begin = (address) this + deoptimize_offset;
 654     _deopt_mh_handler_begin = (address) this + deoptimize_mh_offset;
 655 
 656     code_buffer->copy_code_and_locs_to(this);
 657     code_buffer->copy_values_to(this);
 658 
 659     clear_unloading_state();
 660 
 661     Universe::heap()->register_nmethod(this);
 662     debug_only(Universe::heap()->verify_nmethod(this));
 663 
 664     CodeCache::commit(this);
 665   }
 666 
 667   if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
 668     ttyLocker ttyl;  // keep the following output all in one block
 669     // This output goes directly to the tty, not the compiler log.
 670     // To enable tools to match it up with the compilation activity,
 671     // be sure to tag this tty output with the compile ID.

 718   int compile_id,
 719   int entry_bci,
 720   CodeOffsets* offsets,
 721   int orig_pc_offset,
 722   DebugInformationRecorder* debug_info,
 723   Dependencies* dependencies,
 724   CodeBuffer *code_buffer,
 725   int frame_size,
 726   OopMapSet* oop_maps,
 727   ExceptionHandlerTable* handler_table,
 728   ImplicitExceptionTable* nul_chk_table,
 729   AbstractCompiler* compiler,
 730   int comp_level,
 731   const GrowableArrayView<RuntimeStub*>& native_invokers
 732 #if INCLUDE_JVMCI
 733   , char* speculations,
 734   int speculations_len,
 735   int jvmci_data_size
 736 #endif
 737   )
 738   : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
 739   _is_unloading_state(0),
 740   _native_receiver_sp_offset(in_ByteSize(-1)),
 741   _native_basic_lock_sp_offset(in_ByteSize(-1))
 742 {
 743   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 744   {
 745     debug_only(NoSafepointVerifier nsv;)
 746     assert_locked_or_safepoint(CodeCache_lock);
 747 
 748     _deopt_handler_begin = (address) this;
 749     _deopt_mh_handler_begin = (address) this;
 750 
 751     init_defaults();
 752     _entry_bci               = entry_bci;
 753     _compile_id              = compile_id;
 754     _comp_level              = comp_level;
 755     _orig_pc_offset          = orig_pc_offset;
 756     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 757 
 758     // Section offsets

 838     debug_only(Universe::heap()->verify_nmethod(this));
 839 
 840     CodeCache::commit(this);
 841 
 842     // Copy contents of ExceptionHandlerTable to nmethod
 843     handler_table->copy_to(this);
 844     nul_chk_table->copy_to(this);
 845 
 846 #if INCLUDE_JVMCI
 847     // Copy speculations to nmethod
 848     if (speculations_size() != 0) {
 849       memcpy(speculations_begin(), speculations, speculations_len);
 850     }
 851 #endif
 852 
 853     // we use the information of entry points to find out if a method is
 854     // static or non static
 855     assert(compiler->is_c2() || compiler->is_jvmci() ||
 856            _method->is_static() == (entry_point() == _verified_entry_point),
 857            " entry points must be same for static methods and vice versa");






 858   }
 859 }
 860 






 861 // Print a short set of xml attributes to identify this nmethod.  The
 862 // output should be embedded in some other element.
 863 void nmethod::log_identity(xmlStream* log) const {
 864   log->print(" compile_id='%d'", compile_id());
 865   const char* nm_kind = compile_kind();
 866   if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
 867   log->print(" compiler='%s'", compiler_name());
 868   if (TieredCompilation) {
 869     log->print(" level='%d'", comp_level());
 870   }
 871 #if INCLUDE_JVMCI
 872   if (jvmci_nmethod_data() != NULL) {
 873     const char* jvmci_name = jvmci_nmethod_data()->name();
 874     if (jvmci_name != NULL) {
 875       log->print(" jvmci_mirror_name='");
 876       log->text("%s", jvmci_name);
 877       log->print("'");
 878     }
 879   }
 880 #endif

1069 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
1070   // re-patch all oop-bearing instructions, just in case some oops moved
1071   RelocIterator iter(this, begin, end);
1072   while (iter.next()) {
1073     if (iter.type() == relocInfo::oop_type) {
1074       oop_Relocation* reloc = iter.oop_reloc();
1075       if (initialize_immediates && reloc->oop_is_immediate()) {
1076         oop* dest = reloc->oop_addr();
1077         initialize_immediate_oop(dest, cast_from_oop<jobject>(*dest));
1078       }
1079       // Refresh the oop-related bits of this instruction.
1080       reloc->fix_oop_relocation();
1081     } else if (iter.type() == relocInfo::metadata_type) {
1082       metadata_Relocation* reloc = iter.metadata_reloc();
1083       reloc->fix_metadata_relocation();
1084     }
1085   }
1086 }
1087 
1088 









































1089 void nmethod::verify_clean_inline_caches() {
1090   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1091 
1092   ResourceMark rm;
1093   RelocIterator iter(this, oops_reloc_begin());
1094   while(iter.next()) {
1095     switch(iter.type()) {
1096       case relocInfo::virtual_call_type:
1097       case relocInfo::opt_virtual_call_type: {
1098         CompiledIC *ic = CompiledIC_at(&iter);
1099         // Ok, to lookup references to zombies here
1100         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1101         assert(cb != NULL, "destination not in CodeBlob?");
1102         nmethod* nm = cb->as_nmethod_or_null();
1103         if( nm != NULL ) {
1104           // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1105           if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1106             assert(ic->is_clean(), "IC should be clean");
1107           }
1108         }

1118           if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1119             assert(csc->is_clean(), "IC should be clean");
1120           }
1121         }
1122         break;
1123       }
1124       default:
1125         break;
1126     }
1127   }
1128 }
1129 
1130 // This is a private interface with the sweeper.
1131 void nmethod::mark_as_seen_on_stack() {
1132   assert(is_alive(), "Must be an alive method");
1133   // Set the traversal mark to ensure that the sweeper does 2
1134   // cleaning passes before moving to zombie.
1135   set_stack_traversal_mark(NMethodSweeper::traversal_count());
1136 }
1137 
















1138 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1139 // there are no activations on the stack, not in use by the VM,
1140 // and not in use by the ServiceThread)
1141 bool nmethod::can_convert_to_zombie() {
1142   // Note that this is called when the sweeper has observed the nmethod to be
1143   // not_entrant. However, with concurrent code cache unloading, the state
1144   // might have moved on to unloaded if it is_unloading(), due to racing
1145   // concurrent GC threads.
1146   assert(is_not_entrant() || is_unloading() ||
1147          !Thread::current()->is_Code_cache_sweeper_thread(),
1148          "must be a non-entrant method if called from sweeper");
1149 
1150   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1151   // count can be greater than the stack traversal count before it hits the
1152   // nmethod for the second time.
1153   // If an is_unloading() nmethod is still not_entrant, then it is not safe to
1154   // convert it to zombie due to GC unloading interactions. However, if it
1155   // has become unloaded, then it is okay to convert such nmethods to zombie.
1156   return stack_traversal_mark() + 1 < NMethodSweeper::traversal_count() &&
1157          !is_locked_by_vm() && (!is_unloading() || is_unloaded());
1158 }
1159 
1160 void nmethod::inc_decompile_count() {
1161   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1162   // Could be gated by ProfileTraps, but do not bother...
1163   Method* m = method();
1164   if (m == NULL)  return;
1165   MethodData* mdo = m->method_data();
1166   if (mdo == NULL)  return;
1167   // There is a benign race here.  See comments in methodData.hpp.
1168   mdo->inc_decompile_count();
1169 }
1170 
1171 bool nmethod::try_transition(int new_state_int) {
1172   signed char new_state = new_state_int;
1173 #ifdef ASSERT
1174   if (new_state != unloaded) {
1175     assert_lock_strong(CompiledMethod_lock);
1176   }
1177 #endif

1829 void nmethod::clear_unloading_state() {
1830   uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
1831   RawAccess<MO_RELAXED>::store(&_is_unloading_state, state);
1832 }
1833 
1834 
1835 // This is called at the end of the strong tracing/marking phase of a
1836 // GC to unload an nmethod if it contains otherwise unreachable
1837 // oops.
1838 
1839 void nmethod::do_unloading(bool unloading_occurred) {
1840   // Make sure the oop's ready to receive visitors
1841   assert(!is_zombie() && !is_unloaded(),
1842          "should not call follow on zombie or unloaded nmethod");
1843 
1844   if (is_unloading()) {
1845     make_unloaded();
1846   } else {
1847     guarantee(unload_nmethod_caches(unloading_occurred),
1848               "Should not need transition stubs");




1849   }
1850 }
1851 
1852 void nmethod::oops_do(OopClosure* f, bool allow_dead) {
1853   // make sure the oops ready to receive visitors
1854   assert(allow_dead || is_alive(), "should not call follow on dead nmethod");
1855 
1856   // Prevent extra code cache walk for platforms that don't have immediate oops.
1857   if (relocInfo::mustIterateImmediateOopsInCode()) {
1858     RelocIterator iter(this, oops_reloc_begin());
1859 
1860     while (iter.next()) {
1861       if (iter.type() == relocInfo::oop_type ) {
1862         oop_Relocation* r = iter.oop_reloc();
1863         // In this loop, we must only follow those oops directly embedded in
1864         // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1865         assert(1 == (r->oop_is_immediate()) +
1866                (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1867                "oop must be found in exactly one place");
1868         if (r->oop_is_immediate() && r->oop_value() != NULL) {
1869           f->do_oop(r->oop_addr());
1870         }
1871       }
1872     }
1873   }
1874 
1875   // Scopes
1876   // This includes oop constants not inlined in the code stream.
1877   for (oop* p = oops_begin(); p < oops_end(); p++) {
1878     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1879     f->do_oop(p);
1880   }
1881 }
1882 







1883 nmethod* volatile nmethod::_oops_do_mark_nmethods;
1884 
1885 void nmethod::oops_do_log_change(const char* state) {
1886   LogTarget(Trace, gc, nmethod) lt;
1887   if (lt.is_enabled()) {
1888     LogStream ls(lt);
1889     CompileTask::print(&ls, this, state, true /* short_form */);
1890   }
1891 }
1892 
1893 bool nmethod::oops_do_try_claim() {
1894   if (oops_do_try_claim_weak_request()) {
1895     nmethod* result = oops_do_try_add_to_list_as_weak_done();
1896     assert(result == NULL, "adding to global list as weak done must always succeed.");
1897     return true;
1898   }
1899   return false;
1900 }
1901 
1902 bool nmethod::oops_do_try_claim_weak_request() {

  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "asm/assembler.inline.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/compiledMethod.inline.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/nativeInst.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "compiler/abstractCompiler.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compileLog.hpp"
  38 #include "compiler/compilerDirectives.hpp"
  39 #include "compiler/directivesParser.hpp"
  40 #include "compiler/disassembler.hpp"
  41 #include "compiler/oopMap.hpp"
  42 #include "gc/shared/barrierSet.hpp"
  43 #include "gc/shared/barrierSetNMethod.hpp"
  44 #include "gc/shared/collectedHeap.hpp"
  45 #include "interpreter/bytecode.hpp"
  46 #include "logging/log.hpp"
  47 #include "logging/logStream.hpp"
  48 #include "memory/allocation.inline.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/access.inline.hpp"
  52 #include "oops/klass.inline.hpp"
  53 #include "oops/method.inline.hpp"
  54 #include "oops/methodData.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "oops/weakHandle.inline.hpp"
  57 #include "prims/jvmtiImpl.hpp"
  58 #include "prims/jvmtiThreadState.hpp"
  59 #include "prims/methodHandles.hpp"
  60 #include "runtime/atomic.hpp"
  61 #include "runtime/deoptimization.hpp"
  62 #include "runtime/flags/flagSetting.hpp"
  63 #include "runtime/frame.inline.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/jniHandles.inline.hpp"
  66 #include "runtime/orderAccess.hpp"
  67 #include "runtime/os.hpp"
  68 #include "runtime/safepointVerifiers.hpp"
  69 #include "runtime/serviceThread.hpp"
  70 #include "runtime/sharedRuntime.hpp"
  71 #include "runtime/signature.hpp"
  72 #include "runtime/sweeper.hpp"
  73 #include "runtime/threadWXSetters.inline.hpp"
  74 #include "runtime/vmThread.hpp"
  75 #include "utilities/align.hpp"
  76 #include "utilities/copy.hpp"

 408   int nsize = align_up(pcs_size,   oopSize);
 409   if ((nsize % sizeof(PcDesc)) != 0) {
 410     nsize = pcs_size + sizeof(PcDesc);
 411   }
 412   assert((nsize % oopSize) == 0, "correct alignment");
 413   return nsize;
 414 }
 415 
 416 
 417 int nmethod::total_size() const {
 418   return
 419     consts_size()        +
 420     insts_size()         +
 421     stub_size()          +
 422     scopes_data_size()   +
 423     scopes_pcs_size()    +
 424     handler_table_size() +
 425     nul_chk_table_size();
 426 }
 427 




 428 const char* nmethod::compile_kind() const {
 429   if (is_osr_method())     return "osr";
 430   if (method() != NULL && is_native_method()) {
 431     if (method()->is_continuation_enter_intrinsic()) {
 432       return "cnt";
 433     }
 434     return "c2n";
 435   }
 436   return NULL;
 437 }
 438 
 439 // Fill in default values for various flag fields
 440 void nmethod::init_defaults() {
 441   _state                      = not_installed;
 442   _has_flushed_dependencies   = 0;
 443   _lock_count                 = 0;
 444   _stack_traversal_mark       = 0;
 445   _load_reported              = false; // jvmti state
 446   _unload_reported            = false;
 447 
 448 #ifdef ASSERT
 449   _oops_are_stale             = false;
 450 #endif
 451 
 452   _oops_do_mark_link       = NULL;
 453   _osr_link                = NULL;
 454 #if INCLUDE_RTM_OPT
 455   _rtm_state               = NoRTM;
 456 #endif
 457 }
 458 
 459 nmethod* nmethod::new_native_nmethod(const methodHandle& method,
 460   int compile_id,
 461   CodeBuffer *code_buffer,
 462   int vep_offset,
 463   int frame_complete,
 464   int frame_size,
 465   ByteSize basic_lock_owner_sp_offset,
 466   ByteSize basic_lock_sp_offset,
 467   OopMapSet* oop_maps,
 468   int exception_handler) {
 469   code_buffer->finalize_oop_references(method);
 470   // create nmethod
 471   nmethod* nm = NULL;
 472   {
 473     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 474     int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
 475 
 476     CodeOffsets offsets;
 477     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 478     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 479     if (exception_handler != -1) {
 480       offsets.set_value(CodeOffsets::Exceptions, exception_handler);
 481     }
 482     nm = new (native_nmethod_size, CompLevel_none)
 483     nmethod(method(), compiler_none, native_nmethod_size,
 484             compile_id, &offsets,
 485             code_buffer, frame_size,
 486             basic_lock_owner_sp_offset,
 487             basic_lock_sp_offset,
 488             oop_maps);
 489     NOT_PRODUCT(if (nm != NULL)  native_nmethod_stats.note_native_nmethod(nm));
 490   }
 491 
 492   if (nm != NULL) {
 493     // verify nmethod
 494     debug_only(nm->verify();) // might block
 495 
 496     nm->log_new_nmethod();
 497   }
 498   return nm;
 499 }
 500 
 501 nmethod* nmethod::new_nmethod(const methodHandle& method,

 582           Klass* klass = deps.context_type();
 583           if (klass == NULL) {
 584             continue;  // ignore things like evol_method
 585           }
 586           // record this nmethod as dependent on this klass
 587           InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
 588         }
 589       }
 590       NOT_PRODUCT(if (nm != NULL)  note_java_nmethod(nm));
 591     }
 592   }
 593   // Do verification and logging outside CodeCache_lock.
 594   if (nm != NULL) {
 595     // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
 596     DEBUG_ONLY(nm->verify();)
 597     nm->log_new_nmethod();
 598   }
 599   return nm;
 600 }
 601 
 602   class CountOops : public OopClosure {
 603   private:
 604     int _nr_oops;
 605   public:
 606     CountOops() : _nr_oops(0) {}
 607     int nr_oops() const { return _nr_oops; }
 608 
 609 
 610     virtual void do_oop(oop* o) { _nr_oops++; }
 611     virtual void do_oop(narrowOop* o) { _nr_oops++; }
 612   };
 613 
 614 // For native wrappers
 615 nmethod::nmethod(
 616   Method* method,
 617   CompilerType type,
 618   int nmethod_size,
 619   int compile_id,
 620   CodeOffsets* offsets,
 621   CodeBuffer* code_buffer,
 622   int frame_size,
 623   ByteSize basic_lock_owner_sp_offset,
 624   ByteSize basic_lock_sp_offset,
 625   OopMapSet* oop_maps )
 626   : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
 627   _is_unloading_state(0),
 628   _native_receiver_sp_offset(basic_lock_owner_sp_offset),
 629   _native_basic_lock_sp_offset(basic_lock_sp_offset)
 630 {
 631   {
 632     int scopes_data_offset   = 0;
 633     int deoptimize_offset    = 0;
 634     int deoptimize_mh_offset = 0;
 635 
 636     debug_only(NoSafepointVerifier nsv;)
 637     assert_locked_or_safepoint(CodeCache_lock);
 638 
 639     init_defaults();
 640     _entry_bci               = InvocationEntryBci;
 641     // We have no exception handler or deopt handler make the
 642     // values something that will never match a pc like the nmethod vtable entry
 643     _exception_offset        = 0;
 644     _orig_pc_offset          = 0;
 645     _marking_cycle           = 0;
 646 
 647     _consts_offset           = data_offset();
 648     _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
 649     _oops_offset             = data_offset();
 650     _metadata_offset         = _oops_offset         + align_up(code_buffer->total_oop_size(), oopSize);
 651     scopes_data_offset       = _metadata_offset     + align_up(code_buffer->total_metadata_size(), wordSize);
 652     _scopes_pcs_offset       = scopes_data_offset;
 653     _dependencies_offset     = _scopes_pcs_offset;
 654     _native_invokers_offset     = _dependencies_offset;
 655     _handler_table_offset    = _native_invokers_offset;
 656     _nul_chk_table_offset    = _handler_table_offset;
 657 #if INCLUDE_JVMCI
 658     _speculations_offset     = _nul_chk_table_offset;
 659     _jvmci_data_offset       = _speculations_offset;
 660     _nmethod_end_offset      = _jvmci_data_offset;
 661 #else
 662     _nmethod_end_offset      = _nul_chk_table_offset;
 663 #endif
 664     _compile_id              = compile_id;
 665     _comp_level              = CompLevel_none;
 666     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
 667     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
 668     _osr_entry_point         = NULL;
 669     _exception_cache         = NULL;
 670     _pc_desc_container.reset_to(NULL);
 671     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 672 
 673     _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
 674 
 675     _scopes_data_begin = (address) this + scopes_data_offset;
 676     _deopt_handler_begin = (address) this + deoptimize_offset;
 677     _deopt_mh_handler_begin = (address) this + deoptimize_mh_offset;
 678 
 679     code_buffer->copy_code_and_locs_to(this);
 680     code_buffer->copy_values_to(this);
 681 
 682     clear_unloading_state();
 683 
 684     Universe::heap()->register_nmethod(this);
 685     debug_only(Universe::heap()->verify_nmethod(this));
 686 
 687     CodeCache::commit(this);
 688   }
 689 
 690   if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
 691     ttyLocker ttyl;  // keep the following output all in one block
 692     // This output goes directly to the tty, not the compiler log.
 693     // To enable tools to match it up with the compilation activity,
 694     // be sure to tag this tty output with the compile ID.

 741   int compile_id,
 742   int entry_bci,
 743   CodeOffsets* offsets,
 744   int orig_pc_offset,
 745   DebugInformationRecorder* debug_info,
 746   Dependencies* dependencies,
 747   CodeBuffer *code_buffer,
 748   int frame_size,
 749   OopMapSet* oop_maps,
 750   ExceptionHandlerTable* handler_table,
 751   ImplicitExceptionTable* nul_chk_table,
 752   AbstractCompiler* compiler,
 753   int comp_level,
 754   const GrowableArrayView<RuntimeStub*>& native_invokers
 755 #if INCLUDE_JVMCI
 756   , char* speculations,
 757   int speculations_len,
 758   int jvmci_data_size
 759 #endif
 760   )
 761   : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
 762   _is_unloading_state(0),
 763   _native_receiver_sp_offset(in_ByteSize(-1)),
 764   _native_basic_lock_sp_offset(in_ByteSize(-1))
 765 {
 766   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 767   {
 768     debug_only(NoSafepointVerifier nsv;)
 769     assert_locked_or_safepoint(CodeCache_lock);
 770 
 771     _deopt_handler_begin = (address) this;
 772     _deopt_mh_handler_begin = (address) this;
 773 
 774     init_defaults();
 775     _entry_bci               = entry_bci;
 776     _compile_id              = compile_id;
 777     _comp_level              = comp_level;
 778     _orig_pc_offset          = orig_pc_offset;
 779     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 780 
 781     // Section offsets

 861     debug_only(Universe::heap()->verify_nmethod(this));
 862 
 863     CodeCache::commit(this);
 864 
 865     // Copy contents of ExceptionHandlerTable to nmethod
 866     handler_table->copy_to(this);
 867     nul_chk_table->copy_to(this);
 868 
 869 #if INCLUDE_JVMCI
 870     // Copy speculations to nmethod
 871     if (speculations_size() != 0) {
 872       memcpy(speculations_begin(), speculations, speculations_len);
 873     }
 874 #endif
 875 
 876     // we use the information of entry points to find out if a method is
 877     // static or non static
 878     assert(compiler->is_c2() || compiler->is_jvmci() ||
 879            _method->is_static() == (entry_point() == _verified_entry_point),
 880            " entry points must be same for static methods and vice versa");
 881 
 882     {
 883       CountOops count;
 884       this->oops_do(&count, false, true);
 885       _nr_oops = count.nr_oops();
 886     }
 887   }
 888 }
 889 
 890 int nmethod::count_oops() {
 891   CountOops count;
 892   this->oops_do(&count, false, true);
 893   return count.nr_oops();
 894 }
 895 
 896 // Print a short set of xml attributes to identify this nmethod.  The
 897 // output should be embedded in some other element.
 898 void nmethod::log_identity(xmlStream* log) const {
 899   log->print(" compile_id='%d'", compile_id());
 900   const char* nm_kind = compile_kind();
 901   if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
 902   log->print(" compiler='%s'", compiler_name());
 903   if (TieredCompilation) {
 904     log->print(" level='%d'", comp_level());
 905   }
 906 #if INCLUDE_JVMCI
 907   if (jvmci_nmethod_data() != NULL) {
 908     const char* jvmci_name = jvmci_nmethod_data()->name();
 909     if (jvmci_name != NULL) {
 910       log->print(" jvmci_mirror_name='");
 911       log->text("%s", jvmci_name);
 912       log->print("'");
 913     }
 914   }
 915 #endif

1104 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
1105   // re-patch all oop-bearing instructions, just in case some oops moved
1106   RelocIterator iter(this, begin, end);
1107   while (iter.next()) {
1108     if (iter.type() == relocInfo::oop_type) {
1109       oop_Relocation* reloc = iter.oop_reloc();
1110       if (initialize_immediates && reloc->oop_is_immediate()) {
1111         oop* dest = reloc->oop_addr();
1112         initialize_immediate_oop(dest, cast_from_oop<jobject>(*dest));
1113       }
1114       // Refresh the oop-related bits of this instruction.
1115       reloc->fix_oop_relocation();
1116     } else if (iter.type() == relocInfo::metadata_type) {
1117       metadata_Relocation* reloc = iter.metadata_reloc();
1118       reloc->fix_metadata_relocation();
1119     }
1120   }
1121 }
1122 
1123 
1124 void nmethod::make_deoptimized() {
1125   assert (method() == NULL || can_be_deoptimized(), "");
1126 
1127   CompiledICLocker ml(this);
1128   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1129   ResourceMark rm;
1130   RelocIterator iter(this, oops_reloc_begin());
1131 
1132   while(iter.next()) {
1133 
1134     switch(iter.type()) {
1135       case relocInfo::virtual_call_type:
1136       case relocInfo::opt_virtual_call_type: {
1137         CompiledIC *ic = CompiledIC_at(&iter);
1138         address pc = ic->end_of_call();
1139         NativePostCallNop* nop = nativePostCallNop_at(pc);
1140         if (nop != NULL) {
1141           nop->make_deopt();
1142         }
1143         assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
1144         break;
1145       }
1146       case relocInfo::static_call_type: {
1147         CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1148         address pc = csc->end_of_call();
1149         NativePostCallNop* nop = nativePostCallNop_at(pc);
1150         //tty->print_cr(" - static pc %p", pc);
1151         if (nop != NULL) {
1152           nop->make_deopt();
1153         }
1154         // We can't assert here, there are some calls to stubs / runtime
1155         // that have reloc data and doesn't have a post call NOP.
1156         //assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
1157         break;
1158       }
1159       default:
1160         break;
1161     }
1162   }
1163 }
1164 
1165 void nmethod::verify_clean_inline_caches() {
1166   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1167 
1168   ResourceMark rm;
1169   RelocIterator iter(this, oops_reloc_begin());
1170   while(iter.next()) {
1171     switch(iter.type()) {
1172       case relocInfo::virtual_call_type:
1173       case relocInfo::opt_virtual_call_type: {
1174         CompiledIC *ic = CompiledIC_at(&iter);
1175         // Ok, to lookup references to zombies here
1176         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1177         assert(cb != NULL, "destination not in CodeBlob?");
1178         nmethod* nm = cb->as_nmethod_or_null();
1179         if( nm != NULL ) {
1180           // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1181           if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1182             assert(ic->is_clean(), "IC should be clean");
1183           }
1184         }

1194           if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1195             assert(csc->is_clean(), "IC should be clean");
1196           }
1197         }
1198         break;
1199       }
1200       default:
1201         break;
1202     }
1203   }
1204 }
1205 
1206 // This is a private interface with the sweeper.
1207 void nmethod::mark_as_seen_on_stack() {
1208   assert(is_alive(), "Must be an alive method");
1209   // Set the traversal mark to ensure that the sweeper does 2
1210   // cleaning passes before moving to zombie.
1211   set_stack_traversal_mark(NMethodSweeper::traversal_count());
1212 }
1213 
1214 void nmethod::mark_as_maybe_on_continuation() {
1215   assert(is_alive(), "Must be an alive method");
1216   _marking_cycle = CodeCache::marking_cycle();
1217 }
1218 
1219 bool nmethod::is_not_on_continuation_stack() {
1220   // Odd marking cycles are found during concurrent marking. Even numbers are found
1221   // in nmethods that are marked when GC is inactive (e.g. nmethod entry barriers during
1222   // normal execution). Therefore we align up by 2 so that nmethods encountered during
1223   // concurrent marking are treated as if they were encountered in the inactive phase
1224   // after that concurrent GC. Each GC increments the marking cycle twice - once when
1225   // it starts and once when it ends. So we can only be sure there are no new continuations
1226   // when they have not been encountered from before a GC to after a GC.
1227   return CodeCache::marking_cycle() >= align_up(_marking_cycle, 2) + 2;
1228 }
1229 
1230 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1231 // there are no activations on the stack, not in use by the VM,
1232 // and not in use by the ServiceThread)
1233 bool nmethod::can_convert_to_zombie() {
1234   // Note that this is called when the sweeper has observed the nmethod to be
1235   // not_entrant. However, with concurrent code cache unloading, the state
1236   // might have moved on to unloaded if it is_unloading(), due to racing
1237   // concurrent GC threads.
1238   assert(is_not_entrant() || is_unloading() ||
1239          !Thread::current()->is_Code_cache_sweeper_thread(),
1240          "must be a non-entrant method if called from sweeper");
1241 
1242   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1243   // count can be greater than the stack traversal count before it hits the
1244   // nmethod for the second time.
1245   // If an is_unloading() nmethod is still not_entrant, then it is not safe to
1246   // convert it to zombie due to GC unloading interactions. However, if it
1247   // has become unloaded, then it is okay to convert such nmethods to zombie.
1248   return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && is_not_on_continuation_stack() &&
1249           !is_locked_by_vm() && (!is_unloading() || is_unloaded());
1250 }
1251 
1252 void nmethod::inc_decompile_count() {
1253   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1254   // Could be gated by ProfileTraps, but do not bother...
1255   Method* m = method();
1256   if (m == NULL)  return;
1257   MethodData* mdo = m->method_data();
1258   if (mdo == NULL)  return;
1259   // There is a benign race here.  See comments in methodData.hpp.
1260   mdo->inc_decompile_count();
1261 }
1262 
1263 bool nmethod::try_transition(int new_state_int) {
1264   signed char new_state = new_state_int;
1265 #ifdef ASSERT
1266   if (new_state != unloaded) {
1267     assert_lock_strong(CompiledMethod_lock);
1268   }
1269 #endif

1921 void nmethod::clear_unloading_state() {
1922   uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
1923   RawAccess<MO_RELAXED>::store(&_is_unloading_state, state);
1924 }
1925 
1926 
1927 // This is called at the end of the strong tracing/marking phase of a
1928 // GC to unload an nmethod if it contains otherwise unreachable
1929 // oops.
1930 
1931 void nmethod::do_unloading(bool unloading_occurred) {
1932   // Make sure the oop's ready to receive visitors
1933   assert(!is_zombie() && !is_unloaded(),
1934          "should not call follow on zombie or unloaded nmethod");
1935 
1936   if (is_unloading()) {
1937     make_unloaded();
1938   } else {
1939     guarantee(unload_nmethod_caches(unloading_occurred),
1940               "Should not need transition stubs");
1941     BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
1942     if (bs_nm != NULL) {
1943       bs_nm->disarm(this);
1944     }
1945   }
1946 }
1947 
1948 void nmethod::oops_do(OopClosure* f, bool allow_dead, bool allow_null) {
1949   // make sure the oops ready to receive visitors
1950   assert(allow_dead || is_alive(), "should not call follow on dead nmethod");
1951 
1952   // Prevent extra code cache walk for platforms that don't have immediate oops.
1953   if (relocInfo::mustIterateImmediateOopsInCode()) {
1954     RelocIterator iter(this, oops_reloc_begin());
1955 
1956     while (iter.next()) {
1957       if (iter.type() == relocInfo::oop_type ) {
1958         oop_Relocation* r = iter.oop_reloc();
1959         // In this loop, we must only follow those oops directly embedded in
1960         // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1961         assert(1 == (r->oop_is_immediate()) +
1962                (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1963                "oop must be found in exactly one place");
1964         if (r->oop_is_immediate() && (r->oop_value() != NULL || allow_null)) {
1965           f->do_oop(r->oop_addr());
1966         }
1967       }
1968     }
1969   }
1970 
1971   // Scopes
1972   // This includes oop constants not inlined in the code stream.
1973   for (oop* p = oops_begin(); p < oops_end(); p++) {
1974     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1975     f->do_oop(p);
1976   }
1977 }
1978 
1979 void nmethod::follow_nmethod(OopIterateClosure* cl) {
1980   oops_do(cl);
1981   mark_as_maybe_on_continuation();
1982   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
1983   bs_nm->disarm(this);
1984 }
1985 
1986 nmethod* volatile nmethod::_oops_do_mark_nmethods;
1987 
1988 void nmethod::oops_do_log_change(const char* state) {
1989   LogTarget(Trace, gc, nmethod) lt;
1990   if (lt.is_enabled()) {
1991     LogStream ls(lt);
1992     CompileTask::print(&ls, this, state, true /* short_form */);
1993   }
1994 }
1995 
1996 bool nmethod::oops_do_try_claim() {
1997   if (oops_do_try_claim_weak_request()) {
1998     nmethod* result = oops_do_try_add_to_list_as_weak_done();
1999     assert(result == NULL, "adding to global list as weak done must always succeed.");
2000     return true;
2001   }
2002   return false;
2003 }
2004 
2005 bool nmethod::oops_do_try_claim_weak_request() {
< prev index next >