< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page

  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/nativeInst.hpp"
  31 #include "code/nmethod.inline.hpp"
  32 #include "code/scopeDesc.hpp"

  33 #include "compiler/abstractCompiler.hpp"
  34 #include "compiler/compilationLog.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compileLog.hpp"
  37 #include "compiler/compileTask.hpp"
  38 #include "compiler/compilerDirectives.hpp"
  39 #include "compiler/compilerOracle.hpp"
  40 #include "compiler/directivesParser.hpp"
  41 #include "compiler/disassembler.hpp"
  42 #include "compiler/oopMap.inline.hpp"
  43 #include "gc/shared/barrierSet.hpp"
  44 #include "gc/shared/barrierSetNMethod.hpp"
  45 #include "gc/shared/classUnloadingContext.hpp"
  46 #include "gc/shared/collectedHeap.hpp"
  47 #include "interpreter/bytecode.inline.hpp"
  48 #include "jvm.h"
  49 #include "logging/log.hpp"
  50 #include "logging/logStream.hpp"
  51 #include "memory/allocation.inline.hpp"
  52 #include "memory/resourceArea.hpp"

 767 
 768 void nmethod::clear_inline_caches() {
 769   assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
 770   RelocIterator iter(this);
 771   while (iter.next()) {
 772     iter.reloc()->clear_inline_cache();
 773   }
 774 }
 775 
 776 #ifdef ASSERT
 777 // Check class_loader is alive for this bit of metadata.
 778 class CheckClass : public MetadataClosure {
 779   void do_metadata(Metadata* md) {
 780     Klass* klass = nullptr;
 781     if (md->is_klass()) {
 782       klass = ((Klass*)md);
 783     } else if (md->is_method()) {
 784       klass = ((Method*)md)->method_holder();
 785     } else if (md->is_methodData()) {
 786       klass = ((MethodData*)md)->method()->method_holder();


 787     } else {
 788       md->print();
 789       ShouldNotReachHere();
 790     }
 791     assert(klass->is_loader_alive(), "must be alive");
 792   }
 793 };
 794 #endif // ASSERT
 795 
 796 
 797 static void clean_ic_if_metadata_is_dead(CompiledIC *ic) {
 798   ic->clean_metadata();
 799 }
 800 
 801 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
 802 template <typename CallsiteT>
 803 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, nmethod* from,
 804                                          bool clean_all) {
 805   CodeBlob* cb = CodeCache::find_blob(callsite->destination());
 806   if (!cb->is_nmethod()) {

1115     debug_only(nm->verify();) // might block
1116 
1117     nm->log_new_nmethod();
1118   }
1119   return nm;
1120 }
1121 
1122 nmethod* nmethod::new_nmethod(const methodHandle& method,
1123   int compile_id,
1124   int entry_bci,
1125   CodeOffsets* offsets,
1126   int orig_pc_offset,
1127   DebugInformationRecorder* debug_info,
1128   Dependencies* dependencies,
1129   CodeBuffer* code_buffer, int frame_size,
1130   OopMapSet* oop_maps,
1131   ExceptionHandlerTable* handler_table,
1132   ImplicitExceptionTable* nul_chk_table,
1133   AbstractCompiler* compiler,
1134   CompLevel comp_level

1135 #if INCLUDE_JVMCI
1136   , char* speculations,
1137   int speculations_len,
1138   JVMCINMethodData* jvmci_data
1139 #endif
1140 )
1141 {
1142   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1143   code_buffer->finalize_oop_references(method);
1144   // create nmethod
1145   nmethod* nm = nullptr;
1146   int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1147 #if INCLUDE_JVMCI
1148     if (compiler->is_jvmci()) {
1149       nmethod_size += align_up(jvmci_data->size(), oopSize);
1150     }
1151 #endif
1152 
1153   int immutable_data_size =
1154       adjust_pcs_size(debug_info->pcs_size())

1159     + align_up(speculations_len                  , oopSize)
1160 #endif
1161     + align_up(debug_info->data_size()           , oopSize);
1162 
1163   // First, allocate space for immutable data in C heap.
1164   address immutable_data = nullptr;
1165   if (immutable_data_size > 0) {
1166     immutable_data = (address)os::malloc(immutable_data_size, mtCode);
1167     if (immutable_data == nullptr) {
1168       vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data");
1169       return nullptr;
1170     }
1171   }
1172   {
1173     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1174 
1175     nm = new (nmethod_size, comp_level)
1176     nmethod(method(), compiler->type(), nmethod_size, immutable_data_size,
1177             compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
1178             debug_info, dependencies, code_buffer, frame_size, oop_maps,
1179             handler_table, nul_chk_table, compiler, comp_level
1180 #if INCLUDE_JVMCI
1181             , speculations,
1182             speculations_len,
1183             jvmci_data
1184 #endif
1185             );
1186 
1187     if (nm != nullptr) {
1188       // To make dependency checking during class loading fast, record
1189       // the nmethod dependencies in the classes it is dependent on.
1190       // This allows the dependency checking code to simply walk the
1191       // class hierarchy above the loaded class, checking only nmethods
1192       // which are dependent on those classes.  The slow way is to
1193       // check every nmethod for dependencies which makes it linear in
1194       // the number of methods compiled.  For applications with a lot
1195       // classes the slow way is too slow.
1196       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1197         if (deps.type() == Dependencies::call_site_target_value) {
1198           // CallSite dependencies are managed on per-CallSite instance basis.
1199           oop call_site = deps.argument_oop(0);
1200           MethodHandles::add_dependent_nmethod(call_site, nm);
1201         } else {
1202           InstanceKlass* ik = deps.context_type();
1203           if (ik == nullptr) {
1204             continue;  // ignore things like evol_method
1205           }
1206           // record this nmethod as dependent on this klass
1207           ik->add_dependent_nmethod(nm);
1208         }
1209       }
1210       NOT_PRODUCT(if (nm != nullptr)  note_java_nmethod(nm));
1211     }
1212   }
1213   // Do verification and logging outside CodeCache_lock.
1214   if (nm != nullptr) {












1215     // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1216     DEBUG_ONLY(nm->verify();)
1217     nm->log_new_nmethod();
1218   }
1219   return nm;
1220 }
1221 
1222 // Fill in default values for various fields
1223 void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
1224   // avoid uninitialized fields, even for short time periods
1225   _exception_cache            = nullptr;
1226   _gc_data                    = nullptr;
1227   _oops_do_mark_link          = nullptr;
1228   _compiled_ic_data           = nullptr;
1229 
1230   _is_unloading_state         = 0;
1231   _state                      = not_installed;
1232 
1233   _has_unsafe_access          = 0;
1234   _has_method_handle_invokes  = 0;
1235   _has_wide_vectors           = 0;
1236   _has_monitors               = 0;
1237   _has_scoped_access          = 0;
1238   _has_flushed_dependencies   = 0;
1239   _is_unlinked                = 0;
1240   _load_reported              = 0; // jvmti state


1241 

1242   _deoptimization_status      = not_marked;
1243 
1244   // SECT_CONSTS is first in code buffer so the offset should be 0.
1245   int consts_offset = code_buffer->total_offset_of(code_buffer->consts());
1246   assert(consts_offset == 0, "const_offset: %d", consts_offset);
1247 
1248   _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
1249 
1250   CHECKED_CAST(_entry_offset,              uint16_t, (offsets->value(CodeOffsets::Entry)));
1251   CHECKED_CAST(_verified_entry_offset,     uint16_t, (offsets->value(CodeOffsets::Verified_Entry)));
1252 
1253   _skipped_instructions_size = code_buffer->total_skipped_instructions_size();
1254 }
1255 
1256 // Post initialization
1257 void nmethod::post_init() {
1258   clear_unloading_state();
1259 
1260   finalize_relocations();
1261 

1293 
1294     _osr_entry_point         = nullptr;
1295     _pc_desc_container       = nullptr;
1296     _entry_bci               = InvocationEntryBci;
1297     _compile_id              = compile_id;
1298     _comp_level              = CompLevel_none;
1299     _compiler_type           = type;
1300     _orig_pc_offset          = 0;
1301     _num_stack_arg_slots     = _method->constMethod()->num_stack_arg_slots();
1302 
1303     if (offsets->value(CodeOffsets::Exceptions) != -1) {
1304       // Continuation enter intrinsic
1305       _exception_offset      = code_offset() + offsets->value(CodeOffsets::Exceptions);
1306     } else {
1307       _exception_offset      = 0;
1308     }
1309     // Native wrappers do not have deopt handlers. Make the values
1310     // something that will never match a pc like the nmethod vtable entry
1311     _deopt_handler_offset    = 0;
1312     _deopt_mh_handler_offset = 0;


1313     _unwind_handler_offset   = 0;
1314 
1315     CHECKED_CAST(_metadata_offset, uint16_t, (align_up(code_buffer->total_oop_size(), oopSize)));
1316     int data_end_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
1317 #if INCLUDE_JVMCI
1318     // jvmci_data_size is 0 in native wrapper but we need to set offset
1319     // to correctly calculate metadata_end address
1320     CHECKED_CAST(_jvmci_data_offset, uint16_t, data_end_offset);
1321 #endif
1322     assert((data_offset() + data_end_offset) <= nmethod_size, "wrong nmethod's size: %d < %d", nmethod_size, (data_offset() + data_end_offset));
1323 
1324     // native wrapper does not have read-only data but we need unique not null address
1325     _immutable_data          = data_end();
1326     _immutable_data_size     = 0;
1327     _nul_chk_table_offset    = 0;
1328     _handler_table_offset    = 0;
1329     _scopes_pcs_offset       = 0;
1330     _scopes_data_offset      = 0;
1331 #if INCLUDE_JVMCI
1332     _speculations_offset     = 0;

1353     // This is both handled in decode2(), called via print_code() -> decode()
1354     if (PrintNativeNMethods) {
1355       tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
1356       print_code();
1357       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1358 #if defined(SUPPORT_DATA_STRUCTS)
1359       if (AbstractDisassembler::show_structs()) {
1360         if (oop_maps != nullptr) {
1361           tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
1362           oop_maps->print_on(tty);
1363           tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1364         }
1365       }
1366 #endif
1367     } else {
1368       print(); // print the header part only.
1369     }
1370 #if defined(SUPPORT_DATA_STRUCTS)
1371     if (AbstractDisassembler::show_structs()) {
1372       if (PrintRelocations) {
1373         print_relocations();
1374         tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1375       }
1376     }
1377 #endif
1378     if (xtty != nullptr) {
1379       xtty->tail("print_native_nmethod");
1380     }
1381   }
1382 }
1383 
1384 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
1385   return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
1386 }
1387 
1388 void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
1389   // Try MethodNonProfiled and MethodProfiled.
1390   void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
1391   if (return_value != nullptr || !allow_NonNMethod_space) return return_value;
1392   // Try NonNMethod or give up.
1393   return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);

1396 // For normal JIT compiled code
1397 nmethod::nmethod(
1398   Method* method,
1399   CompilerType type,
1400   int nmethod_size,
1401   int immutable_data_size,
1402   int compile_id,
1403   int entry_bci,
1404   address immutable_data,
1405   CodeOffsets* offsets,
1406   int orig_pc_offset,
1407   DebugInformationRecorder* debug_info,
1408   Dependencies* dependencies,
1409   CodeBuffer *code_buffer,
1410   int frame_size,
1411   OopMapSet* oop_maps,
1412   ExceptionHandlerTable* handler_table,
1413   ImplicitExceptionTable* nul_chk_table,
1414   AbstractCompiler* compiler,
1415   CompLevel comp_level

1416 #if INCLUDE_JVMCI
1417   , char* speculations,
1418   int speculations_len,
1419   JVMCINMethodData* jvmci_data
1420 #endif
1421   )
1422   : CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1423              offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
1424   _deoptimization_generation(0),
1425   _gc_epoch(CodeCache::gc_epoch()),
1426   _method(method),
1427   _osr_link(nullptr)
1428 {
1429   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1430   {
1431     debug_only(NoSafepointVerifier nsv;)
1432     assert_locked_or_safepoint(CodeCache_lock);
1433 
1434     init_defaults(code_buffer, offsets);


1435 
1436     _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
1437     _entry_bci       = entry_bci;
1438     _compile_id      = compile_id;
1439     _comp_level      = comp_level;
1440     _compiler_type   = type;
1441     _orig_pc_offset  = orig_pc_offset;
1442 
1443     _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots();
1444 
1445     set_ctable_begin(header_begin() + content_offset());
1446 
1447 #if INCLUDE_JVMCI
1448     if (compiler->is_jvmci()) {
1449       // JVMCI might not produce any stub sections
1450       if (offsets->value(CodeOffsets::Exceptions) != -1) {
1451         _exception_offset        = code_offset() + offsets->value(CodeOffsets::Exceptions);
1452       } else {
1453         _exception_offset        = -1;
1454       }

1546 #if INCLUDE_JVMCI
1547     // Copy speculations to nmethod
1548     if (speculations_size() != 0) {
1549       memcpy(speculations_begin(), speculations, speculations_len);
1550     }
1551 #endif
1552 
1553     post_init();
1554 
1555     // we use the information of entry points to find out if a method is
1556     // static or non static
1557     assert(compiler->is_c2() || compiler->is_jvmci() ||
1558            _method->is_static() == (entry_point() == verified_entry_point()),
1559            " entry points must be same for static methods and vice versa");
1560   }
1561 }
1562 
1563 // Print a short set of xml attributes to identify this nmethod.  The
1564 // output should be embedded in some other element.
1565 void nmethod::log_identity(xmlStream* log) const {
1566   log->print(" compile_id='%d'", compile_id());

1567   const char* nm_kind = compile_kind();
1568   if (nm_kind != nullptr)  log->print(" compile_kind='%s'", nm_kind);
1569   log->print(" compiler='%s'", compiler_name());
1570   if (TieredCompilation) {
1571     log->print(" level='%d'", comp_level());
1572   }
1573 #if INCLUDE_JVMCI
1574   if (jvmci_nmethod_data() != nullptr) {
1575     const char* jvmci_name = jvmci_nmethod_data()->name();
1576     if (jvmci_name != nullptr) {
1577       log->print(" jvmci_mirror_name='");
1578       log->text("%s", jvmci_name);
1579       log->print("'");
1580     }
1581   }
1582 #endif
1583 }
1584 
1585 
1586 #define LOG_OFFSET(log, name)                    \
1587   if (p2i(name##_end()) - p2i(name##_begin())) \
1588     log->print(" " XSTR(name) "_offset='" INTX_FORMAT "'"    , \
1589                p2i(name##_begin()) - p2i(this))
1590 
1591 

1672       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1673       if (oop_maps() != nullptr) {
1674         tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
1675         oop_maps()->print_on(tty);
1676         tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1677       }
1678     }
1679 #endif
1680   } else {
1681     print(); // print the header part only.
1682   }
1683 
1684 #if defined(SUPPORT_DATA_STRUCTS)
1685   if (AbstractDisassembler::show_structs()) {
1686     methodHandle mh(Thread::current(), _method);
1687     if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) {
1688       print_scopes();
1689       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1690     }
1691     if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) {
1692       print_relocations();
1693       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1694     }
1695     if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) {
1696       print_dependencies_on(tty);
1697       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1698     }
1699     if (printmethod || PrintExceptionHandlers) {
1700       print_handler_table();
1701       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1702       print_nul_chk_table();
1703       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1704     }
1705 
1706     if (printmethod) {
1707       print_recorded_oops();
1708       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1709       print_recorded_metadata();
1710       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1711     }
1712   }

1920   Atomic::store(&_gc_epoch, CodeCache::gc_epoch());
1921 }
1922 
1923 bool nmethod::is_maybe_on_stack() {
1924   // If the condition below is true, it means that the nmethod was found to
1925   // be alive the previous completed marking cycle.
1926   return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
1927 }
1928 
1929 void nmethod::inc_decompile_count() {
1930   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1931   // Could be gated by ProfileTraps, but do not bother...
1932   Method* m = method();
1933   if (m == nullptr)  return;
1934   MethodData* mdo = m->method_data();
1935   if (mdo == nullptr)  return;
1936   // There is a benign race here.  See comments in methodData.hpp.
1937   mdo->inc_decompile_count();
1938 }
1939 








1940 bool nmethod::try_transition(signed char new_state_int) {
1941   signed char new_state = new_state_int;
1942   assert_lock_strong(NMethodState_lock);
1943   signed char old_state = _state;
1944   if (old_state >= new_state) {
1945     // Ensure monotonicity of transitions.
1946     return false;
1947   }
1948   Atomic::store(&_state, new_state);
1949   return true;
1950 }
1951 
1952 void nmethod::invalidate_osr_method() {
1953   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1954   // Remove from list of active nmethods
1955   if (method() != nullptr) {
1956     method()->method_holder()->remove_osr_nmethod(this);
1957   }
1958 }
1959 

1965                        os::current_thread_id());
1966       log_identity(xtty);
1967       xtty->stamp();
1968       xtty->end_elem();
1969     }
1970   }
1971 
1972   CompileTask::print_ul(this, "made not entrant");
1973   if (PrintCompilation) {
1974     print_on(tty, "made not entrant");
1975   }
1976 }
1977 
1978 void nmethod::unlink_from_method() {
1979   if (method() != nullptr) {
1980     method()->unlink_code(this);
1981   }
1982 }
1983 
1984 // Invalidate code
1985 bool nmethod::make_not_entrant() {
1986   // This can be called while the system is already at a safepoint which is ok
1987   NoSafepointVerifier nsv;
1988 
1989   if (is_unloading()) {
1990     // If the nmethod is unloading, then it is already not entrant through
1991     // the nmethod entry barriers. No need to do anything; GC will unload it.
1992     return false;
1993   }
1994 
1995   if (Atomic::load(&_state) == not_entrant) {
1996     // Avoid taking the lock if already in required state.
1997     // This is safe from races because the state is an end-state,
1998     // which the nmethod cannot back out of once entered.
1999     // No need for fencing either.
2000     return false;
2001   }
2002 
2003   {
2004     // Enter critical section.  Does not block for safepoint.
2005     ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);

2028     }
2029 
2030     BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2031     if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2032       // If nmethod entry barriers are not supported, we won't mark
2033       // nmethods as on-stack when they become on-stack. So we
2034       // degrade to a less accurate flushing strategy, for now.
2035       mark_as_maybe_on_stack();
2036     }
2037 
2038     // Change state
2039     bool success = try_transition(not_entrant);
2040     assert(success, "Transition can't fail");
2041 
2042     // Log the transition once
2043     log_state_change();
2044 
2045     // Remove nmethod from method.
2046     unlink_from_method();
2047 







2048   } // leave critical region under NMethodState_lock
2049 
2050 #if INCLUDE_JVMCI
2051   // Invalidate can't occur while holding the Patching lock
2052   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2053   if (nmethod_data != nullptr) {
2054     nmethod_data->invalidate_nmethod_mirror(this);
2055   }
2056 #endif
2057 
2058 #ifdef ASSERT
2059   if (is_osr_method() && method() != nullptr) {
2060     // Make sure osr nmethod is invalidated, i.e. not on the list
2061     bool found = method()->method_holder()->remove_osr_nmethod(this);
2062     assert(!found, "osr nmethod should have been invalidated");
2063   }
2064 #endif
2065 
2066   return true;
2067 }

2167         MethodHandles::clean_dependency_context(call_site);
2168       } else {
2169         InstanceKlass* ik = deps.context_type();
2170         if (ik == nullptr) {
2171           continue;  // ignore things like evol_method
2172         }
2173         // During GC liveness of dependee determines class that needs to be updated.
2174         // The GC may clean dependency contexts concurrently and in parallel.
2175         ik->clean_dependency_context();
2176       }
2177     }
2178   }
2179 }
2180 
2181 void nmethod::post_compiled_method(CompileTask* task) {
2182   task->mark_success();
2183   task->set_nm_content_size(content_size());
2184   task->set_nm_insts_size(insts_size());
2185   task->set_nm_total_size(total_size());
2186 






2187   // JVMTI -- compiled method notification (must be done outside lock)
2188   post_compiled_method_load_event();
2189 
2190   if (CompilationLog::log() != nullptr) {
2191     CompilationLog::log()->log_nmethod(JavaThread::current(), this);
2192   }
2193 
2194   const DirectiveSet* directive = task->directive();
2195   maybe_print_nmethod(directive);
2196 }
2197 
2198 // ------------------------------------------------------------------
2199 // post_compiled_method_load_event
2200 // new method for install_code() path
2201 // Transfer information from compilation to jvmti
2202 void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
2203   // This is a bad time for a safepoint.  We don't want
2204   // this nmethod to get unloaded while we're queueing the event.
2205   NoSafepointVerifier nsv;
2206 

3105                                              p2i(nul_chk_table_end()),
3106                                              nul_chk_table_size());
3107   if (handler_table_size() > 0) st->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3108                                              p2i(handler_table_begin()),
3109                                              p2i(handler_table_end()),
3110                                              handler_table_size());
3111   if (scopes_pcs_size   () > 0) st->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3112                                              p2i(scopes_pcs_begin()),
3113                                              p2i(scopes_pcs_end()),
3114                                              scopes_pcs_size());
3115   if (scopes_data_size  () > 0) st->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3116                                              p2i(scopes_data_begin()),
3117                                              p2i(scopes_data_end()),
3118                                              scopes_data_size());
3119 #if INCLUDE_JVMCI
3120   if (speculations_size () > 0) st->print_cr(" speculations   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3121                                              p2i(speculations_begin()),
3122                                              p2i(speculations_end()),
3123                                              speculations_size());
3124 #endif



3125 }
3126 
3127 void nmethod::print_code() {
3128   ResourceMark m;
3129   ttyLocker ttyl;
3130   // Call the specialized decode method of this class.
3131   decode(tty);
3132 }
3133 
3134 #ifndef PRODUCT  // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN
3135 
3136 void nmethod::print_dependencies_on(outputStream* out) {
3137   ResourceMark rm;
3138   stringStream st;
3139   st.print_cr("Dependencies:");
3140   for (Dependencies::DepStream deps(this); deps.next(); ) {
3141     deps.print_dependency(&st);
3142     InstanceKlass* ctxk = deps.context_type();
3143     if (ctxk != nullptr) {
3144       if (ctxk->is_dependent_nmethod(this)) {

3204   st->print("scopes:");
3205   if (scopes_pcs_begin() < scopes_pcs_end()) {
3206     st->cr();
3207     for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3208       if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
3209         continue;
3210 
3211       ScopeDesc* sd = scope_desc_at(p->real_pc(this));
3212       while (sd != nullptr) {
3213         sd->print_on(st, p);  // print output ends with a newline
3214         sd = sd->sender();
3215       }
3216     }
3217   } else {
3218     st->print_cr(" <list empty>");
3219   }
3220 }
3221 #endif
3222 
3223 #ifndef PRODUCT  // RelocIterator does support printing only then.
3224 void nmethod::print_relocations() {
3225   ResourceMark m;       // in case methods get printed via the debugger
3226   tty->print_cr("relocations:");
3227   RelocIterator iter(this);
3228   iter.print();
3229 }
3230 #endif
3231 
3232 void nmethod::print_pcs_on(outputStream* st) {
3233   ResourceMark m;       // in case methods get printed via debugger
3234   st->print("pc-bytecode offsets:");
3235   if (scopes_pcs_begin() < scopes_pcs_end()) {
3236     st->cr();
3237     for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3238       p->print_on(st, this);  // print output ends with a newline
3239     }
3240   } else {
3241     st->print_cr(" <list empty>");
3242   }
3243 }
3244 
3245 void nmethod::print_handler_table() {
3246   ExceptionHandlerTable(this).print(code_begin());
3247 }
3248 

3563           else obj->print_value_on(&st);
3564           st.print(")");
3565           return st.as_string();
3566         }
3567         case relocInfo::metadata_type: {
3568           stringStream st;
3569           metadata_Relocation* r = iter.metadata_reloc();
3570           Metadata* obj = r->metadata_value();
3571           st.print("metadata(");
3572           if (obj == nullptr) st.print("nullptr");
3573           else obj->print_value_on(&st);
3574           st.print(")");
3575           return st.as_string();
3576         }
3577         case relocInfo::runtime_call_type:
3578         case relocInfo::runtime_call_w_cp_type: {
3579           stringStream st;
3580           st.print("runtime_call");
3581           CallRelocation* r = (CallRelocation*)iter.reloc();
3582           address dest = r->destination();










3583           CodeBlob* cb = CodeCache::find_blob(dest);
3584           if (cb != nullptr) {
3585             st.print(" %s", cb->name());
3586           } else {
3587             ResourceMark rm;
3588             const int buflen = 1024;
3589             char* buf = NEW_RESOURCE_ARRAY(char, buflen);
3590             int offset;
3591             if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) {
3592               st.print(" %s", buf);
3593               if (offset != 0) {
3594                 st.print("+%d", offset);
3595               }
3596             }
3597           }
3598           return st.as_string();
3599         }
3600         case relocInfo::virtual_call_type: {
3601           stringStream st;
3602           st.print_raw("virtual_call");

  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/nativeInst.hpp"
  31 #include "code/nmethod.inline.hpp"
  32 #include "code/scopeDesc.hpp"
  33 #include "code/SCCache.hpp"
  34 #include "compiler/abstractCompiler.hpp"
  35 #include "compiler/compilationLog.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compileLog.hpp"
  38 #include "compiler/compileTask.hpp"
  39 #include "compiler/compilerDirectives.hpp"
  40 #include "compiler/compilerOracle.hpp"
  41 #include "compiler/directivesParser.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "compiler/oopMap.inline.hpp"
  44 #include "gc/shared/barrierSet.hpp"
  45 #include "gc/shared/barrierSetNMethod.hpp"
  46 #include "gc/shared/classUnloadingContext.hpp"
  47 #include "gc/shared/collectedHeap.hpp"
  48 #include "interpreter/bytecode.inline.hpp"
  49 #include "jvm.h"
  50 #include "logging/log.hpp"
  51 #include "logging/logStream.hpp"
  52 #include "memory/allocation.inline.hpp"
  53 #include "memory/resourceArea.hpp"

 768 
 769 void nmethod::clear_inline_caches() {
 770   assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
 771   RelocIterator iter(this);
 772   while (iter.next()) {
 773     iter.reloc()->clear_inline_cache();
 774   }
 775 }
 776 
 777 #ifdef ASSERT
 778 // Check class_loader is alive for this bit of metadata.
 779 class CheckClass : public MetadataClosure {
 780   void do_metadata(Metadata* md) {
 781     Klass* klass = nullptr;
 782     if (md->is_klass()) {
 783       klass = ((Klass*)md);
 784     } else if (md->is_method()) {
 785       klass = ((Method*)md)->method_holder();
 786     } else if (md->is_methodData()) {
 787       klass = ((MethodData*)md)->method()->method_holder();
 788     } else if (md->is_methodCounters()) {
 789       klass = ((MethodCounters*)md)->method()->method_holder();
 790     } else {
 791       md->print();
 792       ShouldNotReachHere();
 793     }
 794     assert(klass->is_loader_alive(), "must be alive");
 795   }
 796 };
 797 #endif // ASSERT
 798 
 799 
 800 static void clean_ic_if_metadata_is_dead(CompiledIC *ic) {
 801   ic->clean_metadata();
 802 }
 803 
 804 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
 805 template <typename CallsiteT>
 806 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, nmethod* from,
 807                                          bool clean_all) {
 808   CodeBlob* cb = CodeCache::find_blob(callsite->destination());
 809   if (!cb->is_nmethod()) {

1118     debug_only(nm->verify();) // might block
1119 
1120     nm->log_new_nmethod();
1121   }
1122   return nm;
1123 }
1124 
1125 nmethod* nmethod::new_nmethod(const methodHandle& method,
1126   int compile_id,
1127   int entry_bci,
1128   CodeOffsets* offsets,
1129   int orig_pc_offset,
1130   DebugInformationRecorder* debug_info,
1131   Dependencies* dependencies,
1132   CodeBuffer* code_buffer, int frame_size,
1133   OopMapSet* oop_maps,
1134   ExceptionHandlerTable* handler_table,
1135   ImplicitExceptionTable* nul_chk_table,
1136   AbstractCompiler* compiler,
1137   CompLevel comp_level
1138   , SCCEntry* scc_entry
1139 #if INCLUDE_JVMCI
1140   , char* speculations,
1141   int speculations_len,
1142   JVMCINMethodData* jvmci_data
1143 #endif
1144 )
1145 {
1146   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1147   code_buffer->finalize_oop_references(method);
1148   // create nmethod
1149   nmethod* nm = nullptr;
1150   int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1151 #if INCLUDE_JVMCI
1152     if (compiler->is_jvmci()) {
1153       nmethod_size += align_up(jvmci_data->size(), oopSize);
1154     }
1155 #endif
1156 
1157   int immutable_data_size =
1158       adjust_pcs_size(debug_info->pcs_size())

1163     + align_up(speculations_len                  , oopSize)
1164 #endif
1165     + align_up(debug_info->data_size()           , oopSize);
1166 
1167   // First, allocate space for immutable data in C heap.
1168   address immutable_data = nullptr;
1169   if (immutable_data_size > 0) {
1170     immutable_data = (address)os::malloc(immutable_data_size, mtCode);
1171     if (immutable_data == nullptr) {
1172       vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data");
1173       return nullptr;
1174     }
1175   }
1176   {
1177     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1178 
1179     nm = new (nmethod_size, comp_level)
1180     nmethod(method(), compiler->type(), nmethod_size, immutable_data_size,
1181             compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
1182             debug_info, dependencies, code_buffer, frame_size, oop_maps,
1183             handler_table, nul_chk_table, compiler, comp_level, scc_entry
1184 #if INCLUDE_JVMCI
1185             , speculations,
1186             speculations_len,
1187             jvmci_data
1188 #endif
1189             );
1190 
1191     if (nm != nullptr) {
1192       // To make dependency checking during class loading fast, record
1193       // the nmethod dependencies in the classes it is dependent on.
1194       // This allows the dependency checking code to simply walk the
1195       // class hierarchy above the loaded class, checking only nmethods
1196       // which are dependent on those classes.  The slow way is to
1197       // check every nmethod for dependencies which makes it linear in
1198       // the number of methods compiled.  For applications with a lot
1199       // classes the slow way is too slow.
1200       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1201         if (deps.type() == Dependencies::call_site_target_value) {
1202           // CallSite dependencies are managed on per-CallSite instance basis.
1203           oop call_site = deps.argument_oop(0);
1204           MethodHandles::add_dependent_nmethod(call_site, nm);
1205         } else {
1206           InstanceKlass* ik = deps.context_type();
1207           if (ik == nullptr) {
1208             continue;  // ignore things like evol_method
1209           }
1210           // record this nmethod as dependent on this klass
1211           ik->add_dependent_nmethod(nm);
1212         }
1213       }
1214       NOT_PRODUCT(if (nm != nullptr)  note_java_nmethod(nm));
1215     }
1216   }
1217   // Do verification and logging outside CodeCache_lock.
1218   if (nm != nullptr) {
1219 
1220 #ifdef ASSERT
1221     LogTarget(Debug, scc, nmethod) log;
1222     if (log.is_enabled()) {
1223       LogStream out(log);
1224       out.print_cr("== new_nmethod 2");
1225       FlagSetting fs(PrintRelocations, true);
1226       nm->print(&out);
1227       nm->decode(&out);
1228     }
1229 #endif
1230 
1231     // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1232     DEBUG_ONLY(nm->verify();)
1233     nm->log_new_nmethod();
1234   }
1235   return nm;
1236 }
1237 
1238 // Fill in default values for various fields
1239 void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
1240   // avoid uninitialized fields, even for short time periods
1241   _exception_cache            = nullptr;
1242   _gc_data                    = nullptr;
1243   _oops_do_mark_link          = nullptr;
1244   _compiled_ic_data           = nullptr;
1245 
1246   _is_unloading_state         = 0;
1247   _state                      = not_installed;
1248 
1249   _has_unsafe_access          = 0;
1250   _has_method_handle_invokes  = 0;
1251   _has_wide_vectors           = 0;
1252   _has_monitors               = 0;
1253   _has_scoped_access          = 0;
1254   _has_flushed_dependencies   = 0;
1255   _is_unlinked                = 0;
1256   _load_reported              = 0; // jvmti state
1257   _preloaded                  = 0;
1258   _has_clinit_barriers        = 0;
1259 
1260   _used                       = false;
1261   _deoptimization_status      = not_marked;
1262 
1263   // SECT_CONSTS is first in code buffer so the offset should be 0.
1264   int consts_offset = code_buffer->total_offset_of(code_buffer->consts());
1265   assert(consts_offset == 0, "const_offset: %d", consts_offset);
1266 
1267   _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
1268 
1269   CHECKED_CAST(_entry_offset,              uint16_t, (offsets->value(CodeOffsets::Entry)));
1270   CHECKED_CAST(_verified_entry_offset,     uint16_t, (offsets->value(CodeOffsets::Verified_Entry)));
1271 
1272   _skipped_instructions_size = code_buffer->total_skipped_instructions_size();
1273 }
1274 
1275 // Post initialization
1276 void nmethod::post_init() {
1277   clear_unloading_state();
1278 
1279   finalize_relocations();
1280 

1312 
1313     _osr_entry_point         = nullptr;
1314     _pc_desc_container       = nullptr;
1315     _entry_bci               = InvocationEntryBci;
1316     _compile_id              = compile_id;
1317     _comp_level              = CompLevel_none;
1318     _compiler_type           = type;
1319     _orig_pc_offset          = 0;
1320     _num_stack_arg_slots     = _method->constMethod()->num_stack_arg_slots();
1321 
1322     if (offsets->value(CodeOffsets::Exceptions) != -1) {
1323       // Continuation enter intrinsic
1324       _exception_offset      = code_offset() + offsets->value(CodeOffsets::Exceptions);
1325     } else {
1326       _exception_offset      = 0;
1327     }
1328     // Native wrappers do not have deopt handlers. Make the values
1329     // something that will never match a pc like the nmethod vtable entry
1330     _deopt_handler_offset    = 0;
1331     _deopt_mh_handler_offset = 0;
1332     _scc_entry               = nullptr;
1333     _method_profiling_count  = 0;
1334     _unwind_handler_offset   = 0;
1335 
1336     CHECKED_CAST(_metadata_offset, uint16_t, (align_up(code_buffer->total_oop_size(), oopSize)));
1337     int data_end_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
1338 #if INCLUDE_JVMCI
1339     // jvmci_data_size is 0 in native wrapper but we need to set offset
1340     // to correctly calculate metadata_end address
1341     CHECKED_CAST(_jvmci_data_offset, uint16_t, data_end_offset);
1342 #endif
1343     assert((data_offset() + data_end_offset) <= nmethod_size, "wrong nmethod's size: %d < %d", nmethod_size, (data_offset() + data_end_offset));
1344 
1345     // native wrapper does not have read-only data but we need unique not null address
1346     _immutable_data          = data_end();
1347     _immutable_data_size     = 0;
1348     _nul_chk_table_offset    = 0;
1349     _handler_table_offset    = 0;
1350     _scopes_pcs_offset       = 0;
1351     _scopes_data_offset      = 0;
1352 #if INCLUDE_JVMCI
1353     _speculations_offset     = 0;

1374     // This is both handled in decode2(), called via print_code() -> decode()
1375     if (PrintNativeNMethods) {
1376       tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
1377       print_code();
1378       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1379 #if defined(SUPPORT_DATA_STRUCTS)
1380       if (AbstractDisassembler::show_structs()) {
1381         if (oop_maps != nullptr) {
1382           tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
1383           oop_maps->print_on(tty);
1384           tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1385         }
1386       }
1387 #endif
1388     } else {
1389       print(); // print the header part only.
1390     }
1391 #if defined(SUPPORT_DATA_STRUCTS)
1392     if (AbstractDisassembler::show_structs()) {
1393       if (PrintRelocations) {
1394         print_relocations_on(tty);
1395         tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1396       }
1397     }
1398 #endif
1399     if (xtty != nullptr) {
1400       xtty->tail("print_native_nmethod");
1401     }
1402   }
1403 }
1404 
1405 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
1406   return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
1407 }
1408 
1409 void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
1410   // Try MethodNonProfiled and MethodProfiled.
1411   void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
1412   if (return_value != nullptr || !allow_NonNMethod_space) return return_value;
1413   // Try NonNMethod or give up.
1414   return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);

1417 // For normal JIT compiled code
1418 nmethod::nmethod(
1419   Method* method,
1420   CompilerType type,
1421   int nmethod_size,
1422   int immutable_data_size,
1423   int compile_id,
1424   int entry_bci,
1425   address immutable_data,
1426   CodeOffsets* offsets,
1427   int orig_pc_offset,
1428   DebugInformationRecorder* debug_info,
1429   Dependencies* dependencies,
1430   CodeBuffer *code_buffer,
1431   int frame_size,
1432   OopMapSet* oop_maps,
1433   ExceptionHandlerTable* handler_table,
1434   ImplicitExceptionTable* nul_chk_table,
1435   AbstractCompiler* compiler,
1436   CompLevel comp_level
1437   , SCCEntry* scc_entry
1438 #if INCLUDE_JVMCI
1439   , char* speculations,
1440   int speculations_len,
1441   JVMCINMethodData* jvmci_data
1442 #endif
1443   )
1444   : CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1445              offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
1446   _deoptimization_generation(0),
1447   _gc_epoch(CodeCache::gc_epoch()),
1448   _method(method),
1449   _osr_link(nullptr)
1450 {
1451   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1452   {
1453     debug_only(NoSafepointVerifier nsv;)
1454     assert_locked_or_safepoint(CodeCache_lock);
1455 
1456     init_defaults(code_buffer, offsets);
1457     _scc_entry      = scc_entry;
1458     _method_profiling_count  = 0;
1459 
1460     _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
1461     _entry_bci       = entry_bci;
1462     _compile_id      = compile_id;
1463     _comp_level      = comp_level;
1464     _compiler_type   = type;
1465     _orig_pc_offset  = orig_pc_offset;
1466 
1467     _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots();
1468 
1469     set_ctable_begin(header_begin() + content_offset());
1470 
1471 #if INCLUDE_JVMCI
1472     if (compiler->is_jvmci()) {
1473       // JVMCI might not produce any stub sections
1474       if (offsets->value(CodeOffsets::Exceptions) != -1) {
1475         _exception_offset        = code_offset() + offsets->value(CodeOffsets::Exceptions);
1476       } else {
1477         _exception_offset        = -1;
1478       }

1570 #if INCLUDE_JVMCI
1571     // Copy speculations to nmethod
1572     if (speculations_size() != 0) {
1573       memcpy(speculations_begin(), speculations, speculations_len);
1574     }
1575 #endif
1576 
1577     post_init();
1578 
1579     // we use the information of entry points to find out if a method is
1580     // static or non static
1581     assert(compiler->is_c2() || compiler->is_jvmci() ||
1582            _method->is_static() == (entry_point() == verified_entry_point()),
1583            " entry points must be same for static methods and vice versa");
1584   }
1585 }
1586 
1587 // Print a short set of xml attributes to identify this nmethod.  The
1588 // output should be embedded in some other element.
1589 void nmethod::log_identity(xmlStream* log) const {
1590   assert(log->inside_attrs_or_error(), "printing attributes");
1591   log->print(" code_compile_id='%d'", compile_id());
1592   const char* nm_kind = compile_kind();
1593   if (nm_kind != nullptr)  log->print(" code_compile_kind='%s'", nm_kind);
1594   log->print(" code_compiler='%s'", compiler_name());
1595   if (TieredCompilation) {
1596     log->print(" code_compile_level='%d'", comp_level());
1597   }
1598 #if INCLUDE_JVMCI
1599   if (jvmci_nmethod_data() != nullptr) {
1600     const char* jvmci_name = jvmci_nmethod_data()->name();
1601     if (jvmci_name != nullptr) {
1602       log->print(" jvmci_mirror_name='");
1603       log->text("%s", jvmci_name);
1604       log->print("'");
1605     }
1606   }
1607 #endif
1608 }
1609 
1610 
1611 #define LOG_OFFSET(log, name)                    \
1612   if (p2i(name##_end()) - p2i(name##_begin())) \
1613     log->print(" " XSTR(name) "_offset='" INTX_FORMAT "'"    , \
1614                p2i(name##_begin()) - p2i(this))
1615 
1616 

1697       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1698       if (oop_maps() != nullptr) {
1699         tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
1700         oop_maps()->print_on(tty);
1701         tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1702       }
1703     }
1704 #endif
1705   } else {
1706     print(); // print the header part only.
1707   }
1708 
1709 #if defined(SUPPORT_DATA_STRUCTS)
1710   if (AbstractDisassembler::show_structs()) {
1711     methodHandle mh(Thread::current(), _method);
1712     if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) {
1713       print_scopes();
1714       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1715     }
1716     if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) {
1717       print_relocations_on(tty);
1718       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1719     }
1720     if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) {
1721       print_dependencies_on(tty);
1722       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1723     }
1724     if (printmethod || PrintExceptionHandlers) {
1725       print_handler_table();
1726       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1727       print_nul_chk_table();
1728       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1729     }
1730 
1731     if (printmethod) {
1732       print_recorded_oops();
1733       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1734       print_recorded_metadata();
1735       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1736     }
1737   }

1945   Atomic::store(&_gc_epoch, CodeCache::gc_epoch());
1946 }
1947 
1948 bool nmethod::is_maybe_on_stack() {
1949   // If the condition below is true, it means that the nmethod was found to
1950   // be alive the previous completed marking cycle.
1951   return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
1952 }
1953 
1954 void nmethod::inc_decompile_count() {
1955   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1956   // Could be gated by ProfileTraps, but do not bother...
1957   Method* m = method();
1958   if (m == nullptr)  return;
1959   MethodData* mdo = m->method_data();
1960   if (mdo == nullptr)  return;
1961   // There is a benign race here.  See comments in methodData.hpp.
1962   mdo->inc_decompile_count();
1963 }
1964 
1965 void nmethod::inc_method_profiling_count() {
1966   Atomic::inc(&_method_profiling_count);
1967 }
1968 
1969 uint64_t nmethod::method_profiling_count() {
1970   return _method_profiling_count;
1971 }
1972 
1973 bool nmethod::try_transition(signed char new_state_int) {
1974   signed char new_state = new_state_int;
1975   assert_lock_strong(NMethodState_lock);
1976   signed char old_state = _state;
1977   if (old_state >= new_state) {
1978     // Ensure monotonicity of transitions.
1979     return false;
1980   }
1981   Atomic::store(&_state, new_state);
1982   return true;
1983 }
1984 
1985 void nmethod::invalidate_osr_method() {
1986   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1987   // Remove from list of active nmethods
1988   if (method() != nullptr) {
1989     method()->method_holder()->remove_osr_nmethod(this);
1990   }
1991 }
1992 

1998                        os::current_thread_id());
1999       log_identity(xtty);
2000       xtty->stamp();
2001       xtty->end_elem();
2002     }
2003   }
2004 
2005   CompileTask::print_ul(this, "made not entrant");
2006   if (PrintCompilation) {
2007     print_on(tty, "made not entrant");
2008   }
2009 }
2010 
2011 void nmethod::unlink_from_method() {
2012   if (method() != nullptr) {
2013     method()->unlink_code(this);
2014   }
2015 }
2016 
2017 // Invalidate code
2018 bool nmethod::make_not_entrant(bool make_not_entrant) {
2019   // This can be called while the system is already at a safepoint which is ok
2020   NoSafepointVerifier nsv;
2021 
2022   if (is_unloading()) {
2023     // If the nmethod is unloading, then it is already not entrant through
2024     // the nmethod entry barriers. No need to do anything; GC will unload it.
2025     return false;
2026   }
2027 
2028   if (Atomic::load(&_state) == not_entrant) {
2029     // Avoid taking the lock if already in required state.
2030     // This is safe from races because the state is an end-state,
2031     // which the nmethod cannot back out of once entered.
2032     // No need for fencing either.
2033     return false;
2034   }
2035 
2036   {
2037     // Enter critical section.  Does not block for safepoint.
2038     ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);

2061     }
2062 
2063     BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2064     if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2065       // If nmethod entry barriers are not supported, we won't mark
2066       // nmethods as on-stack when they become on-stack. So we
2067       // degrade to a less accurate flushing strategy, for now.
2068       mark_as_maybe_on_stack();
2069     }
2070 
2071     // Change state
2072     bool success = try_transition(not_entrant);
2073     assert(success, "Transition can't fail");
2074 
2075     // Log the transition once
2076     log_state_change();
2077 
2078     // Remove nmethod from method.
2079     unlink_from_method();
2080 
2081     if (make_not_entrant) {
2082       // Keep cached code if it was simply replaced
2083       // otherwise make it not entrant too.
2084       SCCache::invalidate(_scc_entry);
2085     }
2086 
2087     CompileBroker::log_not_entrant(this);
2088   } // leave critical region under NMethodState_lock
2089 
2090 #if INCLUDE_JVMCI
2091   // Invalidate can't occur while holding the Patching lock
2092   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2093   if (nmethod_data != nullptr) {
2094     nmethod_data->invalidate_nmethod_mirror(this);
2095   }
2096 #endif
2097 
2098 #ifdef ASSERT
2099   if (is_osr_method() && method() != nullptr) {
2100     // Make sure osr nmethod is invalidated, i.e. not on the list
2101     bool found = method()->method_holder()->remove_osr_nmethod(this);
2102     assert(!found, "osr nmethod should have been invalidated");
2103   }
2104 #endif
2105 
2106   return true;
2107 }

2207         MethodHandles::clean_dependency_context(call_site);
2208       } else {
2209         InstanceKlass* ik = deps.context_type();
2210         if (ik == nullptr) {
2211           continue;  // ignore things like evol_method
2212         }
2213         // During GC liveness of dependee determines class that needs to be updated.
2214         // The GC may clean dependency contexts concurrently and in parallel.
2215         ik->clean_dependency_context();
2216       }
2217     }
2218   }
2219 }
2220 
2221 void nmethod::post_compiled_method(CompileTask* task) {
2222   task->mark_success();
2223   task->set_nm_content_size(content_size());
2224   task->set_nm_insts_size(insts_size());
2225   task->set_nm_total_size(total_size());
2226 
2227   // task->is_scc() is true only for loaded cached code.
2228   // nmethod::_scc_entry is set for loaded and stored cached code
2229   // to invalidate the entry when nmethod is deoptimized.
2230   // There is option to not store in archive cached code.
2231   guarantee((_scc_entry != nullptr) || !task->is_scc() || VerifyCachedCode, "sanity");
2232 
2233   // JVMTI -- compiled method notification (must be done outside lock)
2234   post_compiled_method_load_event();
2235 
2236   if (CompilationLog::log() != nullptr) {
2237     CompilationLog::log()->log_nmethod(JavaThread::current(), this);
2238   }
2239 
2240   const DirectiveSet* directive = task->directive();
2241   maybe_print_nmethod(directive);
2242 }
2243 
2244 // ------------------------------------------------------------------
2245 // post_compiled_method_load_event
2246 // new method for install_code() path
2247 // Transfer information from compilation to jvmti
2248 void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
2249   // This is a bad time for a safepoint.  We don't want
2250   // this nmethod to get unloaded while we're queueing the event.
2251   NoSafepointVerifier nsv;
2252 

3151                                              p2i(nul_chk_table_end()),
3152                                              nul_chk_table_size());
3153   if (handler_table_size() > 0) st->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3154                                              p2i(handler_table_begin()),
3155                                              p2i(handler_table_end()),
3156                                              handler_table_size());
3157   if (scopes_pcs_size   () > 0) st->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3158                                              p2i(scopes_pcs_begin()),
3159                                              p2i(scopes_pcs_end()),
3160                                              scopes_pcs_size());
3161   if (scopes_data_size  () > 0) st->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3162                                              p2i(scopes_data_begin()),
3163                                              p2i(scopes_data_end()),
3164                                              scopes_data_size());
3165 #if INCLUDE_JVMCI
3166   if (speculations_size () > 0) st->print_cr(" speculations   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3167                                              p2i(speculations_begin()),
3168                                              p2i(speculations_end()),
3169                                              speculations_size());
3170 #endif
3171   if (SCCache::is_on() && _scc_entry != nullptr) {
3172     _scc_entry->print(st);
3173   }
3174 }
3175 
3176 void nmethod::print_code() {
3177   ResourceMark m;
3178   ttyLocker ttyl;
3179   // Call the specialized decode method of this class.
3180   decode(tty);
3181 }
3182 
3183 #ifndef PRODUCT  // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN
3184 
3185 void nmethod::print_dependencies_on(outputStream* out) {
3186   ResourceMark rm;
3187   stringStream st;
3188   st.print_cr("Dependencies:");
3189   for (Dependencies::DepStream deps(this); deps.next(); ) {
3190     deps.print_dependency(&st);
3191     InstanceKlass* ctxk = deps.context_type();
3192     if (ctxk != nullptr) {
3193       if (ctxk->is_dependent_nmethod(this)) {

3253   st->print("scopes:");
3254   if (scopes_pcs_begin() < scopes_pcs_end()) {
3255     st->cr();
3256     for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3257       if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
3258         continue;
3259 
3260       ScopeDesc* sd = scope_desc_at(p->real_pc(this));
3261       while (sd != nullptr) {
3262         sd->print_on(st, p);  // print output ends with a newline
3263         sd = sd->sender();
3264       }
3265     }
3266   } else {
3267     st->print_cr(" <list empty>");
3268   }
3269 }
3270 #endif
3271 
3272 #ifndef PRODUCT  // RelocIterator does support printing only then.
3273 void nmethod::print_relocations_on(outputStream* st) {
3274   ResourceMark m;       // in case methods get printed via the debugger
3275   st->print_cr("relocations:");
3276   RelocIterator iter(this);
3277   iter.print_on(st);
3278 }
3279 #endif
3280 
3281 void nmethod::print_pcs_on(outputStream* st) {
3282   ResourceMark m;       // in case methods get printed via debugger
3283   st->print("pc-bytecode offsets:");
3284   if (scopes_pcs_begin() < scopes_pcs_end()) {
3285     st->cr();
3286     for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3287       p->print_on(st, this);  // print output ends with a newline
3288     }
3289   } else {
3290     st->print_cr(" <list empty>");
3291   }
3292 }
3293 
3294 void nmethod::print_handler_table() {
3295   ExceptionHandlerTable(this).print(code_begin());
3296 }
3297 

3612           else obj->print_value_on(&st);
3613           st.print(")");
3614           return st.as_string();
3615         }
3616         case relocInfo::metadata_type: {
3617           stringStream st;
3618           metadata_Relocation* r = iter.metadata_reloc();
3619           Metadata* obj = r->metadata_value();
3620           st.print("metadata(");
3621           if (obj == nullptr) st.print("nullptr");
3622           else obj->print_value_on(&st);
3623           st.print(")");
3624           return st.as_string();
3625         }
3626         case relocInfo::runtime_call_type:
3627         case relocInfo::runtime_call_w_cp_type: {
3628           stringStream st;
3629           st.print("runtime_call");
3630           CallRelocation* r = (CallRelocation*)iter.reloc();
3631           address dest = r->destination();
3632           if (StubRoutines::contains(dest)) {
3633             StubCodeDesc* desc = StubCodeDesc::desc_for(dest);
3634             if (desc == nullptr) {
3635               desc = StubCodeDesc::desc_for(dest + frame::pc_return_offset);
3636             }
3637             if (desc != nullptr) {
3638               st.print(" Stub::%s", desc->name());
3639               return st.as_string();
3640             }
3641           }
3642           CodeBlob* cb = CodeCache::find_blob(dest);
3643           if (cb != nullptr) {
3644             st.print(" %s", cb->name());
3645           } else {
3646             ResourceMark rm;
3647             const int buflen = 1024;
3648             char* buf = NEW_RESOURCE_ARRAY(char, buflen);
3649             int offset;
3650             if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) {
3651               st.print(" %s", buf);
3652               if (offset != 0) {
3653                 st.print("+%d", offset);
3654               }
3655             }
3656           }
3657           return st.as_string();
3658         }
3659         case relocInfo::virtual_call_type: {
3660           stringStream st;
3661           st.print_raw("virtual_call");
< prev index next >