6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/compiledIC.hpp"
28 #include "code/dependencies.hpp"
29 #include "code/nativeInst.hpp"
30 #include "code/nmethod.inline.hpp"
31 #include "code/relocInfo.hpp"
32 #include "code/scopeDesc.hpp"
33 #include "compiler/abstractCompiler.hpp"
34 #include "compiler/compilationLog.hpp"
35 #include "compiler/compileBroker.hpp"
36 #include "compiler/compileLog.hpp"
37 #include "compiler/compileTask.hpp"
38 #include "compiler/compilerDirectives.hpp"
39 #include "compiler/compilerOracle.hpp"
40 #include "compiler/directivesParser.hpp"
41 #include "compiler/disassembler.hpp"
42 #include "compiler/oopMap.inline.hpp"
43 #include "gc/shared/barrierSet.hpp"
44 #include "gc/shared/barrierSetNMethod.hpp"
45 #include "gc/shared/classUnloadingContext.hpp"
771
772 void nmethod::clear_inline_caches() {
773 assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
774 RelocIterator iter(this);
775 while (iter.next()) {
776 iter.reloc()->clear_inline_cache();
777 }
778 }
779
780 #ifdef ASSERT
781 // Check class_loader is alive for this bit of metadata.
782 class CheckClass : public MetadataClosure {
783 void do_metadata(Metadata* md) {
784 Klass* klass = nullptr;
785 if (md->is_klass()) {
786 klass = ((Klass*)md);
787 } else if (md->is_method()) {
788 klass = ((Method*)md)->method_holder();
789 } else if (md->is_methodData()) {
790 klass = ((MethodData*)md)->method()->method_holder();
791 } else {
792 md->print();
793 ShouldNotReachHere();
794 }
795 assert(klass->is_loader_alive(), "must be alive");
796 }
797 };
798 #endif // ASSERT
799
800
801 static void clean_ic_if_metadata_is_dead(CompiledIC *ic) {
802 ic->clean_metadata();
803 }
804
805 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
806 template <typename CallsiteT>
807 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, nmethod* from,
808 bool clean_all) {
809 CodeBlob* cb = CodeCache::find_blob(callsite->destination());
810 if (!cb->is_nmethod()) {
1009 _method->method_holder()->external_name(),
1010 _method->name()->as_C_string(),
1011 _method->signature()->as_C_string(),
1012 compile_id());
1013 }
1014 return check_evol.has_evol_dependency();
1015 }
1016
1017 int nmethod::total_size() const {
1018 return
1019 consts_size() +
1020 insts_size() +
1021 stub_size() +
1022 scopes_data_size() +
1023 scopes_pcs_size() +
1024 handler_table_size() +
1025 nul_chk_table_size();
1026 }
1027
1028 const char* nmethod::compile_kind() const {
1029 if (is_osr_method()) return "osr";
1030 if (method() != nullptr && is_native_method()) {
1031 if (method()->is_continuation_native_intrinsic()) {
1032 return "cnt";
1033 }
1034 return "c2n";
1035 }
1036 return nullptr;
1037 }
1038
1039 const char* nmethod::compiler_name() const {
1040 return compilertype2name(_compiler_type);
1041 }
1042
1043 #ifdef ASSERT
1044 class CheckForOopsClosure : public OopClosure {
1045 bool _found_oop = false;
1046 public:
1047 virtual void do_oop(oop* o) { _found_oop = true; }
1048 virtual void do_oop(narrowOop* o) { _found_oop = true; }
1049 bool found_oop() { return _found_oop; }
1115 nm = new (native_nmethod_size, allow_NonNMethod_space)
1116 nmethod(method(), compiler_none, native_nmethod_size,
1117 compile_id, &offsets,
1118 code_buffer, frame_size,
1119 basic_lock_owner_sp_offset,
1120 basic_lock_sp_offset,
1121 oop_maps, mutable_data_size);
1122 DEBUG_ONLY( if (allow_NonNMethod_space) assert_no_oops_or_metadata(nm); )
1123 NOT_PRODUCT(if (nm != nullptr) native_nmethod_stats.note_native_nmethod(nm));
1124 }
1125
1126 if (nm != nullptr) {
1127 // verify nmethod
1128 DEBUG_ONLY(nm->verify();) // might block
1129
1130 nm->log_new_nmethod();
1131 }
1132 return nm;
1133 }
1134
1135 nmethod* nmethod::new_nmethod(const methodHandle& method,
1136 int compile_id,
1137 int entry_bci,
1138 CodeOffsets* offsets,
1139 int orig_pc_offset,
1140 DebugInformationRecorder* debug_info,
1141 Dependencies* dependencies,
1142 CodeBuffer* code_buffer, int frame_size,
1143 OopMapSet* oop_maps,
1144 ExceptionHandlerTable* handler_table,
1145 ImplicitExceptionTable* nul_chk_table,
1146 AbstractCompiler* compiler,
1147 CompLevel comp_level
1148 #if INCLUDE_JVMCI
1149 , char* speculations,
1150 int speculations_len,
1151 JVMCINMethodData* jvmci_data
1152 #endif
1153 )
1154 {
1155 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1156 code_buffer->finalize_oop_references(method);
1157 // create nmethod
1158 nmethod* nm = nullptr;
1159 int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1160
1161 int immutable_data_size =
1162 adjust_pcs_size(debug_info->pcs_size())
1163 + align_up((int)dependencies->size_in_bytes(), oopSize)
1164 + align_up(handler_table->size_in_bytes() , oopSize)
1165 + align_up(nul_chk_table->size_in_bytes() , oopSize)
1166 #if INCLUDE_JVMCI
1167 + align_up(speculations_len , oopSize)
1171 // First, allocate space for immutable data in C heap.
1172 address immutable_data = nullptr;
1173 if (immutable_data_size > 0) {
1174 immutable_data = (address)os::malloc(immutable_data_size, mtCode);
1175 if (immutable_data == nullptr) {
1176 vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data");
1177 return nullptr;
1178 }
1179 }
1180
1181 int mutable_data_size = required_mutable_data_size(code_buffer
1182 JVMCI_ONLY(COMMA (compiler->is_jvmci() ? jvmci_data->size() : 0)));
1183
1184 {
1185 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1186
1187 nm = new (nmethod_size, comp_level)
1188 nmethod(method(), compiler->type(), nmethod_size, immutable_data_size, mutable_data_size,
1189 compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
1190 debug_info, dependencies, code_buffer, frame_size, oop_maps,
1191 handler_table, nul_chk_table, compiler, comp_level
1192 #if INCLUDE_JVMCI
1193 , speculations,
1194 speculations_len,
1195 jvmci_data
1196 #endif
1197 );
1198
1199 if (nm != nullptr) {
1200 // To make dependency checking during class loading fast, record
1201 // the nmethod dependencies in the classes it is dependent on.
1202 // This allows the dependency checking code to simply walk the
1203 // class hierarchy above the loaded class, checking only nmethods
1204 // which are dependent on those classes. The slow way is to
1205 // check every nmethod for dependencies which makes it linear in
1206 // the number of methods compiled. For applications with a lot
1207 // classes the slow way is too slow.
1208 for (Dependencies::DepStream deps(nm); deps.next(); ) {
1209 if (deps.type() == Dependencies::call_site_target_value) {
1210 // CallSite dependencies are managed on per-CallSite instance basis.
1211 oop call_site = deps.argument_oop(0);
1212 MethodHandles::add_dependent_nmethod(call_site, nm);
1213 } else {
1214 InstanceKlass* ik = deps.context_type();
1215 if (ik == nullptr) {
1216 continue; // ignore things like evol_method
1217 }
1218 // record this nmethod as dependent on this klass
1219 ik->add_dependent_nmethod(nm);
1220 }
1221 }
1222 NOT_PRODUCT(if (nm != nullptr) note_java_nmethod(nm));
1223 }
1224 }
1225 // Do verification and logging outside CodeCache_lock.
1226 if (nm != nullptr) {
1227 // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1228 DEBUG_ONLY(nm->verify();)
1229 nm->log_new_nmethod();
1230 }
1231 return nm;
1232 }
1233
1234 // Fill in default values for various fields
1235 void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
1236 // avoid uninitialized fields, even for short time periods
1237 _exception_cache = nullptr;
1238 _gc_data = nullptr;
1239 _oops_do_mark_link = nullptr;
1240 _compiled_ic_data = nullptr;
1241
1242 _is_unloading_state = 0;
1243 _state = not_installed;
1244
1245 _has_unsafe_access = 0;
1246 _has_method_handle_invokes = 0;
1247 _has_wide_vectors = 0;
1248 _has_monitors = 0;
1249 _has_scoped_access = 0;
1250 _has_flushed_dependencies = 0;
1251 _is_unlinked = 0;
1252 _load_reported = 0; // jvmti state
1253
1254 _deoptimization_status = not_marked;
1255
1256 // SECT_CONSTS is first in code buffer so the offset should be 0.
1257 int consts_offset = code_buffer->total_offset_of(code_buffer->consts());
1258 assert(consts_offset == 0, "const_offset: %d", consts_offset);
1259
1260 _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
1261
1262 CHECKED_CAST(_entry_offset, uint16_t, (offsets->value(CodeOffsets::Entry)));
1263 CHECKED_CAST(_verified_entry_offset, uint16_t, (offsets->value(CodeOffsets::Verified_Entry)));
1264
1265 _skipped_instructions_size = code_buffer->total_skipped_instructions_size();
1266 }
1267
1268 // Post initialization
1269 void nmethod::post_init() {
1270 clear_unloading_state();
1271
1272 finalize_relocations();
1273
1306
1307 _osr_entry_point = nullptr;
1308 _pc_desc_container = nullptr;
1309 _entry_bci = InvocationEntryBci;
1310 _compile_id = compile_id;
1311 _comp_level = CompLevel_none;
1312 _compiler_type = type;
1313 _orig_pc_offset = 0;
1314 _num_stack_arg_slots = 0;
1315
1316 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1317 // Continuation enter intrinsic
1318 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1319 } else {
1320 _exception_offset = 0;
1321 }
1322 // Native wrappers do not have deopt handlers. Make the values
1323 // something that will never match a pc like the nmethod vtable entry
1324 _deopt_handler_offset = 0;
1325 _deopt_mh_handler_offset = 0;
1326 _unwind_handler_offset = 0;
1327
1328 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1329 uint16_t metadata_size;
1330 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1331 JVMCI_ONLY( _metadata_size = metadata_size; )
1332 assert(_mutable_data_size == _relocation_size + metadata_size,
1333 "wrong mutable data size: %d != %d + %d",
1334 _mutable_data_size, _relocation_size, metadata_size);
1335
1336 // native wrapper does not have read-only data but we need unique not null address
1337 _immutable_data = blob_end();
1338 _immutable_data_size = 0;
1339 _nul_chk_table_offset = 0;
1340 _handler_table_offset = 0;
1341 _scopes_pcs_offset = 0;
1342 _scopes_data_offset = 0;
1343 #if INCLUDE_JVMCI
1344 _speculations_offset = 0;
1345 #endif
1365 // This is both handled in decode2(), called via print_code() -> decode()
1366 if (PrintNativeNMethods) {
1367 tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
1368 print_code();
1369 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1370 #if defined(SUPPORT_DATA_STRUCTS)
1371 if (AbstractDisassembler::show_structs()) {
1372 if (oop_maps != nullptr) {
1373 tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
1374 oop_maps->print_on(tty);
1375 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1376 }
1377 }
1378 #endif
1379 } else {
1380 print(); // print the header part only.
1381 }
1382 #if defined(SUPPORT_DATA_STRUCTS)
1383 if (AbstractDisassembler::show_structs()) {
1384 if (PrintRelocations) {
1385 print_relocations();
1386 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1387 }
1388 }
1389 #endif
1390 if (xtty != nullptr) {
1391 xtty->tail("print_native_nmethod");
1392 }
1393 }
1394 }
1395
1396 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
1397 return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
1398 }
1399
1400 void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
1401 // Try MethodNonProfiled and MethodProfiled.
1402 void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
1403 if (return_value != nullptr || !allow_NonNMethod_space) return return_value;
1404 // Try NonNMethod or give up.
1405 return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);
1409 nmethod::nmethod(
1410 Method* method,
1411 CompilerType type,
1412 int nmethod_size,
1413 int immutable_data_size,
1414 int mutable_data_size,
1415 int compile_id,
1416 int entry_bci,
1417 address immutable_data,
1418 CodeOffsets* offsets,
1419 int orig_pc_offset,
1420 DebugInformationRecorder* debug_info,
1421 Dependencies* dependencies,
1422 CodeBuffer *code_buffer,
1423 int frame_size,
1424 OopMapSet* oop_maps,
1425 ExceptionHandlerTable* handler_table,
1426 ImplicitExceptionTable* nul_chk_table,
1427 AbstractCompiler* compiler,
1428 CompLevel comp_level
1429 #if INCLUDE_JVMCI
1430 , char* speculations,
1431 int speculations_len,
1432 JVMCINMethodData* jvmci_data
1433 #endif
1434 )
1435 : CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1436 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1437 _deoptimization_generation(0),
1438 _gc_epoch(CodeCache::gc_epoch()),
1439 _method(method),
1440 _osr_link(nullptr)
1441 {
1442 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1443 {
1444 DEBUG_ONLY(NoSafepointVerifier nsv;)
1445 assert_locked_or_safepoint(CodeCache_lock);
1446
1447 init_defaults(code_buffer, offsets);
1448
1449 _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
1450 _entry_bci = entry_bci;
1451 _compile_id = compile_id;
1452 _comp_level = comp_level;
1453 _compiler_type = type;
1454 _orig_pc_offset = orig_pc_offset;
1455
1456 _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots();
1457
1458 set_ctable_begin(header_begin() + content_offset());
1459
1460 #if INCLUDE_JVMCI
1461 if (compiler->is_jvmci()) {
1462 // JVMCI might not produce any stub sections
1463 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1464 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1465 } else {
1466 _exception_offset = -1;
1467 }
1558 #if INCLUDE_JVMCI
1559 // Copy speculations to nmethod
1560 if (speculations_size() != 0) {
1561 memcpy(speculations_begin(), speculations, speculations_len);
1562 }
1563 #endif
1564
1565 post_init();
1566
1567 // we use the information of entry points to find out if a method is
1568 // static or non static
1569 assert(compiler->is_c2() || compiler->is_jvmci() ||
1570 _method->is_static() == (entry_point() == verified_entry_point()),
1571 " entry points must be same for static methods and vice versa");
1572 }
1573 }
1574
1575 // Print a short set of xml attributes to identify this nmethod. The
1576 // output should be embedded in some other element.
1577 void nmethod::log_identity(xmlStream* log) const {
1578 log->print(" compile_id='%d'", compile_id());
1579 const char* nm_kind = compile_kind();
1580 if (nm_kind != nullptr) log->print(" compile_kind='%s'", nm_kind);
1581 log->print(" compiler='%s'", compiler_name());
1582 if (TieredCompilation) {
1583 log->print(" level='%d'", comp_level());
1584 }
1585 #if INCLUDE_JVMCI
1586 if (jvmci_nmethod_data() != nullptr) {
1587 const char* jvmci_name = jvmci_nmethod_data()->name();
1588 if (jvmci_name != nullptr) {
1589 log->print(" jvmci_mirror_name='");
1590 log->text("%s", jvmci_name);
1591 log->print("'");
1592 }
1593 }
1594 #endif
1595 }
1596
1597
1598 #define LOG_OFFSET(log, name) \
1599 if (p2i(name##_end()) - p2i(name##_begin())) \
1600 log->print(" " XSTR(name) "_offset='%zd'" , \
1601 p2i(name##_begin()) - p2i(this))
1602
1603
1688 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1689 if (oop_maps() != nullptr) {
1690 tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
1691 oop_maps()->print_on(tty);
1692 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1693 }
1694 }
1695 #endif
1696 } else {
1697 print(); // print the header part only.
1698 }
1699
1700 #if defined(SUPPORT_DATA_STRUCTS)
1701 if (AbstractDisassembler::show_structs()) {
1702 methodHandle mh(Thread::current(), _method);
1703 if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) {
1704 print_scopes();
1705 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1706 }
1707 if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) {
1708 print_relocations();
1709 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1710 }
1711 if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) {
1712 print_dependencies_on(tty);
1713 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1714 }
1715 if (printmethod || PrintExceptionHandlers) {
1716 print_handler_table();
1717 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1718 print_nul_chk_table();
1719 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1720 }
1721
1722 if (printmethod) {
1723 print_recorded_oops();
1724 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1725 print_recorded_metadata();
1726 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1727 }
1728 }
1729 #endif
1730
1731 if (xtty != nullptr) {
1732 xtty->tail("print_nmethod");
1733 }
1734 }
1735
1736
1737 // Promote one word from an assembly-time handle to a live embedded oop.
1738 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
1739 if (handle == nullptr ||
1740 // As a special case, IC oops are initialized to 1 or -1.
1741 handle == (jobject) Universe::non_oop_word()) {
1742 *(void**)dest = handle;
1743 } else {
1744 *dest = JNIHandles::resolve_non_null(handle);
1745 }
1746 }
1747
1748
1749 // Have to have the same name because it's called by a template
1750 void nmethod::copy_values(GrowableArray<jobject>* array) {
1751 int length = array->length();
1752 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
1753 oop* dest = oops_begin();
1754 for (int index = 0 ; index < length; index++) {
1755 initialize_immediate_oop(&dest[index], array->at(index));
1756 }
1757
1758 // Now we can fix up all the oops in the code. We need to do this
1759 // in the code because the assembler uses jobjects as placeholders.
1760 // The code and relocations have already been initialized by the
1761 // CodeBlob constructor, so it is valid even at this early point to
1762 // iterate over relocations and patch the code.
1763 fix_oop_relocations(nullptr, nullptr, /*initialize_immediates=*/ true);
1764 }
1765
1766 void nmethod::copy_values(GrowableArray<Metadata*>* array) {
1767 int length = array->length();
1775 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
1776 // re-patch all oop-bearing instructions, just in case some oops moved
1777 RelocIterator iter(this, begin, end);
1778 while (iter.next()) {
1779 if (iter.type() == relocInfo::oop_type) {
1780 oop_Relocation* reloc = iter.oop_reloc();
1781 if (initialize_immediates && reloc->oop_is_immediate()) {
1782 oop* dest = reloc->oop_addr();
1783 jobject obj = *reinterpret_cast<jobject*>(dest);
1784 initialize_immediate_oop(dest, obj);
1785 }
1786 // Refresh the oop-related bits of this instruction.
1787 reloc->fix_oop_relocation();
1788 } else if (iter.type() == relocInfo::metadata_type) {
1789 metadata_Relocation* reloc = iter.metadata_reloc();
1790 reloc->fix_metadata_relocation();
1791 }
1792 }
1793 }
1794
1795 static void install_post_call_nop_displacement(nmethod* nm, address pc) {
1796 NativePostCallNop* nop = nativePostCallNop_at((address) pc);
1797 intptr_t cbaddr = (intptr_t) nm;
1798 intptr_t offset = ((intptr_t) pc) - cbaddr;
1799
1800 int oopmap_slot = nm->oop_maps()->find_slot_for_offset(int((intptr_t) pc - (intptr_t) nm->code_begin()));
1801 if (oopmap_slot < 0) { // this can happen at asynchronous (non-safepoint) stackwalks
1802 log_debug(codecache)("failed to find oopmap for cb: " INTPTR_FORMAT " offset: %d", cbaddr, (int) offset);
1803 } else if (!nop->patch(oopmap_slot, offset)) {
1804 log_debug(codecache)("failed to encode %d %d", oopmap_slot, (int) offset);
1805 }
1806 }
1807
1808 void nmethod::finalize_relocations() {
1809 NoSafepointVerifier nsv;
1810
1811 GrowableArray<NativeMovConstReg*> virtual_call_data;
1812
1813 // Make sure that post call nops fill in nmethod offsets eagerly so
1814 // we don't have to race with deoptimization
1936 Atomic::store(&_gc_epoch, CodeCache::gc_epoch());
1937 }
1938
1939 bool nmethod::is_maybe_on_stack() {
1940 // If the condition below is true, it means that the nmethod was found to
1941 // be alive the previous completed marking cycle.
1942 return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
1943 }
1944
1945 void nmethod::inc_decompile_count() {
1946 if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1947 // Could be gated by ProfileTraps, but do not bother...
1948 Method* m = method();
1949 if (m == nullptr) return;
1950 MethodData* mdo = m->method_data();
1951 if (mdo == nullptr) return;
1952 // There is a benign race here. See comments in methodData.hpp.
1953 mdo->inc_decompile_count();
1954 }
1955
1956 bool nmethod::try_transition(signed char new_state_int) {
1957 signed char new_state = new_state_int;
1958 assert_lock_strong(NMethodState_lock);
1959 signed char old_state = _state;
1960 if (old_state >= new_state) {
1961 // Ensure monotonicity of transitions.
1962 return false;
1963 }
1964 Atomic::store(&_state, new_state);
1965 return true;
1966 }
1967
1968 void nmethod::invalidate_osr_method() {
1969 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1970 // Remove from list of active nmethods
1971 if (method() != nullptr) {
1972 method()->method_holder()->remove_osr_nmethod(this);
1973 }
1974 }
1975
1987 }
1988 }
1989
1990 ResourceMark rm;
1991 stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
1992 ss.print("made not entrant: %s", reason);
1993
1994 CompileTask::print_ul(this, ss.freeze());
1995 if (PrintCompilation) {
1996 print_on_with_msg(tty, ss.freeze());
1997 }
1998 }
1999
2000 void nmethod::unlink_from_method() {
2001 if (method() != nullptr) {
2002 method()->unlink_code(this);
2003 }
2004 }
2005
2006 // Invalidate code
2007 bool nmethod::make_not_entrant(const char* reason) {
2008 assert(reason != nullptr, "Must provide a reason");
2009
2010 // This can be called while the system is already at a safepoint which is ok
2011 NoSafepointVerifier nsv;
2012
2013 if (is_unloading()) {
2014 // If the nmethod is unloading, then it is already not entrant through
2015 // the nmethod entry barriers. No need to do anything; GC will unload it.
2016 return false;
2017 }
2018
2019 if (Atomic::load(&_state) == not_entrant) {
2020 // Avoid taking the lock if already in required state.
2021 // This is safe from races because the state is an end-state,
2022 // which the nmethod cannot back out of once entered.
2023 // No need for fencing either.
2024 return false;
2025 }
2026
2027 {
2063 }
2064
2065 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2066 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2067 // If nmethod entry barriers are not supported, we won't mark
2068 // nmethods as on-stack when they become on-stack. So we
2069 // degrade to a less accurate flushing strategy, for now.
2070 mark_as_maybe_on_stack();
2071 }
2072
2073 // Change state
2074 bool success = try_transition(not_entrant);
2075 assert(success, "Transition can't fail");
2076
2077 // Log the transition once
2078 log_state_change(reason);
2079
2080 // Remove nmethod from method.
2081 unlink_from_method();
2082
2083 } // leave critical region under NMethodState_lock
2084
2085 #if INCLUDE_JVMCI
2086 // Invalidate can't occur while holding the NMethodState_lock
2087 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2088 if (nmethod_data != nullptr) {
2089 nmethod_data->invalidate_nmethod_mirror(this);
2090 }
2091 #endif
2092
2093 #ifdef ASSERT
2094 if (is_osr_method() && method() != nullptr) {
2095 // Make sure osr nmethod is invalidated, i.e. not on the list
2096 bool found = method()->method_holder()->remove_osr_nmethod(this);
2097 assert(!found, "osr nmethod should have been invalidated");
2098 }
2099 #endif
2100
2101 return true;
2102 }
2125 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2126 if (nmethod_data != nullptr) {
2127 nmethod_data->invalidate_nmethod_mirror(this);
2128 }
2129 #endif
2130
2131 // Post before flushing as jmethodID is being used
2132 post_compiled_method_unload();
2133
2134 // Register for flushing when it is safe. For concurrent class unloading,
2135 // that would be after the unloading handshake, and for STW class unloading
2136 // that would be when getting back to the VM thread.
2137 ClassUnloadingContext::context()->register_unlinked_nmethod(this);
2138 }
2139
2140 void nmethod::purge(bool unregister_nmethod) {
2141
2142 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2143
2144 // completely deallocate this method
2145 Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this));
2146 log_debug(codecache)("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
2147 "/Free CodeCache:%zuKb",
2148 is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
2149 CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
2150
2151 // We need to deallocate any ExceptionCache data.
2152 // Note that we do not need to grab the nmethod lock for this, it
2153 // better be thread safe if we're disposing of it!
2154 ExceptionCache* ec = exception_cache();
2155 while(ec != nullptr) {
2156 ExceptionCache* next = ec->next();
2157 delete ec;
2158 ec = next;
2159 }
2160 if (_pc_desc_container != nullptr) {
2161 delete _pc_desc_container;
2162 }
2163 delete[] _compiled_ic_data;
2164
2165 if (_immutable_data != blob_end()) {
2166 os::free(_immutable_data);
2167 _immutable_data = blob_end(); // Valid not null address
2168 }
2169 if (unregister_nmethod) {
2170 Universe::heap()->unregister_nmethod(this);
2171 }
2172 CodeCache::unregister_old_nmethod(this);
2173
2174 CodeBlob::purge();
2175 }
2176
2177 oop nmethod::oop_at(int index) const {
2178 if (index == 0) {
2179 return nullptr;
2180 }
2181
2182 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2183 return bs_nm->oop_load_no_keepalive(this, index);
2184 }
2185
2206 MethodHandles::clean_dependency_context(call_site);
2207 } else {
2208 InstanceKlass* ik = deps.context_type();
2209 if (ik == nullptr) {
2210 continue; // ignore things like evol_method
2211 }
2212 // During GC liveness of dependee determines class that needs to be updated.
2213 // The GC may clean dependency contexts concurrently and in parallel.
2214 ik->clean_dependency_context();
2215 }
2216 }
2217 }
2218 }
2219
2220 void nmethod::post_compiled_method(CompileTask* task) {
2221 task->mark_success();
2222 task->set_nm_content_size(content_size());
2223 task->set_nm_insts_size(insts_size());
2224 task->set_nm_total_size(total_size());
2225
2226 // JVMTI -- compiled method notification (must be done outside lock)
2227 post_compiled_method_load_event();
2228
2229 if (CompilationLog::log() != nullptr) {
2230 CompilationLog::log()->log_nmethod(JavaThread::current(), this);
2231 }
2232
2233 const DirectiveSet* directive = task->directive();
2234 maybe_print_nmethod(directive);
2235 }
2236
2237 // ------------------------------------------------------------------
2238 // post_compiled_method_load_event
2239 // new method for install_code() path
2240 // Transfer information from compilation to jvmti
2241 void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
2242 // This is a bad time for a safepoint. We don't want
2243 // this nmethod to get unloaded while we're queueing the event.
2244 NoSafepointVerifier nsv;
2245
2937
2938 // Make sure all the entry points are correctly aligned for patching.
2939 NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2940
2941 // assert(oopDesc::is_oop(method()), "must be valid");
2942
2943 ResourceMark rm;
2944
2945 if (!CodeCache::contains(this)) {
2946 fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
2947 }
2948
2949 if(is_native_method() )
2950 return;
2951
2952 nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2953 if (nm != this) {
2954 fatal("find_nmethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
2955 }
2956
2957 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2958 if (! p->verify(this)) {
2959 tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
2960 }
2961 }
2962
2963 #ifdef ASSERT
2964 #if INCLUDE_JVMCI
2965 {
2966 // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap
2967 ImmutableOopMapSet* oms = oop_maps();
2968 ImplicitExceptionTable implicit_table(this);
2969 for (uint i = 0; i < implicit_table.len(); i++) {
2970 int exec_offset = (int) implicit_table.get_exec_offset(i);
2971 if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) {
2972 assert(pc_desc_at(code_begin() + exec_offset) != nullptr, "missing PcDesc");
2973 bool found = false;
2974 for (int i = 0, imax = oms->count(); i < imax; i++) {
2975 if (oms->pair_at(i)->pc_offset() == exec_offset) {
2976 found = true;
2977 break;
2978 }
2979 }
2980 assert(found, "missing oopmap");
2981 }
2982 }
2983 }
2984 #endif
2985 #endif
2986
2987 VerifyOopsClosure voc(this);
2988 oops_do(&voc);
2989 assert(voc.ok(), "embedded oops must be OK");
2990 Universe::heap()->verify_nmethod(this);
2991
2992 assert(_oops_do_mark_link == nullptr, "_oops_do_mark_link for %s should be nullptr but is " PTR_FORMAT,
2993 nm->method()->external_name(), p2i(_oops_do_mark_link));
2994 verify_scopes();
2995
2996 CompiledICLocker nm_verify(this);
2997 VerifyMetadataClosure vmc;
2998 metadata_do(&vmc);
2999 }
3000
3001
3002 void nmethod::verify_interrupt_point(address call_site, bool is_inline_cache) {
3003
3004 // Verify IC only when nmethod installation is finished.
3005 if (!is_not_installed()) {
3006 if (CompiledICLocker::is_safe(this)) {
3007 if (is_inline_cache) {
3008 CompiledIC_at(this, call_site);
3009 } else {
3010 CompiledDirectCall::at(call_site);
3011 }
3012 } else {
3013 CompiledICLocker ml_verify(this);
3014 if (is_inline_cache) {
3143 p2i(nul_chk_table_end()),
3144 nul_chk_table_size());
3145 if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3146 p2i(handler_table_begin()),
3147 p2i(handler_table_end()),
3148 handler_table_size());
3149 if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3150 p2i(scopes_pcs_begin()),
3151 p2i(scopes_pcs_end()),
3152 scopes_pcs_size());
3153 if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3154 p2i(scopes_data_begin()),
3155 p2i(scopes_data_end()),
3156 scopes_data_size());
3157 #if INCLUDE_JVMCI
3158 if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3159 p2i(speculations_begin()),
3160 p2i(speculations_end()),
3161 speculations_size());
3162 #endif
3163 }
3164
3165 void nmethod::print_code() {
3166 ResourceMark m;
3167 ttyLocker ttyl;
3168 // Call the specialized decode method of this class.
3169 decode(tty);
3170 }
3171
3172 #ifndef PRODUCT // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN
3173
3174 void nmethod::print_dependencies_on(outputStream* out) {
3175 ResourceMark rm;
3176 stringStream st;
3177 st.print_cr("Dependencies:");
3178 for (Dependencies::DepStream deps(this); deps.next(); ) {
3179 deps.print_dependency(&st);
3180 InstanceKlass* ctxk = deps.context_type();
3181 if (ctxk != nullptr) {
3182 if (ctxk->is_dependent_nmethod(this)) {
3242 st->print("scopes:");
3243 if (scopes_pcs_begin() < scopes_pcs_end()) {
3244 st->cr();
3245 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3246 if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
3247 continue;
3248
3249 ScopeDesc* sd = scope_desc_at(p->real_pc(this));
3250 while (sd != nullptr) {
3251 sd->print_on(st, p); // print output ends with a newline
3252 sd = sd->sender();
3253 }
3254 }
3255 } else {
3256 st->print_cr(" <list empty>");
3257 }
3258 }
3259 #endif
3260
3261 #ifndef PRODUCT // RelocIterator does support printing only then.
3262 void nmethod::print_relocations() {
3263 ResourceMark m; // in case methods get printed via the debugger
3264 tty->print_cr("relocations:");
3265 RelocIterator iter(this);
3266 iter.print_on(tty);
3267 }
3268 #endif
3269
3270 void nmethod::print_pcs_on(outputStream* st) {
3271 ResourceMark m; // in case methods get printed via debugger
3272 st->print("pc-bytecode offsets:");
3273 if (scopes_pcs_begin() < scopes_pcs_end()) {
3274 st->cr();
3275 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3276 p->print_on(st, this); // print output ends with a newline
3277 }
3278 } else {
3279 st->print_cr(" <list empty>");
3280 }
3281 }
3282
3283 void nmethod::print_handler_table() {
3284 ExceptionHandlerTable(this).print(code_begin());
3285 }
3286
4052
4053 #endif // !PRODUCT
4054
4055 #if INCLUDE_JVMCI
4056 void nmethod::update_speculation(JavaThread* thread) {
4057 jlong speculation = thread->pending_failed_speculation();
4058 if (speculation != 0) {
4059 guarantee(jvmci_nmethod_data() != nullptr, "failed speculation in nmethod without failed speculation list");
4060 jvmci_nmethod_data()->add_failed_speculation(this, speculation);
4061 thread->set_pending_failed_speculation(0);
4062 }
4063 }
4064
4065 const char* nmethod::jvmci_name() {
4066 if (jvmci_nmethod_data() != nullptr) {
4067 return jvmci_nmethod_data()->name();
4068 }
4069 return nullptr;
4070 }
4071 #endif
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "code/aotCodeCache.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/nativeInst.hpp"
31 #include "code/nmethod.inline.hpp"
32 #include "code/relocInfo.hpp"
33 #include "code/scopeDesc.hpp"
34 #include "compiler/abstractCompiler.hpp"
35 #include "compiler/compilationLog.hpp"
36 #include "compiler/compileBroker.hpp"
37 #include "compiler/compileLog.hpp"
38 #include "compiler/compileTask.hpp"
39 #include "compiler/compilerDirectives.hpp"
40 #include "compiler/compilerOracle.hpp"
41 #include "compiler/directivesParser.hpp"
42 #include "compiler/disassembler.hpp"
43 #include "compiler/oopMap.inline.hpp"
44 #include "gc/shared/barrierSet.hpp"
45 #include "gc/shared/barrierSetNMethod.hpp"
46 #include "gc/shared/classUnloadingContext.hpp"
772
773 void nmethod::clear_inline_caches() {
774 assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
775 RelocIterator iter(this);
776 while (iter.next()) {
777 iter.reloc()->clear_inline_cache();
778 }
779 }
780
781 #ifdef ASSERT
782 // Check class_loader is alive for this bit of metadata.
783 class CheckClass : public MetadataClosure {
784 void do_metadata(Metadata* md) {
785 Klass* klass = nullptr;
786 if (md->is_klass()) {
787 klass = ((Klass*)md);
788 } else if (md->is_method()) {
789 klass = ((Method*)md)->method_holder();
790 } else if (md->is_methodData()) {
791 klass = ((MethodData*)md)->method()->method_holder();
792 } else if (md->is_methodCounters()) {
793 klass = ((MethodCounters*)md)->method()->method_holder();
794 } else {
795 md->print();
796 ShouldNotReachHere();
797 }
798 assert(klass->is_loader_alive(), "must be alive");
799 }
800 };
801 #endif // ASSERT
802
803
804 static void clean_ic_if_metadata_is_dead(CompiledIC *ic) {
805 ic->clean_metadata();
806 }
807
808 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
809 template <typename CallsiteT>
810 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, nmethod* from,
811 bool clean_all) {
812 CodeBlob* cb = CodeCache::find_blob(callsite->destination());
813 if (!cb->is_nmethod()) {
1012 _method->method_holder()->external_name(),
1013 _method->name()->as_C_string(),
1014 _method->signature()->as_C_string(),
1015 compile_id());
1016 }
1017 return check_evol.has_evol_dependency();
1018 }
1019
1020 int nmethod::total_size() const {
1021 return
1022 consts_size() +
1023 insts_size() +
1024 stub_size() +
1025 scopes_data_size() +
1026 scopes_pcs_size() +
1027 handler_table_size() +
1028 nul_chk_table_size();
1029 }
1030
1031 const char* nmethod::compile_kind() const {
1032 if (is_osr_method()) return "osr";
1033 if (preloaded()) return "AP";
1034 if (is_aot()) return "A";
1035
1036 if (method() != nullptr && is_native_method()) {
1037 if (method()->is_continuation_native_intrinsic()) {
1038 return "cnt";
1039 }
1040 return "c2n";
1041 }
1042 return nullptr;
1043 }
1044
1045 const char* nmethod::compiler_name() const {
1046 return compilertype2name(_compiler_type);
1047 }
1048
1049 #ifdef ASSERT
1050 class CheckForOopsClosure : public OopClosure {
1051 bool _found_oop = false;
1052 public:
1053 virtual void do_oop(oop* o) { _found_oop = true; }
1054 virtual void do_oop(narrowOop* o) { _found_oop = true; }
1055 bool found_oop() { return _found_oop; }
1121 nm = new (native_nmethod_size, allow_NonNMethod_space)
1122 nmethod(method(), compiler_none, native_nmethod_size,
1123 compile_id, &offsets,
1124 code_buffer, frame_size,
1125 basic_lock_owner_sp_offset,
1126 basic_lock_sp_offset,
1127 oop_maps, mutable_data_size);
1128 DEBUG_ONLY( if (allow_NonNMethod_space) assert_no_oops_or_metadata(nm); )
1129 NOT_PRODUCT(if (nm != nullptr) native_nmethod_stats.note_native_nmethod(nm));
1130 }
1131
1132 if (nm != nullptr) {
1133 // verify nmethod
1134 DEBUG_ONLY(nm->verify();) // might block
1135
1136 nm->log_new_nmethod();
1137 }
1138 return nm;
1139 }
1140
1141 void nmethod::record_nmethod_dependency() {
1142 // To make dependency checking during class loading fast, record
1143 // the nmethod dependencies in the classes it is dependent on.
1144 // This allows the dependency checking code to simply walk the
1145 // class hierarchy above the loaded class, checking only nmethods
1146 // which are dependent on those classes. The slow way is to
1147 // check every nmethod for dependencies which makes it linear in
1148 // the number of methods compiled. For applications with a lot
1149 // classes the slow way is too slow.
1150 for (Dependencies::DepStream deps(this); deps.next(); ) {
1151 if (deps.type() == Dependencies::call_site_target_value) {
1152 // CallSite dependencies are managed on per-CallSite instance basis.
1153 oop call_site = deps.argument_oop(0);
1154 MethodHandles::add_dependent_nmethod(call_site, this);
1155 } else {
1156 InstanceKlass* ik = deps.context_type();
1157 if (ik == nullptr) {
1158 continue; // ignore things like evol_method
1159 }
1160 // record this nmethod as dependent on this klass
1161 ik->add_dependent_nmethod(this);
1162 }
1163 }
1164 }
1165
1166 nmethod* nmethod::new_nmethod(const methodHandle& method,
1167 int compile_id,
1168 int entry_bci,
1169 CodeOffsets* offsets,
1170 int orig_pc_offset,
1171 DebugInformationRecorder* debug_info,
1172 Dependencies* dependencies,
1173 CodeBuffer* code_buffer, int frame_size,
1174 OopMapSet* oop_maps,
1175 ExceptionHandlerTable* handler_table,
1176 ImplicitExceptionTable* nul_chk_table,
1177 AbstractCompiler* compiler,
1178 CompLevel comp_level
1179 , AOTCodeEntry* aot_code_entry
1180 #if INCLUDE_JVMCI
1181 , char* speculations,
1182 int speculations_len,
1183 JVMCINMethodData* jvmci_data
1184 #endif
1185 )
1186 {
1187 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1188 code_buffer->finalize_oop_references(method);
1189 // create nmethod
1190 nmethod* nm = nullptr;
1191 int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1192
1193 int immutable_data_size =
1194 adjust_pcs_size(debug_info->pcs_size())
1195 + align_up((int)dependencies->size_in_bytes(), oopSize)
1196 + align_up(handler_table->size_in_bytes() , oopSize)
1197 + align_up(nul_chk_table->size_in_bytes() , oopSize)
1198 #if INCLUDE_JVMCI
1199 + align_up(speculations_len , oopSize)
1203 // First, allocate space for immutable data in C heap.
1204 address immutable_data = nullptr;
1205 if (immutable_data_size > 0) {
1206 immutable_data = (address)os::malloc(immutable_data_size, mtCode);
1207 if (immutable_data == nullptr) {
1208 vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data");
1209 return nullptr;
1210 }
1211 }
1212
1213 int mutable_data_size = required_mutable_data_size(code_buffer
1214 JVMCI_ONLY(COMMA (compiler->is_jvmci() ? jvmci_data->size() : 0)));
1215
1216 {
1217 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1218
1219 nm = new (nmethod_size, comp_level)
1220 nmethod(method(), compiler->type(), nmethod_size, immutable_data_size, mutable_data_size,
1221 compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
1222 debug_info, dependencies, code_buffer, frame_size, oop_maps,
1223 handler_table, nul_chk_table, compiler, comp_level, aot_code_entry
1224 #if INCLUDE_JVMCI
1225 , speculations,
1226 speculations_len,
1227 jvmci_data
1228 #endif
1229 );
1230
1231 if (nm != nullptr) {
1232 nm->record_nmethod_dependency();
1233 NOT_PRODUCT(note_java_nmethod(nm));
1234 }
1235 }
1236 // Do verification and logging outside CodeCache_lock.
1237 if (nm != nullptr) {
1238
1239 #ifdef ASSERT
1240 LogTarget(Debug, aot, codecache, nmethod) log;
1241 if (log.is_enabled()) {
1242 LogStream out(log);
1243 out.print_cr("== new_nmethod 2");
1244 FlagSetting fs(PrintRelocations, true);
1245 nm->print_on_impl(&out);
1246 nm->decode(&out);
1247 }
1248 #endif
1249
1250 // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1251 DEBUG_ONLY(nm->verify();)
1252 nm->log_new_nmethod();
1253 }
1254 return nm;
1255 }
1256
1257 nmethod* nmethod::restore(address code_cache_buffer,
1258 const methodHandle& method,
1259 int compile_id,
1260 address reloc_data,
1261 GrowableArray<Handle>& oop_list,
1262 GrowableArray<Metadata*>& metadata_list,
1263 ImmutableOopMapSet* oop_maps,
1264 address immutable_data,
1265 GrowableArray<Handle>& reloc_imm_oop_list,
1266 GrowableArray<Metadata*>& reloc_imm_metadata_list,
1267 AOTCodeReader* aot_code_reader)
1268 {
1269 CodeBlob::restore(code_cache_buffer, "nmethod", reloc_data, oop_maps);
1270 nmethod* nm = (nmethod*)code_cache_buffer;
1271 nm->set_method(method());
1272 nm->_compile_id = compile_id;
1273 nm->set_immutable_data(immutable_data);
1274 nm->copy_values(&oop_list);
1275 nm->copy_values(&metadata_list);
1276
1277 aot_code_reader->fix_relocations(nm, &reloc_imm_oop_list, &reloc_imm_metadata_list);
1278
1279 #ifndef PRODUCT
1280 nm->asm_remarks().init();
1281 aot_code_reader->read_asm_remarks(nm->asm_remarks(), /* use_string_table */ false);
1282 nm->dbg_strings().init();
1283 aot_code_reader->read_dbg_strings(nm->dbg_strings(), /* use_string_table */ false);
1284 #endif
1285
1286 // Flush the code block
1287 ICache::invalidate_range(nm->code_begin(), nm->code_size());
1288
1289 // Create cache after PcDesc data is copied - it will be used to initialize cache
1290 nm->_pc_desc_container = new PcDescContainer(nm->scopes_pcs_begin());
1291
1292 nm->set_aot_code_entry(aot_code_reader->aot_code_entry());
1293
1294 nm->post_init();
1295 return nm;
1296 }
1297
1298 nmethod* nmethod::new_nmethod(nmethod* archived_nm,
1299 const methodHandle& method,
1300 AbstractCompiler* compiler,
1301 int compile_id,
1302 address reloc_data,
1303 GrowableArray<Handle>& oop_list,
1304 GrowableArray<Metadata*>& metadata_list,
1305 ImmutableOopMapSet* oop_maps,
1306 address immutable_data,
1307 GrowableArray<Handle>& reloc_imm_oop_list,
1308 GrowableArray<Metadata*>& reloc_imm_metadata_list,
1309 AOTCodeReader* aot_code_reader)
1310 {
1311 nmethod* nm = nullptr;
1312 int nmethod_size = archived_nm->size();
1313 // create nmethod
1314 {
1315 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1316 address code_cache_buffer = (address)CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(archived_nm->comp_level()));
1317 if (code_cache_buffer != nullptr) {
1318 nm = archived_nm->restore(code_cache_buffer,
1319 method,
1320 compile_id,
1321 reloc_data,
1322 oop_list,
1323 metadata_list,
1324 oop_maps,
1325 immutable_data,
1326 reloc_imm_oop_list,
1327 reloc_imm_metadata_list,
1328 aot_code_reader);
1329 nm->record_nmethod_dependency();
1330 NOT_PRODUCT(note_java_nmethod(nm));
1331 }
1332 }
1333 // Do verification and logging outside CodeCache_lock.
1334 if (nm != nullptr) {
1335 #ifdef ASSERT
1336 LogTarget(Debug, aot, codecache, nmethod) log;
1337 if (log.is_enabled()) {
1338 LogStream out(log);
1339 out.print_cr("== new_nmethod 2");
1340 FlagSetting fs(PrintRelocations, true);
1341 nm->print_on_impl(&out);
1342 nm->decode(&out);
1343 }
1344 #endif
1345 // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1346 DEBUG_ONLY(nm->verify();)
1347 nm->log_new_nmethod();
1348 }
1349 return nm;
1350 }
1351
1352 // Fill in default values for various fields
1353 void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
1354 // avoid uninitialized fields, even for short time periods
1355 _exception_cache = nullptr;
1356 _gc_data = nullptr;
1357 _oops_do_mark_link = nullptr;
1358 _compiled_ic_data = nullptr;
1359
1360 _is_unloading_state = 0;
1361 _state = not_installed;
1362
1363 _has_unsafe_access = 0;
1364 _has_method_handle_invokes = 0;
1365 _has_wide_vectors = 0;
1366 _has_monitors = 0;
1367 _has_scoped_access = 0;
1368 _has_flushed_dependencies = 0;
1369 _is_unlinked = 0;
1370 _load_reported = 0; // jvmti state
1371 _preloaded = 0;
1372 _has_clinit_barriers = 0;
1373
1374 _used = false;
1375 _deoptimization_status = not_marked;
1376
1377 // SECT_CONSTS is first in code buffer so the offset should be 0.
1378 int consts_offset = code_buffer->total_offset_of(code_buffer->consts());
1379 assert(consts_offset == 0, "const_offset: %d", consts_offset);
1380
1381 _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
1382
1383 CHECKED_CAST(_entry_offset, uint16_t, (offsets->value(CodeOffsets::Entry)));
1384 CHECKED_CAST(_verified_entry_offset, uint16_t, (offsets->value(CodeOffsets::Verified_Entry)));
1385
1386 _skipped_instructions_size = code_buffer->total_skipped_instructions_size();
1387 }
1388
1389 // Post initialization
1390 void nmethod::post_init() {
1391 clear_unloading_state();
1392
1393 finalize_relocations();
1394
1427
1428 _osr_entry_point = nullptr;
1429 _pc_desc_container = nullptr;
1430 _entry_bci = InvocationEntryBci;
1431 _compile_id = compile_id;
1432 _comp_level = CompLevel_none;
1433 _compiler_type = type;
1434 _orig_pc_offset = 0;
1435 _num_stack_arg_slots = 0;
1436
1437 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1438 // Continuation enter intrinsic
1439 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1440 } else {
1441 _exception_offset = 0;
1442 }
1443 // Native wrappers do not have deopt handlers. Make the values
1444 // something that will never match a pc like the nmethod vtable entry
1445 _deopt_handler_offset = 0;
1446 _deopt_mh_handler_offset = 0;
1447 _aot_code_entry = nullptr;
1448 _method_profiling_count = 0;
1449 _unwind_handler_offset = 0;
1450
1451 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1452 uint16_t metadata_size;
1453 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1454 JVMCI_ONLY( _metadata_size = metadata_size; )
1455 assert(_mutable_data_size == _relocation_size + metadata_size,
1456 "wrong mutable data size: %d != %d + %d",
1457 _mutable_data_size, _relocation_size, metadata_size);
1458
1459 // native wrapper does not have read-only data but we need unique not null address
1460 _immutable_data = blob_end();
1461 _immutable_data_size = 0;
1462 _nul_chk_table_offset = 0;
1463 _handler_table_offset = 0;
1464 _scopes_pcs_offset = 0;
1465 _scopes_data_offset = 0;
1466 #if INCLUDE_JVMCI
1467 _speculations_offset = 0;
1468 #endif
1488 // This is both handled in decode2(), called via print_code() -> decode()
1489 if (PrintNativeNMethods) {
1490 tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
1491 print_code();
1492 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1493 #if defined(SUPPORT_DATA_STRUCTS)
1494 if (AbstractDisassembler::show_structs()) {
1495 if (oop_maps != nullptr) {
1496 tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
1497 oop_maps->print_on(tty);
1498 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1499 }
1500 }
1501 #endif
1502 } else {
1503 print(); // print the header part only.
1504 }
1505 #if defined(SUPPORT_DATA_STRUCTS)
1506 if (AbstractDisassembler::show_structs()) {
1507 if (PrintRelocations) {
1508 print_relocations_on(tty);
1509 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1510 }
1511 }
1512 #endif
1513 if (xtty != nullptr) {
1514 xtty->tail("print_native_nmethod");
1515 }
1516 }
1517 }
1518
1519 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
1520 return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
1521 }
1522
1523 void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
1524 // Try MethodNonProfiled and MethodProfiled.
1525 void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
1526 if (return_value != nullptr || !allow_NonNMethod_space) return return_value;
1527 // Try NonNMethod or give up.
1528 return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);
1532 nmethod::nmethod(
1533 Method* method,
1534 CompilerType type,
1535 int nmethod_size,
1536 int immutable_data_size,
1537 int mutable_data_size,
1538 int compile_id,
1539 int entry_bci,
1540 address immutable_data,
1541 CodeOffsets* offsets,
1542 int orig_pc_offset,
1543 DebugInformationRecorder* debug_info,
1544 Dependencies* dependencies,
1545 CodeBuffer *code_buffer,
1546 int frame_size,
1547 OopMapSet* oop_maps,
1548 ExceptionHandlerTable* handler_table,
1549 ImplicitExceptionTable* nul_chk_table,
1550 AbstractCompiler* compiler,
1551 CompLevel comp_level
1552 , AOTCodeEntry* aot_code_entry
1553 #if INCLUDE_JVMCI
1554 , char* speculations,
1555 int speculations_len,
1556 JVMCINMethodData* jvmci_data
1557 #endif
1558 )
1559 : CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1560 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1561 _deoptimization_generation(0),
1562 _gc_epoch(CodeCache::gc_epoch()),
1563 _method(method),
1564 _osr_link(nullptr)
1565 {
1566 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1567 {
1568 DEBUG_ONLY(NoSafepointVerifier nsv;)
1569 assert_locked_or_safepoint(CodeCache_lock);
1570
1571 init_defaults(code_buffer, offsets);
1572 _aot_code_entry = aot_code_entry;
1573 _method_profiling_count = 0;
1574
1575 _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
1576 _entry_bci = entry_bci;
1577 _compile_id = compile_id;
1578 _comp_level = comp_level;
1579 _compiler_type = type;
1580 _orig_pc_offset = orig_pc_offset;
1581
1582 _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots();
1583
1584 set_ctable_begin(header_begin() + content_offset());
1585
1586 #if INCLUDE_JVMCI
1587 if (compiler->is_jvmci()) {
1588 // JVMCI might not produce any stub sections
1589 if (offsets->value(CodeOffsets::Exceptions) != -1) {
1590 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
1591 } else {
1592 _exception_offset = -1;
1593 }
1684 #if INCLUDE_JVMCI
1685 // Copy speculations to nmethod
1686 if (speculations_size() != 0) {
1687 memcpy(speculations_begin(), speculations, speculations_len);
1688 }
1689 #endif
1690
1691 post_init();
1692
1693 // we use the information of entry points to find out if a method is
1694 // static or non static
1695 assert(compiler->is_c2() || compiler->is_jvmci() ||
1696 _method->is_static() == (entry_point() == verified_entry_point()),
1697 " entry points must be same for static methods and vice versa");
1698 }
1699 }
1700
1701 // Print a short set of xml attributes to identify this nmethod. The
1702 // output should be embedded in some other element.
1703 void nmethod::log_identity(xmlStream* log) const {
1704 assert(log->inside_attrs_or_error(), "printing attributes");
1705 log->print(" compile_id='%d'", compile_id());
1706 const char* nm_kind = compile_kind();
1707 if (nm_kind != nullptr) log->print(" compile_kind='%s'", nm_kind);
1708 log->print(" compiler='%s'", compiler_name());
1709 if (TieredCompilation) {
1710 log->print(" compile_level='%d'", comp_level());
1711 }
1712 #if INCLUDE_JVMCI
1713 if (jvmci_nmethod_data() != nullptr) {
1714 const char* jvmci_name = jvmci_nmethod_data()->name();
1715 if (jvmci_name != nullptr) {
1716 log->print(" jvmci_mirror_name='");
1717 log->text("%s", jvmci_name);
1718 log->print("'");
1719 }
1720 }
1721 #endif
1722 }
1723
1724
1725 #define LOG_OFFSET(log, name) \
1726 if (p2i(name##_end()) - p2i(name##_begin())) \
1727 log->print(" " XSTR(name) "_offset='%zd'" , \
1728 p2i(name##_begin()) - p2i(this))
1729
1730
1815 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1816 if (oop_maps() != nullptr) {
1817 tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
1818 oop_maps()->print_on(tty);
1819 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1820 }
1821 }
1822 #endif
1823 } else {
1824 print(); // print the header part only.
1825 }
1826
1827 #if defined(SUPPORT_DATA_STRUCTS)
1828 if (AbstractDisassembler::show_structs()) {
1829 methodHandle mh(Thread::current(), _method);
1830 if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) {
1831 print_scopes();
1832 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1833 }
1834 if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) {
1835 print_relocations_on(tty);
1836 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1837 }
1838 if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) {
1839 print_dependencies_on(tty);
1840 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1841 }
1842 if (printmethod || PrintExceptionHandlers) {
1843 print_handler_table();
1844 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1845 print_nul_chk_table();
1846 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1847 }
1848
1849 if (printmethod) {
1850 print_recorded_oops();
1851 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1852 print_recorded_metadata();
1853 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1854 }
1855 }
1856 #endif
1857
1858 if (xtty != nullptr) {
1859 xtty->tail("print_nmethod");
1860 }
1861 }
1862
1863
1864 // Promote one word from an assembly-time handle to a live embedded oop.
1865 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
1866 if (handle == nullptr ||
1867 // As a special case, IC oops are initialized to 1 or -1.
1868 handle == (jobject) Universe::non_oop_word()) {
1869 *(void**)dest = handle;
1870 } else {
1871 *dest = JNIHandles::resolve_non_null(handle);
1872 }
1873 }
1874
1875 void nmethod::copy_values(GrowableArray<Handle>* array) {
1876 int length = array->length();
1877 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
1878 oop* dest = oops_begin();
1879 for (int index = 0 ; index < length; index++) {
1880 dest[index] = array->at(index)();
1881 }
1882 }
1883
1884 // Have to have the same name because it's called by a template
1885 void nmethod::copy_values(GrowableArray<jobject>* array) {
1886 int length = array->length();
1887 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
1888 oop* dest = oops_begin();
1889 for (int index = 0 ; index < length; index++) {
1890 initialize_immediate_oop(&dest[index], array->at(index));
1891 }
1892
1893 // Now we can fix up all the oops in the code. We need to do this
1894 // in the code because the assembler uses jobjects as placeholders.
1895 // The code and relocations have already been initialized by the
1896 // CodeBlob constructor, so it is valid even at this early point to
1897 // iterate over relocations and patch the code.
1898 fix_oop_relocations(nullptr, nullptr, /*initialize_immediates=*/ true);
1899 }
1900
1901 void nmethod::copy_values(GrowableArray<Metadata*>* array) {
1902 int length = array->length();
1910 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
1911 // re-patch all oop-bearing instructions, just in case some oops moved
1912 RelocIterator iter(this, begin, end);
1913 while (iter.next()) {
1914 if (iter.type() == relocInfo::oop_type) {
1915 oop_Relocation* reloc = iter.oop_reloc();
1916 if (initialize_immediates && reloc->oop_is_immediate()) {
1917 oop* dest = reloc->oop_addr();
1918 jobject obj = *reinterpret_cast<jobject*>(dest);
1919 initialize_immediate_oop(dest, obj);
1920 }
1921 // Refresh the oop-related bits of this instruction.
1922 reloc->fix_oop_relocation();
1923 } else if (iter.type() == relocInfo::metadata_type) {
1924 metadata_Relocation* reloc = iter.metadata_reloc();
1925 reloc->fix_metadata_relocation();
1926 }
1927 }
1928 }
1929
1930 void nmethod::create_reloc_immediates_list(JavaThread* thread, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) {
1931 RelocIterator iter(this);
1932 while (iter.next()) {
1933 if (iter.type() == relocInfo::oop_type) {
1934 oop_Relocation* reloc = iter.oop_reloc();
1935 if (reloc->oop_is_immediate()) {
1936 oop dest = reloc->oop_value();
1937 Handle h(thread, dest);
1938 oop_list.append(h);
1939 }
1940 } else if (iter.type() == relocInfo::metadata_type) {
1941 metadata_Relocation* reloc = iter.metadata_reloc();
1942 if (reloc->metadata_is_immediate()) {
1943 Metadata* m = reloc->metadata_value();
1944 metadata_list.append(m);
1945 }
1946 }
1947 }
1948 }
1949
1950 static void install_post_call_nop_displacement(nmethod* nm, address pc) {
1951 NativePostCallNop* nop = nativePostCallNop_at((address) pc);
1952 intptr_t cbaddr = (intptr_t) nm;
1953 intptr_t offset = ((intptr_t) pc) - cbaddr;
1954
1955 int oopmap_slot = nm->oop_maps()->find_slot_for_offset(int((intptr_t) pc - (intptr_t) nm->code_begin()));
1956 if (oopmap_slot < 0) { // this can happen at asynchronous (non-safepoint) stackwalks
1957 log_debug(codecache)("failed to find oopmap for cb: " INTPTR_FORMAT " offset: %d", cbaddr, (int) offset);
1958 } else if (!nop->patch(oopmap_slot, offset)) {
1959 log_debug(codecache)("failed to encode %d %d", oopmap_slot, (int) offset);
1960 }
1961 }
1962
1963 void nmethod::finalize_relocations() {
1964 NoSafepointVerifier nsv;
1965
1966 GrowableArray<NativeMovConstReg*> virtual_call_data;
1967
1968 // Make sure that post call nops fill in nmethod offsets eagerly so
1969 // we don't have to race with deoptimization
2091 Atomic::store(&_gc_epoch, CodeCache::gc_epoch());
2092 }
2093
2094 bool nmethod::is_maybe_on_stack() {
2095 // If the condition below is true, it means that the nmethod was found to
2096 // be alive the previous completed marking cycle.
2097 return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
2098 }
2099
2100 void nmethod::inc_decompile_count() {
2101 if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
2102 // Could be gated by ProfileTraps, but do not bother...
2103 Method* m = method();
2104 if (m == nullptr) return;
2105 MethodData* mdo = m->method_data();
2106 if (mdo == nullptr) return;
2107 // There is a benign race here. See comments in methodData.hpp.
2108 mdo->inc_decompile_count();
2109 }
2110
2111 void nmethod::inc_method_profiling_count() {
2112 Atomic::inc(&_method_profiling_count);
2113 }
2114
2115 uint64_t nmethod::method_profiling_count() {
2116 return _method_profiling_count;
2117 }
2118
2119 bool nmethod::try_transition(signed char new_state_int) {
2120 signed char new_state = new_state_int;
2121 assert_lock_strong(NMethodState_lock);
2122 signed char old_state = _state;
2123 if (old_state >= new_state) {
2124 // Ensure monotonicity of transitions.
2125 return false;
2126 }
2127 Atomic::store(&_state, new_state);
2128 return true;
2129 }
2130
2131 void nmethod::invalidate_osr_method() {
2132 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
2133 // Remove from list of active nmethods
2134 if (method() != nullptr) {
2135 method()->method_holder()->remove_osr_nmethod(this);
2136 }
2137 }
2138
2150 }
2151 }
2152
2153 ResourceMark rm;
2154 stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
2155 ss.print("made not entrant: %s", reason);
2156
2157 CompileTask::print_ul(this, ss.freeze());
2158 if (PrintCompilation) {
2159 print_on_with_msg(tty, ss.freeze());
2160 }
2161 }
2162
2163 void nmethod::unlink_from_method() {
2164 if (method() != nullptr) {
2165 method()->unlink_code(this);
2166 }
2167 }
2168
2169 // Invalidate code
2170 bool nmethod::make_not_entrant(const char* reason, bool keep_aot_entry) {
2171 assert(reason != nullptr, "Must provide a reason");
2172
2173 // This can be called while the system is already at a safepoint which is ok
2174 NoSafepointVerifier nsv;
2175
2176 if (is_unloading()) {
2177 // If the nmethod is unloading, then it is already not entrant through
2178 // the nmethod entry barriers. No need to do anything; GC will unload it.
2179 return false;
2180 }
2181
2182 if (Atomic::load(&_state) == not_entrant) {
2183 // Avoid taking the lock if already in required state.
2184 // This is safe from races because the state is an end-state,
2185 // which the nmethod cannot back out of once entered.
2186 // No need for fencing either.
2187 return false;
2188 }
2189
2190 {
2226 }
2227
2228 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2229 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2230 // If nmethod entry barriers are not supported, we won't mark
2231 // nmethods as on-stack when they become on-stack. So we
2232 // degrade to a less accurate flushing strategy, for now.
2233 mark_as_maybe_on_stack();
2234 }
2235
2236 // Change state
2237 bool success = try_transition(not_entrant);
2238 assert(success, "Transition can't fail");
2239
2240 // Log the transition once
2241 log_state_change(reason);
2242
2243 // Remove nmethod from method.
2244 unlink_from_method();
2245
2246 if (!keep_aot_entry) {
2247 // Keep AOT code if it was simply replaced
2248 // otherwise make it not entrant too.
2249 AOTCodeCache::invalidate(_aot_code_entry);
2250 }
2251
2252 CompileBroker::log_not_entrant(this);
2253 } // leave critical region under NMethodState_lock
2254
2255 #if INCLUDE_JVMCI
2256 // Invalidate can't occur while holding the NMethodState_lock
2257 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2258 if (nmethod_data != nullptr) {
2259 nmethod_data->invalidate_nmethod_mirror(this);
2260 }
2261 #endif
2262
2263 #ifdef ASSERT
2264 if (is_osr_method() && method() != nullptr) {
2265 // Make sure osr nmethod is invalidated, i.e. not on the list
2266 bool found = method()->method_holder()->remove_osr_nmethod(this);
2267 assert(!found, "osr nmethod should have been invalidated");
2268 }
2269 #endif
2270
2271 return true;
2272 }
2295 JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2296 if (nmethod_data != nullptr) {
2297 nmethod_data->invalidate_nmethod_mirror(this);
2298 }
2299 #endif
2300
2301 // Post before flushing as jmethodID is being used
2302 post_compiled_method_unload();
2303
2304 // Register for flushing when it is safe. For concurrent class unloading,
2305 // that would be after the unloading handshake, and for STW class unloading
2306 // that would be when getting back to the VM thread.
2307 ClassUnloadingContext::context()->register_unlinked_nmethod(this);
2308 }
2309
2310 void nmethod::purge(bool unregister_nmethod) {
2311
2312 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2313
2314 // completely deallocate this method
2315 Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, compile_kind(), p2i(this));
2316 log_debug(codecache)("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
2317 "/Free CodeCache:%zuKb",
2318 compile_kind(), _compile_id, p2i(this), CodeCache::blob_count(),
2319 CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
2320
2321 // We need to deallocate any ExceptionCache data.
2322 // Note that we do not need to grab the nmethod lock for this, it
2323 // better be thread safe if we're disposing of it!
2324 ExceptionCache* ec = exception_cache();
2325 while(ec != nullptr) {
2326 ExceptionCache* next = ec->next();
2327 delete ec;
2328 ec = next;
2329 }
2330 if (_pc_desc_container != nullptr) {
2331 delete _pc_desc_container;
2332 }
2333 if (_compiled_ic_data != nullptr) {
2334 delete[] _compiled_ic_data;
2335 }
2336
2337 if (_immutable_data != data_end() && !AOTCodeCache::is_address_in_aot_cache((address)_oop_maps)) {
2338 os::free(_immutable_data);
2339 _immutable_data = blob_end(); // Valid not null address
2340 }
2341 if (unregister_nmethod) {
2342 Universe::heap()->unregister_nmethod(this);
2343 }
2344 CodeCache::unregister_old_nmethod(this);
2345
2346 CodeBlob::purge();
2347 }
2348
2349 oop nmethod::oop_at(int index) const {
2350 if (index == 0) {
2351 return nullptr;
2352 }
2353
2354 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2355 return bs_nm->oop_load_no_keepalive(this, index);
2356 }
2357
2378 MethodHandles::clean_dependency_context(call_site);
2379 } else {
2380 InstanceKlass* ik = deps.context_type();
2381 if (ik == nullptr) {
2382 continue; // ignore things like evol_method
2383 }
2384 // During GC liveness of dependee determines class that needs to be updated.
2385 // The GC may clean dependency contexts concurrently and in parallel.
2386 ik->clean_dependency_context();
2387 }
2388 }
2389 }
2390 }
2391
2392 void nmethod::post_compiled_method(CompileTask* task) {
2393 task->mark_success();
2394 task->set_nm_content_size(content_size());
2395 task->set_nm_insts_size(insts_size());
2396 task->set_nm_total_size(total_size());
2397
2398 // task->is_aot_load() is true only for loaded AOT code.
2399 // nmethod::_aot_code_entry is set for loaded and stored AOT code
2400 // to invalidate the entry when nmethod is deoptimized.
2401 // VerifyAOTCode is option to not store in archive AOT code.
2402 guarantee((_aot_code_entry != nullptr) || !task->is_aot_load() || VerifyAOTCode, "sanity");
2403
2404 // JVMTI -- compiled method notification (must be done outside lock)
2405 post_compiled_method_load_event();
2406
2407 if (CompilationLog::log() != nullptr) {
2408 CompilationLog::log()->log_nmethod(JavaThread::current(), this);
2409 }
2410
2411 const DirectiveSet* directive = task->directive();
2412 maybe_print_nmethod(directive);
2413 }
2414
2415 // ------------------------------------------------------------------
2416 // post_compiled_method_load_event
2417 // new method for install_code() path
2418 // Transfer information from compilation to jvmti
2419 void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
2420 // This is a bad time for a safepoint. We don't want
2421 // this nmethod to get unloaded while we're queueing the event.
2422 NoSafepointVerifier nsv;
2423
3115
3116 // Make sure all the entry points are correctly aligned for patching.
3117 NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
3118
3119 // assert(oopDesc::is_oop(method()), "must be valid");
3120
3121 ResourceMark rm;
3122
3123 if (!CodeCache::contains(this)) {
3124 fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
3125 }
3126
3127 if(is_native_method() )
3128 return;
3129
3130 nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
3131 if (nm != this) {
3132 fatal("find_nmethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
3133 }
3134
3135 // Verification can triggered during shutdown after AOTCodeCache is closed.
3136 // If the Scopes data is in the AOT code cache, then we should avoid verification during shutdown.
3137 if (!is_aot() || AOTCodeCache::is_on()) {
3138 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3139 if (! p->verify(this)) {
3140 tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
3141 }
3142 }
3143
3144 #ifdef ASSERT
3145 #if INCLUDE_JVMCI
3146 {
3147 // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap
3148 ImmutableOopMapSet* oms = oop_maps();
3149 ImplicitExceptionTable implicit_table(this);
3150 for (uint i = 0; i < implicit_table.len(); i++) {
3151 int exec_offset = (int) implicit_table.get_exec_offset(i);
3152 if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) {
3153 assert(pc_desc_at(code_begin() + exec_offset) != nullptr, "missing PcDesc");
3154 bool found = false;
3155 for (int i = 0, imax = oms->count(); i < imax; i++) {
3156 if (oms->pair_at(i)->pc_offset() == exec_offset) {
3157 found = true;
3158 break;
3159 }
3160 }
3161 assert(found, "missing oopmap");
3162 }
3163 }
3164 }
3165 #endif
3166 #endif
3167 }
3168
3169 VerifyOopsClosure voc(this);
3170 oops_do(&voc);
3171 assert(voc.ok(), "embedded oops must be OK");
3172 Universe::heap()->verify_nmethod(this);
3173
3174 assert(_oops_do_mark_link == nullptr, "_oops_do_mark_link for %s should be nullptr but is " PTR_FORMAT,
3175 nm->method()->external_name(), p2i(_oops_do_mark_link));
3176 if (!is_aot() || AOTCodeCache::is_on()) {
3177 verify_scopes();
3178 }
3179
3180 CompiledICLocker nm_verify(this);
3181 VerifyMetadataClosure vmc;
3182 metadata_do(&vmc);
3183 }
3184
3185
3186 void nmethod::verify_interrupt_point(address call_site, bool is_inline_cache) {
3187
3188 // Verify IC only when nmethod installation is finished.
3189 if (!is_not_installed()) {
3190 if (CompiledICLocker::is_safe(this)) {
3191 if (is_inline_cache) {
3192 CompiledIC_at(this, call_site);
3193 } else {
3194 CompiledDirectCall::at(call_site);
3195 }
3196 } else {
3197 CompiledICLocker ml_verify(this);
3198 if (is_inline_cache) {
3327 p2i(nul_chk_table_end()),
3328 nul_chk_table_size());
3329 if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3330 p2i(handler_table_begin()),
3331 p2i(handler_table_end()),
3332 handler_table_size());
3333 if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3334 p2i(scopes_pcs_begin()),
3335 p2i(scopes_pcs_end()),
3336 scopes_pcs_size());
3337 if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3338 p2i(scopes_data_begin()),
3339 p2i(scopes_data_end()),
3340 scopes_data_size());
3341 #if INCLUDE_JVMCI
3342 if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3343 p2i(speculations_begin()),
3344 p2i(speculations_end()),
3345 speculations_size());
3346 #endif
3347 if (AOTCodeCache::is_on() && _aot_code_entry != nullptr) {
3348 _aot_code_entry->print(st);
3349 }
3350 }
3351
3352 void nmethod::print_code() {
3353 ResourceMark m;
3354 ttyLocker ttyl;
3355 // Call the specialized decode method of this class.
3356 decode(tty);
3357 }
3358
3359 #ifndef PRODUCT // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN
3360
3361 void nmethod::print_dependencies_on(outputStream* out) {
3362 ResourceMark rm;
3363 stringStream st;
3364 st.print_cr("Dependencies:");
3365 for (Dependencies::DepStream deps(this); deps.next(); ) {
3366 deps.print_dependency(&st);
3367 InstanceKlass* ctxk = deps.context_type();
3368 if (ctxk != nullptr) {
3369 if (ctxk->is_dependent_nmethod(this)) {
3429 st->print("scopes:");
3430 if (scopes_pcs_begin() < scopes_pcs_end()) {
3431 st->cr();
3432 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3433 if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
3434 continue;
3435
3436 ScopeDesc* sd = scope_desc_at(p->real_pc(this));
3437 while (sd != nullptr) {
3438 sd->print_on(st, p); // print output ends with a newline
3439 sd = sd->sender();
3440 }
3441 }
3442 } else {
3443 st->print_cr(" <list empty>");
3444 }
3445 }
3446 #endif
3447
3448 #ifndef PRODUCT // RelocIterator does support printing only then.
3449 void nmethod::print_relocations_on(outputStream* st) {
3450 ResourceMark m; // in case methods get printed via the debugger
3451 st->print_cr("relocations:");
3452 RelocIterator iter(this);
3453 iter.print_on(st);
3454 }
3455 #endif
3456
3457 void nmethod::print_pcs_on(outputStream* st) {
3458 ResourceMark m; // in case methods get printed via debugger
3459 st->print("pc-bytecode offsets:");
3460 if (scopes_pcs_begin() < scopes_pcs_end()) {
3461 st->cr();
3462 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3463 p->print_on(st, this); // print output ends with a newline
3464 }
3465 } else {
3466 st->print_cr(" <list empty>");
3467 }
3468 }
3469
3470 void nmethod::print_handler_table() {
3471 ExceptionHandlerTable(this).print(code_begin());
3472 }
3473
4239
4240 #endif // !PRODUCT
4241
4242 #if INCLUDE_JVMCI
4243 void nmethod::update_speculation(JavaThread* thread) {
4244 jlong speculation = thread->pending_failed_speculation();
4245 if (speculation != 0) {
4246 guarantee(jvmci_nmethod_data() != nullptr, "failed speculation in nmethod without failed speculation list");
4247 jvmci_nmethod_data()->add_failed_speculation(this, speculation);
4248 thread->set_pending_failed_speculation(0);
4249 }
4250 }
4251
4252 const char* nmethod::jvmci_name() {
4253 if (jvmci_nmethod_data() != nullptr) {
4254 return jvmci_nmethod_data()->name();
4255 }
4256 return nullptr;
4257 }
4258 #endif
4259
4260 void nmethod::prepare_for_archiving_impl() {
4261 CodeBlob::prepare_for_archiving_impl();
4262 _deoptimization_generation = 0;
4263 _gc_epoch = 0;
4264 _method_profiling_count = 0;
4265 _osr_link = nullptr;
4266 _method = nullptr;
4267 _immutable_data = nullptr;
4268 _pc_desc_container = nullptr;
4269 _exception_cache = nullptr;
4270 _gc_data = nullptr;
4271 _oops_do_mark_link = nullptr;
4272 _compiled_ic_data = nullptr;
4273 _osr_entry_point = nullptr;
4274 _compile_id = -1;
4275 _deoptimization_status = not_marked;
4276 _is_unloading_state = 0;
4277 _state = not_installed;
4278 }
|