1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "asm/assembler.inline.hpp" 26 #include "code/aotCodeCache.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/dependencies.hpp" 30 #include "code/nativeInst.hpp" 31 #include "code/nmethod.inline.hpp" 32 #include "code/scopeDesc.hpp" 33 #include "compiler/abstractCompiler.hpp" 34 #include "compiler/compilationLog.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "compiler/compileLog.hpp" 37 #include "compiler/compilerDirectives.hpp" 38 #include "compiler/compilerOracle.hpp" 39 #include "compiler/compileTask.hpp" 40 #include "compiler/directivesParser.hpp" 41 #include "compiler/disassembler.hpp" 42 #include "compiler/oopMap.inline.hpp" 43 #include "gc/shared/barrierSet.hpp" 44 #include "gc/shared/barrierSetNMethod.hpp" 45 #include "gc/shared/classUnloadingContext.hpp" 46 #include "gc/shared/collectedHeap.hpp" 47 #include "interpreter/bytecode.inline.hpp" 48 #include "jvm.h" 49 #include "logging/log.hpp" 50 #include "logging/logStream.hpp" 51 #include "memory/allocation.inline.hpp" 52 #include "memory/resourceArea.hpp" 53 #include "memory/universe.hpp" 54 #include "oops/access.inline.hpp" 55 #include "oops/klass.inline.hpp" 56 #include "oops/method.inline.hpp" 57 #include "oops/methodData.hpp" 58 #include "oops/oop.inline.hpp" 59 #include "oops/weakHandle.inline.hpp" 60 #include "prims/jvmtiImpl.hpp" 61 #include "prims/jvmtiThreadState.hpp" 62 #include "prims/methodHandles.hpp" 63 #include "runtime/atomic.hpp" 64 #include "runtime/continuation.hpp" 65 #include "runtime/deoptimization.hpp" 66 #include "runtime/flags/flagSetting.hpp" 67 #include "runtime/frame.inline.hpp" 68 #include "runtime/handles.inline.hpp" 69 #include "runtime/jniHandles.inline.hpp" 70 #include "runtime/orderAccess.hpp" 71 #include "runtime/os.hpp" 72 #include "runtime/safepointVerifiers.hpp" 73 #include "runtime/serviceThread.hpp" 74 #include "runtime/sharedRuntime.hpp" 75 #include "runtime/signature.hpp" 76 #include "runtime/threadWXSetters.inline.hpp" 77 #include "runtime/vmThread.hpp" 78 #include "utilities/align.hpp" 79 #include "utilities/copy.hpp" 80 #include "utilities/dtrace.hpp" 81 #include "utilities/events.hpp" 82 #include "utilities/globalDefinitions.hpp" 83 #include "utilities/hashTable.hpp" 84 #include "utilities/xmlstream.hpp" 85 #if INCLUDE_JVMCI 86 #include "jvmci/jvmciRuntime.hpp" 87 #endif 88 89 #ifdef DTRACE_ENABLED 90 91 // Only bother with this argument setup if dtrace is available 92 93 #define DTRACE_METHOD_UNLOAD_PROBE(method) \ 94 { \ 95 Method* m = (method); \ 96 if (m != nullptr) { \ 97 Symbol* klass_name = m->klass_name(); \ 98 Symbol* name = m->name(); \ 99 Symbol* signature = m->signature(); \ 100 HOTSPOT_COMPILED_METHOD_UNLOAD( \ 101 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 102 (char *) name->bytes(), name->utf8_length(), \ 103 (char *) signature->bytes(), signature->utf8_length()); \ 104 } \ 105 } 106 107 #else // ndef DTRACE_ENABLED 108 109 #define DTRACE_METHOD_UNLOAD_PROBE(method) 110 111 #endif 112 113 // Cast from int value to narrow type 114 #define CHECKED_CAST(result, T, thing) \ 115 result = static_cast<T>(thing); \ 116 guarantee(static_cast<int>(result) == thing, "failed: %d != %d", static_cast<int>(result), thing); 117 118 //--------------------------------------------------------------------------------- 119 // NMethod statistics 120 // They are printed under various flags, including: 121 // PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation. 122 // (In the latter two cases, they like other stats are printed to the log only.) 123 124 #ifndef PRODUCT 125 // These variables are put into one block to reduce relocations 126 // and make it simpler to print from the debugger. 127 struct java_nmethod_stats_struct { 128 uint nmethod_count; 129 uint total_nm_size; 130 uint total_immut_size; 131 uint total_mut_size; 132 uint relocation_size; 133 uint consts_size; 134 uint insts_size; 135 uint stub_size; 136 uint oops_size; 137 uint metadata_size; 138 uint dependencies_size; 139 uint nul_chk_table_size; 140 uint handler_table_size; 141 uint scopes_pcs_size; 142 uint scopes_data_size; 143 #if INCLUDE_JVMCI 144 uint speculations_size; 145 uint jvmci_data_size; 146 #endif 147 148 void note_nmethod(nmethod* nm) { 149 nmethod_count += 1; 150 total_nm_size += nm->size(); 151 total_immut_size += nm->immutable_data_size(); 152 total_mut_size += nm->mutable_data_size(); 153 relocation_size += nm->relocation_size(); 154 consts_size += nm->consts_size(); 155 insts_size += nm->insts_size(); 156 stub_size += nm->stub_size(); 157 oops_size += nm->oops_size(); 158 metadata_size += nm->metadata_size(); 159 scopes_data_size += nm->scopes_data_size(); 160 scopes_pcs_size += nm->scopes_pcs_size(); 161 dependencies_size += nm->dependencies_size(); 162 handler_table_size += nm->handler_table_size(); 163 nul_chk_table_size += nm->nul_chk_table_size(); 164 #if INCLUDE_JVMCI 165 speculations_size += nm->speculations_size(); 166 jvmci_data_size += nm->jvmci_data_size(); 167 #endif 168 } 169 void print_nmethod_stats(const char* name) { 170 if (nmethod_count == 0) return; 171 tty->print_cr("Statistics for %u bytecoded nmethods for %s:", nmethod_count, name); 172 uint total_size = total_nm_size + total_immut_size + total_mut_size; 173 if (total_nm_size != 0) { 174 tty->print_cr(" total size = %u (100%%)", total_size); 175 tty->print_cr(" in CodeCache = %u (%f%%)", total_nm_size, (total_nm_size * 100.0f)/total_size); 176 } 177 uint header_size = (uint)(nmethod_count * sizeof(nmethod)); 178 if (nmethod_count != 0) { 179 tty->print_cr(" header = %u (%f%%)", header_size, (header_size * 100.0f)/total_nm_size); 180 } 181 if (consts_size != 0) { 182 tty->print_cr(" constants = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_nm_size); 183 } 184 if (insts_size != 0) { 185 tty->print_cr(" main code = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_nm_size); 186 } 187 if (stub_size != 0) { 188 tty->print_cr(" stub code = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_nm_size); 189 } 190 if (oops_size != 0) { 191 tty->print_cr(" oops = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_nm_size); 192 } 193 if (total_mut_size != 0) { 194 tty->print_cr(" mutable data = %u (%f%%)", total_mut_size, (total_mut_size * 100.0f)/total_size); 195 } 196 if (relocation_size != 0) { 197 tty->print_cr(" relocation = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_mut_size); 198 } 199 if (metadata_size != 0) { 200 tty->print_cr(" metadata = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_mut_size); 201 } 202 #if INCLUDE_JVMCI 203 if (jvmci_data_size != 0) { 204 tty->print_cr(" JVMCI data = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_mut_size); 205 } 206 #endif 207 if (total_immut_size != 0) { 208 tty->print_cr(" immutable data = %u (%f%%)", total_immut_size, (total_immut_size * 100.0f)/total_size); 209 } 210 if (dependencies_size != 0) { 211 tty->print_cr(" dependencies = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_immut_size); 212 } 213 if (nul_chk_table_size != 0) { 214 tty->print_cr(" nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_immut_size); 215 } 216 if (handler_table_size != 0) { 217 tty->print_cr(" handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_immut_size); 218 } 219 if (scopes_pcs_size != 0) { 220 tty->print_cr(" scopes pcs = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_immut_size); 221 } 222 if (scopes_data_size != 0) { 223 tty->print_cr(" scopes data = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_immut_size); 224 } 225 #if INCLUDE_JVMCI 226 if (speculations_size != 0) { 227 tty->print_cr(" speculations = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_immut_size); 228 } 229 #endif 230 } 231 }; 232 233 struct native_nmethod_stats_struct { 234 uint native_nmethod_count; 235 uint native_total_size; 236 uint native_relocation_size; 237 uint native_insts_size; 238 uint native_oops_size; 239 uint native_metadata_size; 240 void note_native_nmethod(nmethod* nm) { 241 native_nmethod_count += 1; 242 native_total_size += nm->size(); 243 native_relocation_size += nm->relocation_size(); 244 native_insts_size += nm->insts_size(); 245 native_oops_size += nm->oops_size(); 246 native_metadata_size += nm->metadata_size(); 247 } 248 void print_native_nmethod_stats() { 249 if (native_nmethod_count == 0) return; 250 tty->print_cr("Statistics for %u native nmethods:", native_nmethod_count); 251 if (native_total_size != 0) tty->print_cr(" N. total size = %u", native_total_size); 252 if (native_relocation_size != 0) tty->print_cr(" N. relocation = %u", native_relocation_size); 253 if (native_insts_size != 0) tty->print_cr(" N. main code = %u", native_insts_size); 254 if (native_oops_size != 0) tty->print_cr(" N. oops = %u", native_oops_size); 255 if (native_metadata_size != 0) tty->print_cr(" N. metadata = %u", native_metadata_size); 256 } 257 }; 258 259 struct pc_nmethod_stats_struct { 260 uint pc_desc_init; // number of initialization of cache (= number of caches) 261 uint pc_desc_queries; // queries to nmethod::find_pc_desc 262 uint pc_desc_approx; // number of those which have approximate true 263 uint pc_desc_repeats; // number of _pc_descs[0] hits 264 uint pc_desc_hits; // number of LRU cache hits 265 uint pc_desc_tests; // total number of PcDesc examinations 266 uint pc_desc_searches; // total number of quasi-binary search steps 267 uint pc_desc_adds; // number of LUR cache insertions 268 269 void print_pc_stats() { 270 tty->print_cr("PcDesc Statistics: %u queries, %.2f comparisons per query", 271 pc_desc_queries, 272 (double)(pc_desc_tests + pc_desc_searches) 273 / pc_desc_queries); 274 tty->print_cr(" caches=%d queries=%u/%u, hits=%u+%u, tests=%u+%u, adds=%u", 275 pc_desc_init, 276 pc_desc_queries, pc_desc_approx, 277 pc_desc_repeats, pc_desc_hits, 278 pc_desc_tests, pc_desc_searches, pc_desc_adds); 279 } 280 }; 281 282 #ifdef COMPILER1 283 static java_nmethod_stats_struct c1_java_nmethod_stats; 284 #endif 285 #ifdef COMPILER2 286 static java_nmethod_stats_struct c2_java_nmethod_stats; 287 #endif 288 #if INCLUDE_JVMCI 289 static java_nmethod_stats_struct jvmci_java_nmethod_stats; 290 #endif 291 static java_nmethod_stats_struct unknown_java_nmethod_stats; 292 293 static native_nmethod_stats_struct native_nmethod_stats; 294 static pc_nmethod_stats_struct pc_nmethod_stats; 295 296 static void note_java_nmethod(nmethod* nm) { 297 #ifdef COMPILER1 298 if (nm->is_compiled_by_c1()) { 299 c1_java_nmethod_stats.note_nmethod(nm); 300 } else 301 #endif 302 #ifdef COMPILER2 303 if (nm->is_compiled_by_c2()) { 304 c2_java_nmethod_stats.note_nmethod(nm); 305 } else 306 #endif 307 #if INCLUDE_JVMCI 308 if (nm->is_compiled_by_jvmci()) { 309 jvmci_java_nmethod_stats.note_nmethod(nm); 310 } else 311 #endif 312 { 313 unknown_java_nmethod_stats.note_nmethod(nm); 314 } 315 } 316 #endif // !PRODUCT 317 318 //--------------------------------------------------------------------------------- 319 320 321 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) { 322 assert(pc != nullptr, "Must be non null"); 323 assert(exception.not_null(), "Must be non null"); 324 assert(handler != nullptr, "Must be non null"); 325 326 _count = 0; 327 _exception_type = exception->klass(); 328 _next = nullptr; 329 _purge_list_next = nullptr; 330 331 add_address_and_handler(pc,handler); 332 } 333 334 335 address ExceptionCache::match(Handle exception, address pc) { 336 assert(pc != nullptr,"Must be non null"); 337 assert(exception.not_null(),"Must be non null"); 338 if (exception->klass() == exception_type()) { 339 return (test_address(pc)); 340 } 341 342 return nullptr; 343 } 344 345 346 bool ExceptionCache::match_exception_with_space(Handle exception) { 347 assert(exception.not_null(),"Must be non null"); 348 if (exception->klass() == exception_type() && count() < cache_size) { 349 return true; 350 } 351 return false; 352 } 353 354 355 address ExceptionCache::test_address(address addr) { 356 int limit = count(); 357 for (int i = 0; i < limit; i++) { 358 if (pc_at(i) == addr) { 359 return handler_at(i); 360 } 361 } 362 return nullptr; 363 } 364 365 366 bool ExceptionCache::add_address_and_handler(address addr, address handler) { 367 if (test_address(addr) == handler) return true; 368 369 int index = count(); 370 if (index < cache_size) { 371 set_pc_at(index, addr); 372 set_handler_at(index, handler); 373 increment_count(); 374 return true; 375 } 376 return false; 377 } 378 379 ExceptionCache* ExceptionCache::next() { 380 return Atomic::load(&_next); 381 } 382 383 void ExceptionCache::set_next(ExceptionCache *ec) { 384 Atomic::store(&_next, ec); 385 } 386 387 //----------------------------------------------------------------------------- 388 389 390 // Helper used by both find_pc_desc methods. 391 static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) { 392 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests); 393 if (!approximate) { 394 return pc->pc_offset() == pc_offset; 395 } else { 396 // Do not look before the sentinel 397 assert(pc_offset > PcDesc::lower_offset_limit, "illegal pc_offset"); 398 return pc_offset <= pc->pc_offset() && (pc-1)->pc_offset() < pc_offset; 399 } 400 } 401 402 void PcDescCache::init_to(PcDesc* initial_pc_desc) { 403 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_init); 404 // initialize the cache by filling it with benign (non-null) values 405 assert(initial_pc_desc != nullptr && initial_pc_desc->pc_offset() == PcDesc::lower_offset_limit, 406 "must start with a sentinel"); 407 for (int i = 0; i < cache_size; i++) { 408 _pc_descs[i] = initial_pc_desc; 409 } 410 } 411 412 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) { 413 // Note: one might think that caching the most recently 414 // read value separately would be a win, but one would be 415 // wrong. When many threads are updating it, the cache 416 // line it's in would bounce between caches, negating 417 // any benefit. 418 419 // In order to prevent race conditions do not load cache elements 420 // repeatedly, but use a local copy: 421 PcDesc* res; 422 423 // Step one: Check the most recently added value. 424 res = _pc_descs[0]; 425 assert(res != nullptr, "PcDesc cache should be initialized already"); 426 427 // Approximate only here since PcDescContainer::find_pc_desc() checked for exact case. 428 if (approximate && match_desc(res, pc_offset, approximate)) { 429 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats); 430 return res; 431 } 432 433 // Step two: Check the rest of the LRU cache. 434 for (int i = 1; i < cache_size; ++i) { 435 res = _pc_descs[i]; 436 if (res->pc_offset() < 0) break; // optimization: skip empty cache 437 if (match_desc(res, pc_offset, approximate)) { 438 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits); 439 return res; 440 } 441 } 442 443 // Report failure. 444 return nullptr; 445 } 446 447 void PcDescCache::add_pc_desc(PcDesc* pc_desc) { 448 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds); 449 // Update the LRU cache by shifting pc_desc forward. 450 for (int i = 0; i < cache_size; i++) { 451 PcDesc* next = _pc_descs[i]; 452 _pc_descs[i] = pc_desc; 453 pc_desc = next; 454 } 455 } 456 457 // adjust pcs_size so that it is a multiple of both oopSize and 458 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple 459 // of oopSize, then 2*sizeof(PcDesc) is) 460 static int adjust_pcs_size(int pcs_size) { 461 int nsize = align_up(pcs_size, oopSize); 462 if ((nsize % sizeof(PcDesc)) != 0) { 463 nsize = pcs_size + sizeof(PcDesc); 464 } 465 assert((nsize % oopSize) == 0, "correct alignment"); 466 return nsize; 467 } 468 469 bool nmethod::is_method_handle_return(address return_pc) { 470 if (!has_method_handle_invokes()) return false; 471 PcDesc* pd = pc_desc_at(return_pc); 472 if (pd == nullptr) 473 return false; 474 return pd->is_method_handle_invoke(); 475 } 476 477 // Returns a string version of the method state. 478 const char* nmethod::state() const { 479 int state = get_state(); 480 switch (state) { 481 case not_installed: 482 return "not installed"; 483 case in_use: 484 return "in use"; 485 case not_entrant: 486 return "not_entrant"; 487 default: 488 fatal("unexpected method state: %d", state); 489 return nullptr; 490 } 491 } 492 493 void nmethod::set_deoptimized_done() { 494 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); 495 if (_deoptimization_status != deoptimize_done) { // can't go backwards 496 Atomic::store(&_deoptimization_status, deoptimize_done); 497 } 498 } 499 500 ExceptionCache* nmethod::exception_cache_acquire() const { 501 return Atomic::load_acquire(&_exception_cache); 502 } 503 504 void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) { 505 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); 506 assert(new_entry != nullptr,"Must be non null"); 507 assert(new_entry->next() == nullptr, "Must be null"); 508 509 for (;;) { 510 ExceptionCache *ec = exception_cache(); 511 if (ec != nullptr) { 512 Klass* ex_klass = ec->exception_type(); 513 if (!ex_klass->is_loader_alive()) { 514 // We must guarantee that entries are not inserted with new next pointer 515 // edges to ExceptionCache entries with dead klasses, due to bad interactions 516 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll 517 // the head pointer forward to the first live ExceptionCache, so that the new 518 // next pointers always point at live ExceptionCaches, that are not removed due 519 // to concurrent ExceptionCache cleanup. 520 ExceptionCache* next = ec->next(); 521 if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) { 522 CodeCache::release_exception_cache(ec); 523 } 524 continue; 525 } 526 ec = exception_cache(); 527 if (ec != nullptr) { 528 new_entry->set_next(ec); 529 } 530 } 531 if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) { 532 return; 533 } 534 } 535 } 536 537 void nmethod::clean_exception_cache() { 538 // For each nmethod, only a single thread may call this cleanup function 539 // at the same time, whether called in STW cleanup or concurrent cleanup. 540 // Note that if the GC is processing exception cache cleaning in a concurrent phase, 541 // then a single writer may contend with cleaning up the head pointer to the 542 // first ExceptionCache node that has a Klass* that is alive. That is fine, 543 // as long as there is no concurrent cleanup of next pointers from concurrent writers. 544 // And the concurrent writers do not clean up next pointers, only the head. 545 // Also note that concurrent readers will walk through Klass* pointers that are not 546 // alive. That does not cause ABA problems, because Klass* is deleted after 547 // a handshake with all threads, after all stale ExceptionCaches have been 548 // unlinked. That is also when the CodeCache::exception_cache_purge_list() 549 // is deleted, with all ExceptionCache entries that were cleaned concurrently. 550 // That similarly implies that CAS operations on ExceptionCache entries do not 551 // suffer from ABA problems as unlinking and deletion is separated by a global 552 // handshake operation. 553 ExceptionCache* prev = nullptr; 554 ExceptionCache* curr = exception_cache_acquire(); 555 556 while (curr != nullptr) { 557 ExceptionCache* next = curr->next(); 558 559 if (!curr->exception_type()->is_loader_alive()) { 560 if (prev == nullptr) { 561 // Try to clean head; this is contended by concurrent inserts, that 562 // both lazily clean the head, and insert entries at the head. If 563 // the CAS fails, the operation is restarted. 564 if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) { 565 prev = nullptr; 566 curr = exception_cache_acquire(); 567 continue; 568 } 569 } else { 570 // It is impossible to during cleanup connect the next pointer to 571 // an ExceptionCache that has not been published before a safepoint 572 // prior to the cleanup. Therefore, release is not required. 573 prev->set_next(next); 574 } 575 // prev stays the same. 576 577 CodeCache::release_exception_cache(curr); 578 } else { 579 prev = curr; 580 } 581 582 curr = next; 583 } 584 } 585 586 // public method for accessing the exception cache 587 // These are the public access methods. 588 address nmethod::handler_for_exception_and_pc(Handle exception, address pc) { 589 // We never grab a lock to read the exception cache, so we may 590 // have false negatives. This is okay, as it can only happen during 591 // the first few exception lookups for a given nmethod. 592 ExceptionCache* ec = exception_cache_acquire(); 593 while (ec != nullptr) { 594 address ret_val; 595 if ((ret_val = ec->match(exception,pc)) != nullptr) { 596 return ret_val; 597 } 598 ec = ec->next(); 599 } 600 return nullptr; 601 } 602 603 void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { 604 // There are potential race conditions during exception cache updates, so we 605 // must own the ExceptionCache_lock before doing ANY modifications. Because 606 // we don't lock during reads, it is possible to have several threads attempt 607 // to update the cache with the same data. We need to check for already inserted 608 // copies of the current data before adding it. 609 610 MutexLocker ml(ExceptionCache_lock); 611 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); 612 613 if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) { 614 target_entry = new ExceptionCache(exception,pc,handler); 615 add_exception_cache_entry(target_entry); 616 } 617 } 618 619 // private method for handling exception cache 620 // These methods are private, and used to manipulate the exception cache 621 // directly. 622 ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) { 623 ExceptionCache* ec = exception_cache_acquire(); 624 while (ec != nullptr) { 625 if (ec->match_exception_with_space(exception)) { 626 return ec; 627 } 628 ec = ec->next(); 629 } 630 return nullptr; 631 } 632 633 bool nmethod::is_at_poll_return(address pc) { 634 RelocIterator iter(this, pc, pc+1); 635 while (iter.next()) { 636 if (iter.type() == relocInfo::poll_return_type) 637 return true; 638 } 639 return false; 640 } 641 642 643 bool nmethod::is_at_poll_or_poll_return(address pc) { 644 RelocIterator iter(this, pc, pc+1); 645 while (iter.next()) { 646 relocInfo::relocType t = iter.type(); 647 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) 648 return true; 649 } 650 return false; 651 } 652 653 void nmethod::verify_oop_relocations() { 654 // Ensure sure that the code matches the current oop values 655 RelocIterator iter(this, nullptr, nullptr); 656 while (iter.next()) { 657 if (iter.type() == relocInfo::oop_type) { 658 oop_Relocation* reloc = iter.oop_reloc(); 659 if (!reloc->oop_is_immediate()) { 660 reloc->verify_oop_relocation(); 661 } 662 } 663 } 664 } 665 666 667 ScopeDesc* nmethod::scope_desc_at(address pc) { 668 PcDesc* pd = pc_desc_at(pc); 669 guarantee(pd != nullptr, "scope must be present"); 670 return new ScopeDesc(this, pd); 671 } 672 673 ScopeDesc* nmethod::scope_desc_near(address pc) { 674 PcDesc* pd = pc_desc_near(pc); 675 guarantee(pd != nullptr, "scope must be present"); 676 return new ScopeDesc(this, pd); 677 } 678 679 address nmethod::oops_reloc_begin() const { 680 // If the method is not entrant then a JMP is plastered over the 681 // first few bytes. If an oop in the old code was there, that oop 682 // should not get GC'd. Skip the first few bytes of oops on 683 // not-entrant methods. 684 if (frame_complete_offset() != CodeOffsets::frame_never_safe && 685 code_begin() + frame_complete_offset() > 686 verified_entry_point() + NativeJump::instruction_size) 687 { 688 // If we have a frame_complete_offset after the native jump, then there 689 // is no point trying to look for oops before that. This is a requirement 690 // for being allowed to scan oops concurrently. 691 return code_begin() + frame_complete_offset(); 692 } 693 694 address low_boundary = verified_entry_point(); 695 return low_boundary; 696 } 697 698 // Method that knows how to preserve outgoing arguments at call. This method must be 699 // called with a frame corresponding to a Java invoke 700 void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { 701 if (method() == nullptr) { 702 return; 703 } 704 705 // handle the case of an anchor explicitly set in continuation code that doesn't have a callee 706 JavaThread* thread = reg_map->thread(); 707 if ((thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp()) 708 JVMTI_ONLY(|| (method()->is_continuation_enter_intrinsic() && thread->on_monitor_waited_event()))) { 709 return; 710 } 711 712 if (!method()->is_native()) { 713 address pc = fr.pc(); 714 bool has_receiver, has_appendix; 715 Symbol* signature; 716 717 // The method attached by JIT-compilers should be used, if present. 718 // Bytecode can be inaccurate in such case. 719 Method* callee = attached_method_before_pc(pc); 720 if (callee != nullptr) { 721 has_receiver = !(callee->access_flags().is_static()); 722 has_appendix = false; 723 signature = callee->signature(); 724 } else { 725 SimpleScopeDesc ssd(this, pc); 726 727 Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci()); 728 has_receiver = call.has_receiver(); 729 has_appendix = call.has_appendix(); 730 signature = call.signature(); 731 } 732 733 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); 734 } else if (method()->is_continuation_enter_intrinsic()) { 735 // This method only calls Continuation.enter() 736 Symbol* signature = vmSymbols::continuationEnter_signature(); 737 fr.oops_compiled_arguments_do(signature, false, false, reg_map, f); 738 } 739 } 740 741 Method* nmethod::attached_method(address call_instr) { 742 assert(code_contains(call_instr), "not part of the nmethod"); 743 RelocIterator iter(this, call_instr, call_instr + 1); 744 while (iter.next()) { 745 if (iter.addr() == call_instr) { 746 switch(iter.type()) { 747 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value(); 748 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value(); 749 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value(); 750 default: break; 751 } 752 } 753 } 754 return nullptr; // not found 755 } 756 757 Method* nmethod::attached_method_before_pc(address pc) { 758 if (NativeCall::is_call_before(pc)) { 759 NativeCall* ncall = nativeCall_before(pc); 760 return attached_method(ncall->instruction_address()); 761 } 762 return nullptr; // not a call 763 } 764 765 void nmethod::clear_inline_caches() { 766 assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint"); 767 RelocIterator iter(this); 768 while (iter.next()) { 769 iter.reloc()->clear_inline_cache(); 770 } 771 } 772 773 #ifdef ASSERT 774 // Check class_loader is alive for this bit of metadata. 775 class CheckClass : public MetadataClosure { 776 void do_metadata(Metadata* md) { 777 Klass* klass = nullptr; 778 if (md->is_klass()) { 779 klass = ((Klass*)md); 780 } else if (md->is_method()) { 781 klass = ((Method*)md)->method_holder(); 782 } else if (md->is_methodData()) { 783 klass = ((MethodData*)md)->method()->method_holder(); 784 } else if (md->is_methodCounters()) { 785 klass = ((MethodCounters*)md)->method()->method_holder(); 786 } else { 787 md->print(); 788 ShouldNotReachHere(); 789 } 790 assert(klass->is_loader_alive(), "must be alive"); 791 } 792 }; 793 #endif // ASSERT 794 795 796 static void clean_ic_if_metadata_is_dead(CompiledIC *ic) { 797 ic->clean_metadata(); 798 } 799 800 // Clean references to unloaded nmethods at addr from this one, which is not unloaded. 801 template <typename CallsiteT> 802 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, nmethod* from, 803 bool clean_all) { 804 CodeBlob* cb = CodeCache::find_blob(callsite->destination()); 805 if (!cb->is_nmethod()) { 806 return; 807 } 808 nmethod* nm = cb->as_nmethod(); 809 if (clean_all || !nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) { 810 callsite->set_to_clean(); 811 } 812 } 813 814 // Cleans caches in nmethods that point to either classes that are unloaded 815 // or nmethods that are unloaded. 816 // 817 // Can be called either in parallel by G1 currently or after all 818 // nmethods are unloaded. Return postponed=true in the parallel case for 819 // inline caches found that point to nmethods that are not yet visited during 820 // the do_unloading walk. 821 void nmethod::unload_nmethod_caches(bool unloading_occurred) { 822 ResourceMark rm; 823 824 // Exception cache only needs to be called if unloading occurred 825 if (unloading_occurred) { 826 clean_exception_cache(); 827 } 828 829 cleanup_inline_caches_impl(unloading_occurred, false); 830 831 #ifdef ASSERT 832 // Check that the metadata embedded in the nmethod is alive 833 CheckClass check_class; 834 metadata_do(&check_class); 835 #endif 836 } 837 838 void nmethod::run_nmethod_entry_barrier() { 839 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 840 if (bs_nm != nullptr) { 841 // We want to keep an invariant that nmethods found through iterations of a Thread's 842 // nmethods found in safepoints have gone through an entry barrier and are not armed. 843 // By calling this nmethod entry barrier, it plays along and acts 844 // like any other nmethod found on the stack of a thread (fewer surprises). 845 nmethod* nm = this; 846 bool alive = bs_nm->nmethod_entry_barrier(nm); 847 assert(alive, "should be alive"); 848 } 849 } 850 851 // Only called by whitebox test 852 void nmethod::cleanup_inline_caches_whitebox() { 853 assert_locked_or_safepoint(CodeCache_lock); 854 CompiledICLocker ic_locker(this); 855 cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */); 856 } 857 858 address* nmethod::orig_pc_addr(const frame* fr) { 859 return (address*) ((address)fr->unextended_sp() + orig_pc_offset()); 860 } 861 862 // Called to clean up after class unloading for live nmethods 863 void nmethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) { 864 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); 865 ResourceMark rm; 866 867 // Find all calls in an nmethod and clear the ones that point to bad nmethods. 868 RelocIterator iter(this, oops_reloc_begin()); 869 bool is_in_static_stub = false; 870 while(iter.next()) { 871 872 switch (iter.type()) { 873 874 case relocInfo::virtual_call_type: 875 if (unloading_occurred) { 876 // If class unloading occurred we first clear ICs where the cached metadata 877 // is referring to an unloaded klass or method. 878 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter)); 879 } 880 881 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all); 882 break; 883 884 case relocInfo::opt_virtual_call_type: 885 case relocInfo::static_call_type: 886 clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), this, clean_all); 887 break; 888 889 case relocInfo::static_stub_type: { 890 is_in_static_stub = true; 891 break; 892 } 893 894 case relocInfo::metadata_type: { 895 // Only the metadata relocations contained in static/opt virtual call stubs 896 // contains the Method* passed to c2i adapters. It is the only metadata 897 // relocation that needs to be walked, as it is the one metadata relocation 898 // that violates the invariant that all metadata relocations have an oop 899 // in the compiled method (due to deferred resolution and code patching). 900 901 // This causes dead metadata to remain in compiled methods that are not 902 // unloading. Unless these slippery metadata relocations of the static 903 // stubs are at least cleared, subsequent class redefinition operations 904 // will access potentially free memory, and JavaThread execution 905 // concurrent to class unloading may call c2i adapters with dead methods. 906 if (!is_in_static_stub) { 907 // The first metadata relocation after a static stub relocation is the 908 // metadata relocation of the static stub used to pass the Method* to 909 // c2i adapters. 910 continue; 911 } 912 is_in_static_stub = false; 913 if (is_unloading()) { 914 // If the nmethod itself is dying, then it may point at dead metadata. 915 // Nobody should follow that metadata; it is strictly unsafe. 916 continue; 917 } 918 metadata_Relocation* r = iter.metadata_reloc(); 919 Metadata* md = r->metadata_value(); 920 if (md != nullptr && md->is_method()) { 921 Method* method = static_cast<Method*>(md); 922 if (!method->method_holder()->is_loader_alive()) { 923 Atomic::store(r->metadata_addr(), (Method*)nullptr); 924 925 if (!r->metadata_is_immediate()) { 926 r->fix_metadata_relocation(); 927 } 928 } 929 } 930 break; 931 } 932 933 default: 934 break; 935 } 936 } 937 } 938 939 address nmethod::continuation_for_implicit_exception(address pc, bool for_div0_check) { 940 // Exception happened outside inline-cache check code => we are inside 941 // an active nmethod => use cpc to determine a return address 942 int exception_offset = int(pc - code_begin()); 943 int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset ); 944 #ifdef ASSERT 945 if (cont_offset == 0) { 946 Thread* thread = Thread::current(); 947 ResourceMark rm(thread); 948 CodeBlob* cb = CodeCache::find_blob(pc); 949 assert(cb != nullptr && cb == this, ""); 950 951 // Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once. 952 stringStream ss; 953 ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc)); 954 print_on(&ss); 955 method()->print_codes_on(&ss); 956 print_code_on(&ss); 957 print_pcs_on(&ss); 958 tty->print("%s", ss.as_string()); // print all at once 959 } 960 #endif 961 if (cont_offset == 0) { 962 // Let the normal error handling report the exception 963 return nullptr; 964 } 965 if (cont_offset == exception_offset) { 966 #if INCLUDE_JVMCI 967 Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check; 968 JavaThread *thread = JavaThread::current(); 969 thread->set_jvmci_implicit_exception_pc(pc); 970 thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason, 971 Deoptimization::Action_reinterpret)); 972 return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap()); 973 #else 974 ShouldNotReachHere(); 975 #endif 976 } 977 return code_begin() + cont_offset; 978 } 979 980 class HasEvolDependency : public MetadataClosure { 981 bool _has_evol_dependency; 982 public: 983 HasEvolDependency() : _has_evol_dependency(false) {} 984 void do_metadata(Metadata* md) { 985 if (md->is_method()) { 986 Method* method = (Method*)md; 987 if (method->is_old()) { 988 _has_evol_dependency = true; 989 } 990 } 991 } 992 bool has_evol_dependency() const { return _has_evol_dependency; } 993 }; 994 995 bool nmethod::has_evol_metadata() { 996 // Check the metadata in relocIter and CompiledIC and also deoptimize 997 // any nmethod that has reference to old methods. 998 HasEvolDependency check_evol; 999 metadata_do(&check_evol); 1000 if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) { 1001 ResourceMark rm; 1002 log_debug(redefine, class, nmethod) 1003 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata", 1004 _method->method_holder()->external_name(), 1005 _method->name()->as_C_string(), 1006 _method->signature()->as_C_string(), 1007 compile_id()); 1008 } 1009 return check_evol.has_evol_dependency(); 1010 } 1011 1012 int nmethod::total_size() const { 1013 return 1014 consts_size() + 1015 insts_size() + 1016 stub_size() + 1017 scopes_data_size() + 1018 scopes_pcs_size() + 1019 handler_table_size() + 1020 nul_chk_table_size(); 1021 } 1022 1023 const char* nmethod::compile_kind() const { 1024 if (is_osr_method()) return "osr"; 1025 if (preloaded()) return "AP"; 1026 if (is_aot()) return "A"; 1027 1028 if (method() != nullptr && is_native_method()) { 1029 if (method()->is_continuation_native_intrinsic()) { 1030 return "cnt"; 1031 } 1032 return "c2n"; 1033 } 1034 return nullptr; 1035 } 1036 1037 const char* nmethod::compiler_name() const { 1038 return compilertype2name(_compiler_type); 1039 } 1040 1041 #ifdef ASSERT 1042 class CheckForOopsClosure : public OopClosure { 1043 bool _found_oop = false; 1044 public: 1045 virtual void do_oop(oop* o) { _found_oop = true; } 1046 virtual void do_oop(narrowOop* o) { _found_oop = true; } 1047 bool found_oop() { return _found_oop; } 1048 }; 1049 class CheckForMetadataClosure : public MetadataClosure { 1050 bool _found_metadata = false; 1051 Metadata* _ignore = nullptr; 1052 public: 1053 CheckForMetadataClosure(Metadata* ignore) : _ignore(ignore) {} 1054 virtual void do_metadata(Metadata* md) { if (md != _ignore) _found_metadata = true; } 1055 bool found_metadata() { return _found_metadata; } 1056 }; 1057 1058 static void assert_no_oops_or_metadata(nmethod* nm) { 1059 if (nm == nullptr) return; 1060 assert(nm->oop_maps() == nullptr, "expectation"); 1061 1062 CheckForOopsClosure cfo; 1063 nm->oops_do(&cfo); 1064 assert(!cfo.found_oop(), "no oops allowed"); 1065 1066 // We allow an exception for the own Method, but require its class to be permanent. 1067 Method* own_method = nm->method(); 1068 CheckForMetadataClosure cfm(/* ignore reference to own Method */ own_method); 1069 nm->metadata_do(&cfm); 1070 assert(!cfm.found_metadata(), "no metadata allowed"); 1071 1072 assert(own_method->method_holder()->class_loader_data()->is_permanent_class_loader_data(), 1073 "Method's class needs to be permanent"); 1074 } 1075 #endif 1076 1077 static int required_mutable_data_size(CodeBuffer* code_buffer, 1078 int jvmci_data_size = 0) { 1079 return align_up(code_buffer->total_relocation_size(), oopSize) + 1080 align_up(code_buffer->total_metadata_size(), oopSize) + 1081 align_up(jvmci_data_size, oopSize); 1082 } 1083 1084 nmethod* nmethod::new_native_nmethod(const methodHandle& method, 1085 int compile_id, 1086 CodeBuffer *code_buffer, 1087 int vep_offset, 1088 int frame_complete, 1089 int frame_size, 1090 ByteSize basic_lock_owner_sp_offset, 1091 ByteSize basic_lock_sp_offset, 1092 OopMapSet* oop_maps, 1093 int exception_handler) { 1094 code_buffer->finalize_oop_references(method); 1095 // create nmethod 1096 nmethod* nm = nullptr; 1097 int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod)); 1098 { 1099 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1100 1101 CodeOffsets offsets; 1102 offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); 1103 offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); 1104 if (exception_handler != -1) { 1105 offsets.set_value(CodeOffsets::Exceptions, exception_handler); 1106 } 1107 1108 int mutable_data_size = required_mutable_data_size(code_buffer); 1109 1110 // MH intrinsics are dispatch stubs which are compatible with NonNMethod space. 1111 // IsUnloadingBehaviour::is_unloading needs to handle them separately. 1112 bool allow_NonNMethod_space = method->can_be_allocated_in_NonNMethod_space(); 1113 nm = new (native_nmethod_size, allow_NonNMethod_space) 1114 nmethod(method(), compiler_none, native_nmethod_size, 1115 compile_id, &offsets, 1116 code_buffer, frame_size, 1117 basic_lock_owner_sp_offset, 1118 basic_lock_sp_offset, 1119 oop_maps, mutable_data_size); 1120 DEBUG_ONLY( if (allow_NonNMethod_space) assert_no_oops_or_metadata(nm); ) 1121 NOT_PRODUCT(if (nm != nullptr) native_nmethod_stats.note_native_nmethod(nm)); 1122 } 1123 1124 if (nm != nullptr) { 1125 // verify nmethod 1126 DEBUG_ONLY(nm->verify();) // might block 1127 1128 nm->log_new_nmethod(); 1129 } 1130 return nm; 1131 } 1132 1133 void nmethod::record_nmethod_dependency() { 1134 // To make dependency checking during class loading fast, record 1135 // the nmethod dependencies in the classes it is dependent on. 1136 // This allows the dependency checking code to simply walk the 1137 // class hierarchy above the loaded class, checking only nmethods 1138 // which are dependent on those classes. The slow way is to 1139 // check every nmethod for dependencies which makes it linear in 1140 // the number of methods compiled. For applications with a lot 1141 // classes the slow way is too slow. 1142 for (Dependencies::DepStream deps(this); deps.next(); ) { 1143 if (deps.type() == Dependencies::call_site_target_value) { 1144 // CallSite dependencies are managed on per-CallSite instance basis. 1145 oop call_site = deps.argument_oop(0); 1146 MethodHandles::add_dependent_nmethod(call_site, this); 1147 } else { 1148 InstanceKlass* ik = deps.context_type(); 1149 if (ik == nullptr) { 1150 continue; // ignore things like evol_method 1151 } 1152 // record this nmethod as dependent on this klass 1153 ik->add_dependent_nmethod(this); 1154 } 1155 } 1156 } 1157 1158 nmethod* nmethod::new_nmethod(const methodHandle& method, 1159 int compile_id, 1160 int entry_bci, 1161 CodeOffsets* offsets, 1162 int orig_pc_offset, 1163 DebugInformationRecorder* debug_info, 1164 Dependencies* dependencies, 1165 CodeBuffer* code_buffer, int frame_size, 1166 OopMapSet* oop_maps, 1167 ExceptionHandlerTable* handler_table, 1168 ImplicitExceptionTable* nul_chk_table, 1169 AbstractCompiler* compiler, 1170 CompLevel comp_level 1171 , AOTCodeEntry* aot_code_entry 1172 #if INCLUDE_JVMCI 1173 , char* speculations, 1174 int speculations_len, 1175 JVMCINMethodData* jvmci_data 1176 #endif 1177 ) 1178 { 1179 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); 1180 code_buffer->finalize_oop_references(method); 1181 // create nmethod 1182 nmethod* nm = nullptr; 1183 int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod)); 1184 1185 int immutable_data_size = 1186 adjust_pcs_size(debug_info->pcs_size()) 1187 + align_up((int)dependencies->size_in_bytes(), oopSize) 1188 + align_up(handler_table->size_in_bytes() , oopSize) 1189 + align_up(nul_chk_table->size_in_bytes() , oopSize) 1190 #if INCLUDE_JVMCI 1191 + align_up(speculations_len , oopSize) 1192 #endif 1193 + align_up(debug_info->data_size() , oopSize); 1194 1195 // First, allocate space for immutable data in C heap. 1196 address immutable_data = nullptr; 1197 if (immutable_data_size > 0) { 1198 immutable_data = (address)os::malloc(immutable_data_size, mtCode); 1199 if (immutable_data == nullptr) { 1200 vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data"); 1201 return nullptr; 1202 } 1203 } 1204 1205 int mutable_data_size = required_mutable_data_size(code_buffer 1206 JVMCI_ONLY(COMMA (compiler->is_jvmci() ? jvmci_data->size() : 0))); 1207 1208 { 1209 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1210 1211 nm = new (nmethod_size, comp_level) 1212 nmethod(method(), compiler->type(), nmethod_size, immutable_data_size, mutable_data_size, 1213 compile_id, entry_bci, immutable_data, offsets, orig_pc_offset, 1214 debug_info, dependencies, code_buffer, frame_size, oop_maps, 1215 handler_table, nul_chk_table, compiler, comp_level, aot_code_entry 1216 #if INCLUDE_JVMCI 1217 , speculations, 1218 speculations_len, 1219 jvmci_data 1220 #endif 1221 ); 1222 1223 if (nm != nullptr) { 1224 nm->record_nmethod_dependency(); 1225 NOT_PRODUCT(note_java_nmethod(nm)); 1226 } 1227 } 1228 // Do verification and logging outside CodeCache_lock. 1229 if (nm != nullptr) { 1230 1231 #ifdef ASSERT 1232 LogTarget(Debug, aot, codecache, nmethod) log; 1233 if (log.is_enabled()) { 1234 LogStream out(log); 1235 out.print_cr("== new_nmethod 2"); 1236 FlagSetting fs(PrintRelocations, true); 1237 nm->print_on_impl(&out); 1238 nm->decode(&out); 1239 } 1240 #endif 1241 1242 // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet. 1243 DEBUG_ONLY(nm->verify();) 1244 nm->log_new_nmethod(); 1245 } 1246 return nm; 1247 } 1248 1249 nmethod* nmethod::restore(address code_cache_buffer, 1250 const methodHandle& method, 1251 int compile_id, 1252 address reloc_data, 1253 GrowableArray<Handle>& oop_list, 1254 GrowableArray<Metadata*>& metadata_list, 1255 ImmutableOopMapSet* oop_maps, 1256 address immutable_data, 1257 GrowableArray<Handle>& reloc_imm_oop_list, 1258 GrowableArray<Metadata*>& reloc_imm_metadata_list, 1259 AOTCodeReader* aot_code_reader) 1260 { 1261 CodeBlob::restore(code_cache_buffer, "nmethod", reloc_data, oop_maps); 1262 nmethod* nm = (nmethod*)code_cache_buffer; 1263 nm->set_method(method()); 1264 nm->_compile_id = compile_id; 1265 nm->set_immutable_data(immutable_data); 1266 nm->copy_values(&oop_list); 1267 nm->copy_values(&metadata_list); 1268 1269 aot_code_reader->fix_relocations(nm, &reloc_imm_oop_list, &reloc_imm_metadata_list); 1270 1271 #ifndef PRODUCT 1272 nm->asm_remarks().init(); 1273 aot_code_reader->read_asm_remarks(nm->asm_remarks(), /* use_string_table */ false); 1274 nm->dbg_strings().init(); 1275 aot_code_reader->read_dbg_strings(nm->dbg_strings(), /* use_string_table */ false); 1276 #endif 1277 1278 // Flush the code block 1279 ICache::invalidate_range(nm->code_begin(), nm->code_size()); 1280 1281 // Create cache after PcDesc data is copied - it will be used to initialize cache 1282 nm->_pc_desc_container = new PcDescContainer(nm->scopes_pcs_begin()); 1283 1284 nm->set_aot_code_entry(aot_code_reader->aot_code_entry()); 1285 1286 nm->post_init(); 1287 return nm; 1288 } 1289 1290 nmethod* nmethod::new_nmethod(nmethod* archived_nm, 1291 const methodHandle& method, 1292 AbstractCompiler* compiler, 1293 int compile_id, 1294 address reloc_data, 1295 GrowableArray<Handle>& oop_list, 1296 GrowableArray<Metadata*>& metadata_list, 1297 ImmutableOopMapSet* oop_maps, 1298 address immutable_data, 1299 GrowableArray<Handle>& reloc_imm_oop_list, 1300 GrowableArray<Metadata*>& reloc_imm_metadata_list, 1301 AOTCodeReader* aot_code_reader) 1302 { 1303 nmethod* nm = nullptr; 1304 int nmethod_size = archived_nm->size(); 1305 // create nmethod 1306 { 1307 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1308 address code_cache_buffer = (address)CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(archived_nm->comp_level())); 1309 if (code_cache_buffer != nullptr) { 1310 nm = archived_nm->restore(code_cache_buffer, 1311 method, 1312 compile_id, 1313 reloc_data, 1314 oop_list, 1315 metadata_list, 1316 oop_maps, 1317 immutable_data, 1318 reloc_imm_oop_list, 1319 reloc_imm_metadata_list, 1320 aot_code_reader); 1321 nm->record_nmethod_dependency(); 1322 NOT_PRODUCT(note_java_nmethod(nm)); 1323 } 1324 } 1325 // Do verification and logging outside CodeCache_lock. 1326 if (nm != nullptr) { 1327 #ifdef ASSERT 1328 LogTarget(Debug, aot, codecache, nmethod) log; 1329 if (log.is_enabled()) { 1330 LogStream out(log); 1331 out.print_cr("== new_nmethod 2"); 1332 FlagSetting fs(PrintRelocations, true); 1333 nm->print_on_impl(&out); 1334 nm->decode(&out); 1335 } 1336 #endif 1337 // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet. 1338 DEBUG_ONLY(nm->verify();) 1339 nm->log_new_nmethod(); 1340 } 1341 return nm; 1342 } 1343 1344 // Fill in default values for various fields 1345 void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) { 1346 // avoid uninitialized fields, even for short time periods 1347 _exception_cache = nullptr; 1348 _gc_data = nullptr; 1349 _oops_do_mark_link = nullptr; 1350 _compiled_ic_data = nullptr; 1351 1352 _is_unloading_state = 0; 1353 _state = not_installed; 1354 1355 _has_unsafe_access = 0; 1356 _has_method_handle_invokes = 0; 1357 _has_wide_vectors = 0; 1358 _has_monitors = 0; 1359 _has_scoped_access = 0; 1360 _has_flushed_dependencies = 0; 1361 _is_unlinked = 0; 1362 _load_reported = 0; // jvmti state 1363 _preloaded = 0; 1364 _has_clinit_barriers = 0; 1365 1366 _used = false; 1367 _deoptimization_status = not_marked; 1368 1369 // SECT_CONSTS is first in code buffer so the offset should be 0. 1370 int consts_offset = code_buffer->total_offset_of(code_buffer->consts()); 1371 assert(consts_offset == 0, "const_offset: %d", consts_offset); 1372 1373 _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); 1374 1375 CHECKED_CAST(_entry_offset, uint16_t, (offsets->value(CodeOffsets::Entry))); 1376 CHECKED_CAST(_verified_entry_offset, uint16_t, (offsets->value(CodeOffsets::Verified_Entry))); 1377 1378 _skipped_instructions_size = code_buffer->total_skipped_instructions_size(); 1379 } 1380 1381 // Post initialization 1382 void nmethod::post_init() { 1383 clear_unloading_state(); 1384 1385 finalize_relocations(); 1386 1387 Universe::heap()->register_nmethod(this); 1388 DEBUG_ONLY(Universe::heap()->verify_nmethod(this)); 1389 1390 CodeCache::commit(this); 1391 } 1392 1393 // For native wrappers 1394 nmethod::nmethod( 1395 Method* method, 1396 CompilerType type, 1397 int nmethod_size, 1398 int compile_id, 1399 CodeOffsets* offsets, 1400 CodeBuffer* code_buffer, 1401 int frame_size, 1402 ByteSize basic_lock_owner_sp_offset, 1403 ByteSize basic_lock_sp_offset, 1404 OopMapSet* oop_maps, 1405 int mutable_data_size) 1406 : CodeBlob("native nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod), 1407 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size), 1408 _deoptimization_generation(0), 1409 _gc_epoch(CodeCache::gc_epoch()), 1410 _method(method), 1411 _native_receiver_sp_offset(basic_lock_owner_sp_offset), 1412 _native_basic_lock_sp_offset(basic_lock_sp_offset) 1413 { 1414 { 1415 DEBUG_ONLY(NoSafepointVerifier nsv;) 1416 assert_locked_or_safepoint(CodeCache_lock); 1417 1418 init_defaults(code_buffer, offsets); 1419 1420 _osr_entry_point = nullptr; 1421 _pc_desc_container = nullptr; 1422 _entry_bci = InvocationEntryBci; 1423 _compile_id = compile_id; 1424 _comp_level = CompLevel_none; 1425 _compiler_type = type; 1426 _orig_pc_offset = 0; 1427 _num_stack_arg_slots = 0; 1428 1429 if (offsets->value(CodeOffsets::Exceptions) != -1) { 1430 // Continuation enter intrinsic 1431 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); 1432 } else { 1433 _exception_offset = 0; 1434 } 1435 // Native wrappers do not have deopt handlers. Make the values 1436 // something that will never match a pc like the nmethod vtable entry 1437 _deopt_handler_offset = 0; 1438 _deopt_mh_handler_offset = 0; 1439 _aot_code_entry = nullptr; 1440 _method_profiling_count = 0; 1441 _unwind_handler_offset = 0; 1442 1443 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize)); 1444 uint16_t metadata_size; 1445 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize)); 1446 JVMCI_ONLY( _metadata_size = metadata_size; ) 1447 assert(_mutable_data_size == _relocation_size + metadata_size, 1448 "wrong mutable data size: %d != %d + %d", 1449 _mutable_data_size, _relocation_size, metadata_size); 1450 1451 // native wrapper does not have read-only data but we need unique not null address 1452 _immutable_data = blob_end(); 1453 _immutable_data_size = 0; 1454 _nul_chk_table_offset = 0; 1455 _handler_table_offset = 0; 1456 _scopes_pcs_offset = 0; 1457 _scopes_data_offset = 0; 1458 #if INCLUDE_JVMCI 1459 _speculations_offset = 0; 1460 #endif 1461 1462 code_buffer->copy_code_and_locs_to(this); 1463 code_buffer->copy_values_to(this); 1464 1465 post_init(); 1466 } 1467 1468 if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { 1469 ttyLocker ttyl; // keep the following output all in one block 1470 // This output goes directly to the tty, not the compiler log. 1471 // To enable tools to match it up with the compilation activity, 1472 // be sure to tag this tty output with the compile ID. 1473 if (xtty != nullptr) { 1474 xtty->begin_head("print_native_nmethod"); 1475 xtty->method(_method); 1476 xtty->stamp(); 1477 xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this); 1478 } 1479 // Print the header part, then print the requested information. 1480 // This is both handled in decode2(), called via print_code() -> decode() 1481 if (PrintNativeNMethods) { 1482 tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------"); 1483 print_code(); 1484 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1485 #if defined(SUPPORT_DATA_STRUCTS) 1486 if (AbstractDisassembler::show_structs()) { 1487 if (oop_maps != nullptr) { 1488 tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning 1489 oop_maps->print_on(tty); 1490 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1491 } 1492 } 1493 #endif 1494 } else { 1495 print(); // print the header part only. 1496 } 1497 #if defined(SUPPORT_DATA_STRUCTS) 1498 if (AbstractDisassembler::show_structs()) { 1499 if (PrintRelocations) { 1500 print_relocations_on(tty); 1501 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1502 } 1503 } 1504 #endif 1505 if (xtty != nullptr) { 1506 xtty->tail("print_native_nmethod"); 1507 } 1508 } 1509 } 1510 1511 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () { 1512 return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level)); 1513 } 1514 1515 void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () { 1516 // Try MethodNonProfiled and MethodProfiled. 1517 void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled); 1518 if (return_value != nullptr || !allow_NonNMethod_space) return return_value; 1519 // Try NonNMethod or give up. 1520 return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod); 1521 } 1522 1523 // For normal JIT compiled code 1524 nmethod::nmethod( 1525 Method* method, 1526 CompilerType type, 1527 int nmethod_size, 1528 int immutable_data_size, 1529 int mutable_data_size, 1530 int compile_id, 1531 int entry_bci, 1532 address immutable_data, 1533 CodeOffsets* offsets, 1534 int orig_pc_offset, 1535 DebugInformationRecorder* debug_info, 1536 Dependencies* dependencies, 1537 CodeBuffer *code_buffer, 1538 int frame_size, 1539 OopMapSet* oop_maps, 1540 ExceptionHandlerTable* handler_table, 1541 ImplicitExceptionTable* nul_chk_table, 1542 AbstractCompiler* compiler, 1543 CompLevel comp_level 1544 , AOTCodeEntry* aot_code_entry 1545 #if INCLUDE_JVMCI 1546 , char* speculations, 1547 int speculations_len, 1548 JVMCINMethodData* jvmci_data 1549 #endif 1550 ) 1551 : CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod), 1552 offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size), 1553 _deoptimization_generation(0), 1554 _gc_epoch(CodeCache::gc_epoch()), 1555 _method(method), 1556 _osr_link(nullptr) 1557 { 1558 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); 1559 { 1560 DEBUG_ONLY(NoSafepointVerifier nsv;) 1561 assert_locked_or_safepoint(CodeCache_lock); 1562 1563 init_defaults(code_buffer, offsets); 1564 _aot_code_entry = aot_code_entry; 1565 _method_profiling_count = 0; 1566 1567 _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); 1568 _entry_bci = entry_bci; 1569 _compile_id = compile_id; 1570 _comp_level = comp_level; 1571 _compiler_type = type; 1572 _orig_pc_offset = orig_pc_offset; 1573 1574 _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots(); 1575 1576 set_ctable_begin(header_begin() + content_offset()); 1577 1578 #if INCLUDE_JVMCI 1579 if (compiler->is_jvmci()) { 1580 // JVMCI might not produce any stub sections 1581 if (offsets->value(CodeOffsets::Exceptions) != -1) { 1582 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); 1583 } else { 1584 _exception_offset = -1; 1585 } 1586 if (offsets->value(CodeOffsets::Deopt) != -1) { 1587 _deopt_handler_offset = code_offset() + offsets->value(CodeOffsets::Deopt); 1588 } else { 1589 _deopt_handler_offset = -1; 1590 } 1591 if (offsets->value(CodeOffsets::DeoptMH) != -1) { 1592 _deopt_mh_handler_offset = code_offset() + offsets->value(CodeOffsets::DeoptMH); 1593 } else { 1594 _deopt_mh_handler_offset = -1; 1595 } 1596 } else 1597 #endif 1598 { 1599 // Exception handler and deopt handler are in the stub section 1600 assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set"); 1601 assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set"); 1602 1603 _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); 1604 _deopt_handler_offset = _stub_offset + offsets->value(CodeOffsets::Deopt); 1605 if (offsets->value(CodeOffsets::DeoptMH) != -1) { 1606 _deopt_mh_handler_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH); 1607 } else { 1608 _deopt_mh_handler_offset = -1; 1609 } 1610 } 1611 if (offsets->value(CodeOffsets::UnwindHandler) != -1) { 1612 // C1 generates UnwindHandler at the end of instructions section. 1613 // Calculate positive offset as distance between the start of stubs section 1614 // (which is also the end of instructions section) and the start of the handler. 1615 int unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler); 1616 CHECKED_CAST(_unwind_handler_offset, int16_t, (_stub_offset - unwind_handler_offset)); 1617 } else { 1618 _unwind_handler_offset = -1; 1619 } 1620 1621 CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize)); 1622 uint16_t metadata_size; 1623 CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize)); 1624 JVMCI_ONLY( _metadata_size = metadata_size; ) 1625 int jvmci_data_size = 0 JVMCI_ONLY( + align_up(compiler->is_jvmci() ? jvmci_data->size() : 0, oopSize)); 1626 assert(_mutable_data_size == _relocation_size + metadata_size + jvmci_data_size, 1627 "wrong mutable data size: %d != %d + %d + %d", 1628 _mutable_data_size, _relocation_size, metadata_size, jvmci_data_size); 1629 assert(nmethod_size == data_end() - header_begin(), "wrong nmethod size: %d != %d", 1630 nmethod_size, (int)(code_end() - header_begin())); 1631 1632 _immutable_data_size = immutable_data_size; 1633 if (immutable_data_size > 0) { 1634 assert(immutable_data != nullptr, "required"); 1635 _immutable_data = immutable_data; 1636 } else { 1637 // We need unique not null address 1638 _immutable_data = blob_end(); 1639 } 1640 CHECKED_CAST(_nul_chk_table_offset, uint16_t, (align_up((int)dependencies->size_in_bytes(), oopSize))); 1641 CHECKED_CAST(_handler_table_offset, uint16_t, (_nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize))); 1642 _scopes_pcs_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize); 1643 _scopes_data_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); 1644 1645 #if INCLUDE_JVMCI 1646 _speculations_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize); 1647 DEBUG_ONLY( int immutable_data_end_offset = _speculations_offset + align_up(speculations_len, oopSize); ) 1648 #else 1649 DEBUG_ONLY( int immutable_data_end_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize); ) 1650 #endif 1651 assert(immutable_data_end_offset <= immutable_data_size, "wrong read-only data size: %d > %d", 1652 immutable_data_end_offset, immutable_data_size); 1653 1654 // Copy code and relocation info 1655 code_buffer->copy_code_and_locs_to(this); 1656 // Copy oops and metadata 1657 code_buffer->copy_values_to(this); 1658 dependencies->copy_to(this); 1659 // Copy PcDesc and ScopeDesc data 1660 debug_info->copy_to(this); 1661 1662 // Create cache after PcDesc data is copied - it will be used to initialize cache 1663 _pc_desc_container = new PcDescContainer(scopes_pcs_begin()); 1664 1665 #if INCLUDE_JVMCI 1666 if (compiler->is_jvmci()) { 1667 // Initialize the JVMCINMethodData object inlined into nm 1668 jvmci_nmethod_data()->copy(jvmci_data); 1669 } 1670 #endif 1671 1672 // Copy contents of ExceptionHandlerTable to nmethod 1673 handler_table->copy_to(this); 1674 nul_chk_table->copy_to(this); 1675 1676 #if INCLUDE_JVMCI 1677 // Copy speculations to nmethod 1678 if (speculations_size() != 0) { 1679 memcpy(speculations_begin(), speculations, speculations_len); 1680 } 1681 #endif 1682 1683 post_init(); 1684 1685 // we use the information of entry points to find out if a method is 1686 // static or non static 1687 assert(compiler->is_c2() || compiler->is_jvmci() || 1688 _method->is_static() == (entry_point() == verified_entry_point()), 1689 " entry points must be same for static methods and vice versa"); 1690 } 1691 } 1692 1693 // Print a short set of xml attributes to identify this nmethod. The 1694 // output should be embedded in some other element. 1695 void nmethod::log_identity(xmlStream* log) const { 1696 assert(log->inside_attrs_or_error(), "printing attributes"); 1697 log->print(" compile_id='%d'", compile_id()); 1698 const char* nm_kind = compile_kind(); 1699 if (nm_kind != nullptr) log->print(" compile_kind='%s'", nm_kind); 1700 log->print(" compiler='%s'", compiler_name()); 1701 if (TieredCompilation) { 1702 log->print(" compile_level='%d'", comp_level()); 1703 } 1704 #if INCLUDE_JVMCI 1705 if (jvmci_nmethod_data() != nullptr) { 1706 const char* jvmci_name = jvmci_nmethod_data()->name(); 1707 if (jvmci_name != nullptr) { 1708 log->print(" jvmci_mirror_name='"); 1709 log->text("%s", jvmci_name); 1710 log->print("'"); 1711 } 1712 } 1713 #endif 1714 } 1715 1716 1717 #define LOG_OFFSET(log, name) \ 1718 if (p2i(name##_end()) - p2i(name##_begin())) \ 1719 log->print(" " XSTR(name) "_offset='%zd'" , \ 1720 p2i(name##_begin()) - p2i(this)) 1721 1722 1723 void nmethod::log_new_nmethod() const { 1724 if (LogCompilation && xtty != nullptr) { 1725 ttyLocker ttyl; 1726 xtty->begin_elem("nmethod"); 1727 log_identity(xtty); 1728 xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size()); 1729 xtty->print(" address='" INTPTR_FORMAT "'", p2i(this)); 1730 1731 LOG_OFFSET(xtty, relocation); 1732 LOG_OFFSET(xtty, consts); 1733 LOG_OFFSET(xtty, insts); 1734 LOG_OFFSET(xtty, stub); 1735 LOG_OFFSET(xtty, scopes_data); 1736 LOG_OFFSET(xtty, scopes_pcs); 1737 LOG_OFFSET(xtty, dependencies); 1738 LOG_OFFSET(xtty, handler_table); 1739 LOG_OFFSET(xtty, nul_chk_table); 1740 LOG_OFFSET(xtty, oops); 1741 LOG_OFFSET(xtty, metadata); 1742 1743 xtty->method(method()); 1744 xtty->stamp(); 1745 xtty->end_elem(); 1746 } 1747 } 1748 1749 #undef LOG_OFFSET 1750 1751 1752 // Print out more verbose output usually for a newly created nmethod. 1753 void nmethod::print_on_with_msg(outputStream* st, const char* msg) const { 1754 if (st != nullptr) { 1755 ttyLocker ttyl; 1756 if (WizardMode) { 1757 CompileTask::print(st, this, msg, /*short_form:*/ true); 1758 st->print_cr(" (" INTPTR_FORMAT ")", p2i(this)); 1759 } else { 1760 CompileTask::print(st, this, msg, /*short_form:*/ false); 1761 } 1762 } 1763 } 1764 1765 void nmethod::maybe_print_nmethod(const DirectiveSet* directive) { 1766 bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption; 1767 if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) { 1768 print_nmethod(printnmethods); 1769 } 1770 } 1771 1772 void nmethod::print_nmethod(bool printmethod) { 1773 ttyLocker ttyl; // keep the following output all in one block 1774 if (xtty != nullptr) { 1775 xtty->begin_head("print_nmethod"); 1776 log_identity(xtty); 1777 xtty->stamp(); 1778 xtty->end_head(); 1779 } 1780 // Print the header part, then print the requested information. 1781 // This is both handled in decode2(). 1782 if (printmethod) { 1783 ResourceMark m; 1784 if (is_compiled_by_c1()) { 1785 tty->cr(); 1786 tty->print_cr("============================= C1-compiled nmethod =============================="); 1787 } 1788 if (is_compiled_by_jvmci()) { 1789 tty->cr(); 1790 tty->print_cr("=========================== JVMCI-compiled nmethod ============================="); 1791 } 1792 tty->print_cr("----------------------------------- Assembly -----------------------------------"); 1793 decode2(tty); 1794 #if defined(SUPPORT_DATA_STRUCTS) 1795 if (AbstractDisassembler::show_structs()) { 1796 // Print the oops from the underlying CodeBlob as well. 1797 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1798 print_oops(tty); 1799 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1800 print_metadata(tty); 1801 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1802 print_pcs_on(tty); 1803 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1804 if (oop_maps() != nullptr) { 1805 tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning 1806 oop_maps()->print_on(tty); 1807 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1808 } 1809 } 1810 #endif 1811 } else { 1812 print(); // print the header part only. 1813 } 1814 1815 #if defined(SUPPORT_DATA_STRUCTS) 1816 if (AbstractDisassembler::show_structs()) { 1817 methodHandle mh(Thread::current(), _method); 1818 if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) { 1819 print_scopes(); 1820 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1821 } 1822 if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) { 1823 print_relocations_on(tty); 1824 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1825 } 1826 if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) { 1827 print_dependencies_on(tty); 1828 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1829 } 1830 if (printmethod || PrintExceptionHandlers) { 1831 print_handler_table(); 1832 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1833 print_nul_chk_table(); 1834 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1835 } 1836 1837 if (printmethod) { 1838 print_recorded_oops(); 1839 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1840 print_recorded_metadata(); 1841 tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - "); 1842 } 1843 } 1844 #endif 1845 1846 if (xtty != nullptr) { 1847 xtty->tail("print_nmethod"); 1848 } 1849 } 1850 1851 1852 // Promote one word from an assembly-time handle to a live embedded oop. 1853 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) { 1854 if (handle == nullptr || 1855 // As a special case, IC oops are initialized to 1 or -1. 1856 handle == (jobject) Universe::non_oop_word()) { 1857 *(void**)dest = handle; 1858 } else { 1859 *dest = JNIHandles::resolve_non_null(handle); 1860 } 1861 } 1862 1863 void nmethod::copy_values(GrowableArray<Handle>* array) { 1864 int length = array->length(); 1865 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough"); 1866 oop* dest = oops_begin(); 1867 for (int index = 0 ; index < length; index++) { 1868 dest[index] = array->at(index)(); 1869 } 1870 } 1871 1872 // Have to have the same name because it's called by a template 1873 void nmethod::copy_values(GrowableArray<jobject>* array) { 1874 int length = array->length(); 1875 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough"); 1876 oop* dest = oops_begin(); 1877 for (int index = 0 ; index < length; index++) { 1878 initialize_immediate_oop(&dest[index], array->at(index)); 1879 } 1880 1881 // Now we can fix up all the oops in the code. We need to do this 1882 // in the code because the assembler uses jobjects as placeholders. 1883 // The code and relocations have already been initialized by the 1884 // CodeBlob constructor, so it is valid even at this early point to 1885 // iterate over relocations and patch the code. 1886 fix_oop_relocations(nullptr, nullptr, /*initialize_immediates=*/ true); 1887 } 1888 1889 void nmethod::copy_values(GrowableArray<Metadata*>* array) { 1890 int length = array->length(); 1891 assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough"); 1892 Metadata** dest = metadata_begin(); 1893 for (int index = 0 ; index < length; index++) { 1894 dest[index] = array->at(index); 1895 } 1896 } 1897 1898 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) { 1899 // re-patch all oop-bearing instructions, just in case some oops moved 1900 RelocIterator iter(this, begin, end); 1901 while (iter.next()) { 1902 if (iter.type() == relocInfo::oop_type) { 1903 oop_Relocation* reloc = iter.oop_reloc(); 1904 if (initialize_immediates && reloc->oop_is_immediate()) { 1905 oop* dest = reloc->oop_addr(); 1906 jobject obj = *reinterpret_cast<jobject*>(dest); 1907 initialize_immediate_oop(dest, obj); 1908 } 1909 // Refresh the oop-related bits of this instruction. 1910 reloc->fix_oop_relocation(); 1911 } else if (iter.type() == relocInfo::metadata_type) { 1912 metadata_Relocation* reloc = iter.metadata_reloc(); 1913 reloc->fix_metadata_relocation(); 1914 } 1915 } 1916 } 1917 1918 void nmethod::create_reloc_immediates_list(JavaThread* thread, GrowableArray<Handle>& oop_list, GrowableArray<Metadata*>& metadata_list) { 1919 RelocIterator iter(this); 1920 while (iter.next()) { 1921 if (iter.type() == relocInfo::oop_type) { 1922 oop_Relocation* reloc = iter.oop_reloc(); 1923 if (reloc->oop_is_immediate()) { 1924 oop dest = reloc->oop_value(); 1925 Handle h(thread, dest); 1926 oop_list.append(h); 1927 } 1928 } else if (iter.type() == relocInfo::metadata_type) { 1929 metadata_Relocation* reloc = iter.metadata_reloc(); 1930 if (reloc->metadata_is_immediate()) { 1931 Metadata* m = reloc->metadata_value(); 1932 metadata_list.append(m); 1933 } 1934 } 1935 } 1936 } 1937 1938 static void install_post_call_nop_displacement(nmethod* nm, address pc) { 1939 NativePostCallNop* nop = nativePostCallNop_at((address) pc); 1940 intptr_t cbaddr = (intptr_t) nm; 1941 intptr_t offset = ((intptr_t) pc) - cbaddr; 1942 1943 int oopmap_slot = nm->oop_maps()->find_slot_for_offset(int((intptr_t) pc - (intptr_t) nm->code_begin())); 1944 if (oopmap_slot < 0) { // this can happen at asynchronous (non-safepoint) stackwalks 1945 log_debug(codecache)("failed to find oopmap for cb: " INTPTR_FORMAT " offset: %d", cbaddr, (int) offset); 1946 } else if (!nop->patch(oopmap_slot, offset)) { 1947 log_debug(codecache)("failed to encode %d %d", oopmap_slot, (int) offset); 1948 } 1949 } 1950 1951 void nmethod::finalize_relocations() { 1952 NoSafepointVerifier nsv; 1953 1954 GrowableArray<NativeMovConstReg*> virtual_call_data; 1955 1956 // Make sure that post call nops fill in nmethod offsets eagerly so 1957 // we don't have to race with deoptimization 1958 RelocIterator iter(this); 1959 while (iter.next()) { 1960 if (iter.type() == relocInfo::virtual_call_type) { 1961 virtual_call_Relocation* r = iter.virtual_call_reloc(); 1962 NativeMovConstReg* value = nativeMovConstReg_at(r->cached_value()); 1963 virtual_call_data.append(value); 1964 } else if (iter.type() == relocInfo::post_call_nop_type) { 1965 post_call_nop_Relocation* const reloc = iter.post_call_nop_reloc(); 1966 address pc = reloc->addr(); 1967 install_post_call_nop_displacement(this, pc); 1968 } 1969 } 1970 1971 if (virtual_call_data.length() > 0) { 1972 // We allocate a block of CompiledICData per nmethod so the GC can purge this faster. 1973 _compiled_ic_data = new CompiledICData[virtual_call_data.length()]; 1974 CompiledICData* next_data = _compiled_ic_data; 1975 1976 for (NativeMovConstReg* value : virtual_call_data) { 1977 value->set_data((intptr_t)next_data); 1978 next_data++; 1979 } 1980 } 1981 } 1982 1983 void nmethod::make_deoptimized() { 1984 if (!Continuations::enabled()) { 1985 // Don't deopt this again. 1986 set_deoptimized_done(); 1987 return; 1988 } 1989 1990 assert(method() == nullptr || can_be_deoptimized(), ""); 1991 1992 CompiledICLocker ml(this); 1993 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); 1994 1995 // If post call nops have been already patched, we can just bail-out. 1996 if (has_been_deoptimized()) { 1997 return; 1998 } 1999 2000 ResourceMark rm; 2001 RelocIterator iter(this, oops_reloc_begin()); 2002 2003 while (iter.next()) { 2004 2005 switch (iter.type()) { 2006 case relocInfo::virtual_call_type: { 2007 CompiledIC *ic = CompiledIC_at(&iter); 2008 address pc = ic->end_of_call(); 2009 NativePostCallNop* nop = nativePostCallNop_at(pc); 2010 if (nop != nullptr) { 2011 nop->make_deopt(); 2012 } 2013 assert(NativeDeoptInstruction::is_deopt_at(pc), "check"); 2014 break; 2015 } 2016 case relocInfo::static_call_type: 2017 case relocInfo::opt_virtual_call_type: { 2018 CompiledDirectCall *csc = CompiledDirectCall::at(iter.reloc()); 2019 address pc = csc->end_of_call(); 2020 NativePostCallNop* nop = nativePostCallNop_at(pc); 2021 //tty->print_cr(" - static pc %p", pc); 2022 if (nop != nullptr) { 2023 nop->make_deopt(); 2024 } 2025 // We can't assert here, there are some calls to stubs / runtime 2026 // that have reloc data and doesn't have a post call NOP. 2027 //assert(NativeDeoptInstruction::is_deopt_at(pc), "check"); 2028 break; 2029 } 2030 default: 2031 break; 2032 } 2033 } 2034 // Don't deopt this again. 2035 set_deoptimized_done(); 2036 } 2037 2038 void nmethod::verify_clean_inline_caches() { 2039 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); 2040 2041 ResourceMark rm; 2042 RelocIterator iter(this, oops_reloc_begin()); 2043 while(iter.next()) { 2044 switch(iter.type()) { 2045 case relocInfo::virtual_call_type: { 2046 CompiledIC *ic = CompiledIC_at(&iter); 2047 CodeBlob *cb = CodeCache::find_blob(ic->destination()); 2048 assert(cb != nullptr, "destination not in CodeBlob?"); 2049 nmethod* nm = cb->as_nmethod_or_null(); 2050 if (nm != nullptr) { 2051 // Verify that inline caches pointing to bad nmethods are clean 2052 if (!nm->is_in_use() || nm->is_unloading()) { 2053 assert(ic->is_clean(), "IC should be clean"); 2054 } 2055 } 2056 break; 2057 } 2058 case relocInfo::static_call_type: 2059 case relocInfo::opt_virtual_call_type: { 2060 CompiledDirectCall *cdc = CompiledDirectCall::at(iter.reloc()); 2061 CodeBlob *cb = CodeCache::find_blob(cdc->destination()); 2062 assert(cb != nullptr, "destination not in CodeBlob?"); 2063 nmethod* nm = cb->as_nmethod_or_null(); 2064 if (nm != nullptr) { 2065 // Verify that inline caches pointing to bad nmethods are clean 2066 if (!nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) { 2067 assert(cdc->is_clean(), "IC should be clean"); 2068 } 2069 } 2070 break; 2071 } 2072 default: 2073 break; 2074 } 2075 } 2076 } 2077 2078 void nmethod::mark_as_maybe_on_stack() { 2079 Atomic::store(&_gc_epoch, CodeCache::gc_epoch()); 2080 } 2081 2082 bool nmethod::is_maybe_on_stack() { 2083 // If the condition below is true, it means that the nmethod was found to 2084 // be alive the previous completed marking cycle. 2085 return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle(); 2086 } 2087 2088 void nmethod::inc_decompile_count() { 2089 if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return; 2090 // Could be gated by ProfileTraps, but do not bother... 2091 #if INCLUDE_JVMCI 2092 if (jvmci_skip_profile_deopt()) { 2093 return; 2094 } 2095 #endif 2096 Method* m = method(); 2097 if (m == nullptr) return; 2098 MethodData* mdo = m->method_data(); 2099 if (mdo == nullptr) return; 2100 // There is a benign race here. See comments in methodData.hpp. 2101 mdo->inc_decompile_count(); 2102 } 2103 2104 void nmethod::inc_method_profiling_count() { 2105 Atomic::inc(&_method_profiling_count); 2106 } 2107 2108 uint64_t nmethod::method_profiling_count() { 2109 return _method_profiling_count; 2110 } 2111 2112 bool nmethod::try_transition(signed char new_state_int) { 2113 signed char new_state = new_state_int; 2114 assert_lock_strong(NMethodState_lock); 2115 signed char old_state = _state; 2116 if (old_state >= new_state) { 2117 // Ensure monotonicity of transitions. 2118 return false; 2119 } 2120 Atomic::store(&_state, new_state); 2121 return true; 2122 } 2123 2124 void nmethod::invalidate_osr_method() { 2125 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); 2126 // Remove from list of active nmethods 2127 if (method() != nullptr) { 2128 method()->method_holder()->remove_osr_nmethod(this); 2129 } 2130 } 2131 2132 void nmethod::log_state_change(InvalidationReason invalidation_reason) const { 2133 if (LogCompilation) { 2134 if (xtty != nullptr) { 2135 ttyLocker ttyl; // keep the following output all in one block 2136 xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'", 2137 os::current_thread_id(), invalidation_reason_to_string(invalidation_reason)); 2138 log_identity(xtty); 2139 xtty->stamp(); 2140 xtty->end_elem(); 2141 } 2142 } 2143 2144 ResourceMark rm; 2145 stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256); 2146 ss.print("made not entrant: %s", invalidation_reason_to_string(invalidation_reason)); 2147 2148 CompileTask::print_ul(this, ss.freeze()); 2149 if (PrintCompilation) { 2150 print_on_with_msg(tty, ss.freeze()); 2151 } 2152 } 2153 2154 void nmethod::unlink_from_method() { 2155 if (method() != nullptr) { 2156 method()->unlink_code(this); 2157 } 2158 } 2159 2160 // Invalidate code 2161 bool nmethod::make_not_entrant(InvalidationReason invalidation_reason, bool keep_aot_entry) { 2162 // This can be called while the system is already at a safepoint which is ok 2163 NoSafepointVerifier nsv; 2164 2165 if (is_unloading()) { 2166 // If the nmethod is unloading, then it is already not entrant through 2167 // the nmethod entry barriers. No need to do anything; GC will unload it. 2168 return false; 2169 } 2170 2171 if (Atomic::load(&_state) == not_entrant) { 2172 // Avoid taking the lock if already in required state. 2173 // This is safe from races because the state is an end-state, 2174 // which the nmethod cannot back out of once entered. 2175 // No need for fencing either. 2176 return false; 2177 } 2178 2179 { 2180 // Enter critical section. Does not block for safepoint. 2181 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); 2182 2183 if (Atomic::load(&_state) == not_entrant) { 2184 // another thread already performed this transition so nothing 2185 // to do, but return false to indicate this. 2186 return false; 2187 } 2188 2189 if (is_osr_method()) { 2190 // This logic is equivalent to the logic below for patching the 2191 // verified entry point of regular methods. 2192 // this effectively makes the osr nmethod not entrant 2193 invalidate_osr_method(); 2194 } else { 2195 // The caller can be calling the method statically or through an inline 2196 // cache call. 2197 BarrierSet::barrier_set()->barrier_set_nmethod()->make_not_entrant(this); 2198 } 2199 2200 if (update_recompile_counts()) { 2201 // Mark the method as decompiled. 2202 inc_decompile_count(); 2203 } 2204 2205 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 2206 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) { 2207 // If nmethod entry barriers are not supported, we won't mark 2208 // nmethods as on-stack when they become on-stack. So we 2209 // degrade to a less accurate flushing strategy, for now. 2210 mark_as_maybe_on_stack(); 2211 } 2212 2213 // Change state 2214 bool success = try_transition(not_entrant); 2215 assert(success, "Transition can't fail"); 2216 2217 // Log the transition once 2218 log_state_change(invalidation_reason); 2219 2220 // Remove nmethod from method. 2221 unlink_from_method(); 2222 2223 if (!keep_aot_entry) { 2224 // Keep AOT code if it was simply replaced 2225 // otherwise make it not entrant too. 2226 AOTCodeCache::invalidate(_aot_code_entry); 2227 } 2228 2229 CompileBroker::log_not_entrant(this); 2230 } // leave critical region under NMethodState_lock 2231 2232 #if INCLUDE_JVMCI 2233 // Invalidate can't occur while holding the NMethodState_lock 2234 JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); 2235 if (nmethod_data != nullptr) { 2236 nmethod_data->invalidate_nmethod_mirror(this, invalidation_reason); 2237 } 2238 #endif 2239 2240 #ifdef ASSERT 2241 if (is_osr_method() && method() != nullptr) { 2242 // Make sure osr nmethod is invalidated, i.e. not on the list 2243 bool found = method()->method_holder()->remove_osr_nmethod(this); 2244 assert(!found, "osr nmethod should have been invalidated"); 2245 } 2246 #endif 2247 2248 return true; 2249 } 2250 2251 // For concurrent GCs, there must be a handshake between unlink and flush 2252 void nmethod::unlink() { 2253 if (is_unlinked()) { 2254 // Already unlinked. 2255 return; 2256 } 2257 2258 flush_dependencies(); 2259 2260 // unlink_from_method will take the NMethodState_lock. 2261 // In this case we don't strictly need it when unlinking nmethods from 2262 // the Method, because it is only concurrently unlinked by 2263 // the entry barrier, which acquires the per nmethod lock. 2264 unlink_from_method(); 2265 2266 if (is_osr_method()) { 2267 invalidate_osr_method(); 2268 } 2269 2270 #if INCLUDE_JVMCI 2271 // Clear the link between this nmethod and a HotSpotNmethod mirror 2272 JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); 2273 if (nmethod_data != nullptr) { 2274 nmethod_data->invalidate_nmethod_mirror(this, is_cold() ? 2275 nmethod::InvalidationReason::UNLOADING_COLD : 2276 nmethod::InvalidationReason::UNLOADING); 2277 } 2278 #endif 2279 2280 // Post before flushing as jmethodID is being used 2281 post_compiled_method_unload(); 2282 2283 // Register for flushing when it is safe. For concurrent class unloading, 2284 // that would be after the unloading handshake, and for STW class unloading 2285 // that would be when getting back to the VM thread. 2286 ClassUnloadingContext::context()->register_unlinked_nmethod(this); 2287 } 2288 2289 void nmethod::purge(bool unregister_nmethod) { 2290 2291 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2292 2293 // completely deallocate this method 2294 Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, compile_kind(), p2i(this)); 2295 2296 LogTarget(Debug, codecache) lt; 2297 if (lt.is_enabled()) { 2298 ResourceMark rm; 2299 LogStream ls(lt); 2300 const char* method_name = method()->name()->as_C_string(); 2301 const size_t codecache_capacity = CodeCache::capacity()/1024; 2302 const size_t codecache_free_space = CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024; 2303 ls.print("Flushing %s nmethod %6d/" INTPTR_FORMAT ", level=%d, cold=%d, epoch=" UINT64_FORMAT ", cold_count=" UINT64_FORMAT ". " 2304 "Cache capacity: %zuKb, free space: %zuKb. method %s (%s)", 2305 compile_kind(), _compile_id, p2i(this), _comp_level, is_cold(), _gc_epoch, CodeCache::cold_gc_count(), 2306 codecache_capacity, codecache_free_space, method_name, compiler_name()); 2307 } 2308 2309 // We need to deallocate any ExceptionCache data. 2310 // Note that we do not need to grab the nmethod lock for this, it 2311 // better be thread safe if we're disposing of it! 2312 ExceptionCache* ec = exception_cache(); 2313 while(ec != nullptr) { 2314 ExceptionCache* next = ec->next(); 2315 delete ec; 2316 ec = next; 2317 } 2318 if (_pc_desc_container != nullptr) { 2319 delete _pc_desc_container; 2320 } 2321 if (_compiled_ic_data != nullptr) { 2322 delete[] _compiled_ic_data; 2323 } 2324 2325 if (_immutable_data != data_end() && !AOTCodeCache::is_address_in_aot_cache((address)_oop_maps)) { 2326 os::free(_immutable_data); 2327 _immutable_data = blob_end(); // Valid not null address 2328 } 2329 if (unregister_nmethod) { 2330 Universe::heap()->unregister_nmethod(this); 2331 } 2332 CodeCache::unregister_old_nmethod(this); 2333 2334 JVMCI_ONLY( _metadata_size = 0; ) 2335 CodeBlob::purge(); 2336 } 2337 2338 oop nmethod::oop_at(int index) const { 2339 if (index == 0) { 2340 return nullptr; 2341 } 2342 2343 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 2344 return bs_nm->oop_load_no_keepalive(this, index); 2345 } 2346 2347 oop nmethod::oop_at_phantom(int index) const { 2348 if (index == 0) { 2349 return nullptr; 2350 } 2351 2352 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 2353 return bs_nm->oop_load_phantom(this, index); 2354 } 2355 2356 // 2357 // Notify all classes this nmethod is dependent on that it is no 2358 // longer dependent. 2359 2360 void nmethod::flush_dependencies() { 2361 if (!has_flushed_dependencies()) { 2362 set_has_flushed_dependencies(true); 2363 for (Dependencies::DepStream deps(this); deps.next(); ) { 2364 if (deps.type() == Dependencies::call_site_target_value) { 2365 // CallSite dependencies are managed on per-CallSite instance basis. 2366 oop call_site = deps.argument_oop(0); 2367 MethodHandles::clean_dependency_context(call_site); 2368 } else { 2369 InstanceKlass* ik = deps.context_type(); 2370 if (ik == nullptr) { 2371 continue; // ignore things like evol_method 2372 } 2373 // During GC liveness of dependee determines class that needs to be updated. 2374 // The GC may clean dependency contexts concurrently and in parallel. 2375 ik->clean_dependency_context(); 2376 } 2377 } 2378 } 2379 } 2380 2381 void nmethod::post_compiled_method(CompileTask* task) { 2382 task->mark_success(); 2383 task->set_nm_content_size(content_size()); 2384 task->set_nm_insts_size(insts_size()); 2385 task->set_nm_total_size(total_size()); 2386 2387 // task->is_aot_load() is true only for loaded AOT code. 2388 // nmethod::_aot_code_entry is set for loaded and stored AOT code 2389 // to invalidate the entry when nmethod is deoptimized. 2390 // VerifyAOTCode is option to not store in archive AOT code. 2391 guarantee((_aot_code_entry != nullptr) || !task->is_aot_load() || VerifyAOTCode, "sanity"); 2392 2393 // JVMTI -- compiled method notification (must be done outside lock) 2394 post_compiled_method_load_event(); 2395 2396 if (CompilationLog::log() != nullptr) { 2397 CompilationLog::log()->log_nmethod(JavaThread::current(), this); 2398 } 2399 2400 const DirectiveSet* directive = task->directive(); 2401 maybe_print_nmethod(directive); 2402 } 2403 2404 // ------------------------------------------------------------------ 2405 // post_compiled_method_load_event 2406 // new method for install_code() path 2407 // Transfer information from compilation to jvmti 2408 void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) { 2409 // This is a bad time for a safepoint. We don't want 2410 // this nmethod to get unloaded while we're queueing the event. 2411 NoSafepointVerifier nsv; 2412 2413 Method* m = method(); 2414 HOTSPOT_COMPILED_METHOD_LOAD( 2415 (char *) m->klass_name()->bytes(), 2416 m->klass_name()->utf8_length(), 2417 (char *) m->name()->bytes(), 2418 m->name()->utf8_length(), 2419 (char *) m->signature()->bytes(), 2420 m->signature()->utf8_length(), 2421 insts_begin(), insts_size()); 2422 2423 2424 if (JvmtiExport::should_post_compiled_method_load()) { 2425 // Only post unload events if load events are found. 2426 set_load_reported(); 2427 // If a JavaThread hasn't been passed in, let the Service thread 2428 // (which is a real Java thread) post the event 2429 JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this); 2430 if (state == nullptr) { 2431 // Execute any barrier code for this nmethod as if it's called, since 2432 // keeping it alive looks like stack walking. 2433 run_nmethod_entry_barrier(); 2434 ServiceThread::enqueue_deferred_event(&event); 2435 } else { 2436 // This enters the nmethod barrier outside in the caller. 2437 state->enqueue_event(&event); 2438 } 2439 } 2440 } 2441 2442 void nmethod::post_compiled_method_unload() { 2443 assert(_method != nullptr, "just checking"); 2444 DTRACE_METHOD_UNLOAD_PROBE(method()); 2445 2446 // If a JVMTI agent has enabled the CompiledMethodUnload event then 2447 // post the event. The Method* will not be valid when this is freed. 2448 2449 // Don't bother posting the unload if the load event wasn't posted. 2450 if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) { 2451 JvmtiDeferredEvent event = 2452 JvmtiDeferredEvent::compiled_method_unload_event( 2453 method()->jmethod_id(), insts_begin()); 2454 ServiceThread::enqueue_deferred_event(&event); 2455 } 2456 } 2457 2458 // Iterate over metadata calling this function. Used by RedefineClasses 2459 void nmethod::metadata_do(MetadataClosure* f) { 2460 { 2461 // Visit all immediate references that are embedded in the instruction stream. 2462 RelocIterator iter(this, oops_reloc_begin()); 2463 while (iter.next()) { 2464 if (iter.type() == relocInfo::metadata_type) { 2465 metadata_Relocation* r = iter.metadata_reloc(); 2466 // In this metadata, we must only follow those metadatas directly embedded in 2467 // the code. Other metadatas (oop_index>0) are seen as part of 2468 // the metadata section below. 2469 assert(1 == (r->metadata_is_immediate()) + 2470 (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()), 2471 "metadata must be found in exactly one place"); 2472 if (r->metadata_is_immediate() && r->metadata_value() != nullptr) { 2473 Metadata* md = r->metadata_value(); 2474 if (md != _method) f->do_metadata(md); 2475 } 2476 } else if (iter.type() == relocInfo::virtual_call_type) { 2477 // Check compiledIC holders associated with this nmethod 2478 ResourceMark rm; 2479 CompiledIC *ic = CompiledIC_at(&iter); 2480 ic->metadata_do(f); 2481 } 2482 } 2483 } 2484 2485 // Visit the metadata section 2486 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) { 2487 if (*p == Universe::non_oop_word() || *p == nullptr) continue; // skip non-oops 2488 Metadata* md = *p; 2489 f->do_metadata(md); 2490 } 2491 2492 // Visit metadata not embedded in the other places. 2493 if (_method != nullptr) f->do_metadata(_method); 2494 } 2495 2496 // Heuristic for nuking nmethods even though their oops are live. 2497 // Main purpose is to reduce code cache pressure and get rid of 2498 // nmethods that don't seem to be all that relevant any longer. 2499 bool nmethod::is_cold() { 2500 if (!MethodFlushing || is_native_method() || is_not_installed()) { 2501 // No heuristic unloading at all 2502 return false; 2503 } 2504 2505 if (!is_maybe_on_stack() && is_not_entrant()) { 2506 // Not entrant nmethods that are not on any stack can just 2507 // be removed 2508 return true; 2509 } 2510 2511 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 2512 if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) { 2513 // On platforms that don't support nmethod entry barriers, we can't 2514 // trust the temporal aspect of the gc epochs. So we can't detect 2515 // cold nmethods on such platforms. 2516 return false; 2517 } 2518 2519 if (!UseCodeCacheFlushing) { 2520 // Bail out if we don't heuristically remove nmethods 2521 return false; 2522 } 2523 2524 // Other code can be phased out more gradually after N GCs 2525 return CodeCache::previous_completed_gc_marking_cycle() > _gc_epoch + 2 * CodeCache::cold_gc_count(); 2526 } 2527 2528 // The _is_unloading_state encodes a tuple comprising the unloading cycle 2529 // and the result of IsUnloadingBehaviour::is_unloading() for that cycle. 2530 // This is the bit layout of the _is_unloading_state byte: 00000CCU 2531 // CC refers to the cycle, which has 2 bits, and U refers to the result of 2532 // IsUnloadingBehaviour::is_unloading() for that unloading cycle. 2533 2534 class IsUnloadingState: public AllStatic { 2535 static const uint8_t _is_unloading_mask = 1; 2536 static const uint8_t _is_unloading_shift = 0; 2537 static const uint8_t _unloading_cycle_mask = 6; 2538 static const uint8_t _unloading_cycle_shift = 1; 2539 2540 static uint8_t set_is_unloading(uint8_t state, bool value) { 2541 state &= (uint8_t)~_is_unloading_mask; 2542 if (value) { 2543 state |= 1 << _is_unloading_shift; 2544 } 2545 assert(is_unloading(state) == value, "unexpected unloading cycle overflow"); 2546 return state; 2547 } 2548 2549 static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) { 2550 state &= (uint8_t)~_unloading_cycle_mask; 2551 state |= (uint8_t)(value << _unloading_cycle_shift); 2552 assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow"); 2553 return state; 2554 } 2555 2556 public: 2557 static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; } 2558 static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; } 2559 2560 static uint8_t create(bool is_unloading, uint8_t unloading_cycle) { 2561 uint8_t state = 0; 2562 state = set_is_unloading(state, is_unloading); 2563 state = set_unloading_cycle(state, unloading_cycle); 2564 return state; 2565 } 2566 }; 2567 2568 bool nmethod::is_unloading() { 2569 uint8_t state = Atomic::load(&_is_unloading_state); 2570 bool state_is_unloading = IsUnloadingState::is_unloading(state); 2571 if (state_is_unloading) { 2572 return true; 2573 } 2574 uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state); 2575 uint8_t current_cycle = CodeCache::unloading_cycle(); 2576 if (state_unloading_cycle == current_cycle) { 2577 return false; 2578 } 2579 2580 // The IsUnloadingBehaviour is responsible for calculating if the nmethod 2581 // should be unloaded. This can be either because there is a dead oop, 2582 // or because is_cold() heuristically determines it is time to unload. 2583 state_unloading_cycle = current_cycle; 2584 state_is_unloading = IsUnloadingBehaviour::is_unloading(this); 2585 uint8_t new_state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle); 2586 2587 // Note that if an nmethod has dead oops, everyone will agree that the 2588 // nmethod is_unloading. However, the is_cold heuristics can yield 2589 // different outcomes, so we guard the computed result with a CAS 2590 // to ensure all threads have a shared view of whether an nmethod 2591 // is_unloading or not. 2592 uint8_t found_state = Atomic::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed); 2593 2594 if (found_state == state) { 2595 // First to change state, we win 2596 return state_is_unloading; 2597 } else { 2598 // State already set, so use it 2599 return IsUnloadingState::is_unloading(found_state); 2600 } 2601 } 2602 2603 void nmethod::clear_unloading_state() { 2604 uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle()); 2605 Atomic::store(&_is_unloading_state, state); 2606 } 2607 2608 2609 // This is called at the end of the strong tracing/marking phase of a 2610 // GC to unload an nmethod if it contains otherwise unreachable 2611 // oops or is heuristically found to be not important. 2612 void nmethod::do_unloading(bool unloading_occurred) { 2613 // Make sure the oop's ready to receive visitors 2614 if (is_unloading()) { 2615 unlink(); 2616 } else { 2617 unload_nmethod_caches(unloading_occurred); 2618 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 2619 if (bs_nm != nullptr) { 2620 bs_nm->disarm(this); 2621 } 2622 } 2623 } 2624 2625 void nmethod::oops_do(OopClosure* f) { 2626 // Prevent extra code cache walk for platforms that don't have immediate oops. 2627 if (relocInfo::mustIterateImmediateOopsInCode()) { 2628 RelocIterator iter(this, oops_reloc_begin()); 2629 2630 while (iter.next()) { 2631 if (iter.type() == relocInfo::oop_type ) { 2632 oop_Relocation* r = iter.oop_reloc(); 2633 // In this loop, we must only follow those oops directly embedded in 2634 // the code. Other oops (oop_index>0) are seen as part of scopes_oops. 2635 assert(1 == (r->oop_is_immediate()) + 2636 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), 2637 "oop must be found in exactly one place"); 2638 if (r->oop_is_immediate() && r->oop_value() != nullptr) { 2639 f->do_oop(r->oop_addr()); 2640 } 2641 } 2642 } 2643 } 2644 2645 // Scopes 2646 // This includes oop constants not inlined in the code stream. 2647 for (oop* p = oops_begin(); p < oops_end(); p++) { 2648 if (*p == Universe::non_oop_word()) continue; // skip non-oops 2649 f->do_oop(p); 2650 } 2651 } 2652 2653 void nmethod::follow_nmethod(OopIterateClosure* cl) { 2654 // Process oops in the nmethod 2655 oops_do(cl); 2656 2657 // CodeCache unloading support 2658 mark_as_maybe_on_stack(); 2659 2660 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 2661 bs_nm->disarm(this); 2662 2663 // There's an assumption made that this function is not used by GCs that 2664 // relocate objects, and therefore we don't call fix_oop_relocations. 2665 } 2666 2667 nmethod* volatile nmethod::_oops_do_mark_nmethods; 2668 2669 void nmethod::oops_do_log_change(const char* state) { 2670 LogTarget(Trace, gc, nmethod) lt; 2671 if (lt.is_enabled()) { 2672 LogStream ls(lt); 2673 CompileTask::print(&ls, this, state, true /* short_form */); 2674 } 2675 } 2676 2677 bool nmethod::oops_do_try_claim() { 2678 if (oops_do_try_claim_weak_request()) { 2679 nmethod* result = oops_do_try_add_to_list_as_weak_done(); 2680 assert(result == nullptr, "adding to global list as weak done must always succeed."); 2681 return true; 2682 } 2683 return false; 2684 } 2685 2686 bool nmethod::oops_do_try_claim_weak_request() { 2687 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); 2688 2689 if ((_oops_do_mark_link == nullptr) && 2690 (Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) { 2691 oops_do_log_change("oops_do, mark weak request"); 2692 return true; 2693 } 2694 return false; 2695 } 2696 2697 void nmethod::oops_do_set_strong_done(nmethod* old_head) { 2698 _oops_do_mark_link = mark_link(old_head, claim_strong_done_tag); 2699 } 2700 2701 nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() { 2702 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); 2703 2704 oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, mark_link(nullptr, claim_weak_request_tag), mark_link(this, claim_strong_done_tag)); 2705 if (old_next == nullptr) { 2706 oops_do_log_change("oops_do, mark strong done"); 2707 } 2708 return old_next; 2709 } 2710 2711 nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oops_do_mark_link* next) { 2712 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); 2713 assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak"); 2714 2715 oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag)); 2716 if (old_next == next) { 2717 oops_do_log_change("oops_do, mark strong request"); 2718 } 2719 return old_next; 2720 } 2721 2722 bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_link* next) { 2723 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); 2724 assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done"); 2725 2726 oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag)); 2727 if (old_next == next) { 2728 oops_do_log_change("oops_do, mark weak done -> mark strong done"); 2729 return true; 2730 } 2731 return false; 2732 } 2733 2734 nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() { 2735 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); 2736 2737 assert(extract_state(_oops_do_mark_link) == claim_weak_request_tag || 2738 extract_state(_oops_do_mark_link) == claim_strong_request_tag, 2739 "must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link)); 2740 2741 nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this); 2742 // Self-loop if needed. 2743 if (old_head == nullptr) { 2744 old_head = this; 2745 } 2746 // Try to install end of list and weak done tag. 2747 if (Atomic::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) { 2748 oops_do_log_change("oops_do, mark weak done"); 2749 return nullptr; 2750 } else { 2751 return old_head; 2752 } 2753 } 2754 2755 void nmethod::oops_do_add_to_list_as_strong_done() { 2756 assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); 2757 2758 nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this); 2759 // Self-loop if needed. 2760 if (old_head == nullptr) { 2761 old_head = this; 2762 } 2763 assert(_oops_do_mark_link == mark_link(this, claim_strong_done_tag), "must be but is nmethod " PTR_FORMAT " state %u", 2764 p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link)); 2765 2766 oops_do_set_strong_done(old_head); 2767 } 2768 2769 void nmethod::oops_do_process_weak(OopsDoProcessor* p) { 2770 if (!oops_do_try_claim_weak_request()) { 2771 // Failed to claim for weak processing. 2772 oops_do_log_change("oops_do, mark weak request fail"); 2773 return; 2774 } 2775 2776 p->do_regular_processing(this); 2777 2778 nmethod* old_head = oops_do_try_add_to_list_as_weak_done(); 2779 if (old_head == nullptr) { 2780 return; 2781 } 2782 oops_do_log_change("oops_do, mark weak done fail"); 2783 // Adding to global list failed, another thread added a strong request. 2784 assert(extract_state(_oops_do_mark_link) == claim_strong_request_tag, 2785 "must be but is %u", extract_state(_oops_do_mark_link)); 2786 2787 oops_do_log_change("oops_do, mark weak request -> mark strong done"); 2788 2789 oops_do_set_strong_done(old_head); 2790 // Do missing strong processing. 2791 p->do_remaining_strong_processing(this); 2792 } 2793 2794 void nmethod::oops_do_process_strong(OopsDoProcessor* p) { 2795 oops_do_mark_link* next_raw = oops_do_try_claim_strong_done(); 2796 if (next_raw == nullptr) { 2797 p->do_regular_processing(this); 2798 oops_do_add_to_list_as_strong_done(); 2799 return; 2800 } 2801 // Claim failed. Figure out why and handle it. 2802 if (oops_do_has_weak_request(next_raw)) { 2803 oops_do_mark_link* old = next_raw; 2804 // Claim failed because being weak processed (state == "weak request"). 2805 // Try to request deferred strong processing. 2806 next_raw = oops_do_try_add_strong_request(old); 2807 if (next_raw == old) { 2808 // Successfully requested deferred strong processing. 2809 return; 2810 } 2811 // Failed because of a concurrent transition. No longer in "weak request" state. 2812 } 2813 if (oops_do_has_any_strong_state(next_raw)) { 2814 // Already claimed for strong processing or requested for such. 2815 return; 2816 } 2817 if (oops_do_try_claim_weak_done_as_strong_done(next_raw)) { 2818 // Successfully claimed "weak done" as "strong done". Do the missing marking. 2819 p->do_remaining_strong_processing(this); 2820 return; 2821 } 2822 // Claim failed, some other thread got it. 2823 } 2824 2825 void nmethod::oops_do_marking_prologue() { 2826 assert_at_safepoint(); 2827 2828 log_trace(gc, nmethod)("oops_do_marking_prologue"); 2829 assert(_oops_do_mark_nmethods == nullptr, "must be empty"); 2830 } 2831 2832 void nmethod::oops_do_marking_epilogue() { 2833 assert_at_safepoint(); 2834 2835 nmethod* next = _oops_do_mark_nmethods; 2836 _oops_do_mark_nmethods = nullptr; 2837 if (next != nullptr) { 2838 nmethod* cur; 2839 do { 2840 cur = next; 2841 next = extract_nmethod(cur->_oops_do_mark_link); 2842 cur->_oops_do_mark_link = nullptr; 2843 DEBUG_ONLY(cur->verify_oop_relocations()); 2844 2845 LogTarget(Trace, gc, nmethod) lt; 2846 if (lt.is_enabled()) { 2847 LogStream ls(lt); 2848 CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true); 2849 } 2850 // End if self-loop has been detected. 2851 } while (cur != next); 2852 } 2853 log_trace(gc, nmethod)("oops_do_marking_epilogue"); 2854 } 2855 2856 inline bool includes(void* p, void* from, void* to) { 2857 return from <= p && p < to; 2858 } 2859 2860 2861 void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) { 2862 assert(count >= 2, "must be sentinel values, at least"); 2863 2864 #ifdef ASSERT 2865 // must be sorted and unique; we do a binary search in find_pc_desc() 2866 int prev_offset = pcs[0].pc_offset(); 2867 assert(prev_offset == PcDesc::lower_offset_limit, 2868 "must start with a sentinel"); 2869 for (int i = 1; i < count; i++) { 2870 int this_offset = pcs[i].pc_offset(); 2871 assert(this_offset > prev_offset, "offsets must be sorted"); 2872 prev_offset = this_offset; 2873 } 2874 assert(prev_offset == PcDesc::upper_offset_limit, 2875 "must end with a sentinel"); 2876 #endif //ASSERT 2877 2878 // Search for MethodHandle invokes and tag the nmethod. 2879 for (int i = 0; i < count; i++) { 2880 if (pcs[i].is_method_handle_invoke()) { 2881 set_has_method_handle_invokes(true); 2882 break; 2883 } 2884 } 2885 assert(has_method_handle_invokes() == (_deopt_mh_handler_offset != -1), "must have deopt mh handler"); 2886 2887 int size = count * sizeof(PcDesc); 2888 assert(scopes_pcs_size() >= size, "oob"); 2889 memcpy(scopes_pcs_begin(), pcs, size); 2890 2891 // Adjust the final sentinel downward. 2892 PcDesc* last_pc = &scopes_pcs_begin()[count-1]; 2893 assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity"); 2894 last_pc->set_pc_offset(content_size() + 1); 2895 for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) { 2896 // Fill any rounding gaps with copies of the last record. 2897 last_pc[1] = last_pc[0]; 2898 } 2899 // The following assert could fail if sizeof(PcDesc) is not 2900 // an integral multiple of oopSize (the rounding term). 2901 // If it fails, change the logic to always allocate a multiple 2902 // of sizeof(PcDesc), and fill unused words with copies of *last_pc. 2903 assert(last_pc + 1 == scopes_pcs_end(), "must match exactly"); 2904 } 2905 2906 void nmethod::copy_scopes_data(u_char* buffer, int size) { 2907 assert(scopes_data_size() >= size, "oob"); 2908 memcpy(scopes_data_begin(), buffer, size); 2909 } 2910 2911 #ifdef ASSERT 2912 static PcDesc* linear_search(int pc_offset, bool approximate, PcDesc* lower, PcDesc* upper) { 2913 PcDesc* res = nullptr; 2914 assert(lower != nullptr && lower->pc_offset() == PcDesc::lower_offset_limit, 2915 "must start with a sentinel"); 2916 // lower + 1 to exclude initial sentinel 2917 for (PcDesc* p = lower + 1; p < upper; p++) { 2918 NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests); // don't count this call to match_desc 2919 if (match_desc(p, pc_offset, approximate)) { 2920 if (res == nullptr) { 2921 res = p; 2922 } else { 2923 res = (PcDesc*) badAddress; 2924 } 2925 } 2926 } 2927 return res; 2928 } 2929 #endif 2930 2931 2932 #ifndef PRODUCT 2933 // Version of method to collect statistic 2934 PcDesc* PcDescContainer::find_pc_desc(address pc, bool approximate, address code_begin, 2935 PcDesc* lower, PcDesc* upper) { 2936 ++pc_nmethod_stats.pc_desc_queries; 2937 if (approximate) ++pc_nmethod_stats.pc_desc_approx; 2938 2939 PcDesc* desc = _pc_desc_cache.last_pc_desc(); 2940 assert(desc != nullptr, "PcDesc cache should be initialized already"); 2941 if (desc->pc_offset() == (pc - code_begin)) { 2942 // Cached value matched 2943 ++pc_nmethod_stats.pc_desc_tests; 2944 ++pc_nmethod_stats.pc_desc_repeats; 2945 return desc; 2946 } 2947 return find_pc_desc_internal(pc, approximate, code_begin, lower, upper); 2948 } 2949 #endif 2950 2951 // Finds a PcDesc with real-pc equal to "pc" 2952 PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, address code_begin, 2953 PcDesc* lower_incl, PcDesc* upper_incl) { 2954 if ((pc < code_begin) || 2955 (pc - code_begin) >= (ptrdiff_t) PcDesc::upper_offset_limit) { 2956 return nullptr; // PC is wildly out of range 2957 } 2958 int pc_offset = (int) (pc - code_begin); 2959 2960 // Check the PcDesc cache if it contains the desired PcDesc 2961 // (This as an almost 100% hit rate.) 2962 PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate); 2963 if (res != nullptr) { 2964 assert(res == linear_search(pc_offset, approximate, lower_incl, upper_incl), "cache ok"); 2965 return res; 2966 } 2967 2968 // Fallback algorithm: quasi-linear search for the PcDesc 2969 // Find the last pc_offset less than the given offset. 2970 // The successor must be the required match, if there is a match at all. 2971 // (Use a fixed radix to avoid expensive affine pointer arithmetic.) 2972 PcDesc* lower = lower_incl; // this is initial sentinel 2973 PcDesc* upper = upper_incl - 1; // exclude final sentinel 2974 if (lower >= upper) return nullptr; // no PcDescs at all 2975 2976 #define assert_LU_OK \ 2977 /* invariant on lower..upper during the following search: */ \ 2978 assert(lower->pc_offset() < pc_offset, "sanity"); \ 2979 assert(upper->pc_offset() >= pc_offset, "sanity") 2980 assert_LU_OK; 2981 2982 // Use the last successful return as a split point. 2983 PcDesc* mid = _pc_desc_cache.last_pc_desc(); 2984 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches); 2985 if (mid->pc_offset() < pc_offset) { 2986 lower = mid; 2987 } else { 2988 upper = mid; 2989 } 2990 2991 // Take giant steps at first (4096, then 256, then 16, then 1) 2992 const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ DEBUG_ONLY(-1); 2993 const int RADIX = (1 << LOG2_RADIX); 2994 for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) { 2995 while ((mid = lower + step) < upper) { 2996 assert_LU_OK; 2997 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches); 2998 if (mid->pc_offset() < pc_offset) { 2999 lower = mid; 3000 } else { 3001 upper = mid; 3002 break; 3003 } 3004 } 3005 assert_LU_OK; 3006 } 3007 3008 // Sneak up on the value with a linear search of length ~16. 3009 while (true) { 3010 assert_LU_OK; 3011 mid = lower + 1; 3012 NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches); 3013 if (mid->pc_offset() < pc_offset) { 3014 lower = mid; 3015 } else { 3016 upper = mid; 3017 break; 3018 } 3019 } 3020 #undef assert_LU_OK 3021 3022 if (match_desc(upper, pc_offset, approximate)) { 3023 assert(upper == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch"); 3024 if (!Thread::current_in_asgct()) { 3025 // we don't want to modify the cache if we're in ASGCT 3026 // which is typically called in a signal handler 3027 _pc_desc_cache.add_pc_desc(upper); 3028 } 3029 return upper; 3030 } else { 3031 assert(nullptr == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch"); 3032 return nullptr; 3033 } 3034 } 3035 3036 bool nmethod::check_dependency_on(DepChange& changes) { 3037 // What has happened: 3038 // 1) a new class dependee has been added 3039 // 2) dependee and all its super classes have been marked 3040 bool found_check = false; // set true if we are upset 3041 for (Dependencies::DepStream deps(this); deps.next(); ) { 3042 // Evaluate only relevant dependencies. 3043 if (deps.spot_check_dependency_at(changes) != nullptr) { 3044 found_check = true; 3045 NOT_DEBUG(break); 3046 } 3047 } 3048 return found_check; 3049 } 3050 3051 // Called from mark_for_deoptimization, when dependee is invalidated. 3052 bool nmethod::is_dependent_on_method(Method* dependee) { 3053 for (Dependencies::DepStream deps(this); deps.next(); ) { 3054 if (deps.type() != Dependencies::evol_method) 3055 continue; 3056 Method* method = deps.method_argument(0); 3057 if (method == dependee) return true; 3058 } 3059 return false; 3060 } 3061 3062 void nmethod_init() { 3063 // make sure you didn't forget to adjust the filler fields 3064 assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word"); 3065 } 3066 3067 // ----------------------------------------------------------------------------- 3068 // Verification 3069 3070 class VerifyOopsClosure: public OopClosure { 3071 nmethod* _nm; 3072 bool _ok; 3073 public: 3074 VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { } 3075 bool ok() { return _ok; } 3076 virtual void do_oop(oop* p) { 3077 if (oopDesc::is_oop_or_null(*p)) return; 3078 // Print diagnostic information before calling print_nmethod(). 3079 // Assertions therein might prevent call from returning. 3080 tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)", 3081 p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm)); 3082 if (_ok) { 3083 _nm->print_nmethod(true); 3084 _ok = false; 3085 } 3086 } 3087 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } 3088 }; 3089 3090 class VerifyMetadataClosure: public MetadataClosure { 3091 public: 3092 void do_metadata(Metadata* md) { 3093 if (md->is_method()) { 3094 Method* method = (Method*)md; 3095 assert(!method->is_old(), "Should not be installing old methods"); 3096 } 3097 } 3098 }; 3099 3100 3101 void nmethod::verify() { 3102 if (is_not_entrant()) 3103 return; 3104 3105 // assert(oopDesc::is_oop(method()), "must be valid"); 3106 3107 ResourceMark rm; 3108 3109 if (!CodeCache::contains(this)) { 3110 fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this)); 3111 } 3112 3113 if(is_native_method() ) 3114 return; 3115 3116 nmethod* nm = CodeCache::find_nmethod(verified_entry_point()); 3117 if (nm != this) { 3118 fatal("find_nmethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this)); 3119 } 3120 3121 // Verification can triggered during shutdown after AOTCodeCache is closed. 3122 // If the Scopes data is in the AOT code cache, then we should avoid verification during shutdown. 3123 if (!is_aot() || AOTCodeCache::is_on()) { 3124 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { 3125 if (! p->verify(this)) { 3126 tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this)); 3127 } 3128 } 3129 3130 #ifdef ASSERT 3131 #if INCLUDE_JVMCI 3132 { 3133 // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap 3134 ImmutableOopMapSet* oms = oop_maps(); 3135 ImplicitExceptionTable implicit_table(this); 3136 for (uint i = 0; i < implicit_table.len(); i++) { 3137 int exec_offset = (int) implicit_table.get_exec_offset(i); 3138 if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) { 3139 assert(pc_desc_at(code_begin() + exec_offset) != nullptr, "missing PcDesc"); 3140 bool found = false; 3141 for (int i = 0, imax = oms->count(); i < imax; i++) { 3142 if (oms->pair_at(i)->pc_offset() == exec_offset) { 3143 found = true; 3144 break; 3145 } 3146 } 3147 assert(found, "missing oopmap"); 3148 } 3149 } 3150 } 3151 #endif 3152 #endif 3153 } 3154 3155 VerifyOopsClosure voc(this); 3156 oops_do(&voc); 3157 assert(voc.ok(), "embedded oops must be OK"); 3158 Universe::heap()->verify_nmethod(this); 3159 3160 assert(_oops_do_mark_link == nullptr, "_oops_do_mark_link for %s should be nullptr but is " PTR_FORMAT, 3161 nm->method()->external_name(), p2i(_oops_do_mark_link)); 3162 if (!is_aot() || AOTCodeCache::is_on()) { 3163 verify_scopes(); 3164 } 3165 3166 CompiledICLocker nm_verify(this); 3167 VerifyMetadataClosure vmc; 3168 metadata_do(&vmc); 3169 } 3170 3171 3172 void nmethod::verify_interrupt_point(address call_site, bool is_inline_cache) { 3173 3174 // Verify IC only when nmethod installation is finished. 3175 if (!is_not_installed()) { 3176 if (CompiledICLocker::is_safe(this)) { 3177 if (is_inline_cache) { 3178 CompiledIC_at(this, call_site); 3179 } else { 3180 CompiledDirectCall::at(call_site); 3181 } 3182 } else { 3183 CompiledICLocker ml_verify(this); 3184 if (is_inline_cache) { 3185 CompiledIC_at(this, call_site); 3186 } else { 3187 CompiledDirectCall::at(call_site); 3188 } 3189 } 3190 } 3191 3192 HandleMark hm(Thread::current()); 3193 3194 PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address()); 3195 assert(pd != nullptr, "PcDesc must exist"); 3196 for (ScopeDesc* sd = new ScopeDesc(this, pd); 3197 !sd->is_top(); sd = sd->sender()) { 3198 sd->verify(); 3199 } 3200 } 3201 3202 void nmethod::verify_scopes() { 3203 if( !method() ) return; // Runtime stubs have no scope 3204 if (method()->is_native()) return; // Ignore stub methods. 3205 // iterate through all interrupt point 3206 // and verify the debug information is valid. 3207 RelocIterator iter(this); 3208 while (iter.next()) { 3209 address stub = nullptr; 3210 switch (iter.type()) { 3211 case relocInfo::virtual_call_type: 3212 verify_interrupt_point(iter.addr(), true /* is_inline_cache */); 3213 break; 3214 case relocInfo::opt_virtual_call_type: 3215 stub = iter.opt_virtual_call_reloc()->static_stub(); 3216 verify_interrupt_point(iter.addr(), false /* is_inline_cache */); 3217 break; 3218 case relocInfo::static_call_type: 3219 stub = iter.static_call_reloc()->static_stub(); 3220 verify_interrupt_point(iter.addr(), false /* is_inline_cache */); 3221 break; 3222 case relocInfo::runtime_call_type: 3223 case relocInfo::runtime_call_w_cp_type: { 3224 address destination = iter.reloc()->value(); 3225 // Right now there is no way to find out which entries support 3226 // an interrupt point. It would be nice if we had this 3227 // information in a table. 3228 break; 3229 } 3230 default: 3231 break; 3232 } 3233 assert(stub == nullptr || stub_contains(stub), "static call stub outside stub section"); 3234 } 3235 } 3236 3237 3238 // ----------------------------------------------------------------------------- 3239 // Printing operations 3240 3241 void nmethod::print_on_impl(outputStream* st) const { 3242 ResourceMark rm; 3243 3244 st->print("Compiled method "); 3245 3246 if (is_compiled_by_c1()) { 3247 st->print("(c1) "); 3248 } else if (is_compiled_by_c2()) { 3249 st->print("(c2) "); 3250 } else if (is_compiled_by_jvmci()) { 3251 st->print("(JVMCI) "); 3252 } else { 3253 st->print("(n/a) "); 3254 } 3255 3256 print_on_with_msg(st, nullptr); 3257 3258 if (WizardMode) { 3259 st->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this)); 3260 st->print(" for method " INTPTR_FORMAT , p2i(method())); 3261 st->print(" { "); 3262 st->print_cr("%s ", state()); 3263 st->print_cr("}:"); 3264 } 3265 if (size () > 0) st->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3266 p2i(this), 3267 p2i(this) + size(), 3268 size()); 3269 if (consts_size () > 0) st->print_cr(" constants [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3270 p2i(consts_begin()), 3271 p2i(consts_end()), 3272 consts_size()); 3273 if (insts_size () > 0) st->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3274 p2i(insts_begin()), 3275 p2i(insts_end()), 3276 insts_size()); 3277 if (stub_size () > 0) st->print_cr(" stub code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3278 p2i(stub_begin()), 3279 p2i(stub_end()), 3280 stub_size()); 3281 if (oops_size () > 0) st->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3282 p2i(oops_begin()), 3283 p2i(oops_end()), 3284 oops_size()); 3285 if (mutable_data_size() > 0) st->print_cr(" mutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3286 p2i(mutable_data_begin()), 3287 p2i(mutable_data_end()), 3288 mutable_data_size()); 3289 if (relocation_size() > 0) st->print_cr(" relocation [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3290 p2i(relocation_begin()), 3291 p2i(relocation_end()), 3292 relocation_size()); 3293 if (metadata_size () > 0) st->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3294 p2i(metadata_begin()), 3295 p2i(metadata_end()), 3296 metadata_size()); 3297 #if INCLUDE_JVMCI 3298 if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3299 p2i(jvmci_data_begin()), 3300 p2i(jvmci_data_end()), 3301 jvmci_data_size()); 3302 #endif 3303 if (immutable_data_size() > 0) st->print_cr(" immutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3304 p2i(immutable_data_begin()), 3305 p2i(immutable_data_end()), 3306 immutable_data_size()); 3307 if (dependencies_size () > 0) st->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3308 p2i(dependencies_begin()), 3309 p2i(dependencies_end()), 3310 dependencies_size()); 3311 if (nul_chk_table_size() > 0) st->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3312 p2i(nul_chk_table_begin()), 3313 p2i(nul_chk_table_end()), 3314 nul_chk_table_size()); 3315 if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3316 p2i(handler_table_begin()), 3317 p2i(handler_table_end()), 3318 handler_table_size()); 3319 if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3320 p2i(scopes_pcs_begin()), 3321 p2i(scopes_pcs_end()), 3322 scopes_pcs_size()); 3323 if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3324 p2i(scopes_data_begin()), 3325 p2i(scopes_data_end()), 3326 scopes_data_size()); 3327 #if INCLUDE_JVMCI 3328 if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 3329 p2i(speculations_begin()), 3330 p2i(speculations_end()), 3331 speculations_size()); 3332 #endif 3333 if (AOTCodeCache::is_on() && _aot_code_entry != nullptr) { 3334 _aot_code_entry->print(st); 3335 } 3336 } 3337 3338 void nmethod::print_code() { 3339 ResourceMark m; 3340 ttyLocker ttyl; 3341 // Call the specialized decode method of this class. 3342 decode(tty); 3343 } 3344 3345 #ifndef PRODUCT // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN 3346 3347 void nmethod::print_dependencies_on(outputStream* out) { 3348 ResourceMark rm; 3349 stringStream st; 3350 st.print_cr("Dependencies:"); 3351 for (Dependencies::DepStream deps(this); deps.next(); ) { 3352 deps.print_dependency(&st); 3353 InstanceKlass* ctxk = deps.context_type(); 3354 if (ctxk != nullptr) { 3355 if (ctxk->is_dependent_nmethod(this)) { 3356 st.print_cr(" [nmethod<=klass]%s", ctxk->external_name()); 3357 } 3358 } 3359 deps.log_dependency(); // put it into the xml log also 3360 } 3361 out->print_raw(st.as_string()); 3362 } 3363 #endif 3364 3365 #if defined(SUPPORT_DATA_STRUCTS) 3366 3367 // Print the oops from the underlying CodeBlob. 3368 void nmethod::print_oops(outputStream* st) { 3369 ResourceMark m; 3370 st->print("Oops:"); 3371 if (oops_begin() < oops_end()) { 3372 st->cr(); 3373 for (oop* p = oops_begin(); p < oops_end(); p++) { 3374 Disassembler::print_location((unsigned char*)p, (unsigned char*)oops_begin(), (unsigned char*)oops_end(), st, true, false); 3375 st->print(PTR_FORMAT " ", *((uintptr_t*)p)); 3376 if (Universe::contains_non_oop_word(p)) { 3377 st->print_cr("NON_OOP"); 3378 continue; // skip non-oops 3379 } 3380 if (*p == nullptr) { 3381 st->print_cr("nullptr-oop"); 3382 continue; // skip non-oops 3383 } 3384 (*p)->print_value_on(st); 3385 st->cr(); 3386 } 3387 } else { 3388 st->print_cr(" <list empty>"); 3389 } 3390 } 3391 3392 // Print metadata pool. 3393 void nmethod::print_metadata(outputStream* st) { 3394 ResourceMark m; 3395 st->print("Metadata:"); 3396 if (metadata_begin() < metadata_end()) { 3397 st->cr(); 3398 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) { 3399 Disassembler::print_location((unsigned char*)p, (unsigned char*)metadata_begin(), (unsigned char*)metadata_end(), st, true, false); 3400 st->print(PTR_FORMAT " ", *((uintptr_t*)p)); 3401 if (*p && *p != Universe::non_oop_word()) { 3402 (*p)->print_value_on(st); 3403 } 3404 st->cr(); 3405 } 3406 } else { 3407 st->print_cr(" <list empty>"); 3408 } 3409 } 3410 3411 #ifndef PRODUCT // ScopeDesc::print_on() is available only then. Declared as PRODUCT_RETURN 3412 void nmethod::print_scopes_on(outputStream* st) { 3413 // Find the first pc desc for all scopes in the code and print it. 3414 ResourceMark rm; 3415 st->print("scopes:"); 3416 if (scopes_pcs_begin() < scopes_pcs_end()) { 3417 st->cr(); 3418 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { 3419 if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null) 3420 continue; 3421 3422 ScopeDesc* sd = scope_desc_at(p->real_pc(this)); 3423 while (sd != nullptr) { 3424 sd->print_on(st, p); // print output ends with a newline 3425 sd = sd->sender(); 3426 } 3427 } 3428 } else { 3429 st->print_cr(" <list empty>"); 3430 } 3431 } 3432 #endif 3433 3434 #ifndef PRODUCT // RelocIterator does support printing only then. 3435 void nmethod::print_relocations_on(outputStream* st) { 3436 ResourceMark m; // in case methods get printed via the debugger 3437 st->print_cr("relocations:"); 3438 RelocIterator iter(this); 3439 iter.print_on(st); 3440 } 3441 #endif 3442 3443 void nmethod::print_pcs_on(outputStream* st) { 3444 ResourceMark m; // in case methods get printed via debugger 3445 st->print("pc-bytecode offsets:"); 3446 if (scopes_pcs_begin() < scopes_pcs_end()) { 3447 st->cr(); 3448 for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { 3449 p->print_on(st, this); // print output ends with a newline 3450 } 3451 } else { 3452 st->print_cr(" <list empty>"); 3453 } 3454 } 3455 3456 void nmethod::print_handler_table() { 3457 ExceptionHandlerTable(this).print(code_begin()); 3458 } 3459 3460 void nmethod::print_nul_chk_table() { 3461 ImplicitExceptionTable(this).print(code_begin()); 3462 } 3463 3464 void nmethod::print_recorded_oop(int log_n, int i) { 3465 void* value; 3466 3467 if (i == 0) { 3468 value = nullptr; 3469 } else { 3470 // Be careful around non-oop words. Don't create an oop 3471 // with that value, or it will assert in verification code. 3472 if (Universe::contains_non_oop_word(oop_addr_at(i))) { 3473 value = Universe::non_oop_word(); 3474 } else { 3475 value = oop_at(i); 3476 } 3477 } 3478 3479 tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(value)); 3480 3481 if (value == Universe::non_oop_word()) { 3482 tty->print("non-oop word"); 3483 } else { 3484 if (value == nullptr) { 3485 tty->print("nullptr-oop"); 3486 } else { 3487 oop_at(i)->print_value_on(tty); 3488 } 3489 } 3490 3491 tty->cr(); 3492 } 3493 3494 void nmethod::print_recorded_oops() { 3495 const int n = oops_count(); 3496 const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6; 3497 tty->print("Recorded oops:"); 3498 if (n > 0) { 3499 tty->cr(); 3500 for (int i = 0; i < n; i++) { 3501 print_recorded_oop(log_n, i); 3502 } 3503 } else { 3504 tty->print_cr(" <list empty>"); 3505 } 3506 } 3507 3508 void nmethod::print_recorded_metadata() { 3509 const int n = metadata_count(); 3510 const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6; 3511 tty->print("Recorded metadata:"); 3512 if (n > 0) { 3513 tty->cr(); 3514 for (int i = 0; i < n; i++) { 3515 Metadata* m = metadata_at(i); 3516 tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(m)); 3517 if (m == (Metadata*)Universe::non_oop_word()) { 3518 tty->print("non-metadata word"); 3519 } else if (m == nullptr) { 3520 tty->print("nullptr-oop"); 3521 } else { 3522 Metadata::print_value_on_maybe_null(tty, m); 3523 } 3524 tty->cr(); 3525 } 3526 } else { 3527 tty->print_cr(" <list empty>"); 3528 } 3529 } 3530 #endif 3531 3532 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) 3533 3534 void nmethod::print_constant_pool(outputStream* st) { 3535 //----------------------------------- 3536 //---< Print the constant pool >--- 3537 //----------------------------------- 3538 int consts_size = this->consts_size(); 3539 if ( consts_size > 0 ) { 3540 unsigned char* cstart = this->consts_begin(); 3541 unsigned char* cp = cstart; 3542 unsigned char* cend = cp + consts_size; 3543 unsigned int bytes_per_line = 4; 3544 unsigned int CP_alignment = 8; 3545 unsigned int n; 3546 3547 st->cr(); 3548 3549 //---< print CP header to make clear what's printed >--- 3550 if( ((uintptr_t)cp&(CP_alignment-1)) == 0 ) { 3551 n = bytes_per_line; 3552 st->print_cr("[Constant Pool]"); 3553 Disassembler::print_location(cp, cstart, cend, st, true, true); 3554 Disassembler::print_hexdata(cp, n, st, true); 3555 st->cr(); 3556 } else { 3557 n = (int)((uintptr_t)cp & (bytes_per_line-1)); 3558 st->print_cr("[Constant Pool (unaligned)]"); 3559 } 3560 3561 //---< print CP contents, bytes_per_line at a time >--- 3562 while (cp < cend) { 3563 Disassembler::print_location(cp, cstart, cend, st, true, false); 3564 Disassembler::print_hexdata(cp, n, st, false); 3565 cp += n; 3566 n = bytes_per_line; 3567 st->cr(); 3568 } 3569 3570 //---< Show potential alignment gap between constant pool and code >--- 3571 cend = code_begin(); 3572 if( cp < cend ) { 3573 n = 4; 3574 st->print_cr("[Code entry alignment]"); 3575 while (cp < cend) { 3576 Disassembler::print_location(cp, cstart, cend, st, false, false); 3577 cp += n; 3578 st->cr(); 3579 } 3580 } 3581 } else { 3582 st->print_cr("[Constant Pool (empty)]"); 3583 } 3584 st->cr(); 3585 } 3586 3587 #endif 3588 3589 // Disassemble this nmethod. 3590 // Print additional debug information, if requested. This could be code 3591 // comments, block comments, profiling counters, etc. 3592 // The undisassembled format is useful no disassembler library is available. 3593 // The resulting hex dump (with markers) can be disassembled later, or on 3594 // another system, when/where a disassembler library is available. 3595 void nmethod::decode2(outputStream* ost) const { 3596 3597 // Called from frame::back_trace_with_decode without ResourceMark. 3598 ResourceMark rm; 3599 3600 // Make sure we have a valid stream to print on. 3601 outputStream* st = ost ? ost : tty; 3602 3603 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) && ! defined(SUPPORT_ASSEMBLY) 3604 const bool use_compressed_format = true; 3605 const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() || 3606 AbstractDisassembler::show_block_comment()); 3607 #else 3608 const bool use_compressed_format = Disassembler::is_abstract(); 3609 const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() || 3610 AbstractDisassembler::show_block_comment()); 3611 #endif 3612 3613 st->cr(); 3614 this->print_on(st); 3615 st->cr(); 3616 3617 #if defined(SUPPORT_ASSEMBLY) 3618 //---------------------------------- 3619 //---< Print real disassembly >--- 3620 //---------------------------------- 3621 if (! use_compressed_format) { 3622 st->print_cr("[Disassembly]"); 3623 Disassembler::decode(const_cast<nmethod*>(this), st); 3624 st->bol(); 3625 st->print_cr("[/Disassembly]"); 3626 return; 3627 } 3628 #endif 3629 3630 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) 3631 3632 // Compressed undisassembled disassembly format. 3633 // The following status values are defined/supported: 3634 // = 0 - currently at bol() position, nothing printed yet on current line. 3635 // = 1 - currently at position after print_location(). 3636 // > 1 - in the midst of printing instruction stream bytes. 3637 int compressed_format_idx = 0; 3638 int code_comment_column = 0; 3639 const int instr_maxlen = Assembler::instr_maxlen(); 3640 const uint tabspacing = 8; 3641 unsigned char* start = this->code_begin(); 3642 unsigned char* p = this->code_begin(); 3643 unsigned char* end = this->code_end(); 3644 unsigned char* pss = p; // start of a code section (used for offsets) 3645 3646 if ((start == nullptr) || (end == nullptr)) { 3647 st->print_cr("PrintAssembly not possible due to uninitialized section pointers"); 3648 return; 3649 } 3650 #endif 3651 3652 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) 3653 //---< plain abstract disassembly, no comments or anything, just section headers >--- 3654 if (use_compressed_format && ! compressed_with_comments) { 3655 const_cast<nmethod*>(this)->print_constant_pool(st); 3656 3657 st->bol(); 3658 st->cr(); 3659 st->print_cr("Loading hsdis library failed, undisassembled code is shown in MachCode section"); 3660 //---< Open the output (Marker for post-mortem disassembler) >--- 3661 st->print_cr("[MachCode]"); 3662 const char* header = nullptr; 3663 address p0 = p; 3664 while (p < end) { 3665 address pp = p; 3666 while ((p < end) && (header == nullptr)) { 3667 header = nmethod_section_label(p); 3668 pp = p; 3669 p += Assembler::instr_len(p); 3670 } 3671 if (pp > p0) { 3672 AbstractDisassembler::decode_range_abstract(p0, pp, start, end, st, Assembler::instr_maxlen()); 3673 p0 = pp; 3674 p = pp; 3675 header = nullptr; 3676 } else if (header != nullptr) { 3677 st->bol(); 3678 st->print_cr("%s", header); 3679 header = nullptr; 3680 } 3681 } 3682 //---< Close the output (Marker for post-mortem disassembler) >--- 3683 st->bol(); 3684 st->print_cr("[/MachCode]"); 3685 return; 3686 } 3687 #endif 3688 3689 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) 3690 //---< abstract disassembly with comments and section headers merged in >--- 3691 if (compressed_with_comments) { 3692 const_cast<nmethod*>(this)->print_constant_pool(st); 3693 3694 st->bol(); 3695 st->cr(); 3696 st->print_cr("Loading hsdis library failed, undisassembled code is shown in MachCode section"); 3697 //---< Open the output (Marker for post-mortem disassembler) >--- 3698 st->print_cr("[MachCode]"); 3699 while ((p < end) && (p != nullptr)) { 3700 const int instruction_size_in_bytes = Assembler::instr_len(p); 3701 3702 //---< Block comments for nmethod. Interrupts instruction stream, if any. >--- 3703 // Outputs a bol() before and a cr() after, but only if a comment is printed. 3704 // Prints nmethod_section_label as well. 3705 if (AbstractDisassembler::show_block_comment()) { 3706 print_block_comment(st, p); 3707 if (st->position() == 0) { 3708 compressed_format_idx = 0; 3709 } 3710 } 3711 3712 //---< New location information after line break >--- 3713 if (compressed_format_idx == 0) { 3714 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false); 3715 compressed_format_idx = 1; 3716 } 3717 3718 //---< Code comment for current instruction. Address range [p..(p+len)) >--- 3719 unsigned char* p_end = p + (ssize_t)instruction_size_in_bytes; 3720 S390_ONLY(if (p_end > end) p_end = end;) // avoid getting past the end 3721 3722 if (AbstractDisassembler::show_comment() && const_cast<nmethod*>(this)->has_code_comment(p, p_end)) { 3723 //---< interrupt instruction byte stream for code comment >--- 3724 if (compressed_format_idx > 1) { 3725 st->cr(); // interrupt byte stream 3726 st->cr(); // add an empty line 3727 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false); 3728 } 3729 const_cast<nmethod*>(this)->print_code_comment_on(st, code_comment_column, p, p_end ); 3730 st->bol(); 3731 compressed_format_idx = 0; 3732 } 3733 3734 //---< New location information after line break >--- 3735 if (compressed_format_idx == 0) { 3736 code_comment_column = Disassembler::print_location(p, pss, end, st, false, false); 3737 compressed_format_idx = 1; 3738 } 3739 3740 //---< Nicely align instructions for readability >--- 3741 if (compressed_format_idx > 1) { 3742 Disassembler::print_delimiter(st); 3743 } 3744 3745 //---< Now, finally, print the actual instruction bytes >--- 3746 unsigned char* p0 = p; 3747 p = Disassembler::decode_instruction_abstract(p, st, instruction_size_in_bytes, instr_maxlen); 3748 compressed_format_idx += (int)(p - p0); 3749 3750 if (Disassembler::start_newline(compressed_format_idx-1)) { 3751 st->cr(); 3752 compressed_format_idx = 0; 3753 } 3754 } 3755 //---< Close the output (Marker for post-mortem disassembler) >--- 3756 st->bol(); 3757 st->print_cr("[/MachCode]"); 3758 return; 3759 } 3760 #endif 3761 } 3762 3763 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) 3764 3765 const char* nmethod::reloc_string_for(u_char* begin, u_char* end) { 3766 RelocIterator iter(this, begin, end); 3767 bool have_one = false; 3768 while (iter.next()) { 3769 have_one = true; 3770 switch (iter.type()) { 3771 case relocInfo::none: { 3772 // Skip it and check next 3773 break; 3774 } 3775 case relocInfo::oop_type: { 3776 // Get a non-resizable resource-allocated stringStream. 3777 // Our callees make use of (nested) ResourceMarks. 3778 stringStream st(NEW_RESOURCE_ARRAY(char, 1024), 1024); 3779 oop_Relocation* r = iter.oop_reloc(); 3780 oop obj = r->oop_value(); 3781 st.print("oop("); 3782 if (obj == nullptr) st.print("nullptr"); 3783 else obj->print_value_on(&st); 3784 st.print(")"); 3785 return st.as_string(); 3786 } 3787 case relocInfo::metadata_type: { 3788 stringStream st; 3789 metadata_Relocation* r = iter.metadata_reloc(); 3790 Metadata* obj = r->metadata_value(); 3791 st.print("metadata("); 3792 if (obj == nullptr) st.print("nullptr"); 3793 else obj->print_value_on(&st); 3794 st.print(")"); 3795 return st.as_string(); 3796 } 3797 case relocInfo::runtime_call_type: 3798 case relocInfo::runtime_call_w_cp_type: { 3799 stringStream st; 3800 st.print("runtime_call"); 3801 CallRelocation* r = (CallRelocation*)iter.reloc(); 3802 address dest = r->destination(); 3803 if (StubRoutines::contains(dest)) { 3804 StubCodeDesc* desc = StubCodeDesc::desc_for(dest); 3805 if (desc == nullptr) { 3806 desc = StubCodeDesc::desc_for(dest + frame::pc_return_offset); 3807 } 3808 if (desc != nullptr) { 3809 st.print(" Stub::%s", desc->name()); 3810 return st.as_string(); 3811 } 3812 } 3813 CodeBlob* cb = CodeCache::find_blob(dest); 3814 if (cb != nullptr) { 3815 st.print(" %s", cb->name()); 3816 } else { 3817 ResourceMark rm; 3818 const int buflen = 1024; 3819 char* buf = NEW_RESOURCE_ARRAY(char, buflen); 3820 int offset; 3821 if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) { 3822 st.print(" %s", buf); 3823 if (offset != 0) { 3824 st.print("+%d", offset); 3825 } 3826 } 3827 } 3828 return st.as_string(); 3829 } 3830 case relocInfo::virtual_call_type: { 3831 stringStream st; 3832 st.print_raw("virtual_call"); 3833 virtual_call_Relocation* r = iter.virtual_call_reloc(); 3834 Method* m = r->method_value(); 3835 if (m != nullptr) { 3836 assert(m->is_method(), ""); 3837 m->print_short_name(&st); 3838 } 3839 return st.as_string(); 3840 } 3841 case relocInfo::opt_virtual_call_type: { 3842 stringStream st; 3843 st.print_raw("optimized virtual_call"); 3844 opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc(); 3845 Method* m = r->method_value(); 3846 if (m != nullptr) { 3847 assert(m->is_method(), ""); 3848 m->print_short_name(&st); 3849 } 3850 return st.as_string(); 3851 } 3852 case relocInfo::static_call_type: { 3853 stringStream st; 3854 st.print_raw("static_call"); 3855 static_call_Relocation* r = iter.static_call_reloc(); 3856 Method* m = r->method_value(); 3857 if (m != nullptr) { 3858 assert(m->is_method(), ""); 3859 m->print_short_name(&st); 3860 } 3861 return st.as_string(); 3862 } 3863 case relocInfo::static_stub_type: return "static_stub"; 3864 case relocInfo::external_word_type: return "external_word"; 3865 case relocInfo::internal_word_type: return "internal_word"; 3866 case relocInfo::section_word_type: return "section_word"; 3867 case relocInfo::poll_type: return "poll"; 3868 case relocInfo::poll_return_type: return "poll_return"; 3869 case relocInfo::trampoline_stub_type: return "trampoline_stub"; 3870 case relocInfo::entry_guard_type: return "entry_guard"; 3871 case relocInfo::post_call_nop_type: return "post_call_nop"; 3872 case relocInfo::barrier_type: { 3873 barrier_Relocation* const reloc = iter.barrier_reloc(); 3874 stringStream st; 3875 st.print("barrier format=%d", reloc->format()); 3876 return st.as_string(); 3877 } 3878 3879 case relocInfo::type_mask: return "type_bit_mask"; 3880 3881 default: { 3882 stringStream st; 3883 st.print("unknown relocInfo=%d", (int) iter.type()); 3884 return st.as_string(); 3885 } 3886 } 3887 } 3888 return have_one ? "other" : nullptr; 3889 } 3890 3891 // Return the last scope in (begin..end] 3892 ScopeDesc* nmethod::scope_desc_in(address begin, address end) { 3893 PcDesc* p = pc_desc_near(begin+1); 3894 if (p != nullptr && p->real_pc(this) <= end) { 3895 return new ScopeDesc(this, p); 3896 } 3897 return nullptr; 3898 } 3899 3900 const char* nmethod::nmethod_section_label(address pos) const { 3901 const char* label = nullptr; 3902 if (pos == code_begin()) label = "[Instructions begin]"; 3903 if (pos == entry_point()) label = "[Entry Point]"; 3904 if (pos == verified_entry_point()) label = "[Verified Entry Point]"; 3905 if (has_method_handle_invokes() && (pos == deopt_mh_handler_begin())) label = "[Deopt MH Handler Code]"; 3906 if (pos == consts_begin() && pos != insts_begin()) label = "[Constants]"; 3907 // Check stub_code before checking exception_handler or deopt_handler. 3908 if (pos == this->stub_begin()) label = "[Stub Code]"; 3909 if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]"; 3910 if (JVMCI_ONLY(_deopt_handler_offset != -1 &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]"; 3911 return label; 3912 } 3913 3914 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels) const { 3915 if (print_section_labels) { 3916 const char* label = nmethod_section_label(block_begin); 3917 if (label != nullptr) { 3918 stream->bol(); 3919 stream->print_cr("%s", label); 3920 } 3921 } 3922 3923 if (block_begin == entry_point()) { 3924 Method* m = method(); 3925 if (m != nullptr) { 3926 stream->print(" # "); 3927 m->print_value_on(stream); 3928 stream->cr(); 3929 } 3930 if (m != nullptr && !is_osr_method()) { 3931 ResourceMark rm; 3932 int sizeargs = m->size_of_parameters(); 3933 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs); 3934 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs); 3935 { 3936 int sig_index = 0; 3937 if (!m->is_static()) 3938 sig_bt[sig_index++] = T_OBJECT; // 'this' 3939 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) { 3940 BasicType t = ss.type(); 3941 sig_bt[sig_index++] = t; 3942 if (type2size[t] == 2) { 3943 sig_bt[sig_index++] = T_VOID; 3944 } else { 3945 assert(type2size[t] == 1, "size is 1 or 2"); 3946 } 3947 } 3948 assert(sig_index == sizeargs, ""); 3949 } 3950 const char* spname = "sp"; // make arch-specific? 3951 SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs); 3952 int stack_slot_offset = this->frame_size() * wordSize; 3953 int tab1 = 14, tab2 = 24; 3954 int sig_index = 0; 3955 int arg_index = (m->is_static() ? 0 : -1); 3956 bool did_old_sp = false; 3957 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) { 3958 bool at_this = (arg_index == -1); 3959 bool at_old_sp = false; 3960 BasicType t = (at_this ? T_OBJECT : ss.type()); 3961 assert(t == sig_bt[sig_index], "sigs in sync"); 3962 if (at_this) 3963 stream->print(" # this: "); 3964 else 3965 stream->print(" # parm%d: ", arg_index); 3966 stream->move_to(tab1); 3967 VMReg fst = regs[sig_index].first(); 3968 VMReg snd = regs[sig_index].second(); 3969 if (fst->is_reg()) { 3970 stream->print("%s", fst->name()); 3971 if (snd->is_valid()) { 3972 stream->print(":%s", snd->name()); 3973 } 3974 } else if (fst->is_stack()) { 3975 int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset; 3976 if (offset == stack_slot_offset) at_old_sp = true; 3977 stream->print("[%s+0x%x]", spname, offset); 3978 } else { 3979 stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd); 3980 } 3981 stream->print(" "); 3982 stream->move_to(tab2); 3983 stream->print("= "); 3984 if (at_this) { 3985 m->method_holder()->print_value_on(stream); 3986 } else { 3987 bool did_name = false; 3988 if (!at_this && ss.is_reference()) { 3989 Symbol* name = ss.as_symbol(); 3990 name->print_value_on(stream); 3991 did_name = true; 3992 } 3993 if (!did_name) 3994 stream->print("%s", type2name(t)); 3995 } 3996 if (at_old_sp) { 3997 stream->print(" (%s of caller)", spname); 3998 did_old_sp = true; 3999 } 4000 stream->cr(); 4001 sig_index += type2size[t]; 4002 arg_index += 1; 4003 if (!at_this) ss.next(); 4004 } 4005 if (!did_old_sp) { 4006 stream->print(" # "); 4007 stream->move_to(tab1); 4008 stream->print("[%s+0x%x]", spname, stack_slot_offset); 4009 stream->print(" (%s of caller)", spname); 4010 stream->cr(); 4011 } 4012 } 4013 } 4014 } 4015 4016 // Returns whether this nmethod has code comments. 4017 bool nmethod::has_code_comment(address begin, address end) { 4018 // scopes? 4019 ScopeDesc* sd = scope_desc_in(begin, end); 4020 if (sd != nullptr) return true; 4021 4022 // relocations? 4023 const char* str = reloc_string_for(begin, end); 4024 if (str != nullptr) return true; 4025 4026 // implicit exceptions? 4027 int cont_offset = ImplicitExceptionTable(this).continuation_offset((uint)(begin - code_begin())); 4028 if (cont_offset != 0) return true; 4029 4030 return false; 4031 } 4032 4033 void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) { 4034 ImplicitExceptionTable implicit_table(this); 4035 int pc_offset = (int)(begin - code_begin()); 4036 int cont_offset = implicit_table.continuation_offset(pc_offset); 4037 bool oop_map_required = false; 4038 if (cont_offset != 0) { 4039 st->move_to(column, 6, 0); 4040 if (pc_offset == cont_offset) { 4041 st->print("; implicit exception: deoptimizes"); 4042 oop_map_required = true; 4043 } else { 4044 st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset)); 4045 } 4046 } 4047 4048 // Find an oopmap in (begin, end]. We use the odd half-closed 4049 // interval so that oop maps and scope descs which are tied to the 4050 // byte after a call are printed with the call itself. OopMaps 4051 // associated with implicit exceptions are printed with the implicit 4052 // instruction. 4053 address base = code_begin(); 4054 ImmutableOopMapSet* oms = oop_maps(); 4055 if (oms != nullptr) { 4056 for (int i = 0, imax = oms->count(); i < imax; i++) { 4057 const ImmutableOopMapPair* pair = oms->pair_at(i); 4058 const ImmutableOopMap* om = pair->get_from(oms); 4059 address pc = base + pair->pc_offset(); 4060 if (pc >= begin) { 4061 #if INCLUDE_JVMCI 4062 bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset(); 4063 #else 4064 bool is_implicit_deopt = false; 4065 #endif 4066 if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) { 4067 st->move_to(column, 6, 0); 4068 st->print("; "); 4069 om->print_on(st); 4070 oop_map_required = false; 4071 } 4072 } 4073 if (pc > end) { 4074 break; 4075 } 4076 } 4077 } 4078 assert(!oop_map_required, "missed oopmap"); 4079 4080 Thread* thread = Thread::current(); 4081 4082 // Print any debug info present at this pc. 4083 ScopeDesc* sd = scope_desc_in(begin, end); 4084 if (sd != nullptr) { 4085 st->move_to(column, 6, 0); 4086 if (sd->bci() == SynchronizationEntryBCI) { 4087 st->print(";*synchronization entry"); 4088 } else if (sd->bci() == AfterBci) { 4089 st->print(";* method exit (unlocked if synchronized)"); 4090 } else if (sd->bci() == UnwindBci) { 4091 st->print(";* unwind (locked if synchronized)"); 4092 } else if (sd->bci() == AfterExceptionBci) { 4093 st->print(";* unwind (unlocked if synchronized)"); 4094 } else if (sd->bci() == UnknownBci) { 4095 st->print(";* unknown"); 4096 } else if (sd->bci() == InvalidFrameStateBci) { 4097 st->print(";* invalid frame state"); 4098 } else { 4099 if (sd->method() == nullptr) { 4100 st->print("method is nullptr"); 4101 } else if (sd->method()->is_native()) { 4102 st->print("method is native"); 4103 } else { 4104 Bytecodes::Code bc = sd->method()->java_code_at(sd->bci()); 4105 st->print(";*%s", Bytecodes::name(bc)); 4106 switch (bc) { 4107 case Bytecodes::_invokevirtual: 4108 case Bytecodes::_invokespecial: 4109 case Bytecodes::_invokestatic: 4110 case Bytecodes::_invokeinterface: 4111 { 4112 Bytecode_invoke invoke(methodHandle(thread, sd->method()), sd->bci()); 4113 st->print(" "); 4114 if (invoke.name() != nullptr) 4115 invoke.name()->print_symbol_on(st); 4116 else 4117 st->print("<UNKNOWN>"); 4118 break; 4119 } 4120 case Bytecodes::_getfield: 4121 case Bytecodes::_putfield: 4122 case Bytecodes::_getstatic: 4123 case Bytecodes::_putstatic: 4124 { 4125 Bytecode_field field(methodHandle(thread, sd->method()), sd->bci()); 4126 st->print(" "); 4127 if (field.name() != nullptr) 4128 field.name()->print_symbol_on(st); 4129 else 4130 st->print("<UNKNOWN>"); 4131 } 4132 default: 4133 break; 4134 } 4135 } 4136 st->print(" {reexecute=%d rethrow=%d return_oop=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop()); 4137 } 4138 4139 // Print all scopes 4140 for (;sd != nullptr; sd = sd->sender()) { 4141 st->move_to(column, 6, 0); 4142 st->print("; -"); 4143 if (sd->should_reexecute()) { 4144 st->print(" (reexecute)"); 4145 } 4146 if (sd->method() == nullptr) { 4147 st->print("method is nullptr"); 4148 } else { 4149 sd->method()->print_short_name(st); 4150 } 4151 int lineno = sd->method()->line_number_from_bci(sd->bci()); 4152 if (lineno != -1) { 4153 st->print("@%d (line %d)", sd->bci(), lineno); 4154 } else { 4155 st->print("@%d", sd->bci()); 4156 } 4157 st->cr(); 4158 } 4159 } 4160 4161 // Print relocation information 4162 // Prevent memory leak: allocating without ResourceMark. 4163 ResourceMark rm; 4164 const char* str = reloc_string_for(begin, end); 4165 if (str != nullptr) { 4166 if (sd != nullptr) st->cr(); 4167 st->move_to(column, 6, 0); 4168 st->print("; {%s}", str); 4169 } 4170 } 4171 4172 #endif 4173 4174 address nmethod::call_instruction_address(address pc) const { 4175 if (NativeCall::is_call_before(pc)) { 4176 NativeCall *ncall = nativeCall_before(pc); 4177 return ncall->instruction_address(); 4178 } 4179 return nullptr; 4180 } 4181 4182 void nmethod::print_value_on_impl(outputStream* st) const { 4183 st->print_cr("nmethod"); 4184 #if defined(SUPPORT_DATA_STRUCTS) 4185 print_on_with_msg(st, nullptr); 4186 #endif 4187 } 4188 4189 #ifndef PRODUCT 4190 4191 void nmethod::print_calls(outputStream* st) { 4192 RelocIterator iter(this); 4193 while (iter.next()) { 4194 switch (iter.type()) { 4195 case relocInfo::virtual_call_type: { 4196 CompiledICLocker ml_verify(this); 4197 CompiledIC_at(&iter)->print(); 4198 break; 4199 } 4200 case relocInfo::static_call_type: 4201 case relocInfo::opt_virtual_call_type: 4202 st->print_cr("Direct call at " INTPTR_FORMAT, p2i(iter.reloc()->addr())); 4203 CompiledDirectCall::at(iter.reloc())->print(); 4204 break; 4205 default: 4206 break; 4207 } 4208 } 4209 } 4210 4211 void nmethod::print_statistics() { 4212 ttyLocker ttyl; 4213 if (xtty != nullptr) xtty->head("statistics type='nmethod'"); 4214 native_nmethod_stats.print_native_nmethod_stats(); 4215 #ifdef COMPILER1 4216 c1_java_nmethod_stats.print_nmethod_stats("C1"); 4217 #endif 4218 #ifdef COMPILER2 4219 c2_java_nmethod_stats.print_nmethod_stats("C2"); 4220 #endif 4221 #if INCLUDE_JVMCI 4222 jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI"); 4223 #endif 4224 unknown_java_nmethod_stats.print_nmethod_stats("Unknown"); 4225 DebugInformationRecorder::print_statistics(); 4226 pc_nmethod_stats.print_pc_stats(); 4227 Dependencies::print_statistics(); 4228 ExternalsRecorder::print_statistics(); 4229 if (xtty != nullptr) xtty->tail("statistics"); 4230 } 4231 4232 #endif // !PRODUCT 4233 4234 #if INCLUDE_JVMCI 4235 void nmethod::update_speculation(JavaThread* thread) { 4236 jlong speculation = thread->pending_failed_speculation(); 4237 if (speculation != 0) { 4238 guarantee(jvmci_nmethod_data() != nullptr, "failed speculation in nmethod without failed speculation list"); 4239 jvmci_nmethod_data()->add_failed_speculation(this, speculation); 4240 thread->set_pending_failed_speculation(0); 4241 } 4242 } 4243 4244 const char* nmethod::jvmci_name() { 4245 if (jvmci_nmethod_data() != nullptr) { 4246 return jvmci_nmethod_data()->name(); 4247 } 4248 return nullptr; 4249 } 4250 4251 bool nmethod::jvmci_skip_profile_deopt() const { 4252 return jvmci_nmethod_data() != nullptr && !jvmci_nmethod_data()->profile_deopt(); 4253 } 4254 #endif 4255 4256 void nmethod::prepare_for_archiving_impl() { 4257 CodeBlob::prepare_for_archiving_impl(); 4258 _deoptimization_generation = 0; 4259 _gc_epoch = 0; 4260 _method_profiling_count = 0; 4261 _osr_link = nullptr; 4262 _method = nullptr; 4263 _immutable_data = nullptr; 4264 _pc_desc_container = nullptr; 4265 _exception_cache = nullptr; 4266 _gc_data = nullptr; 4267 _oops_do_mark_link = nullptr; 4268 _compiled_ic_data = nullptr; 4269 _osr_entry_point = nullptr; 4270 _compile_id = -1; 4271 _deoptimization_status = not_marked; 4272 _is_unloading_state = 0; 4273 _state = not_installed; 4274 }