1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/assembler.inline.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/dependencies.hpp"
  29 #include "code/nativeInst.hpp"
  30 #include "code/nmethod.inline.hpp"
  31 #include "code/relocInfo.hpp"
  32 #include "code/scopeDesc.hpp"
  33 #include "compiler/abstractCompiler.hpp"
  34 #include "compiler/compilationLog.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/compileLog.hpp"
  37 #include "compiler/compileTask.hpp"
  38 #include "compiler/compilerDirectives.hpp"
  39 #include "compiler/compilerOracle.hpp"
  40 #include "compiler/directivesParser.hpp"
  41 #include "compiler/disassembler.hpp"
  42 #include "compiler/oopMap.inline.hpp"
  43 #include "gc/shared/barrierSet.hpp"
  44 #include "gc/shared/barrierSetNMethod.hpp"
  45 #include "gc/shared/classUnloadingContext.hpp"
  46 #include "gc/shared/collectedHeap.hpp"
  47 #include "interpreter/bytecode.inline.hpp"
  48 #include "jvm.h"
  49 #include "logging/log.hpp"
  50 #include "logging/logStream.hpp"
  51 #include "memory/allocation.inline.hpp"
  52 #include "memory/resourceArea.hpp"
  53 #include "memory/universe.hpp"
  54 #include "oops/access.inline.hpp"
  55 #include "oops/klass.inline.hpp"
  56 #include "oops/method.inline.hpp"
  57 #include "oops/methodData.hpp"
  58 #include "oops/oop.inline.hpp"
  59 #include "oops/weakHandle.inline.hpp"
  60 #include "prims/jvmtiImpl.hpp"
  61 #include "prims/jvmtiThreadState.hpp"
  62 #include "prims/methodHandles.hpp"
  63 #include "runtime/continuation.hpp"
  64 #include "runtime/atomic.hpp"
  65 #include "runtime/deoptimization.hpp"
  66 #include "runtime/flags/flagSetting.hpp"
  67 #include "runtime/frame.inline.hpp"
  68 #include "runtime/handles.inline.hpp"
  69 #include "runtime/jniHandles.inline.hpp"
  70 #include "runtime/orderAccess.hpp"
  71 #include "runtime/os.hpp"
  72 #include "runtime/safepointVerifiers.hpp"
  73 #include "runtime/serviceThread.hpp"
  74 #include "runtime/sharedRuntime.hpp"
  75 #include "runtime/signature.hpp"
  76 #include "runtime/threadWXSetters.inline.hpp"
  77 #include "runtime/vmThread.hpp"
  78 #include "utilities/align.hpp"
  79 #include "utilities/copy.hpp"
  80 #include "utilities/dtrace.hpp"
  81 #include "utilities/events.hpp"
  82 #include "utilities/globalDefinitions.hpp"
  83 #include "utilities/resourceHash.hpp"
  84 #include "utilities/xmlstream.hpp"
  85 #if INCLUDE_JVMCI
  86 #include "jvmci/jvmciRuntime.hpp"
  87 #endif
  88 
  89 #ifdef DTRACE_ENABLED
  90 
  91 // Only bother with this argument setup if dtrace is available
  92 
  93 #define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
  94   {                                                                       \
  95     Method* m = (method);                                                 \
  96     if (m != nullptr) {                                                   \
  97       Symbol* klass_name = m->klass_name();                               \
  98       Symbol* name = m->name();                                           \
  99       Symbol* signature = m->signature();                                 \
 100       HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
 101         (char *) klass_name->bytes(), klass_name->utf8_length(),          \
 102         (char *) name->bytes(), name->utf8_length(),                      \
 103         (char *) signature->bytes(), signature->utf8_length());           \
 104     }                                                                     \
 105   }
 106 
 107 #else //  ndef DTRACE_ENABLED
 108 
 109 #define DTRACE_METHOD_UNLOAD_PROBE(method)
 110 
 111 #endif
 112 
 113 // Cast from int value to narrow type
 114 #define CHECKED_CAST(result, T, thing)      \
 115   result = static_cast<T>(thing); \
 116   guarantee(static_cast<int>(result) == thing, "failed: %d != %d", static_cast<int>(result), thing);
 117 
 118 //---------------------------------------------------------------------------------
 119 // NMethod statistics
 120 // They are printed under various flags, including:
 121 //   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
 122 // (In the latter two cases, they like other stats are printed to the log only.)
 123 
 124 #ifndef PRODUCT
 125 // These variables are put into one block to reduce relocations
 126 // and make it simpler to print from the debugger.
 127 struct java_nmethod_stats_struct {
 128   uint nmethod_count;
 129   uint total_nm_size;
 130   uint total_immut_size;
 131   uint total_mut_size;
 132   uint relocation_size;
 133   uint consts_size;
 134   uint insts_size;
 135   uint stub_size;
 136   uint oops_size;
 137   uint metadata_size;
 138   uint dependencies_size;
 139   uint nul_chk_table_size;
 140   uint handler_table_size;
 141   uint scopes_pcs_size;
 142   uint scopes_data_size;
 143 #if INCLUDE_JVMCI
 144   uint speculations_size;
 145   uint jvmci_data_size;
 146 #endif
 147 
 148   void note_nmethod(nmethod* nm) {
 149     nmethod_count += 1;
 150     total_nm_size       += nm->size();
 151     total_immut_size    += nm->immutable_data_size();
 152     total_mut_size      += nm->mutable_data_size();
 153     relocation_size     += nm->relocation_size();
 154     consts_size         += nm->consts_size();
 155     insts_size          += nm->insts_size();
 156     stub_size           += nm->stub_size();
 157     oops_size           += nm->oops_size();
 158     metadata_size       += nm->metadata_size();
 159     scopes_data_size    += nm->scopes_data_size();
 160     scopes_pcs_size     += nm->scopes_pcs_size();
 161     dependencies_size   += nm->dependencies_size();
 162     handler_table_size  += nm->handler_table_size();
 163     nul_chk_table_size  += nm->nul_chk_table_size();
 164 #if INCLUDE_JVMCI
 165     speculations_size   += nm->speculations_size();
 166     jvmci_data_size     += nm->jvmci_data_size();
 167 #endif
 168   }
 169   void print_nmethod_stats(const char* name) {
 170     if (nmethod_count == 0)  return;
 171     tty->print_cr("Statistics for %u bytecoded nmethods for %s:", nmethod_count, name);
 172     uint total_size = total_nm_size + total_immut_size + total_mut_size;
 173     if (total_nm_size != 0) {
 174       tty->print_cr(" total size      = %u (100%%)", total_size);
 175       tty->print_cr(" in CodeCache    = %u (%f%%)", total_nm_size, (total_nm_size * 100.0f)/total_size);
 176     }
 177     uint header_size = (uint)(nmethod_count * sizeof(nmethod));
 178     if (nmethod_count != 0) {
 179       tty->print_cr("   header        = %u (%f%%)", header_size, (header_size * 100.0f)/total_nm_size);
 180     }
 181     if (consts_size != 0) {
 182       tty->print_cr("   constants     = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_nm_size);
 183     }
 184     if (insts_size != 0) {
 185       tty->print_cr("   main code     = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_nm_size);
 186     }
 187     if (stub_size != 0) {
 188       tty->print_cr("   stub code     = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_nm_size);
 189     }
 190     if (oops_size != 0) {
 191       tty->print_cr("   oops          = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_nm_size);
 192     }
 193     if (total_mut_size != 0) {
 194       tty->print_cr(" mutable data    = %u (%f%%)", total_mut_size, (total_mut_size * 100.0f)/total_size);
 195     }
 196     if (relocation_size != 0) {
 197       tty->print_cr("   relocation    = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_mut_size);
 198     }
 199     if (metadata_size != 0) {
 200       tty->print_cr("   metadata      = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_mut_size);
 201     }
 202 #if INCLUDE_JVMCI
 203     if (jvmci_data_size != 0) {
 204       tty->print_cr("   JVMCI data    = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_mut_size);
 205     }
 206 #endif
 207     if (total_immut_size != 0) {
 208       tty->print_cr(" immutable data  = %u (%f%%)", total_immut_size, (total_immut_size * 100.0f)/total_size);
 209     }
 210     if (dependencies_size != 0) {
 211       tty->print_cr("   dependencies  = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_immut_size);
 212     }
 213     if (nul_chk_table_size != 0) {
 214       tty->print_cr("   nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_immut_size);
 215     }
 216     if (handler_table_size != 0) {
 217       tty->print_cr("   handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_immut_size);
 218     }
 219     if (scopes_pcs_size != 0) {
 220       tty->print_cr("   scopes pcs    = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_immut_size);
 221     }
 222     if (scopes_data_size != 0) {
 223       tty->print_cr("   scopes data   = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_immut_size);
 224     }
 225 #if INCLUDE_JVMCI
 226     if (speculations_size != 0) {
 227       tty->print_cr("   speculations  = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_immut_size);
 228     }
 229 #endif
 230   }
 231 };
 232 
 233 struct native_nmethod_stats_struct {
 234   uint native_nmethod_count;
 235   uint native_total_size;
 236   uint native_relocation_size;
 237   uint native_insts_size;
 238   uint native_oops_size;
 239   uint native_metadata_size;
 240   void note_native_nmethod(nmethod* nm) {
 241     native_nmethod_count += 1;
 242     native_total_size       += nm->size();
 243     native_relocation_size  += nm->relocation_size();
 244     native_insts_size       += nm->insts_size();
 245     native_oops_size        += nm->oops_size();
 246     native_metadata_size    += nm->metadata_size();
 247   }
 248   void print_native_nmethod_stats() {
 249     if (native_nmethod_count == 0)  return;
 250     tty->print_cr("Statistics for %u native nmethods:", native_nmethod_count);
 251     if (native_total_size != 0)       tty->print_cr(" N. total size  = %u", native_total_size);
 252     if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %u", native_relocation_size);
 253     if (native_insts_size != 0)       tty->print_cr(" N. main code   = %u", native_insts_size);
 254     if (native_oops_size != 0)        tty->print_cr(" N. oops        = %u", native_oops_size);
 255     if (native_metadata_size != 0)    tty->print_cr(" N. metadata    = %u", native_metadata_size);
 256   }
 257 };
 258 
 259 struct pc_nmethod_stats_struct {
 260   uint pc_desc_init;     // number of initialization of cache (= number of caches)
 261   uint pc_desc_queries;  // queries to nmethod::find_pc_desc
 262   uint pc_desc_approx;   // number of those which have approximate true
 263   uint pc_desc_repeats;  // number of _pc_descs[0] hits
 264   uint pc_desc_hits;     // number of LRU cache hits
 265   uint pc_desc_tests;    // total number of PcDesc examinations
 266   uint pc_desc_searches; // total number of quasi-binary search steps
 267   uint pc_desc_adds;     // number of LUR cache insertions
 268 
 269   void print_pc_stats() {
 270     tty->print_cr("PcDesc Statistics:  %u queries, %.2f comparisons per query",
 271                   pc_desc_queries,
 272                   (double)(pc_desc_tests + pc_desc_searches)
 273                   / pc_desc_queries);
 274     tty->print_cr("  caches=%d queries=%u/%u, hits=%u+%u, tests=%u+%u, adds=%u",
 275                   pc_desc_init,
 276                   pc_desc_queries, pc_desc_approx,
 277                   pc_desc_repeats, pc_desc_hits,
 278                   pc_desc_tests, pc_desc_searches, pc_desc_adds);
 279   }
 280 };
 281 
 282 #ifdef COMPILER1
 283 static java_nmethod_stats_struct c1_java_nmethod_stats;
 284 #endif
 285 #ifdef COMPILER2
 286 static java_nmethod_stats_struct c2_java_nmethod_stats;
 287 #endif
 288 #if INCLUDE_JVMCI
 289 static java_nmethod_stats_struct jvmci_java_nmethod_stats;
 290 #endif
 291 static java_nmethod_stats_struct unknown_java_nmethod_stats;
 292 
 293 static native_nmethod_stats_struct native_nmethod_stats;
 294 static pc_nmethod_stats_struct pc_nmethod_stats;
 295 
 296 static void note_java_nmethod(nmethod* nm) {
 297 #ifdef COMPILER1
 298   if (nm->is_compiled_by_c1()) {
 299     c1_java_nmethod_stats.note_nmethod(nm);
 300   } else
 301 #endif
 302 #ifdef COMPILER2
 303   if (nm->is_compiled_by_c2()) {
 304     c2_java_nmethod_stats.note_nmethod(nm);
 305   } else
 306 #endif
 307 #if INCLUDE_JVMCI
 308   if (nm->is_compiled_by_jvmci()) {
 309     jvmci_java_nmethod_stats.note_nmethod(nm);
 310   } else
 311 #endif
 312   {
 313     unknown_java_nmethod_stats.note_nmethod(nm);
 314   }
 315 }
 316 #endif // !PRODUCT
 317 
 318 //---------------------------------------------------------------------------------
 319 
 320 
 321 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
 322   assert(pc != nullptr, "Must be non null");
 323   assert(exception.not_null(), "Must be non null");
 324   assert(handler != nullptr, "Must be non null");
 325 
 326   _count = 0;
 327   _exception_type = exception->klass();
 328   _next = nullptr;
 329   _purge_list_next = nullptr;
 330 
 331   add_address_and_handler(pc,handler);
 332 }
 333 
 334 
 335 address ExceptionCache::match(Handle exception, address pc) {
 336   assert(pc != nullptr,"Must be non null");
 337   assert(exception.not_null(),"Must be non null");
 338   if (exception->klass() == exception_type()) {
 339     return (test_address(pc));
 340   }
 341 
 342   return nullptr;
 343 }
 344 
 345 
 346 bool ExceptionCache::match_exception_with_space(Handle exception) {
 347   assert(exception.not_null(),"Must be non null");
 348   if (exception->klass() == exception_type() && count() < cache_size) {
 349     return true;
 350   }
 351   return false;
 352 }
 353 
 354 
 355 address ExceptionCache::test_address(address addr) {
 356   int limit = count();
 357   for (int i = 0; i < limit; i++) {
 358     if (pc_at(i) == addr) {
 359       return handler_at(i);
 360     }
 361   }
 362   return nullptr;
 363 }
 364 
 365 
 366 bool ExceptionCache::add_address_and_handler(address addr, address handler) {
 367   if (test_address(addr) == handler) return true;
 368 
 369   int index = count();
 370   if (index < cache_size) {
 371     set_pc_at(index, addr);
 372     set_handler_at(index, handler);
 373     increment_count();
 374     return true;
 375   }
 376   return false;
 377 }
 378 
 379 ExceptionCache* ExceptionCache::next() {
 380   return Atomic::load(&_next);
 381 }
 382 
 383 void ExceptionCache::set_next(ExceptionCache *ec) {
 384   Atomic::store(&_next, ec);
 385 }
 386 
 387 //-----------------------------------------------------------------------------
 388 
 389 
 390 // Helper used by both find_pc_desc methods.
 391 static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
 392   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests);
 393   if (!approximate) {
 394     return pc->pc_offset() == pc_offset;
 395   } else {
 396     // Do not look before the sentinel
 397     assert(pc_offset > PcDesc::lower_offset_limit, "illegal pc_offset");
 398     return pc_offset <= pc->pc_offset() && (pc-1)->pc_offset() < pc_offset;
 399   }
 400 }
 401 
 402 void PcDescCache::init_to(PcDesc* initial_pc_desc) {
 403   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_init);
 404   // initialize the cache by filling it with benign (non-null) values
 405   assert(initial_pc_desc != nullptr && initial_pc_desc->pc_offset() == PcDesc::lower_offset_limit,
 406          "must start with a sentinel");
 407   for (int i = 0; i < cache_size; i++) {
 408     _pc_descs[i] = initial_pc_desc;
 409   }
 410 }
 411 
 412 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
 413   // Note: one might think that caching the most recently
 414   // read value separately would be a win, but one would be
 415   // wrong.  When many threads are updating it, the cache
 416   // line it's in would bounce between caches, negating
 417   // any benefit.
 418 
 419   // In order to prevent race conditions do not load cache elements
 420   // repeatedly, but use a local copy:
 421   PcDesc* res;
 422 
 423   // Step one:  Check the most recently added value.
 424   res = _pc_descs[0];
 425   assert(res != nullptr, "PcDesc cache should be initialized already");
 426 
 427   // Approximate only here since PcDescContainer::find_pc_desc() checked for exact case.
 428   if (approximate && match_desc(res, pc_offset, approximate)) {
 429     NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats);
 430     return res;
 431   }
 432 
 433   // Step two:  Check the rest of the LRU cache.
 434   for (int i = 1; i < cache_size; ++i) {
 435     res = _pc_descs[i];
 436     if (res->pc_offset() < 0) break;  // optimization: skip empty cache
 437     if (match_desc(res, pc_offset, approximate)) {
 438       NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits);
 439       return res;
 440     }
 441   }
 442 
 443   // Report failure.
 444   return nullptr;
 445 }
 446 
 447 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
 448   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
 449   // Update the LRU cache by shifting pc_desc forward.
 450   for (int i = 0; i < cache_size; i++)  {
 451     PcDesc* next = _pc_descs[i];
 452     _pc_descs[i] = pc_desc;
 453     pc_desc = next;
 454   }
 455 }
 456 
 457 // adjust pcs_size so that it is a multiple of both oopSize and
 458 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
 459 // of oopSize, then 2*sizeof(PcDesc) is)
 460 static int adjust_pcs_size(int pcs_size) {
 461   int nsize = align_up(pcs_size,   oopSize);
 462   if ((nsize % sizeof(PcDesc)) != 0) {
 463     nsize = pcs_size + sizeof(PcDesc);
 464   }
 465   assert((nsize % oopSize) == 0, "correct alignment");
 466   return nsize;
 467 }
 468 
 469 bool nmethod::is_method_handle_return(address return_pc) {
 470   if (!has_method_handle_invokes())  return false;
 471   PcDesc* pd = pc_desc_at(return_pc);
 472   if (pd == nullptr)
 473     return false;
 474   return pd->is_method_handle_invoke();
 475 }
 476 
 477 // Returns a string version of the method state.
 478 const char* nmethod::state() const {
 479   int state = get_state();
 480   switch (state) {
 481   case not_installed:
 482     return "not installed";
 483   case in_use:
 484     return "in use";
 485   case not_entrant:
 486     return "not_entrant";
 487   default:
 488     fatal("unexpected method state: %d", state);
 489     return nullptr;
 490   }
 491 }
 492 
 493 void nmethod::set_deoptimized_done() {
 494   ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
 495   if (_deoptimization_status != deoptimize_done) { // can't go backwards
 496     Atomic::store(&_deoptimization_status, deoptimize_done);
 497   }
 498 }
 499 
 500 ExceptionCache* nmethod::exception_cache_acquire() const {
 501   return Atomic::load_acquire(&_exception_cache);
 502 }
 503 
 504 void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
 505   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
 506   assert(new_entry != nullptr,"Must be non null");
 507   assert(new_entry->next() == nullptr, "Must be null");
 508 
 509   for (;;) {
 510     ExceptionCache *ec = exception_cache();
 511     if (ec != nullptr) {
 512       Klass* ex_klass = ec->exception_type();
 513       if (!ex_klass->is_loader_alive()) {
 514         // We must guarantee that entries are not inserted with new next pointer
 515         // edges to ExceptionCache entries with dead klasses, due to bad interactions
 516         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
 517         // the head pointer forward to the first live ExceptionCache, so that the new
 518         // next pointers always point at live ExceptionCaches, that are not removed due
 519         // to concurrent ExceptionCache cleanup.
 520         ExceptionCache* next = ec->next();
 521         if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
 522           CodeCache::release_exception_cache(ec);
 523         }
 524         continue;
 525       }
 526       ec = exception_cache();
 527       if (ec != nullptr) {
 528         new_entry->set_next(ec);
 529       }
 530     }
 531     if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
 532       return;
 533     }
 534   }
 535 }
 536 
 537 void nmethod::clean_exception_cache() {
 538   // For each nmethod, only a single thread may call this cleanup function
 539   // at the same time, whether called in STW cleanup or concurrent cleanup.
 540   // Note that if the GC is processing exception cache cleaning in a concurrent phase,
 541   // then a single writer may contend with cleaning up the head pointer to the
 542   // first ExceptionCache node that has a Klass* that is alive. That is fine,
 543   // as long as there is no concurrent cleanup of next pointers from concurrent writers.
 544   // And the concurrent writers do not clean up next pointers, only the head.
 545   // Also note that concurrent readers will walk through Klass* pointers that are not
 546   // alive. That does not cause ABA problems, because Klass* is deleted after
 547   // a handshake with all threads, after all stale ExceptionCaches have been
 548   // unlinked. That is also when the CodeCache::exception_cache_purge_list()
 549   // is deleted, with all ExceptionCache entries that were cleaned concurrently.
 550   // That similarly implies that CAS operations on ExceptionCache entries do not
 551   // suffer from ABA problems as unlinking and deletion is separated by a global
 552   // handshake operation.
 553   ExceptionCache* prev = nullptr;
 554   ExceptionCache* curr = exception_cache_acquire();
 555 
 556   while (curr != nullptr) {
 557     ExceptionCache* next = curr->next();
 558 
 559     if (!curr->exception_type()->is_loader_alive()) {
 560       if (prev == nullptr) {
 561         // Try to clean head; this is contended by concurrent inserts, that
 562         // both lazily clean the head, and insert entries at the head. If
 563         // the CAS fails, the operation is restarted.
 564         if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
 565           prev = nullptr;
 566           curr = exception_cache_acquire();
 567           continue;
 568         }
 569       } else {
 570         // It is impossible to during cleanup connect the next pointer to
 571         // an ExceptionCache that has not been published before a safepoint
 572         // prior to the cleanup. Therefore, release is not required.
 573         prev->set_next(next);
 574       }
 575       // prev stays the same.
 576 
 577       CodeCache::release_exception_cache(curr);
 578     } else {
 579       prev = curr;
 580     }
 581 
 582     curr = next;
 583   }
 584 }
 585 
 586 // public method for accessing the exception cache
 587 // These are the public access methods.
 588 address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
 589   // We never grab a lock to read the exception cache, so we may
 590   // have false negatives. This is okay, as it can only happen during
 591   // the first few exception lookups for a given nmethod.
 592   ExceptionCache* ec = exception_cache_acquire();
 593   while (ec != nullptr) {
 594     address ret_val;
 595     if ((ret_val = ec->match(exception,pc)) != nullptr) {
 596       return ret_val;
 597     }
 598     ec = ec->next();
 599   }
 600   return nullptr;
 601 }
 602 
 603 void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
 604   // There are potential race conditions during exception cache updates, so we
 605   // must own the ExceptionCache_lock before doing ANY modifications. Because
 606   // we don't lock during reads, it is possible to have several threads attempt
 607   // to update the cache with the same data. We need to check for already inserted
 608   // copies of the current data before adding it.
 609 
 610   MutexLocker ml(ExceptionCache_lock);
 611   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
 612 
 613   if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) {
 614     target_entry = new ExceptionCache(exception,pc,handler);
 615     add_exception_cache_entry(target_entry);
 616   }
 617 }
 618 
 619 // private method for handling exception cache
 620 // These methods are private, and used to manipulate the exception cache
 621 // directly.
 622 ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
 623   ExceptionCache* ec = exception_cache_acquire();
 624   while (ec != nullptr) {
 625     if (ec->match_exception_with_space(exception)) {
 626       return ec;
 627     }
 628     ec = ec->next();
 629   }
 630   return nullptr;
 631 }
 632 
 633 bool nmethod::is_at_poll_return(address pc) {
 634   RelocIterator iter(this, pc, pc+1);
 635   while (iter.next()) {
 636     if (iter.type() == relocInfo::poll_return_type)
 637       return true;
 638   }
 639   return false;
 640 }
 641 
 642 
 643 bool nmethod::is_at_poll_or_poll_return(address pc) {
 644   RelocIterator iter(this, pc, pc+1);
 645   while (iter.next()) {
 646     relocInfo::relocType t = iter.type();
 647     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
 648       return true;
 649   }
 650   return false;
 651 }
 652 
 653 void nmethod::verify_oop_relocations() {
 654   // Ensure sure that the code matches the current oop values
 655   RelocIterator iter(this, nullptr, nullptr);
 656   while (iter.next()) {
 657     if (iter.type() == relocInfo::oop_type) {
 658       oop_Relocation* reloc = iter.oop_reloc();
 659       if (!reloc->oop_is_immediate()) {
 660         reloc->verify_oop_relocation();
 661       }
 662     }
 663   }
 664 }
 665 
 666 
 667 ScopeDesc* nmethod::scope_desc_at(address pc) {
 668   PcDesc* pd = pc_desc_at(pc);
 669   guarantee(pd != nullptr, "scope must be present");
 670   return new ScopeDesc(this, pd);
 671 }
 672 
 673 ScopeDesc* nmethod::scope_desc_near(address pc) {
 674   PcDesc* pd = pc_desc_near(pc);
 675   guarantee(pd != nullptr, "scope must be present");
 676   return new ScopeDesc(this, pd);
 677 }
 678 
 679 address nmethod::oops_reloc_begin() const {
 680   // If the method is not entrant then a JMP is plastered over the
 681   // first few bytes.  If an oop in the old code was there, that oop
 682   // should not get GC'd.  Skip the first few bytes of oops on
 683   // not-entrant methods.
 684   if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
 685       code_begin() + frame_complete_offset() >
 686       verified_entry_point() + NativeJump::instruction_size)
 687   {
 688     // If we have a frame_complete_offset after the native jump, then there
 689     // is no point trying to look for oops before that. This is a requirement
 690     // for being allowed to scan oops concurrently.
 691     return code_begin() + frame_complete_offset();
 692   }
 693 
 694   address low_boundary = verified_entry_point();
 695   if (!is_in_use()) {
 696     low_boundary += NativeJump::instruction_size;
 697     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 698     // This means that the low_boundary is going to be a little too high.
 699     // This shouldn't matter, since oops of non-entrant methods are never used.
 700     // In fact, why are we bothering to look at oops in a non-entrant method??
 701   }
 702   return low_boundary;
 703 }
 704 
 705 // Method that knows how to preserve outgoing arguments at call. This method must be
 706 // called with a frame corresponding to a Java invoke
 707 void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
 708   if (method() == nullptr) {
 709     return;
 710   }
 711 
 712   // handle the case of an anchor explicitly set in continuation code that doesn't have a callee
 713   JavaThread* thread = reg_map->thread();
 714   if ((thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp())
 715       JVMTI_ONLY(|| (method()->is_continuation_enter_intrinsic() && thread->on_monitor_waited_event()))) {
 716     return;
 717   }
 718 
 719   if (!method()->is_native()) {
 720     address pc = fr.pc();
 721     bool has_receiver, has_appendix;
 722     Symbol* signature;
 723 
 724     // The method attached by JIT-compilers should be used, if present.
 725     // Bytecode can be inaccurate in such case.
 726     Method* callee = attached_method_before_pc(pc);
 727     if (callee != nullptr) {
 728       has_receiver = !(callee->access_flags().is_static());
 729       has_appendix = false;
 730       signature    = callee->signature();
 731 
 732       // If inline types are passed as fields, use the extended signature
 733       // which contains the types of all (oop) fields of the inline type.
 734       if (is_compiled_by_c2() && callee->has_scalarized_args()) {
 735         const GrowableArray<SigEntry>* sig = callee->adapter()->get_sig_cc();
 736         assert(sig != nullptr, "sig should never be null");
 737         TempNewSymbol tmp_sig = SigEntry::create_symbol(sig);
 738         has_receiver = false; // The extended signature contains the receiver type
 739         fr.oops_compiled_arguments_do(tmp_sig, has_receiver, has_appendix, reg_map, f);
 740         return;
 741       }
 742     } else {
 743       SimpleScopeDesc ssd(this, pc);
 744 
 745       Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
 746       has_receiver = call.has_receiver();
 747       has_appendix = call.has_appendix();
 748       signature    = call.signature();
 749     }
 750 
 751     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
 752   } else if (method()->is_continuation_enter_intrinsic()) {
 753     // This method only calls Continuation.enter()
 754     Symbol* signature = vmSymbols::continuationEnter_signature();
 755     fr.oops_compiled_arguments_do(signature, false, false, reg_map, f);
 756   }
 757 }
 758 
 759 Method* nmethod::attached_method(address call_instr) {
 760   assert(code_contains(call_instr), "not part of the nmethod");
 761   RelocIterator iter(this, call_instr, call_instr + 1);
 762   while (iter.next()) {
 763     if (iter.addr() == call_instr) {
 764       switch(iter.type()) {
 765         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
 766         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
 767         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
 768         default:                               break;
 769       }
 770     }
 771   }
 772   return nullptr; // not found
 773 }
 774 
 775 Method* nmethod::attached_method_before_pc(address pc) {
 776   if (NativeCall::is_call_before(pc)) {
 777     NativeCall* ncall = nativeCall_before(pc);
 778     return attached_method(ncall->instruction_address());
 779   }
 780   return nullptr; // not a call
 781 }
 782 
 783 void nmethod::clear_inline_caches() {
 784   assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
 785   RelocIterator iter(this);
 786   while (iter.next()) {
 787     iter.reloc()->clear_inline_cache();
 788   }
 789 }
 790 
 791 #ifdef ASSERT
 792 // Check class_loader is alive for this bit of metadata.
 793 class CheckClass : public MetadataClosure {
 794   void do_metadata(Metadata* md) {
 795     Klass* klass = nullptr;
 796     if (md->is_klass()) {
 797       klass = ((Klass*)md);
 798     } else if (md->is_method()) {
 799       klass = ((Method*)md)->method_holder();
 800     } else if (md->is_methodData()) {
 801       klass = ((MethodData*)md)->method()->method_holder();
 802     } else {
 803       md->print();
 804       ShouldNotReachHere();
 805     }
 806     assert(klass->is_loader_alive(), "must be alive");
 807   }
 808 };
 809 #endif // ASSERT
 810 
 811 
 812 static void clean_ic_if_metadata_is_dead(CompiledIC *ic) {
 813   ic->clean_metadata();
 814 }
 815 
 816 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
 817 template <typename CallsiteT>
 818 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, nmethod* from,
 819                                          bool clean_all) {
 820   CodeBlob* cb = CodeCache::find_blob(callsite->destination());
 821   if (!cb->is_nmethod()) {
 822     return;
 823   }
 824   nmethod* nm = cb->as_nmethod();
 825   if (clean_all || !nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
 826     callsite->set_to_clean();
 827   }
 828 }
 829 
 830 // Cleans caches in nmethods that point to either classes that are unloaded
 831 // or nmethods that are unloaded.
 832 //
 833 // Can be called either in parallel by G1 currently or after all
 834 // nmethods are unloaded.  Return postponed=true in the parallel case for
 835 // inline caches found that point to nmethods that are not yet visited during
 836 // the do_unloading walk.
 837 void nmethod::unload_nmethod_caches(bool unloading_occurred) {
 838   ResourceMark rm;
 839 
 840   // Exception cache only needs to be called if unloading occurred
 841   if (unloading_occurred) {
 842     clean_exception_cache();
 843   }
 844 
 845   cleanup_inline_caches_impl(unloading_occurred, false);
 846 
 847 #ifdef ASSERT
 848   // Check that the metadata embedded in the nmethod is alive
 849   CheckClass check_class;
 850   metadata_do(&check_class);
 851 #endif
 852 }
 853 
 854 void nmethod::run_nmethod_entry_barrier() {
 855   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 856   if (bs_nm != nullptr) {
 857     // We want to keep an invariant that nmethods found through iterations of a Thread's
 858     // nmethods found in safepoints have gone through an entry barrier and are not armed.
 859     // By calling this nmethod entry barrier, it plays along and acts
 860     // like any other nmethod found on the stack of a thread (fewer surprises).
 861     nmethod* nm = this;
 862     bool alive = bs_nm->nmethod_entry_barrier(nm);
 863     assert(alive, "should be alive");
 864   }
 865 }
 866 
 867 // Only called by whitebox test
 868 void nmethod::cleanup_inline_caches_whitebox() {
 869   assert_locked_or_safepoint(CodeCache_lock);
 870   CompiledICLocker ic_locker(this);
 871   cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */);
 872 }
 873 
 874 address* nmethod::orig_pc_addr(const frame* fr) {
 875   return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
 876 }
 877 
 878 // Called to clean up after class unloading for live nmethods
 879 void nmethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
 880   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
 881   ResourceMark rm;
 882 
 883   // Find all calls in an nmethod and clear the ones that point to bad nmethods.
 884   RelocIterator iter(this, oops_reloc_begin());
 885   bool is_in_static_stub = false;
 886   while(iter.next()) {
 887 
 888     switch (iter.type()) {
 889 
 890     case relocInfo::virtual_call_type:
 891       if (unloading_occurred) {
 892         // If class unloading occurred we first clear ICs where the cached metadata
 893         // is referring to an unloaded klass or method.
 894         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
 895       }
 896 
 897       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
 898       break;
 899 
 900     case relocInfo::opt_virtual_call_type:
 901     case relocInfo::static_call_type:
 902       clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), this, clean_all);
 903       break;
 904 
 905     case relocInfo::static_stub_type: {
 906       is_in_static_stub = true;
 907       break;
 908     }
 909 
 910     case relocInfo::metadata_type: {
 911       // Only the metadata relocations contained in static/opt virtual call stubs
 912       // contains the Method* passed to c2i adapters. It is the only metadata
 913       // relocation that needs to be walked, as it is the one metadata relocation
 914       // that violates the invariant that all metadata relocations have an oop
 915       // in the compiled method (due to deferred resolution and code patching).
 916 
 917       // This causes dead metadata to remain in compiled methods that are not
 918       // unloading. Unless these slippery metadata relocations of the static
 919       // stubs are at least cleared, subsequent class redefinition operations
 920       // will access potentially free memory, and JavaThread execution
 921       // concurrent to class unloading may call c2i adapters with dead methods.
 922       if (!is_in_static_stub) {
 923         // The first metadata relocation after a static stub relocation is the
 924         // metadata relocation of the static stub used to pass the Method* to
 925         // c2i adapters.
 926         continue;
 927       }
 928       is_in_static_stub = false;
 929       if (is_unloading()) {
 930         // If the nmethod itself is dying, then it may point at dead metadata.
 931         // Nobody should follow that metadata; it is strictly unsafe.
 932         continue;
 933       }
 934       metadata_Relocation* r = iter.metadata_reloc();
 935       Metadata* md = r->metadata_value();
 936       if (md != nullptr && md->is_method()) {
 937         Method* method = static_cast<Method*>(md);
 938         if (!method->method_holder()->is_loader_alive()) {
 939           Atomic::store(r->metadata_addr(), (Method*)nullptr);
 940 
 941           if (!r->metadata_is_immediate()) {
 942             r->fix_metadata_relocation();
 943           }
 944         }
 945       }
 946       break;
 947     }
 948 
 949     default:
 950       break;
 951     }
 952   }
 953 }
 954 
 955 address nmethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
 956   // Exception happened outside inline-cache check code => we are inside
 957   // an active nmethod => use cpc to determine a return address
 958   int exception_offset = int(pc - code_begin());
 959   int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
 960 #ifdef ASSERT
 961   if (cont_offset == 0) {
 962     Thread* thread = Thread::current();
 963     ResourceMark rm(thread);
 964     CodeBlob* cb = CodeCache::find_blob(pc);
 965     assert(cb != nullptr && cb == this, "");
 966 
 967     // Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once.
 968     stringStream ss;
 969     ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
 970     print_on(&ss);
 971     method()->print_codes_on(&ss);
 972     print_code_on(&ss);
 973     print_pcs_on(&ss);
 974     tty->print("%s", ss.as_string()); // print all at once
 975   }
 976 #endif
 977   if (cont_offset == 0) {
 978     // Let the normal error handling report the exception
 979     return nullptr;
 980   }
 981   if (cont_offset == exception_offset) {
 982 #if INCLUDE_JVMCI
 983     Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
 984     JavaThread *thread = JavaThread::current();
 985     thread->set_jvmci_implicit_exception_pc(pc);
 986     thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
 987                                                                          Deoptimization::Action_reinterpret));
 988     return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
 989 #else
 990     ShouldNotReachHere();
 991 #endif
 992   }
 993   return code_begin() + cont_offset;
 994 }
 995 
 996 class HasEvolDependency : public MetadataClosure {
 997   bool _has_evol_dependency;
 998  public:
 999   HasEvolDependency() : _has_evol_dependency(false) {}
1000   void do_metadata(Metadata* md) {
1001     if (md->is_method()) {
1002       Method* method = (Method*)md;
1003       if (method->is_old()) {
1004         _has_evol_dependency = true;
1005       }
1006     }
1007   }
1008   bool has_evol_dependency() const { return _has_evol_dependency; }
1009 };
1010 
1011 bool nmethod::has_evol_metadata() {
1012   // Check the metadata in relocIter and CompiledIC and also deoptimize
1013   // any nmethod that has reference to old methods.
1014   HasEvolDependency check_evol;
1015   metadata_do(&check_evol);
1016   if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
1017     ResourceMark rm;
1018     log_debug(redefine, class, nmethod)
1019             ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
1020              _method->method_holder()->external_name(),
1021              _method->name()->as_C_string(),
1022              _method->signature()->as_C_string(),
1023              compile_id());
1024   }
1025   return check_evol.has_evol_dependency();
1026 }
1027 
1028 int nmethod::total_size() const {
1029   return
1030     consts_size()        +
1031     insts_size()         +
1032     stub_size()          +
1033     scopes_data_size()   +
1034     scopes_pcs_size()    +
1035     handler_table_size() +
1036     nul_chk_table_size();
1037 }
1038 
1039 const char* nmethod::compile_kind() const {
1040   if (is_osr_method())     return "osr";
1041   if (method() != nullptr && is_native_method()) {
1042     if (method()->is_continuation_native_intrinsic()) {
1043       return "cnt";
1044     }
1045     return "c2n";
1046   }
1047   return nullptr;
1048 }
1049 
1050 const char* nmethod::compiler_name() const {
1051   return compilertype2name(_compiler_type);
1052 }
1053 
1054 #ifdef ASSERT
1055 class CheckForOopsClosure : public OopClosure {
1056   bool _found_oop = false;
1057  public:
1058   virtual void do_oop(oop* o) { _found_oop = true; }
1059   virtual void do_oop(narrowOop* o) { _found_oop = true; }
1060   bool found_oop() { return _found_oop; }
1061 };
1062 class CheckForMetadataClosure : public MetadataClosure {
1063   bool _found_metadata = false;
1064   Metadata* _ignore = nullptr;
1065  public:
1066   CheckForMetadataClosure(Metadata* ignore) : _ignore(ignore) {}
1067   virtual void do_metadata(Metadata* md) { if (md != _ignore) _found_metadata = true; }
1068   bool found_metadata() { return _found_metadata; }
1069 };
1070 
1071 static void assert_no_oops_or_metadata(nmethod* nm) {
1072   if (nm == nullptr) return;
1073   assert(nm->oop_maps() == nullptr, "expectation");
1074 
1075   CheckForOopsClosure cfo;
1076   nm->oops_do(&cfo);
1077   assert(!cfo.found_oop(), "no oops allowed");
1078 
1079   // We allow an exception for the own Method, but require its class to be permanent.
1080   Method* own_method = nm->method();
1081   CheckForMetadataClosure cfm(/* ignore reference to own Method */ own_method);
1082   nm->metadata_do(&cfm);
1083   assert(!cfm.found_metadata(), "no metadata allowed");
1084 
1085   assert(own_method->method_holder()->class_loader_data()->is_permanent_class_loader_data(),
1086          "Method's class needs to be permanent");
1087 }
1088 #endif
1089 
1090 static int required_mutable_data_size(CodeBuffer* code_buffer,
1091                                       int jvmci_data_size = 0) {
1092   return align_up(code_buffer->total_relocation_size(), oopSize) +
1093          align_up(code_buffer->total_metadata_size(), oopSize) +
1094          align_up(jvmci_data_size, oopSize);
1095 }
1096 
1097 nmethod* nmethod::new_native_nmethod(const methodHandle& method,
1098   int compile_id,
1099   CodeBuffer *code_buffer,
1100   int vep_offset,
1101   int frame_complete,
1102   int frame_size,
1103   ByteSize basic_lock_owner_sp_offset,
1104   ByteSize basic_lock_sp_offset,
1105   OopMapSet* oop_maps,
1106   int exception_handler) {
1107   code_buffer->finalize_oop_references(method);
1108   // create nmethod
1109   nmethod* nm = nullptr;
1110   int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1111   {
1112     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1113 
1114     CodeOffsets offsets;
1115     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
1116     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
1117     if (exception_handler != -1) {
1118       offsets.set_value(CodeOffsets::Exceptions, exception_handler);
1119     }
1120 
1121     int mutable_data_size = required_mutable_data_size(code_buffer);
1122 
1123     // MH intrinsics are dispatch stubs which are compatible with NonNMethod space.
1124     // IsUnloadingBehaviour::is_unloading needs to handle them separately.
1125     bool allow_NonNMethod_space = method->can_be_allocated_in_NonNMethod_space();
1126     nm = new (native_nmethod_size, allow_NonNMethod_space)
1127     nmethod(method(), compiler_none, native_nmethod_size,
1128             compile_id, &offsets,
1129             code_buffer, frame_size,
1130             basic_lock_owner_sp_offset,
1131             basic_lock_sp_offset,
1132             oop_maps, mutable_data_size);
1133     DEBUG_ONLY( if (allow_NonNMethod_space) assert_no_oops_or_metadata(nm); )
1134     NOT_PRODUCT(if (nm != nullptr) native_nmethod_stats.note_native_nmethod(nm));
1135   }
1136 
1137   if (nm != nullptr) {
1138     // verify nmethod
1139     DEBUG_ONLY(nm->verify();) // might block
1140 
1141     nm->log_new_nmethod();
1142   }
1143   return nm;
1144 }
1145 
1146 nmethod* nmethod::new_nmethod(const methodHandle& method,
1147   int compile_id,
1148   int entry_bci,
1149   CodeOffsets* offsets,
1150   int orig_pc_offset,
1151   DebugInformationRecorder* debug_info,
1152   Dependencies* dependencies,
1153   CodeBuffer* code_buffer, int frame_size,
1154   OopMapSet* oop_maps,
1155   ExceptionHandlerTable* handler_table,
1156   ImplicitExceptionTable* nul_chk_table,
1157   AbstractCompiler* compiler,
1158   CompLevel comp_level
1159 #if INCLUDE_JVMCI
1160   , char* speculations,
1161   int speculations_len,
1162   JVMCINMethodData* jvmci_data
1163 #endif
1164 )
1165 {
1166   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1167   code_buffer->finalize_oop_references(method);
1168   // create nmethod
1169   nmethod* nm = nullptr;
1170   int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
1171 
1172   int immutable_data_size =
1173       adjust_pcs_size(debug_info->pcs_size())
1174     + align_up((int)dependencies->size_in_bytes(), oopSize)
1175     + align_up(handler_table->size_in_bytes()    , oopSize)
1176     + align_up(nul_chk_table->size_in_bytes()    , oopSize)
1177 #if INCLUDE_JVMCI
1178     + align_up(speculations_len                  , oopSize)
1179 #endif
1180     + align_up(debug_info->data_size()           , oopSize);
1181 
1182   // First, allocate space for immutable data in C heap.
1183   address immutable_data = nullptr;
1184   if (immutable_data_size > 0) {
1185     immutable_data = (address)os::malloc(immutable_data_size, mtCode);
1186     if (immutable_data == nullptr) {
1187       vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data");
1188       return nullptr;
1189     }
1190   }
1191 
1192   int mutable_data_size = required_mutable_data_size(code_buffer
1193     JVMCI_ONLY(COMMA (compiler->is_jvmci() ? jvmci_data->size() : 0)));
1194 
1195   {
1196     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1197 
1198     nm = new (nmethod_size, comp_level)
1199     nmethod(method(), compiler->type(), nmethod_size, immutable_data_size, mutable_data_size,
1200             compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
1201             debug_info, dependencies, code_buffer, frame_size, oop_maps,
1202             handler_table, nul_chk_table, compiler, comp_level
1203 #if INCLUDE_JVMCI
1204             , speculations,
1205             speculations_len,
1206             jvmci_data
1207 #endif
1208             );
1209 
1210     if (nm != nullptr) {
1211       // To make dependency checking during class loading fast, record
1212       // the nmethod dependencies in the classes it is dependent on.
1213       // This allows the dependency checking code to simply walk the
1214       // class hierarchy above the loaded class, checking only nmethods
1215       // which are dependent on those classes.  The slow way is to
1216       // check every nmethod for dependencies which makes it linear in
1217       // the number of methods compiled.  For applications with a lot
1218       // classes the slow way is too slow.
1219       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1220         if (deps.type() == Dependencies::call_site_target_value) {
1221           // CallSite dependencies are managed on per-CallSite instance basis.
1222           oop call_site = deps.argument_oop(0);
1223           MethodHandles::add_dependent_nmethod(call_site, nm);
1224         } else {
1225           InstanceKlass* ik = deps.context_type();
1226           if (ik == nullptr) {
1227             continue;  // ignore things like evol_method
1228           }
1229           // record this nmethod as dependent on this klass
1230           ik->add_dependent_nmethod(nm);
1231         }
1232       }
1233       NOT_PRODUCT(if (nm != nullptr)  note_java_nmethod(nm));
1234     }
1235   }
1236   // Do verification and logging outside CodeCache_lock.
1237   if (nm != nullptr) {
1238     // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
1239     DEBUG_ONLY(nm->verify();)
1240     nm->log_new_nmethod();
1241   }
1242   return nm;
1243 }
1244 
1245 // Fill in default values for various fields
1246 void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
1247   // avoid uninitialized fields, even for short time periods
1248   _exception_cache            = nullptr;
1249   _gc_data                    = nullptr;
1250   _oops_do_mark_link          = nullptr;
1251   _compiled_ic_data           = nullptr;
1252 
1253   _is_unloading_state         = 0;
1254   _state                      = not_installed;
1255 
1256   _has_unsafe_access          = 0;
1257   _has_method_handle_invokes  = 0;
1258   _has_wide_vectors           = 0;
1259   _has_monitors               = 0;
1260   _has_scoped_access          = 0;
1261   _has_flushed_dependencies   = 0;
1262   _is_unlinked                = 0;
1263   _load_reported              = 0; // jvmti state
1264 
1265   _deoptimization_status      = not_marked;
1266 
1267   // SECT_CONSTS is first in code buffer so the offset should be 0.
1268   int consts_offset = code_buffer->total_offset_of(code_buffer->consts());
1269   assert(consts_offset == 0, "const_offset: %d", consts_offset);
1270 
1271   _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
1272 
1273   CHECKED_CAST(_entry_offset,              uint16_t, (offsets->value(CodeOffsets::Entry)));
1274   CHECKED_CAST(_verified_entry_offset,     uint16_t, (offsets->value(CodeOffsets::Verified_Entry)));
1275 
1276   _inline_entry_point             = entry_point();
1277   _verified_inline_entry_point    = verified_entry_point();
1278   _verified_inline_ro_entry_point = verified_entry_point();
1279 
1280   _skipped_instructions_size = code_buffer->total_skipped_instructions_size();
1281 }
1282 
1283 // Post initialization
1284 void nmethod::post_init() {
1285   clear_unloading_state();
1286 
1287   finalize_relocations();
1288 
1289   Universe::heap()->register_nmethod(this);
1290   DEBUG_ONLY(Universe::heap()->verify_nmethod(this));
1291 
1292   CodeCache::commit(this);
1293 }
1294 
1295 // For native wrappers
1296 nmethod::nmethod(
1297   Method* method,
1298   CompilerType type,
1299   int nmethod_size,
1300   int compile_id,
1301   CodeOffsets* offsets,
1302   CodeBuffer* code_buffer,
1303   int frame_size,
1304   ByteSize basic_lock_owner_sp_offset,
1305   ByteSize basic_lock_sp_offset,
1306   OopMapSet* oop_maps,
1307   int mutable_data_size)
1308   : CodeBlob("native nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1309              offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1310   _deoptimization_generation(0),
1311   _gc_epoch(CodeCache::gc_epoch()),
1312   _method(method),
1313   _native_receiver_sp_offset(basic_lock_owner_sp_offset),
1314   _native_basic_lock_sp_offset(basic_lock_sp_offset)
1315 {
1316   {
1317     DEBUG_ONLY(NoSafepointVerifier nsv;)
1318     assert_locked_or_safepoint(CodeCache_lock);
1319     assert(!method->has_scalarized_args(), "scalarized native wrappers not supported yet");
1320     init_defaults(code_buffer, offsets);
1321 
1322     _osr_entry_point         = nullptr;
1323     _pc_desc_container       = nullptr;
1324     _entry_bci               = InvocationEntryBci;
1325     _compile_id              = compile_id;
1326     _comp_level              = CompLevel_none;
1327     _compiler_type           = type;
1328     _orig_pc_offset          = 0;
1329     _num_stack_arg_slots     = 0;
1330 
1331     if (offsets->value(CodeOffsets::Exceptions) != -1) {
1332       // Continuation enter intrinsic
1333       _exception_offset      = code_offset() + offsets->value(CodeOffsets::Exceptions);
1334     } else {
1335       _exception_offset      = 0;
1336     }
1337     // Native wrappers do not have deopt handlers. Make the values
1338     // something that will never match a pc like the nmethod vtable entry
1339     _deopt_handler_offset    = 0;
1340     _deopt_mh_handler_offset = 0;
1341     _unwind_handler_offset   = 0;
1342 
1343     CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1344     uint16_t metadata_size;
1345     CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1346     JVMCI_ONLY( _metadata_size = metadata_size; )
1347     assert(_mutable_data_size == _relocation_size + metadata_size,
1348            "wrong mutable data size: %d != %d + %d",
1349            _mutable_data_size, _relocation_size, metadata_size);
1350 
1351     // native wrapper does not have read-only data but we need unique not null address
1352     _immutable_data          = blob_end();
1353     _immutable_data_size     = 0;
1354     _nul_chk_table_offset    = 0;
1355     _handler_table_offset    = 0;
1356     _scopes_pcs_offset       = 0;
1357     _scopes_data_offset      = 0;
1358 #if INCLUDE_JVMCI
1359     _speculations_offset     = 0;
1360 #endif
1361 
1362     code_buffer->copy_code_and_locs_to(this);
1363     code_buffer->copy_values_to(this);
1364 
1365     post_init();
1366   }
1367 
1368   if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
1369     ttyLocker ttyl;  // keep the following output all in one block
1370     // This output goes directly to the tty, not the compiler log.
1371     // To enable tools to match it up with the compilation activity,
1372     // be sure to tag this tty output with the compile ID.
1373     if (xtty != nullptr) {
1374       xtty->begin_head("print_native_nmethod");
1375       xtty->method(_method);
1376       xtty->stamp();
1377       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
1378     }
1379     // Print the header part, then print the requested information.
1380     // This is both handled in decode2(), called via print_code() -> decode()
1381     if (PrintNativeNMethods) {
1382       tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
1383       print_code();
1384       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1385 #if defined(SUPPORT_DATA_STRUCTS)
1386       if (AbstractDisassembler::show_structs()) {
1387         if (oop_maps != nullptr) {
1388           tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
1389           oop_maps->print_on(tty);
1390           tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1391         }
1392       }
1393 #endif
1394     } else {
1395       print(); // print the header part only.
1396     }
1397 #if defined(SUPPORT_DATA_STRUCTS)
1398     if (AbstractDisassembler::show_structs()) {
1399       if (PrintRelocations) {
1400         print_relocations();
1401         tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1402       }
1403     }
1404 #endif
1405     if (xtty != nullptr) {
1406       xtty->tail("print_native_nmethod");
1407     }
1408   }
1409 }
1410 
1411 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
1412   return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
1413 }
1414 
1415 void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
1416   // Try MethodNonProfiled and MethodProfiled.
1417   void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
1418   if (return_value != nullptr || !allow_NonNMethod_space) return return_value;
1419   // Try NonNMethod or give up.
1420   return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);
1421 }
1422 
1423 // For normal JIT compiled code
1424 nmethod::nmethod(
1425   Method* method,
1426   CompilerType type,
1427   int nmethod_size,
1428   int immutable_data_size,
1429   int mutable_data_size,
1430   int compile_id,
1431   int entry_bci,
1432   address immutable_data,
1433   CodeOffsets* offsets,
1434   int orig_pc_offset,
1435   DebugInformationRecorder* debug_info,
1436   Dependencies* dependencies,
1437   CodeBuffer *code_buffer,
1438   int frame_size,
1439   OopMapSet* oop_maps,
1440   ExceptionHandlerTable* handler_table,
1441   ImplicitExceptionTable* nul_chk_table,
1442   AbstractCompiler* compiler,
1443   CompLevel comp_level
1444 #if INCLUDE_JVMCI
1445   , char* speculations,
1446   int speculations_len,
1447   JVMCINMethodData* jvmci_data
1448 #endif
1449   )
1450   : CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
1451              offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size),
1452   _deoptimization_generation(0),
1453   _gc_epoch(CodeCache::gc_epoch()),
1454   _method(method),
1455   _osr_link(nullptr)
1456 {
1457   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
1458   {
1459     DEBUG_ONLY(NoSafepointVerifier nsv;)
1460     assert_locked_or_safepoint(CodeCache_lock);
1461 
1462     init_defaults(code_buffer, offsets);
1463 
1464     _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
1465     _entry_bci       = entry_bci;
1466     _compile_id      = compile_id;
1467     _comp_level      = comp_level;
1468     _compiler_type   = type;
1469     _orig_pc_offset  = orig_pc_offset;
1470 
1471     _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots();
1472 
1473     set_ctable_begin(header_begin() + content_offset());
1474 
1475 #if INCLUDE_JVMCI
1476     if (compiler->is_jvmci()) {
1477       // JVMCI might not produce any stub sections
1478       if (offsets->value(CodeOffsets::Exceptions) != -1) {
1479         _exception_offset        = code_offset() + offsets->value(CodeOffsets::Exceptions);
1480       } else {
1481         _exception_offset        = -1;
1482       }
1483       if (offsets->value(CodeOffsets::Deopt) != -1) {
1484         _deopt_handler_offset    = code_offset() + offsets->value(CodeOffsets::Deopt);
1485       } else {
1486         _deopt_handler_offset    = -1;
1487       }
1488       if (offsets->value(CodeOffsets::DeoptMH) != -1) {
1489         _deopt_mh_handler_offset = code_offset() + offsets->value(CodeOffsets::DeoptMH);
1490       } else {
1491         _deopt_mh_handler_offset = -1;
1492       }
1493     } else
1494 #endif
1495     {
1496       // Exception handler and deopt handler are in the stub section
1497       assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
1498       assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
1499 
1500       _exception_offset          = _stub_offset + offsets->value(CodeOffsets::Exceptions);
1501       _deopt_handler_offset      = _stub_offset + offsets->value(CodeOffsets::Deopt);
1502       if (offsets->value(CodeOffsets::DeoptMH) != -1) {
1503         _deopt_mh_handler_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
1504       } else {
1505         _deopt_mh_handler_offset = -1;
1506       }
1507     }
1508     if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
1509       // C1 generates UnwindHandler at the end of instructions section.
1510       // Calculate positive offset as distance between the start of stubs section
1511       // (which is also the end of instructions section) and the start of the handler.
1512       int unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
1513       CHECKED_CAST(_unwind_handler_offset, int16_t, (_stub_offset - unwind_handler_offset));
1514     } else {
1515       _unwind_handler_offset = -1;
1516     }
1517 
1518     CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
1519     uint16_t metadata_size;
1520     CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
1521     JVMCI_ONLY( _metadata_size = metadata_size; )
1522     int jvmci_data_size = 0 JVMCI_ONLY( + align_up(compiler->is_jvmci() ? jvmci_data->size() : 0, oopSize));
1523     _inline_entry_point             = code_begin() + offsets->value(CodeOffsets::Inline_Entry);
1524     _verified_inline_entry_point    = code_begin() + offsets->value(CodeOffsets::Verified_Inline_Entry);
1525     _verified_inline_ro_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Inline_Entry_RO);
1526 
1527     assert(_mutable_data_size == _relocation_size + metadata_size + jvmci_data_size,
1528            "wrong mutable data size: %d != %d + %d + %d",
1529            _mutable_data_size, _relocation_size, metadata_size, jvmci_data_size);
1530     assert(nmethod_size == data_end() - header_begin(), "wrong nmethod size: %d != %d",
1531            nmethod_size, (int)(code_end() - header_begin()));
1532 
1533     _immutable_data_size  = immutable_data_size;
1534     if (immutable_data_size > 0) {
1535       assert(immutable_data != nullptr, "required");
1536       _immutable_data     = immutable_data;
1537     } else {
1538       // We need unique not null address
1539       _immutable_data     = blob_end();
1540     }
1541     CHECKED_CAST(_nul_chk_table_offset, uint16_t, (align_up((int)dependencies->size_in_bytes(), oopSize)));
1542     CHECKED_CAST(_handler_table_offset, uint16_t, (_nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize)));
1543     _scopes_pcs_offset    = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
1544     _scopes_data_offset   = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
1545 
1546 #if INCLUDE_JVMCI
1547     _speculations_offset  = _scopes_data_offset   + align_up(debug_info->data_size(), oopSize);
1548     DEBUG_ONLY( int immutable_data_end_offset = _speculations_offset  + align_up(speculations_len, oopSize); )
1549 #else
1550     DEBUG_ONLY( int immutable_data_end_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize); )
1551 #endif
1552     assert(immutable_data_end_offset <= immutable_data_size, "wrong read-only data size: %d > %d",
1553            immutable_data_end_offset, immutable_data_size);
1554 
1555     // Copy code and relocation info
1556     code_buffer->copy_code_and_locs_to(this);
1557     // Copy oops and metadata
1558     code_buffer->copy_values_to(this);
1559     dependencies->copy_to(this);
1560     // Copy PcDesc and ScopeDesc data
1561     debug_info->copy_to(this);
1562 
1563     // Create cache after PcDesc data is copied - it will be used to initialize cache
1564     _pc_desc_container = new PcDescContainer(scopes_pcs_begin());
1565 
1566 #if INCLUDE_JVMCI
1567     if (compiler->is_jvmci()) {
1568       // Initialize the JVMCINMethodData object inlined into nm
1569       jvmci_nmethod_data()->copy(jvmci_data);
1570     }
1571 #endif
1572 
1573     // Copy contents of ExceptionHandlerTable to nmethod
1574     handler_table->copy_to(this);
1575     nul_chk_table->copy_to(this);
1576 
1577 #if INCLUDE_JVMCI
1578     // Copy speculations to nmethod
1579     if (speculations_size() != 0) {
1580       memcpy(speculations_begin(), speculations, speculations_len);
1581     }
1582 #endif
1583 
1584     post_init();
1585 
1586     // we use the information of entry points to find out if a method is
1587     // static or non static
1588     assert(compiler->is_c2() || compiler->is_jvmci() ||
1589            _method->is_static() == (entry_point() == verified_entry_point()),
1590            " entry points must be same for static methods and vice versa");
1591   }
1592 }
1593 
1594 // Print a short set of xml attributes to identify this nmethod.  The
1595 // output should be embedded in some other element.
1596 void nmethod::log_identity(xmlStream* log) const {
1597   log->print(" compile_id='%d'", compile_id());
1598   const char* nm_kind = compile_kind();
1599   if (nm_kind != nullptr)  log->print(" compile_kind='%s'", nm_kind);
1600   log->print(" compiler='%s'", compiler_name());
1601   if (TieredCompilation) {
1602     log->print(" level='%d'", comp_level());
1603   }
1604 #if INCLUDE_JVMCI
1605   if (jvmci_nmethod_data() != nullptr) {
1606     const char* jvmci_name = jvmci_nmethod_data()->name();
1607     if (jvmci_name != nullptr) {
1608       log->print(" jvmci_mirror_name='");
1609       log->text("%s", jvmci_name);
1610       log->print("'");
1611     }
1612   }
1613 #endif
1614 }
1615 
1616 
1617 #define LOG_OFFSET(log, name)                    \
1618   if (p2i(name##_end()) - p2i(name##_begin())) \
1619     log->print(" " XSTR(name) "_offset='%zd'"    , \
1620                p2i(name##_begin()) - p2i(this))
1621 
1622 
1623 void nmethod::log_new_nmethod() const {
1624   if (LogCompilation && xtty != nullptr) {
1625     ttyLocker ttyl;
1626     xtty->begin_elem("nmethod");
1627     log_identity(xtty);
1628     xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
1629     xtty->print(" address='" INTPTR_FORMAT "'", p2i(this));
1630 
1631     LOG_OFFSET(xtty, relocation);
1632     LOG_OFFSET(xtty, consts);
1633     LOG_OFFSET(xtty, insts);
1634     LOG_OFFSET(xtty, stub);
1635     LOG_OFFSET(xtty, scopes_data);
1636     LOG_OFFSET(xtty, scopes_pcs);
1637     LOG_OFFSET(xtty, dependencies);
1638     LOG_OFFSET(xtty, handler_table);
1639     LOG_OFFSET(xtty, nul_chk_table);
1640     LOG_OFFSET(xtty, oops);
1641     LOG_OFFSET(xtty, metadata);
1642 
1643     xtty->method(method());
1644     xtty->stamp();
1645     xtty->end_elem();
1646   }
1647 }
1648 
1649 #undef LOG_OFFSET
1650 
1651 
1652 // Print out more verbose output usually for a newly created nmethod.
1653 void nmethod::print_on_with_msg(outputStream* st, const char* msg) const {
1654   if (st != nullptr) {
1655     ttyLocker ttyl;
1656     if (WizardMode) {
1657       CompileTask::print(st, this, msg, /*short_form:*/ true);
1658       st->print_cr(" (" INTPTR_FORMAT ")", p2i(this));
1659     } else {
1660       CompileTask::print(st, this, msg, /*short_form:*/ false);
1661     }
1662   }
1663 }
1664 
1665 void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
1666   bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
1667   if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
1668     print_nmethod(printnmethods);
1669   }
1670 }
1671 
1672 void nmethod::print_nmethod(bool printmethod) {
1673   // Enter a critical section to prevent a race with deopts that patch code and updates the relocation info.
1674   // Unfortunately, we have to lock the NMethodState_lock before the tty lock due to the deadlock rules and
1675   // cannot lock in a more finely grained manner.
1676   ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
1677   ttyLocker ttyl;  // keep the following output all in one block
1678   if (xtty != nullptr) {
1679     xtty->begin_head("print_nmethod");
1680     log_identity(xtty);
1681     xtty->stamp();
1682     xtty->end_head();
1683   }
1684   // Print the header part, then print the requested information.
1685   // This is both handled in decode2().
1686   if (printmethod) {
1687     ResourceMark m;
1688     if (is_compiled_by_c1()) {
1689       tty->cr();
1690       tty->print_cr("============================= C1-compiled nmethod ==============================");
1691     }
1692     if (is_compiled_by_jvmci()) {
1693       tty->cr();
1694       tty->print_cr("=========================== JVMCI-compiled nmethod =============================");
1695     }
1696     tty->print_cr("----------------------------------- Assembly -----------------------------------");
1697     decode2(tty);
1698 #if defined(SUPPORT_DATA_STRUCTS)
1699     if (AbstractDisassembler::show_structs()) {
1700       // Print the oops from the underlying CodeBlob as well.
1701       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1702       print_oops(tty);
1703       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1704       print_metadata(tty);
1705       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1706       print_pcs_on(tty);
1707       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1708       if (oop_maps() != nullptr) {
1709         tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
1710         oop_maps()->print_on(tty);
1711         tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1712       }
1713     }
1714 #endif
1715   } else {
1716     print(); // print the header part only.
1717   }
1718 
1719 #if defined(SUPPORT_DATA_STRUCTS)
1720   if (AbstractDisassembler::show_structs()) {
1721     methodHandle mh(Thread::current(), _method);
1722     if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) {
1723       print_scopes();
1724       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1725     }
1726     if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) {
1727       print_relocations();
1728       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1729     }
1730     if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) {
1731       print_dependencies_on(tty);
1732       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1733     }
1734     if (printmethod || PrintExceptionHandlers) {
1735       print_handler_table();
1736       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1737       print_nul_chk_table();
1738       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1739     }
1740 
1741     if (printmethod) {
1742       print_recorded_oops();
1743       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1744       print_recorded_metadata();
1745       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1746     }
1747   }
1748 #endif
1749 
1750   if (xtty != nullptr) {
1751     xtty->tail("print_nmethod");
1752   }
1753 }
1754 
1755 
1756 // Promote one word from an assembly-time handle to a live embedded oop.
1757 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
1758   if (handle == nullptr ||
1759       // As a special case, IC oops are initialized to 1 or -1.
1760       handle == (jobject) Universe::non_oop_word()) {
1761     *(void**)dest = handle;
1762   } else {
1763     *dest = JNIHandles::resolve_non_null(handle);
1764   }
1765 }
1766 
1767 
1768 // Have to have the same name because it's called by a template
1769 void nmethod::copy_values(GrowableArray<jobject>* array) {
1770   int length = array->length();
1771   assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
1772   oop* dest = oops_begin();
1773   for (int index = 0 ; index < length; index++) {
1774     initialize_immediate_oop(&dest[index], array->at(index));
1775   }
1776 
1777   // Now we can fix up all the oops in the code.  We need to do this
1778   // in the code because the assembler uses jobjects as placeholders.
1779   // The code and relocations have already been initialized by the
1780   // CodeBlob constructor, so it is valid even at this early point to
1781   // iterate over relocations and patch the code.
1782   fix_oop_relocations(nullptr, nullptr, /*initialize_immediates=*/ true);
1783 }
1784 
1785 void nmethod::copy_values(GrowableArray<Metadata*>* array) {
1786   int length = array->length();
1787   assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
1788   Metadata** dest = metadata_begin();
1789   for (int index = 0 ; index < length; index++) {
1790     dest[index] = array->at(index);
1791   }
1792 }
1793 
1794 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
1795   // re-patch all oop-bearing instructions, just in case some oops moved
1796   RelocIterator iter(this, begin, end);
1797   while (iter.next()) {
1798     if (iter.type() == relocInfo::oop_type) {
1799       oop_Relocation* reloc = iter.oop_reloc();
1800       if (initialize_immediates && reloc->oop_is_immediate()) {
1801         oop* dest = reloc->oop_addr();
1802         jobject obj = *reinterpret_cast<jobject*>(dest);
1803         initialize_immediate_oop(dest, obj);
1804       }
1805       // Refresh the oop-related bits of this instruction.
1806       reloc->fix_oop_relocation();
1807     } else if (iter.type() == relocInfo::metadata_type) {
1808       metadata_Relocation* reloc = iter.metadata_reloc();
1809       reloc->fix_metadata_relocation();
1810     }
1811   }
1812 }
1813 
1814 static void install_post_call_nop_displacement(nmethod* nm, address pc) {
1815   NativePostCallNop* nop = nativePostCallNop_at((address) pc);
1816   intptr_t cbaddr = (intptr_t) nm;
1817   intptr_t offset = ((intptr_t) pc) - cbaddr;
1818 
1819   int oopmap_slot = nm->oop_maps()->find_slot_for_offset(int((intptr_t) pc - (intptr_t) nm->code_begin()));
1820   if (oopmap_slot < 0) { // this can happen at asynchronous (non-safepoint) stackwalks
1821     log_debug(codecache)("failed to find oopmap for cb: " INTPTR_FORMAT " offset: %d", cbaddr, (int) offset);
1822   } else if (!nop->patch(oopmap_slot, offset)) {
1823     log_debug(codecache)("failed to encode %d %d", oopmap_slot, (int) offset);
1824   }
1825 }
1826 
1827 void nmethod::finalize_relocations() {
1828   NoSafepointVerifier nsv;
1829 
1830   GrowableArray<NativeMovConstReg*> virtual_call_data;
1831 
1832   // Make sure that post call nops fill in nmethod offsets eagerly so
1833   // we don't have to race with deoptimization
1834   RelocIterator iter(this);
1835   while (iter.next()) {
1836     if (iter.type() == relocInfo::virtual_call_type) {
1837       virtual_call_Relocation* r = iter.virtual_call_reloc();
1838       NativeMovConstReg* value = nativeMovConstReg_at(r->cached_value());
1839       virtual_call_data.append(value);
1840     } else if (iter.type() == relocInfo::post_call_nop_type) {
1841       post_call_nop_Relocation* const reloc = iter.post_call_nop_reloc();
1842       address pc = reloc->addr();
1843       install_post_call_nop_displacement(this, pc);
1844     }
1845   }
1846 
1847   if (virtual_call_data.length() > 0) {
1848     // We allocate a block of CompiledICData per nmethod so the GC can purge this faster.
1849     _compiled_ic_data = new CompiledICData[virtual_call_data.length()];
1850     CompiledICData* next_data = _compiled_ic_data;
1851 
1852     for (NativeMovConstReg* value : virtual_call_data) {
1853       value->set_data((intptr_t)next_data);
1854       next_data++;
1855     }
1856   }
1857 }
1858 
1859 void nmethod::make_deoptimized() {
1860   if (!Continuations::enabled()) {
1861     // Don't deopt this again.
1862     set_deoptimized_done();
1863     return;
1864   }
1865 
1866   assert(method() == nullptr || can_be_deoptimized(), "");
1867 
1868   CompiledICLocker ml(this);
1869   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1870 
1871   // If post call nops have been already patched, we can just bail-out.
1872   if (has_been_deoptimized()) {
1873     return;
1874   }
1875 
1876   ResourceMark rm;
1877   RelocIterator iter(this, oops_reloc_begin());
1878 
1879   while (iter.next()) {
1880 
1881     switch (iter.type()) {
1882       case relocInfo::virtual_call_type: {
1883         CompiledIC *ic = CompiledIC_at(&iter);
1884         address pc = ic->end_of_call();
1885         NativePostCallNop* nop = nativePostCallNop_at(pc);
1886         if (nop != nullptr) {
1887           nop->make_deopt();
1888         }
1889         assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
1890         break;
1891       }
1892       case relocInfo::static_call_type:
1893       case relocInfo::opt_virtual_call_type: {
1894         CompiledDirectCall *csc = CompiledDirectCall::at(iter.reloc());
1895         address pc = csc->end_of_call();
1896         NativePostCallNop* nop = nativePostCallNop_at(pc);
1897         //tty->print_cr(" - static pc %p", pc);
1898         if (nop != nullptr) {
1899           nop->make_deopt();
1900         }
1901         // We can't assert here, there are some calls to stubs / runtime
1902         // that have reloc data and doesn't have a post call NOP.
1903         //assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
1904         break;
1905       }
1906       default:
1907         break;
1908     }
1909   }
1910   // Don't deopt this again.
1911   set_deoptimized_done();
1912 }
1913 
1914 void nmethod::verify_clean_inline_caches() {
1915   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1916 
1917   ResourceMark rm;
1918   RelocIterator iter(this, oops_reloc_begin());
1919   while(iter.next()) {
1920     switch(iter.type()) {
1921       case relocInfo::virtual_call_type: {
1922         CompiledIC *ic = CompiledIC_at(&iter);
1923         CodeBlob *cb = CodeCache::find_blob(ic->destination());
1924         assert(cb != nullptr, "destination not in CodeBlob?");
1925         nmethod* nm = cb->as_nmethod_or_null();
1926         if (nm != nullptr) {
1927           // Verify that inline caches pointing to bad nmethods are clean
1928           if (!nm->is_in_use() || nm->is_unloading()) {
1929             assert(ic->is_clean(), "IC should be clean");
1930           }
1931         }
1932         break;
1933       }
1934       case relocInfo::static_call_type:
1935       case relocInfo::opt_virtual_call_type: {
1936         CompiledDirectCall *cdc = CompiledDirectCall::at(iter.reloc());
1937         CodeBlob *cb = CodeCache::find_blob(cdc->destination());
1938         assert(cb != nullptr, "destination not in CodeBlob?");
1939         nmethod* nm = cb->as_nmethod_or_null();
1940         if (nm != nullptr) {
1941           // Verify that inline caches pointing to bad nmethods are clean
1942           if (!nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
1943             assert(cdc->is_clean(), "IC should be clean");
1944           }
1945         }
1946         break;
1947       }
1948       default:
1949         break;
1950     }
1951   }
1952 }
1953 
1954 void nmethod::mark_as_maybe_on_stack() {
1955   Atomic::store(&_gc_epoch, CodeCache::gc_epoch());
1956 }
1957 
1958 bool nmethod::is_maybe_on_stack() {
1959   // If the condition below is true, it means that the nmethod was found to
1960   // be alive the previous completed marking cycle.
1961   return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
1962 }
1963 
1964 void nmethod::inc_decompile_count() {
1965   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1966   // Could be gated by ProfileTraps, but do not bother...
1967   Method* m = method();
1968   if (m == nullptr)  return;
1969   MethodData* mdo = m->method_data();
1970   if (mdo == nullptr)  return;
1971   // There is a benign race here.  See comments in methodData.hpp.
1972   mdo->inc_decompile_count();
1973 }
1974 
1975 bool nmethod::try_transition(signed char new_state_int) {
1976   signed char new_state = new_state_int;
1977   assert_lock_strong(NMethodState_lock);
1978   signed char old_state = _state;
1979   if (old_state >= new_state) {
1980     // Ensure monotonicity of transitions.
1981     return false;
1982   }
1983   Atomic::store(&_state, new_state);
1984   return true;
1985 }
1986 
1987 void nmethod::invalidate_osr_method() {
1988   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1989   // Remove from list of active nmethods
1990   if (method() != nullptr) {
1991     method()->method_holder()->remove_osr_nmethod(this);
1992   }
1993 }
1994 
1995 void nmethod::log_state_change(const char* reason) const {
1996   assert(reason != nullptr, "Must provide a reason");
1997 
1998   if (LogCompilation) {
1999     if (xtty != nullptr) {
2000       ttyLocker ttyl;  // keep the following output all in one block
2001       xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'",
2002                        os::current_thread_id(), reason);
2003       log_identity(xtty);
2004       xtty->stamp();
2005       xtty->end_elem();
2006     }
2007   }
2008 
2009   ResourceMark rm;
2010   stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
2011   ss.print("made not entrant: %s", reason);
2012 
2013   CompileTask::print_ul(this, ss.freeze());
2014   if (PrintCompilation) {
2015     print_on_with_msg(tty, ss.freeze());
2016   }
2017 }
2018 
2019 void nmethod::unlink_from_method() {
2020   if (method() != nullptr) {
2021     method()->unlink_code(this);
2022   }
2023 }
2024 
2025 // Invalidate code
2026 bool nmethod::make_not_entrant(const char* reason) {
2027   assert(reason != nullptr, "Must provide a reason");
2028 
2029   // This can be called while the system is already at a safepoint which is ok
2030   NoSafepointVerifier nsv;
2031 
2032   if (is_unloading()) {
2033     // If the nmethod is unloading, then it is already not entrant through
2034     // the nmethod entry barriers. No need to do anything; GC will unload it.
2035     return false;
2036   }
2037 
2038   if (Atomic::load(&_state) == not_entrant) {
2039     // Avoid taking the lock if already in required state.
2040     // This is safe from races because the state is an end-state,
2041     // which the nmethod cannot back out of once entered.
2042     // No need for fencing either.
2043     return false;
2044   }
2045 
2046   {
2047     // Enter critical section.  Does not block for safepoint.
2048     ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
2049 
2050     if (Atomic::load(&_state) == not_entrant) {
2051       // another thread already performed this transition so nothing
2052       // to do, but return false to indicate this.
2053       return false;
2054     }
2055 
2056     if (is_osr_method()) {
2057       // This logic is equivalent to the logic below for patching the
2058       // verified entry point of regular methods.
2059       // this effectively makes the osr nmethod not entrant
2060       invalidate_osr_method();
2061     } else {
2062       // The caller can be calling the method statically or through an inline
2063       // cache call.
2064       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
2065                                        SharedRuntime::get_handle_wrong_method_stub());
2066 
2067       // Update the relocation info for the patched entry.
2068       // First, get the old relocation info...
2069       RelocIterator iter(this, verified_entry_point(), verified_entry_point() + 8);
2070       if (iter.next() && iter.addr() == verified_entry_point()) {
2071         Relocation* old_reloc = iter.reloc();
2072         // ...then reset the iterator to update it.
2073         RelocIterator iter(this, verified_entry_point(), verified_entry_point() + 8);
2074         relocInfo::change_reloc_info_for_address(&iter, verified_entry_point(), old_reloc->type(),
2075                                                  relocInfo::relocType::runtime_call_type);
2076       }
2077     }
2078 
2079     if (update_recompile_counts()) {
2080       // Mark the method as decompiled.
2081       inc_decompile_count();
2082     }
2083 
2084     BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2085     if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2086       // If nmethod entry barriers are not supported, we won't mark
2087       // nmethods as on-stack when they become on-stack. So we
2088       // degrade to a less accurate flushing strategy, for now.
2089       mark_as_maybe_on_stack();
2090     }
2091 
2092     // Change state
2093     bool success = try_transition(not_entrant);
2094     assert(success, "Transition can't fail");
2095 
2096     // Log the transition once
2097     log_state_change(reason);
2098 
2099     // Remove nmethod from method.
2100     unlink_from_method();
2101 
2102   } // leave critical region under NMethodState_lock
2103 
2104 #if INCLUDE_JVMCI
2105   // Invalidate can't occur while holding the NMethodState_lock
2106   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2107   if (nmethod_data != nullptr) {
2108     nmethod_data->invalidate_nmethod_mirror(this);
2109   }
2110 #endif
2111 
2112 #ifdef ASSERT
2113   if (is_osr_method() && method() != nullptr) {
2114     // Make sure osr nmethod is invalidated, i.e. not on the list
2115     bool found = method()->method_holder()->remove_osr_nmethod(this);
2116     assert(!found, "osr nmethod should have been invalidated");
2117   }
2118 #endif
2119 
2120   return true;
2121 }
2122 
2123 // For concurrent GCs, there must be a handshake between unlink and flush
2124 void nmethod::unlink() {
2125   if (is_unlinked()) {
2126     // Already unlinked.
2127     return;
2128   }
2129 
2130   flush_dependencies();
2131 
2132   // unlink_from_method will take the NMethodState_lock.
2133   // In this case we don't strictly need it when unlinking nmethods from
2134   // the Method, because it is only concurrently unlinked by
2135   // the entry barrier, which acquires the per nmethod lock.
2136   unlink_from_method();
2137 
2138   if (is_osr_method()) {
2139     invalidate_osr_method();
2140   }
2141 
2142 #if INCLUDE_JVMCI
2143   // Clear the link between this nmethod and a HotSpotNmethod mirror
2144   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2145   if (nmethod_data != nullptr) {
2146     nmethod_data->invalidate_nmethod_mirror(this);
2147   }
2148 #endif
2149 
2150   // Post before flushing as jmethodID is being used
2151   post_compiled_method_unload();
2152 
2153   // Register for flushing when it is safe. For concurrent class unloading,
2154   // that would be after the unloading handshake, and for STW class unloading
2155   // that would be when getting back to the VM thread.
2156   ClassUnloadingContext::context()->register_unlinked_nmethod(this);
2157 }
2158 
2159 void nmethod::purge(bool unregister_nmethod) {
2160 
2161   MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2162 
2163   // completely deallocate this method
2164   Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this));
2165   log_debug(codecache)("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
2166                        "/Free CodeCache:%zuKb",
2167                        is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
2168                        CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
2169 
2170   // We need to deallocate any ExceptionCache data.
2171   // Note that we do not need to grab the nmethod lock for this, it
2172   // better be thread safe if we're disposing of it!
2173   ExceptionCache* ec = exception_cache();
2174   while(ec != nullptr) {
2175     ExceptionCache* next = ec->next();
2176     delete ec;
2177     ec = next;
2178   }
2179   if (_pc_desc_container != nullptr) {
2180     delete _pc_desc_container;
2181   }
2182   delete[] _compiled_ic_data;
2183 
2184   if (_immutable_data != blob_end()) {
2185     os::free(_immutable_data);
2186     _immutable_data = blob_end(); // Valid not null address
2187   }
2188   if (unregister_nmethod) {
2189     Universe::heap()->unregister_nmethod(this);
2190   }
2191   CodeCache::unregister_old_nmethod(this);
2192 
2193   CodeBlob::purge();
2194 }
2195 
2196 oop nmethod::oop_at(int index) const {
2197   if (index == 0) {
2198     return nullptr;
2199   }
2200 
2201   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2202   return bs_nm->oop_load_no_keepalive(this, index);
2203 }
2204 
2205 oop nmethod::oop_at_phantom(int index) const {
2206   if (index == 0) {
2207     return nullptr;
2208   }
2209 
2210   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2211   return bs_nm->oop_load_phantom(this, index);
2212 }
2213 
2214 //
2215 // Notify all classes this nmethod is dependent on that it is no
2216 // longer dependent.
2217 
2218 void nmethod::flush_dependencies() {
2219   if (!has_flushed_dependencies()) {
2220     set_has_flushed_dependencies(true);
2221     for (Dependencies::DepStream deps(this); deps.next(); ) {
2222       if (deps.type() == Dependencies::call_site_target_value) {
2223         // CallSite dependencies are managed on per-CallSite instance basis.
2224         oop call_site = deps.argument_oop(0);
2225         MethodHandles::clean_dependency_context(call_site);
2226       } else {
2227         InstanceKlass* ik = deps.context_type();
2228         if (ik == nullptr) {
2229           continue;  // ignore things like evol_method
2230         }
2231         // During GC liveness of dependee determines class that needs to be updated.
2232         // The GC may clean dependency contexts concurrently and in parallel.
2233         ik->clean_dependency_context();
2234       }
2235     }
2236   }
2237 }
2238 
2239 void nmethod::post_compiled_method(CompileTask* task) {
2240   task->mark_success();
2241   task->set_nm_content_size(content_size());
2242   task->set_nm_insts_size(insts_size());
2243   task->set_nm_total_size(total_size());
2244 
2245   // JVMTI -- compiled method notification (must be done outside lock)
2246   post_compiled_method_load_event();
2247 
2248   if (CompilationLog::log() != nullptr) {
2249     CompilationLog::log()->log_nmethod(JavaThread::current(), this);
2250   }
2251 
2252   const DirectiveSet* directive = task->directive();
2253   maybe_print_nmethod(directive);
2254 }
2255 
2256 // ------------------------------------------------------------------
2257 // post_compiled_method_load_event
2258 // new method for install_code() path
2259 // Transfer information from compilation to jvmti
2260 void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
2261   // This is a bad time for a safepoint.  We don't want
2262   // this nmethod to get unloaded while we're queueing the event.
2263   NoSafepointVerifier nsv;
2264 
2265   Method* m = method();
2266   HOTSPOT_COMPILED_METHOD_LOAD(
2267       (char *) m->klass_name()->bytes(),
2268       m->klass_name()->utf8_length(),
2269       (char *) m->name()->bytes(),
2270       m->name()->utf8_length(),
2271       (char *) m->signature()->bytes(),
2272       m->signature()->utf8_length(),
2273       insts_begin(), insts_size());
2274 
2275 
2276   if (JvmtiExport::should_post_compiled_method_load()) {
2277     // Only post unload events if load events are found.
2278     set_load_reported();
2279     // If a JavaThread hasn't been passed in, let the Service thread
2280     // (which is a real Java thread) post the event
2281     JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this);
2282     if (state == nullptr) {
2283       // Execute any barrier code for this nmethod as if it's called, since
2284       // keeping it alive looks like stack walking.
2285       run_nmethod_entry_barrier();
2286       ServiceThread::enqueue_deferred_event(&event);
2287     } else {
2288       // This enters the nmethod barrier outside in the caller.
2289       state->enqueue_event(&event);
2290     }
2291   }
2292 }
2293 
2294 void nmethod::post_compiled_method_unload() {
2295   assert(_method != nullptr, "just checking");
2296   DTRACE_METHOD_UNLOAD_PROBE(method());
2297 
2298   // If a JVMTI agent has enabled the CompiledMethodUnload event then
2299   // post the event. The Method* will not be valid when this is freed.
2300 
2301   // Don't bother posting the unload if the load event wasn't posted.
2302   if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) {
2303     JvmtiDeferredEvent event =
2304       JvmtiDeferredEvent::compiled_method_unload_event(
2305           method()->jmethod_id(), insts_begin());
2306     ServiceThread::enqueue_deferred_event(&event);
2307   }
2308 }
2309 
2310 // Iterate over metadata calling this function.   Used by RedefineClasses
2311 void nmethod::metadata_do(MetadataClosure* f) {
2312   {
2313     // Visit all immediate references that are embedded in the instruction stream.
2314     RelocIterator iter(this, oops_reloc_begin());
2315     while (iter.next()) {
2316       if (iter.type() == relocInfo::metadata_type) {
2317         metadata_Relocation* r = iter.metadata_reloc();
2318         // In this metadata, we must only follow those metadatas directly embedded in
2319         // the code.  Other metadatas (oop_index>0) are seen as part of
2320         // the metadata section below.
2321         assert(1 == (r->metadata_is_immediate()) +
2322                (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2323                "metadata must be found in exactly one place");
2324         if (r->metadata_is_immediate() && r->metadata_value() != nullptr) {
2325           Metadata* md = r->metadata_value();
2326           if (md != _method) f->do_metadata(md);
2327         }
2328       } else if (iter.type() == relocInfo::virtual_call_type) {
2329         // Check compiledIC holders associated with this nmethod
2330         ResourceMark rm;
2331         CompiledIC *ic = CompiledIC_at(&iter);
2332         ic->metadata_do(f);
2333       }
2334     }
2335   }
2336 
2337   // Visit the metadata section
2338   for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2339     if (*p == Universe::non_oop_word() || *p == nullptr)  continue;  // skip non-oops
2340     Metadata* md = *p;
2341     f->do_metadata(md);
2342   }
2343 
2344   // Visit metadata not embedded in the other places.
2345   if (_method != nullptr) f->do_metadata(_method);
2346 }
2347 
2348 // Heuristic for nuking nmethods even though their oops are live.
2349 // Main purpose is to reduce code cache pressure and get rid of
2350 // nmethods that don't seem to be all that relevant any longer.
2351 bool nmethod::is_cold() {
2352   if (!MethodFlushing || is_native_method() || is_not_installed()) {
2353     // No heuristic unloading at all
2354     return false;
2355   }
2356 
2357   if (!is_maybe_on_stack() && is_not_entrant()) {
2358     // Not entrant nmethods that are not on any stack can just
2359     // be removed
2360     return true;
2361   }
2362 
2363   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2364   if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2365     // On platforms that don't support nmethod entry barriers, we can't
2366     // trust the temporal aspect of the gc epochs. So we can't detect
2367     // cold nmethods on such platforms.
2368     return false;
2369   }
2370 
2371   if (!UseCodeCacheFlushing) {
2372     // Bail out if we don't heuristically remove nmethods
2373     return false;
2374   }
2375 
2376   // Other code can be phased out more gradually after N GCs
2377   return CodeCache::previous_completed_gc_marking_cycle() > _gc_epoch + 2 * CodeCache::cold_gc_count();
2378 }
2379 
2380 // The _is_unloading_state encodes a tuple comprising the unloading cycle
2381 // and the result of IsUnloadingBehaviour::is_unloading() for that cycle.
2382 // This is the bit layout of the _is_unloading_state byte: 00000CCU
2383 // CC refers to the cycle, which has 2 bits, and U refers to the result of
2384 // IsUnloadingBehaviour::is_unloading() for that unloading cycle.
2385 
2386 class IsUnloadingState: public AllStatic {
2387   static const uint8_t _is_unloading_mask = 1;
2388   static const uint8_t _is_unloading_shift = 0;
2389   static const uint8_t _unloading_cycle_mask = 6;
2390   static const uint8_t _unloading_cycle_shift = 1;
2391 
2392   static uint8_t set_is_unloading(uint8_t state, bool value) {
2393     state &= (uint8_t)~_is_unloading_mask;
2394     if (value) {
2395       state |= 1 << _is_unloading_shift;
2396     }
2397     assert(is_unloading(state) == value, "unexpected unloading cycle overflow");
2398     return state;
2399   }
2400 
2401   static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) {
2402     state &= (uint8_t)~_unloading_cycle_mask;
2403     state |= (uint8_t)(value << _unloading_cycle_shift);
2404     assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow");
2405     return state;
2406   }
2407 
2408 public:
2409   static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; }
2410   static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; }
2411 
2412   static uint8_t create(bool is_unloading, uint8_t unloading_cycle) {
2413     uint8_t state = 0;
2414     state = set_is_unloading(state, is_unloading);
2415     state = set_unloading_cycle(state, unloading_cycle);
2416     return state;
2417   }
2418 };
2419 
2420 bool nmethod::is_unloading() {
2421   uint8_t state = Atomic::load(&_is_unloading_state);
2422   bool state_is_unloading = IsUnloadingState::is_unloading(state);
2423   if (state_is_unloading) {
2424     return true;
2425   }
2426   uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state);
2427   uint8_t current_cycle = CodeCache::unloading_cycle();
2428   if (state_unloading_cycle == current_cycle) {
2429     return false;
2430   }
2431 
2432   // The IsUnloadingBehaviour is responsible for calculating if the nmethod
2433   // should be unloaded. This can be either because there is a dead oop,
2434   // or because is_cold() heuristically determines it is time to unload.
2435   state_unloading_cycle = current_cycle;
2436   state_is_unloading = IsUnloadingBehaviour::is_unloading(this);
2437   uint8_t new_state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
2438 
2439   // Note that if an nmethod has dead oops, everyone will agree that the
2440   // nmethod is_unloading. However, the is_cold heuristics can yield
2441   // different outcomes, so we guard the computed result with a CAS
2442   // to ensure all threads have a shared view of whether an nmethod
2443   // is_unloading or not.
2444   uint8_t found_state = Atomic::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed);
2445 
2446   if (found_state == state) {
2447     // First to change state, we win
2448     return state_is_unloading;
2449   } else {
2450     // State already set, so use it
2451     return IsUnloadingState::is_unloading(found_state);
2452   }
2453 }
2454 
2455 void nmethod::clear_unloading_state() {
2456   uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
2457   Atomic::store(&_is_unloading_state, state);
2458 }
2459 
2460 
2461 // This is called at the end of the strong tracing/marking phase of a
2462 // GC to unload an nmethod if it contains otherwise unreachable
2463 // oops or is heuristically found to be not important.
2464 void nmethod::do_unloading(bool unloading_occurred) {
2465   // Make sure the oop's ready to receive visitors
2466   if (is_unloading()) {
2467     unlink();
2468   } else {
2469     unload_nmethod_caches(unloading_occurred);
2470     BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2471     if (bs_nm != nullptr) {
2472       bs_nm->disarm(this);
2473     }
2474   }
2475 }
2476 
2477 void nmethod::oops_do(OopClosure* f, bool allow_dead) {
2478   // Prevent extra code cache walk for platforms that don't have immediate oops.
2479   if (relocInfo::mustIterateImmediateOopsInCode()) {
2480     RelocIterator iter(this, oops_reloc_begin());
2481 
2482     while (iter.next()) {
2483       if (iter.type() == relocInfo::oop_type ) {
2484         oop_Relocation* r = iter.oop_reloc();
2485         // In this loop, we must only follow those oops directly embedded in
2486         // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
2487         assert(1 == (r->oop_is_immediate()) +
2488                (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
2489                "oop must be found in exactly one place");
2490         if (r->oop_is_immediate() && r->oop_value() != nullptr) {
2491           f->do_oop(r->oop_addr());
2492         }
2493       }
2494     }
2495   }
2496 
2497   // Scopes
2498   // This includes oop constants not inlined in the code stream.
2499   for (oop* p = oops_begin(); p < oops_end(); p++) {
2500     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
2501     f->do_oop(p);
2502   }
2503 }
2504 
2505 void nmethod::follow_nmethod(OopIterateClosure* cl) {
2506   // Process oops in the nmethod
2507   oops_do(cl);
2508 
2509   // CodeCache unloading support
2510   mark_as_maybe_on_stack();
2511 
2512   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2513   bs_nm->disarm(this);
2514 
2515   // There's an assumption made that this function is not used by GCs that
2516   // relocate objects, and therefore we don't call fix_oop_relocations.
2517 }
2518 
2519 nmethod* volatile nmethod::_oops_do_mark_nmethods;
2520 
2521 void nmethod::oops_do_log_change(const char* state) {
2522   LogTarget(Trace, gc, nmethod) lt;
2523   if (lt.is_enabled()) {
2524     LogStream ls(lt);
2525     CompileTask::print(&ls, this, state, true /* short_form */);
2526   }
2527 }
2528 
2529 bool nmethod::oops_do_try_claim() {
2530   if (oops_do_try_claim_weak_request()) {
2531     nmethod* result = oops_do_try_add_to_list_as_weak_done();
2532     assert(result == nullptr, "adding to global list as weak done must always succeed.");
2533     return true;
2534   }
2535   return false;
2536 }
2537 
2538 bool nmethod::oops_do_try_claim_weak_request() {
2539   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2540 
2541   if ((_oops_do_mark_link == nullptr) &&
2542       (Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) {
2543     oops_do_log_change("oops_do, mark weak request");
2544     return true;
2545   }
2546   return false;
2547 }
2548 
2549 void nmethod::oops_do_set_strong_done(nmethod* old_head) {
2550   _oops_do_mark_link = mark_link(old_head, claim_strong_done_tag);
2551 }
2552 
2553 nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() {
2554   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2555 
2556   oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, mark_link(nullptr, claim_weak_request_tag), mark_link(this, claim_strong_done_tag));
2557   if (old_next == nullptr) {
2558     oops_do_log_change("oops_do, mark strong done");
2559   }
2560   return old_next;
2561 }
2562 
2563 nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oops_do_mark_link* next) {
2564   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2565   assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak");
2566 
2567   oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag));
2568   if (old_next == next) {
2569     oops_do_log_change("oops_do, mark strong request");
2570   }
2571   return old_next;
2572 }
2573 
2574 bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_link* next) {
2575   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2576   assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done");
2577 
2578   oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag));
2579   if (old_next == next) {
2580     oops_do_log_change("oops_do, mark weak done -> mark strong done");
2581     return true;
2582   }
2583   return false;
2584 }
2585 
2586 nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
2587   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2588 
2589   assert(extract_state(_oops_do_mark_link) == claim_weak_request_tag ||
2590          extract_state(_oops_do_mark_link) == claim_strong_request_tag,
2591          "must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
2592 
2593   nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
2594   // Self-loop if needed.
2595   if (old_head == nullptr) {
2596     old_head = this;
2597   }
2598   // Try to install end of list and weak done tag.
2599   if (Atomic::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) {
2600     oops_do_log_change("oops_do, mark weak done");
2601     return nullptr;
2602   } else {
2603     return old_head;
2604   }
2605 }
2606 
2607 void nmethod::oops_do_add_to_list_as_strong_done() {
2608   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
2609 
2610   nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
2611   // Self-loop if needed.
2612   if (old_head == nullptr) {
2613     old_head = this;
2614   }
2615   assert(_oops_do_mark_link == mark_link(this, claim_strong_done_tag), "must be but is nmethod " PTR_FORMAT " state %u",
2616          p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
2617 
2618   oops_do_set_strong_done(old_head);
2619 }
2620 
2621 void nmethod::oops_do_process_weak(OopsDoProcessor* p) {
2622   if (!oops_do_try_claim_weak_request()) {
2623     // Failed to claim for weak processing.
2624     oops_do_log_change("oops_do, mark weak request fail");
2625     return;
2626   }
2627 
2628   p->do_regular_processing(this);
2629 
2630   nmethod* old_head = oops_do_try_add_to_list_as_weak_done();
2631   if (old_head == nullptr) {
2632     return;
2633   }
2634   oops_do_log_change("oops_do, mark weak done fail");
2635   // Adding to global list failed, another thread added a strong request.
2636   assert(extract_state(_oops_do_mark_link) == claim_strong_request_tag,
2637          "must be but is %u", extract_state(_oops_do_mark_link));
2638 
2639   oops_do_log_change("oops_do, mark weak request -> mark strong done");
2640 
2641   oops_do_set_strong_done(old_head);
2642   // Do missing strong processing.
2643   p->do_remaining_strong_processing(this);
2644 }
2645 
2646 void nmethod::oops_do_process_strong(OopsDoProcessor* p) {
2647   oops_do_mark_link* next_raw = oops_do_try_claim_strong_done();
2648   if (next_raw == nullptr) {
2649     p->do_regular_processing(this);
2650     oops_do_add_to_list_as_strong_done();
2651     return;
2652   }
2653   // Claim failed. Figure out why and handle it.
2654   if (oops_do_has_weak_request(next_raw)) {
2655     oops_do_mark_link* old = next_raw;
2656     // Claim failed because being weak processed (state == "weak request").
2657     // Try to request deferred strong processing.
2658     next_raw = oops_do_try_add_strong_request(old);
2659     if (next_raw == old) {
2660       // Successfully requested deferred strong processing.
2661       return;
2662     }
2663     // Failed because of a concurrent transition. No longer in "weak request" state.
2664   }
2665   if (oops_do_has_any_strong_state(next_raw)) {
2666     // Already claimed for strong processing or requested for such.
2667     return;
2668   }
2669   if (oops_do_try_claim_weak_done_as_strong_done(next_raw)) {
2670     // Successfully claimed "weak done" as "strong done". Do the missing marking.
2671     p->do_remaining_strong_processing(this);
2672     return;
2673   }
2674   // Claim failed, some other thread got it.
2675 }
2676 
2677 void nmethod::oops_do_marking_prologue() {
2678   assert_at_safepoint();
2679 
2680   log_trace(gc, nmethod)("oops_do_marking_prologue");
2681   assert(_oops_do_mark_nmethods == nullptr, "must be empty");
2682 }
2683 
2684 void nmethod::oops_do_marking_epilogue() {
2685   assert_at_safepoint();
2686 
2687   nmethod* next = _oops_do_mark_nmethods;
2688   _oops_do_mark_nmethods = nullptr;
2689   if (next != nullptr) {
2690     nmethod* cur;
2691     do {
2692       cur = next;
2693       next = extract_nmethod(cur->_oops_do_mark_link);
2694       cur->_oops_do_mark_link = nullptr;
2695       DEBUG_ONLY(cur->verify_oop_relocations());
2696 
2697       LogTarget(Trace, gc, nmethod) lt;
2698       if (lt.is_enabled()) {
2699         LogStream ls(lt);
2700         CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
2701       }
2702       // End if self-loop has been detected.
2703     } while (cur != next);
2704   }
2705   log_trace(gc, nmethod)("oops_do_marking_epilogue");
2706 }
2707 
2708 inline bool includes(void* p, void* from, void* to) {
2709   return from <= p && p < to;
2710 }
2711 
2712 
2713 void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
2714   assert(count >= 2, "must be sentinel values, at least");
2715 
2716 #ifdef ASSERT
2717   // must be sorted and unique; we do a binary search in find_pc_desc()
2718   int prev_offset = pcs[0].pc_offset();
2719   assert(prev_offset == PcDesc::lower_offset_limit,
2720          "must start with a sentinel");
2721   for (int i = 1; i < count; i++) {
2722     int this_offset = pcs[i].pc_offset();
2723     assert(this_offset > prev_offset, "offsets must be sorted");
2724     prev_offset = this_offset;
2725   }
2726   assert(prev_offset == PcDesc::upper_offset_limit,
2727          "must end with a sentinel");
2728 #endif //ASSERT
2729 
2730   // Search for MethodHandle invokes and tag the nmethod.
2731   for (int i = 0; i < count; i++) {
2732     if (pcs[i].is_method_handle_invoke()) {
2733       set_has_method_handle_invokes(true);
2734       break;
2735     }
2736   }
2737   assert(has_method_handle_invokes() == (_deopt_mh_handler_offset != -1), "must have deopt mh handler");
2738 
2739   int size = count * sizeof(PcDesc);
2740   assert(scopes_pcs_size() >= size, "oob");
2741   memcpy(scopes_pcs_begin(), pcs, size);
2742 
2743   // Adjust the final sentinel downward.
2744   PcDesc* last_pc = &scopes_pcs_begin()[count-1];
2745   assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
2746   last_pc->set_pc_offset(content_size() + 1);
2747   for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
2748     // Fill any rounding gaps with copies of the last record.
2749     last_pc[1] = last_pc[0];
2750   }
2751   // The following assert could fail if sizeof(PcDesc) is not
2752   // an integral multiple of oopSize (the rounding term).
2753   // If it fails, change the logic to always allocate a multiple
2754   // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
2755   assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
2756 }
2757 
2758 void nmethod::copy_scopes_data(u_char* buffer, int size) {
2759   assert(scopes_data_size() >= size, "oob");
2760   memcpy(scopes_data_begin(), buffer, size);
2761 }
2762 
2763 #ifdef ASSERT
2764 static PcDesc* linear_search(int pc_offset, bool approximate, PcDesc* lower, PcDesc* upper) {
2765   PcDesc* res = nullptr;
2766   assert(lower != nullptr && lower->pc_offset() == PcDesc::lower_offset_limit,
2767          "must start with a sentinel");
2768   // lower + 1 to exclude initial sentinel
2769   for (PcDesc* p = lower + 1; p < upper; p++) {
2770     NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
2771     if (match_desc(p, pc_offset, approximate)) {
2772       if (res == nullptr) {
2773         res = p;
2774       } else {
2775         res = (PcDesc*) badAddress;
2776       }
2777     }
2778   }
2779   return res;
2780 }
2781 #endif
2782 
2783 
2784 #ifndef PRODUCT
2785 // Version of method to collect statistic
2786 PcDesc* PcDescContainer::find_pc_desc(address pc, bool approximate, address code_begin,
2787                                       PcDesc* lower, PcDesc* upper) {
2788   ++pc_nmethod_stats.pc_desc_queries;
2789   if (approximate) ++pc_nmethod_stats.pc_desc_approx;
2790 
2791   PcDesc* desc = _pc_desc_cache.last_pc_desc();
2792   assert(desc != nullptr, "PcDesc cache should be initialized already");
2793   if (desc->pc_offset() == (pc - code_begin)) {
2794     // Cached value matched
2795     ++pc_nmethod_stats.pc_desc_tests;
2796     ++pc_nmethod_stats.pc_desc_repeats;
2797     return desc;
2798   }
2799   return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
2800 }
2801 #endif
2802 
2803 // Finds a PcDesc with real-pc equal to "pc"
2804 PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, address code_begin,
2805                                                PcDesc* lower_incl, PcDesc* upper_incl) {
2806   if ((pc < code_begin) ||
2807       (pc - code_begin) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
2808     return nullptr;  // PC is wildly out of range
2809   }
2810   int pc_offset = (int) (pc - code_begin);
2811 
2812   // Check the PcDesc cache if it contains the desired PcDesc
2813   // (This as an almost 100% hit rate.)
2814   PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
2815   if (res != nullptr) {
2816     assert(res == linear_search(pc_offset, approximate, lower_incl, upper_incl), "cache ok");
2817     return res;
2818   }
2819 
2820   // Fallback algorithm: quasi-linear search for the PcDesc
2821   // Find the last pc_offset less than the given offset.
2822   // The successor must be the required match, if there is a match at all.
2823   // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
2824   PcDesc* lower = lower_incl;     // this is initial sentinel
2825   PcDesc* upper = upper_incl - 1; // exclude final sentinel
2826   if (lower >= upper)  return nullptr;  // no PcDescs at all
2827 
2828 #define assert_LU_OK \
2829   /* invariant on lower..upper during the following search: */ \
2830   assert(lower->pc_offset() <  pc_offset, "sanity"); \
2831   assert(upper->pc_offset() >= pc_offset, "sanity")
2832   assert_LU_OK;
2833 
2834   // Use the last successful return as a split point.
2835   PcDesc* mid = _pc_desc_cache.last_pc_desc();
2836   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2837   if (mid->pc_offset() < pc_offset) {
2838     lower = mid;
2839   } else {
2840     upper = mid;
2841   }
2842 
2843   // Take giant steps at first (4096, then 256, then 16, then 1)
2844   const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ DEBUG_ONLY(-1);
2845   const int RADIX = (1 << LOG2_RADIX);
2846   for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
2847     while ((mid = lower + step) < upper) {
2848       assert_LU_OK;
2849       NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2850       if (mid->pc_offset() < pc_offset) {
2851         lower = mid;
2852       } else {
2853         upper = mid;
2854         break;
2855       }
2856     }
2857     assert_LU_OK;
2858   }
2859 
2860   // Sneak up on the value with a linear search of length ~16.
2861   while (true) {
2862     assert_LU_OK;
2863     mid = lower + 1;
2864     NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2865     if (mid->pc_offset() < pc_offset) {
2866       lower = mid;
2867     } else {
2868       upper = mid;
2869       break;
2870     }
2871   }
2872 #undef assert_LU_OK
2873 
2874   if (match_desc(upper, pc_offset, approximate)) {
2875     assert(upper == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
2876     if (!Thread::current_in_asgct()) {
2877       // we don't want to modify the cache if we're in ASGCT
2878       // which is typically called in a signal handler
2879       _pc_desc_cache.add_pc_desc(upper);
2880     }
2881     return upper;
2882   } else {
2883     assert(nullptr == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
2884     return nullptr;
2885   }
2886 }
2887 
2888 bool nmethod::check_dependency_on(DepChange& changes) {
2889   // What has happened:
2890   // 1) a new class dependee has been added
2891   // 2) dependee and all its super classes have been marked
2892   bool found_check = false;  // set true if we are upset
2893   for (Dependencies::DepStream deps(this); deps.next(); ) {
2894     // Evaluate only relevant dependencies.
2895     if (deps.spot_check_dependency_at(changes) != nullptr) {
2896       found_check = true;
2897       NOT_DEBUG(break);
2898     }
2899   }
2900   return found_check;
2901 }
2902 
2903 // Called from mark_for_deoptimization, when dependee is invalidated.
2904 bool nmethod::is_dependent_on_method(Method* dependee) {
2905   for (Dependencies::DepStream deps(this); deps.next(); ) {
2906     if (deps.type() != Dependencies::evol_method)
2907       continue;
2908     Method* method = deps.method_argument(0);
2909     if (method == dependee) return true;
2910   }
2911   return false;
2912 }
2913 
2914 void nmethod_init() {
2915   // make sure you didn't forget to adjust the filler fields
2916   assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
2917 }
2918 
2919 // -----------------------------------------------------------------------------
2920 // Verification
2921 
2922 class VerifyOopsClosure: public OopClosure {
2923   nmethod* _nm;
2924   bool     _ok;
2925 public:
2926   VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
2927   bool ok() { return _ok; }
2928   virtual void do_oop(oop* p) {
2929     if (oopDesc::is_oop_or_null(*p)) return;
2930     // Print diagnostic information before calling print_nmethod().
2931     // Assertions therein might prevent call from returning.
2932     tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
2933                   p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
2934     if (_ok) {
2935       _nm->print_nmethod(true);
2936       _ok = false;
2937     }
2938   }
2939   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2940 };
2941 
2942 class VerifyMetadataClosure: public MetadataClosure {
2943  public:
2944   void do_metadata(Metadata* md) {
2945     if (md->is_method()) {
2946       Method* method = (Method*)md;
2947       assert(!method->is_old(), "Should not be installing old methods");
2948     }
2949   }
2950 };
2951 
2952 
2953 void nmethod::verify() {
2954   if (is_not_entrant())
2955     return;
2956 
2957   // Make sure all the entry points are correctly aligned for patching.
2958   NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2959 
2960   // assert(oopDesc::is_oop(method()), "must be valid");
2961 
2962   ResourceMark rm;
2963 
2964   if (!CodeCache::contains(this)) {
2965     fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
2966   }
2967 
2968   if(is_native_method() )
2969     return;
2970 
2971   nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2972   if (nm != this) {
2973     fatal("find_nmethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
2974   }
2975 
2976   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2977     if (! p->verify(this)) {
2978       tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
2979     }
2980   }
2981 
2982 #ifdef ASSERT
2983 #if INCLUDE_JVMCI
2984   {
2985     // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap
2986     ImmutableOopMapSet* oms = oop_maps();
2987     ImplicitExceptionTable implicit_table(this);
2988     for (uint i = 0; i < implicit_table.len(); i++) {
2989       int exec_offset = (int) implicit_table.get_exec_offset(i);
2990       if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) {
2991         assert(pc_desc_at(code_begin() + exec_offset) != nullptr, "missing PcDesc");
2992         bool found = false;
2993         for (int i = 0, imax = oms->count(); i < imax; i++) {
2994           if (oms->pair_at(i)->pc_offset() == exec_offset) {
2995             found = true;
2996             break;
2997           }
2998         }
2999         assert(found, "missing oopmap");
3000       }
3001     }
3002   }
3003 #endif
3004 #endif
3005 
3006   VerifyOopsClosure voc(this);
3007   oops_do(&voc);
3008   assert(voc.ok(), "embedded oops must be OK");
3009   Universe::heap()->verify_nmethod(this);
3010 
3011   assert(_oops_do_mark_link == nullptr, "_oops_do_mark_link for %s should be nullptr but is " PTR_FORMAT,
3012          nm->method()->external_name(), p2i(_oops_do_mark_link));
3013   verify_scopes();
3014 
3015   CompiledICLocker nm_verify(this);
3016   VerifyMetadataClosure vmc;
3017   metadata_do(&vmc);
3018 }
3019 
3020 
3021 void nmethod::verify_interrupt_point(address call_site, bool is_inline_cache) {
3022 
3023   // Verify IC only when nmethod installation is finished.
3024   if (!is_not_installed()) {
3025     if (CompiledICLocker::is_safe(this)) {
3026       if (is_inline_cache) {
3027         CompiledIC_at(this, call_site);
3028       } else {
3029         CompiledDirectCall::at(call_site);
3030       }
3031     } else {
3032       CompiledICLocker ml_verify(this);
3033       if (is_inline_cache) {
3034         CompiledIC_at(this, call_site);
3035       } else {
3036         CompiledDirectCall::at(call_site);
3037       }
3038     }
3039   }
3040 
3041   HandleMark hm(Thread::current());
3042 
3043   PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
3044   assert(pd != nullptr, "PcDesc must exist");
3045   for (ScopeDesc* sd = new ScopeDesc(this, pd);
3046        !sd->is_top(); sd = sd->sender()) {
3047     sd->verify();
3048   }
3049 }
3050 
3051 void nmethod::verify_scopes() {
3052   if( !method() ) return;       // Runtime stubs have no scope
3053   if (method()->is_native()) return; // Ignore stub methods.
3054   // iterate through all interrupt point
3055   // and verify the debug information is valid.
3056   RelocIterator iter(this);
3057   while (iter.next()) {
3058     address stub = nullptr;
3059     switch (iter.type()) {
3060       case relocInfo::virtual_call_type:
3061         verify_interrupt_point(iter.addr(), true /* is_inline_cache */);
3062         break;
3063       case relocInfo::opt_virtual_call_type:
3064         stub = iter.opt_virtual_call_reloc()->static_stub();
3065         verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3066         break;
3067       case relocInfo::static_call_type:
3068         stub = iter.static_call_reloc()->static_stub();
3069         verify_interrupt_point(iter.addr(), false /* is_inline_cache */);
3070         break;
3071       case relocInfo::runtime_call_type:
3072       case relocInfo::runtime_call_w_cp_type: {
3073         address destination = iter.reloc()->value();
3074         // Right now there is no way to find out which entries support
3075         // an interrupt point.  It would be nice if we had this
3076         // information in a table.
3077         break;
3078       }
3079       default:
3080         break;
3081     }
3082     assert(stub == nullptr || stub_contains(stub), "static call stub outside stub section");
3083   }
3084 }
3085 
3086 
3087 // -----------------------------------------------------------------------------
3088 // Printing operations
3089 
3090 void nmethod::print_on_impl(outputStream* st) const {
3091   ResourceMark rm;
3092 
3093   st->print("Compiled method ");
3094 
3095   if (is_compiled_by_c1()) {
3096     st->print("(c1) ");
3097   } else if (is_compiled_by_c2()) {
3098     st->print("(c2) ");
3099   } else if (is_compiled_by_jvmci()) {
3100     st->print("(JVMCI) ");
3101   } else {
3102     st->print("(n/a) ");
3103   }
3104 
3105   print_on_with_msg(st, nullptr);
3106 
3107   if (WizardMode) {
3108     st->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
3109     st->print(" for method " INTPTR_FORMAT , p2i(method()));
3110     st->print(" { ");
3111     st->print_cr("%s ", state());
3112     st->print_cr("}:");
3113   }
3114   if (size              () > 0) st->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3115                                              p2i(this),
3116                                              p2i(this) + size(),
3117                                              size());
3118   if (consts_size       () > 0) st->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3119                                              p2i(consts_begin()),
3120                                              p2i(consts_end()),
3121                                              consts_size());
3122   if (insts_size        () > 0) st->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3123                                              p2i(insts_begin()),
3124                                              p2i(insts_end()),
3125                                              insts_size());
3126   if (stub_size         () > 0) st->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3127                                              p2i(stub_begin()),
3128                                              p2i(stub_end()),
3129                                              stub_size());
3130   if (oops_size         () > 0) st->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3131                                              p2i(oops_begin()),
3132                                              p2i(oops_end()),
3133                                              oops_size());
3134   if (mutable_data_size() > 0) st->print_cr(" mutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3135                                              p2i(mutable_data_begin()),
3136                                              p2i(mutable_data_end()),
3137                                              mutable_data_size());
3138   if (relocation_size() > 0)   st->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3139                                              p2i(relocation_begin()),
3140                                              p2i(relocation_end()),
3141                                              relocation_size());
3142   if (metadata_size     () > 0) st->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3143                                              p2i(metadata_begin()),
3144                                              p2i(metadata_end()),
3145                                              metadata_size());
3146 #if INCLUDE_JVMCI
3147   if (jvmci_data_size   () > 0) st->print_cr(" JVMCI data     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3148                                              p2i(jvmci_data_begin()),
3149                                              p2i(jvmci_data_end()),
3150                                              jvmci_data_size());
3151 #endif
3152   if (immutable_data_size() > 0) st->print_cr(" immutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3153                                              p2i(immutable_data_begin()),
3154                                              p2i(immutable_data_end()),
3155                                              immutable_data_size());
3156   if (dependencies_size () > 0) st->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3157                                              p2i(dependencies_begin()),
3158                                              p2i(dependencies_end()),
3159                                              dependencies_size());
3160   if (nul_chk_table_size() > 0) st->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3161                                              p2i(nul_chk_table_begin()),
3162                                              p2i(nul_chk_table_end()),
3163                                              nul_chk_table_size());
3164   if (handler_table_size() > 0) st->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3165                                              p2i(handler_table_begin()),
3166                                              p2i(handler_table_end()),
3167                                              handler_table_size());
3168   if (scopes_pcs_size   () > 0) st->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3169                                              p2i(scopes_pcs_begin()),
3170                                              p2i(scopes_pcs_end()),
3171                                              scopes_pcs_size());
3172   if (scopes_data_size  () > 0) st->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3173                                              p2i(scopes_data_begin()),
3174                                              p2i(scopes_data_end()),
3175                                              scopes_data_size());
3176 #if INCLUDE_JVMCI
3177   if (speculations_size () > 0) st->print_cr(" speculations   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
3178                                              p2i(speculations_begin()),
3179                                              p2i(speculations_end()),
3180                                              speculations_size());
3181 #endif
3182 }
3183 
3184 void nmethod::print_code() {
3185   ResourceMark m;
3186   ttyLocker ttyl;
3187   // Call the specialized decode method of this class.
3188   decode(tty);
3189 }
3190 
3191 #ifndef PRODUCT  // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN
3192 
3193 void nmethod::print_dependencies_on(outputStream* out) {
3194   ResourceMark rm;
3195   stringStream st;
3196   st.print_cr("Dependencies:");
3197   for (Dependencies::DepStream deps(this); deps.next(); ) {
3198     deps.print_dependency(&st);
3199     InstanceKlass* ctxk = deps.context_type();
3200     if (ctxk != nullptr) {
3201       if (ctxk->is_dependent_nmethod(this)) {
3202         st.print_cr("   [nmethod<=klass]%s", ctxk->external_name());
3203       }
3204     }
3205     deps.log_dependency();  // put it into the xml log also
3206   }
3207   out->print_raw(st.as_string());
3208 }
3209 #endif
3210 
3211 #if defined(SUPPORT_DATA_STRUCTS)
3212 
3213 // Print the oops from the underlying CodeBlob.
3214 void nmethod::print_oops(outputStream* st) {
3215   ResourceMark m;
3216   st->print("Oops:");
3217   if (oops_begin() < oops_end()) {
3218     st->cr();
3219     for (oop* p = oops_begin(); p < oops_end(); p++) {
3220       Disassembler::print_location((unsigned char*)p, (unsigned char*)oops_begin(), (unsigned char*)oops_end(), st, true, false);
3221       st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3222       if (Universe::contains_non_oop_word(p)) {
3223         st->print_cr("NON_OOP");
3224         continue;  // skip non-oops
3225       }
3226       if (*p == nullptr) {
3227         st->print_cr("nullptr-oop");
3228         continue;  // skip non-oops
3229       }
3230       (*p)->print_value_on(st);
3231       st->cr();
3232     }
3233   } else {
3234     st->print_cr(" <list empty>");
3235   }
3236 }
3237 
3238 // Print metadata pool.
3239 void nmethod::print_metadata(outputStream* st) {
3240   ResourceMark m;
3241   st->print("Metadata:");
3242   if (metadata_begin() < metadata_end()) {
3243     st->cr();
3244     for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
3245       Disassembler::print_location((unsigned char*)p, (unsigned char*)metadata_begin(), (unsigned char*)metadata_end(), st, true, false);
3246       st->print(PTR_FORMAT " ", *((uintptr_t*)p));
3247       if (*p && *p != Universe::non_oop_word()) {
3248         (*p)->print_value_on(st);
3249       }
3250       st->cr();
3251     }
3252   } else {
3253     st->print_cr(" <list empty>");
3254   }
3255 }
3256 
3257 #ifndef PRODUCT  // ScopeDesc::print_on() is available only then. Declared as PRODUCT_RETURN
3258 void nmethod::print_scopes_on(outputStream* st) {
3259   // Find the first pc desc for all scopes in the code and print it.
3260   ResourceMark rm;
3261   st->print("scopes:");
3262   if (scopes_pcs_begin() < scopes_pcs_end()) {
3263     st->cr();
3264     for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3265       if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
3266         continue;
3267 
3268       ScopeDesc* sd = scope_desc_at(p->real_pc(this));
3269       while (sd != nullptr) {
3270         sd->print_on(st, p);  // print output ends with a newline
3271         sd = sd->sender();
3272       }
3273     }
3274   } else {
3275     st->print_cr(" <list empty>");
3276   }
3277 }
3278 #endif
3279 
3280 #ifndef PRODUCT  // RelocIterator does support printing only then.
3281 void nmethod::print_relocations() {
3282   ResourceMark m;       // in case methods get printed via the debugger
3283   tty->print_cr("relocations:");
3284   RelocIterator iter(this);
3285   iter.print_on(tty);
3286 }
3287 #endif
3288 
3289 void nmethod::print_pcs_on(outputStream* st) {
3290   ResourceMark m;       // in case methods get printed via debugger
3291   st->print("pc-bytecode offsets:");
3292   if (scopes_pcs_begin() < scopes_pcs_end()) {
3293     st->cr();
3294     for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3295       p->print_on(st, this);  // print output ends with a newline
3296     }
3297   } else {
3298     st->print_cr(" <list empty>");
3299   }
3300 }
3301 
3302 void nmethod::print_handler_table() {
3303   ExceptionHandlerTable(this).print(code_begin());
3304 }
3305 
3306 void nmethod::print_nul_chk_table() {
3307   ImplicitExceptionTable(this).print(code_begin());
3308 }
3309 
3310 void nmethod::print_recorded_oop(int log_n, int i) {
3311   void* value;
3312 
3313   if (i == 0) {
3314     value = nullptr;
3315   } else {
3316     // Be careful around non-oop words. Don't create an oop
3317     // with that value, or it will assert in verification code.
3318     if (Universe::contains_non_oop_word(oop_addr_at(i))) {
3319       value = Universe::non_oop_word();
3320     } else {
3321       value = oop_at(i);
3322     }
3323   }
3324 
3325   tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(value));
3326 
3327   if (value == Universe::non_oop_word()) {
3328     tty->print("non-oop word");
3329   } else {
3330     if (value == nullptr) {
3331       tty->print("nullptr-oop");
3332     } else {
3333       oop_at(i)->print_value_on(tty);
3334     }
3335   }
3336 
3337   tty->cr();
3338 }
3339 
3340 void nmethod::print_recorded_oops() {
3341   const int n = oops_count();
3342   const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3343   tty->print("Recorded oops:");
3344   if (n > 0) {
3345     tty->cr();
3346     for (int i = 0; i < n; i++) {
3347       print_recorded_oop(log_n, i);
3348     }
3349   } else {
3350     tty->print_cr(" <list empty>");
3351   }
3352 }
3353 
3354 void nmethod::print_recorded_metadata() {
3355   const int n = metadata_count();
3356   const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
3357   tty->print("Recorded metadata:");
3358   if (n > 0) {
3359     tty->cr();
3360     for (int i = 0; i < n; i++) {
3361       Metadata* m = metadata_at(i);
3362       tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(m));
3363       if (m == (Metadata*)Universe::non_oop_word()) {
3364         tty->print("non-metadata word");
3365       } else if (m == nullptr) {
3366         tty->print("nullptr-oop");
3367       } else {
3368         Metadata::print_value_on_maybe_null(tty, m);
3369       }
3370       tty->cr();
3371     }
3372   } else {
3373     tty->print_cr(" <list empty>");
3374   }
3375 }
3376 #endif
3377 
3378 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3379 
3380 void nmethod::print_constant_pool(outputStream* st) {
3381   //-----------------------------------
3382   //---<  Print the constant pool  >---
3383   //-----------------------------------
3384   int consts_size = this->consts_size();
3385   if ( consts_size > 0 ) {
3386     unsigned char* cstart = this->consts_begin();
3387     unsigned char* cp     = cstart;
3388     unsigned char* cend   = cp + consts_size;
3389     unsigned int   bytes_per_line = 4;
3390     unsigned int   CP_alignment   = 8;
3391     unsigned int   n;
3392 
3393     st->cr();
3394 
3395     //---<  print CP header to make clear what's printed  >---
3396     if( ((uintptr_t)cp&(CP_alignment-1)) == 0 ) {
3397       n = bytes_per_line;
3398       st->print_cr("[Constant Pool]");
3399       Disassembler::print_location(cp, cstart, cend, st, true, true);
3400       Disassembler::print_hexdata(cp, n, st, true);
3401       st->cr();
3402     } else {
3403       n = (int)((uintptr_t)cp & (bytes_per_line-1));
3404       st->print_cr("[Constant Pool (unaligned)]");
3405     }
3406 
3407     //---<  print CP contents, bytes_per_line at a time  >---
3408     while (cp < cend) {
3409       Disassembler::print_location(cp, cstart, cend, st, true, false);
3410       Disassembler::print_hexdata(cp, n, st, false);
3411       cp += n;
3412       n   = bytes_per_line;
3413       st->cr();
3414     }
3415 
3416     //---<  Show potential alignment gap between constant pool and code  >---
3417     cend = code_begin();
3418     if( cp < cend ) {
3419       n = 4;
3420       st->print_cr("[Code entry alignment]");
3421       while (cp < cend) {
3422         Disassembler::print_location(cp, cstart, cend, st, false, false);
3423         cp += n;
3424         st->cr();
3425       }
3426     }
3427   } else {
3428     st->print_cr("[Constant Pool (empty)]");
3429   }
3430   st->cr();
3431 }
3432 
3433 #endif
3434 
3435 // Disassemble this nmethod.
3436 // Print additional debug information, if requested. This could be code
3437 // comments, block comments, profiling counters, etc.
3438 // The undisassembled format is useful no disassembler library is available.
3439 // The resulting hex dump (with markers) can be disassembled later, or on
3440 // another system, when/where a disassembler library is available.
3441 void nmethod::decode2(outputStream* ost) const {
3442 
3443   // Called from frame::back_trace_with_decode without ResourceMark.
3444   ResourceMark rm;
3445 
3446   // Make sure we have a valid stream to print on.
3447   outputStream* st = ost ? ost : tty;
3448 
3449 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) && ! defined(SUPPORT_ASSEMBLY)
3450   const bool use_compressed_format    = true;
3451   const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3452                                                                   AbstractDisassembler::show_block_comment());
3453 #else
3454   const bool use_compressed_format    = Disassembler::is_abstract();
3455   const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
3456                                                                   AbstractDisassembler::show_block_comment());
3457 #endif
3458 
3459   st->cr();
3460   this->print_on(st);
3461   st->cr();
3462 
3463 #if defined(SUPPORT_ASSEMBLY)
3464   //----------------------------------
3465   //---<  Print real disassembly  >---
3466   //----------------------------------
3467   if (! use_compressed_format) {
3468     st->print_cr("[Disassembly]");
3469     Disassembler::decode(const_cast<nmethod*>(this), st);
3470     st->bol();
3471     st->print_cr("[/Disassembly]");
3472     return;
3473   }
3474 #endif
3475 
3476 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3477 
3478   // Compressed undisassembled disassembly format.
3479   // The following status values are defined/supported:
3480   //   = 0 - currently at bol() position, nothing printed yet on current line.
3481   //   = 1 - currently at position after print_location().
3482   //   > 1 - in the midst of printing instruction stream bytes.
3483   int        compressed_format_idx    = 0;
3484   int        code_comment_column      = 0;
3485   const int  instr_maxlen             = Assembler::instr_maxlen();
3486   const uint tabspacing               = 8;
3487   unsigned char* start = this->code_begin();
3488   unsigned char* p     = this->code_begin();
3489   unsigned char* end   = this->code_end();
3490   unsigned char* pss   = p; // start of a code section (used for offsets)
3491 
3492   if ((start == nullptr) || (end == nullptr)) {
3493     st->print_cr("PrintAssembly not possible due to uninitialized section pointers");
3494     return;
3495   }
3496 #endif
3497 
3498 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3499   //---<  plain abstract disassembly, no comments or anything, just section headers  >---
3500   if (use_compressed_format && ! compressed_with_comments) {
3501     const_cast<nmethod*>(this)->print_constant_pool(st);
3502 
3503     //---<  Open the output (Marker for post-mortem disassembler)  >---
3504     st->print_cr("[MachCode]");
3505     const char* header = nullptr;
3506     address p0 = p;
3507     while (p < end) {
3508       address pp = p;
3509       while ((p < end) && (header == nullptr)) {
3510         header = nmethod_section_label(p);
3511         pp  = p;
3512         p  += Assembler::instr_len(p);
3513       }
3514       if (pp > p0) {
3515         AbstractDisassembler::decode_range_abstract(p0, pp, start, end, st, Assembler::instr_maxlen());
3516         p0 = pp;
3517         p  = pp;
3518         header = nullptr;
3519       } else if (header != nullptr) {
3520         st->bol();
3521         st->print_cr("%s", header);
3522         header = nullptr;
3523       }
3524     }
3525     //---<  Close the output (Marker for post-mortem disassembler)  >---
3526     st->bol();
3527     st->print_cr("[/MachCode]");
3528     return;
3529   }
3530 #endif
3531 
3532 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
3533   //---<  abstract disassembly with comments and section headers merged in  >---
3534   if (compressed_with_comments) {
3535     const_cast<nmethod*>(this)->print_constant_pool(st);
3536 
3537     //---<  Open the output (Marker for post-mortem disassembler)  >---
3538     st->print_cr("[MachCode]");
3539     while ((p < end) && (p != nullptr)) {
3540       const int instruction_size_in_bytes = Assembler::instr_len(p);
3541 
3542       //---<  Block comments for nmethod. Interrupts instruction stream, if any.  >---
3543       // Outputs a bol() before and a cr() after, but only if a comment is printed.
3544       // Prints nmethod_section_label as well.
3545       if (AbstractDisassembler::show_block_comment()) {
3546         print_block_comment(st, p);
3547         if (st->position() == 0) {
3548           compressed_format_idx = 0;
3549         }
3550       }
3551 
3552       //---<  New location information after line break  >---
3553       if (compressed_format_idx == 0) {
3554         code_comment_column   = Disassembler::print_location(p, pss, end, st, false, false);
3555         compressed_format_idx = 1;
3556       }
3557 
3558       //---<  Code comment for current instruction. Address range [p..(p+len))  >---
3559       unsigned char* p_end = p + (ssize_t)instruction_size_in_bytes;
3560       S390_ONLY(if (p_end > end) p_end = end;) // avoid getting past the end
3561 
3562       if (AbstractDisassembler::show_comment() && const_cast<nmethod*>(this)->has_code_comment(p, p_end)) {
3563         //---<  interrupt instruction byte stream for code comment  >---
3564         if (compressed_format_idx > 1) {
3565           st->cr();  // interrupt byte stream
3566           st->cr();  // add an empty line
3567           code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
3568         }
3569         const_cast<nmethod*>(this)->print_code_comment_on(st, code_comment_column, p, p_end );
3570         st->bol();
3571         compressed_format_idx = 0;
3572       }
3573 
3574       //---<  New location information after line break  >---
3575       if (compressed_format_idx == 0) {
3576         code_comment_column   = Disassembler::print_location(p, pss, end, st, false, false);
3577         compressed_format_idx = 1;
3578       }
3579 
3580       //---<  Nicely align instructions for readability  >---
3581       if (compressed_format_idx > 1) {
3582         Disassembler::print_delimiter(st);
3583       }
3584 
3585       //---<  Now, finally, print the actual instruction bytes  >---
3586       unsigned char* p0 = p;
3587       p = Disassembler::decode_instruction_abstract(p, st, instruction_size_in_bytes, instr_maxlen);
3588       compressed_format_idx += (int)(p - p0);
3589 
3590       if (Disassembler::start_newline(compressed_format_idx-1)) {
3591         st->cr();
3592         compressed_format_idx = 0;
3593       }
3594     }
3595     //---<  Close the output (Marker for post-mortem disassembler)  >---
3596     st->bol();
3597     st->print_cr("[/MachCode]");
3598     return;
3599   }
3600 #endif
3601 }
3602 
3603 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3604 
3605 const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
3606   RelocIterator iter(this, begin, end);
3607   bool have_one = false;
3608   while (iter.next()) {
3609     have_one = true;
3610     switch (iter.type()) {
3611         case relocInfo::none: {
3612           // Skip it and check next
3613           break;
3614         }
3615         case relocInfo::oop_type: {
3616           // Get a non-resizable resource-allocated stringStream.
3617           // Our callees make use of (nested) ResourceMarks.
3618           stringStream st(NEW_RESOURCE_ARRAY(char, 1024), 1024);
3619           oop_Relocation* r = iter.oop_reloc();
3620           oop obj = r->oop_value();
3621           st.print("oop(");
3622           if (obj == nullptr) st.print("nullptr");
3623           else obj->print_value_on(&st);
3624           st.print(")");
3625           return st.as_string();
3626         }
3627         case relocInfo::metadata_type: {
3628           stringStream st;
3629           metadata_Relocation* r = iter.metadata_reloc();
3630           Metadata* obj = r->metadata_value();
3631           st.print("metadata(");
3632           if (obj == nullptr) st.print("nullptr");
3633           else obj->print_value_on(&st);
3634           st.print(")");
3635           return st.as_string();
3636         }
3637         case relocInfo::runtime_call_type:
3638         case relocInfo::runtime_call_w_cp_type: {
3639           stringStream st;
3640           st.print("runtime_call");
3641           CallRelocation* r = (CallRelocation*)iter.reloc();
3642           address dest = r->destination();
3643           if (StubRoutines::contains(dest)) {
3644             StubCodeDesc* desc = StubCodeDesc::desc_for(dest);
3645             if (desc == nullptr) {
3646               desc = StubCodeDesc::desc_for(dest + frame::pc_return_offset);
3647             }
3648             if (desc != nullptr) {
3649               st.print(" Stub::%s", desc->name());
3650               return st.as_string();
3651             }
3652           }
3653           CodeBlob* cb = CodeCache::find_blob(dest);
3654           if (cb != nullptr) {
3655             st.print(" %s", cb->name());
3656           } else {
3657             ResourceMark rm;
3658             const int buflen = 1024;
3659             char* buf = NEW_RESOURCE_ARRAY(char, buflen);
3660             int offset;
3661             if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) {
3662               st.print(" %s", buf);
3663               if (offset != 0) {
3664                 st.print("+%d", offset);
3665               }
3666             }
3667           }
3668           return st.as_string();
3669         }
3670         case relocInfo::virtual_call_type: {
3671           stringStream st;
3672           st.print_raw("virtual_call");
3673           virtual_call_Relocation* r = iter.virtual_call_reloc();
3674           Method* m = r->method_value();
3675           if (m != nullptr) {
3676             assert(m->is_method(), "");
3677             m->print_short_name(&st);
3678           }
3679           return st.as_string();
3680         }
3681         case relocInfo::opt_virtual_call_type: {
3682           stringStream st;
3683           st.print_raw("optimized virtual_call");
3684           opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc();
3685           Method* m = r->method_value();
3686           if (m != nullptr) {
3687             assert(m->is_method(), "");
3688             m->print_short_name(&st);
3689           }
3690           return st.as_string();
3691         }
3692         case relocInfo::static_call_type: {
3693           stringStream st;
3694           st.print_raw("static_call");
3695           static_call_Relocation* r = iter.static_call_reloc();
3696           Method* m = r->method_value();
3697           if (m != nullptr) {
3698             assert(m->is_method(), "");
3699             m->print_short_name(&st);
3700           }
3701           return st.as_string();
3702         }
3703         case relocInfo::static_stub_type:      return "static_stub";
3704         case relocInfo::external_word_type:    return "external_word";
3705         case relocInfo::internal_word_type:    return "internal_word";
3706         case relocInfo::section_word_type:     return "section_word";
3707         case relocInfo::poll_type:             return "poll";
3708         case relocInfo::poll_return_type:      return "poll_return";
3709         case relocInfo::trampoline_stub_type:  return "trampoline_stub";
3710         case relocInfo::entry_guard_type:      return "entry_guard";
3711         case relocInfo::post_call_nop_type:    return "post_call_nop";
3712         case relocInfo::barrier_type: {
3713           barrier_Relocation* const reloc = iter.barrier_reloc();
3714           stringStream st;
3715           st.print("barrier format=%d", reloc->format());
3716           return st.as_string();
3717         }
3718 
3719         case relocInfo::type_mask:             return "type_bit_mask";
3720 
3721         default: {
3722           stringStream st;
3723           st.print("unknown relocInfo=%d", (int) iter.type());
3724           return st.as_string();
3725         }
3726     }
3727   }
3728   return have_one ? "other" : nullptr;
3729 }
3730 
3731 // Return the last scope in (begin..end]
3732 ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
3733   PcDesc* p = pc_desc_near(begin+1);
3734   if (p != nullptr && p->real_pc(this) <= end) {
3735     return new ScopeDesc(this, p);
3736   }
3737   return nullptr;
3738 }
3739 
3740 const char* nmethod::nmethod_section_label(address pos) const {
3741   const char* label = nullptr;
3742   if (pos == code_begin())                                              label = "[Instructions begin]";
3743   if (pos == entry_point())                                             label = "[Entry Point]";
3744   if (pos == inline_entry_point())                                      label = "[Inline Entry Point]";
3745   if (pos == verified_entry_point())                                    label = "[Verified Entry Point]";
3746   if (pos == verified_inline_entry_point())                             label = "[Verified Inline Entry Point]";
3747   if (pos == verified_inline_ro_entry_point())                          label = "[Verified Inline Entry Point (RO)]";
3748   if (has_method_handle_invokes() && (pos == deopt_mh_handler_begin())) label = "[Deopt MH Handler Code]";
3749   if (pos == consts_begin() && pos != insts_begin())                    label = "[Constants]";
3750   // Check stub_code before checking exception_handler or deopt_handler.
3751   if (pos == this->stub_begin())                                        label = "[Stub Code]";
3752   if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin())          label = "[Exception Handler]";
3753   if (JVMCI_ONLY(_deopt_handler_offset != -1 &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]";
3754   return label;
3755 }
3756 
3757 static int maybe_print_entry_label(outputStream* stream, address pos, address entry, const char* label) {
3758   if (pos == entry) {
3759     stream->bol();
3760     stream->print_cr("%s", label);
3761     return 1;
3762   } else {
3763     return 0;
3764   }
3765 }
3766 
3767 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels) const {
3768   if (print_section_labels) {
3769     int n = 0;
3770     // Multiple entry points may be at the same position. Print them all.
3771     n += maybe_print_entry_label(stream, block_begin, entry_point(),                    "[Entry Point]");
3772     n += maybe_print_entry_label(stream, block_begin, inline_entry_point(),             "[Inline Entry Point]");
3773     n += maybe_print_entry_label(stream, block_begin, verified_entry_point(),           "[Verified Entry Point]");
3774     n += maybe_print_entry_label(stream, block_begin, verified_inline_entry_point(),    "[Verified Inline Entry Point]");
3775     n += maybe_print_entry_label(stream, block_begin, verified_inline_ro_entry_point(), "[Verified Inline Entry Point (RO)]");
3776     if (n == 0) {
3777       const char* label = nmethod_section_label(block_begin);
3778       if (label != nullptr) {
3779         stream->bol();
3780         stream->print_cr("%s", label);
3781       }
3782     }
3783   }
3784 
3785   Method* m = method();
3786   if (m == nullptr || is_osr_method()) {
3787     return;
3788   }
3789 
3790   // Print the name of the method (only once)
3791   address low = MIN4(entry_point(), verified_entry_point(), verified_inline_entry_point(), verified_inline_ro_entry_point());
3792   low = MIN2(low, inline_entry_point());
3793   assert(low != 0, "sanity");
3794   if (block_begin == low) {
3795     stream->print("  # ");
3796     m->print_value_on(stream);
3797     stream->cr();
3798   }
3799 
3800   // Print the arguments for the 3 types of verified entry points
3801   CompiledEntrySignature ces(m);
3802   ces.compute_calling_conventions(false);
3803   const GrowableArray<SigEntry>* sig_cc;
3804   const VMRegPair* regs;
3805   if (block_begin == verified_entry_point()) {
3806     sig_cc = ces.sig_cc();
3807     regs = ces.regs_cc();
3808   } else if (block_begin == verified_inline_entry_point()) {
3809     sig_cc = ces.sig();
3810     regs = ces.regs();
3811   } else if (block_begin == verified_inline_ro_entry_point()) {
3812     sig_cc = ces.sig_cc_ro();
3813     regs = ces.regs_cc_ro();
3814   } else {
3815     return;
3816   }
3817 
3818   bool has_this = !m->is_static();
3819   if (ces.has_inline_recv() && block_begin == verified_entry_point()) {
3820     // <this> argument is scalarized for verified_entry_point()
3821     has_this = false;
3822   }
3823   const char* spname = "sp"; // make arch-specific?
3824   int stack_slot_offset = this->frame_size() * wordSize;
3825   int tab1 = 14, tab2 = 24;
3826   int sig_index = 0;
3827   int arg_index = has_this ? -1 : 0;
3828   bool did_old_sp = false;
3829   for (ExtendedSignature sig = ExtendedSignature(sig_cc, SigEntryFilter()); !sig.at_end(); ++sig) {
3830     bool at_this = (arg_index == -1);
3831     bool at_old_sp = false;
3832     BasicType t = (*sig)._bt;
3833     if (at_this) {
3834       stream->print("  # this: ");
3835     } else {
3836       stream->print("  # parm%d: ", arg_index);
3837     }
3838     stream->move_to(tab1);
3839     VMReg fst = regs[sig_index].first();
3840     VMReg snd = regs[sig_index].second();
3841     if (fst->is_reg()) {
3842       stream->print("%s", fst->name());
3843       if (snd->is_valid())  {
3844         stream->print(":%s", snd->name());
3845       }
3846     } else if (fst->is_stack()) {
3847       int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
3848       if (offset == stack_slot_offset)  at_old_sp = true;
3849       stream->print("[%s+0x%x]", spname, offset);
3850     } else {
3851       stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
3852     }
3853     stream->print(" ");
3854     stream->move_to(tab2);
3855     stream->print("= ");
3856     if (at_this) {
3857       m->method_holder()->print_value_on(stream);
3858     } else {
3859       bool did_name = false;
3860       if (is_reference_type(t)) {
3861         Symbol* name = (*sig)._name;
3862         name->print_value_on(stream);
3863         did_name = true;
3864       }
3865       if (!did_name)
3866         stream->print("%s", type2name(t));
3867       if ((*sig)._null_marker) {
3868         stream->print(" (null marker)");
3869       }
3870     }
3871     if (at_old_sp) {
3872       stream->print("  (%s of caller)", spname);
3873       did_old_sp = true;
3874     }
3875     stream->cr();
3876     sig_index += type2size[t];
3877     arg_index += 1;
3878   }
3879   if (!did_old_sp) {
3880     stream->print("  # ");
3881     stream->move_to(tab1);
3882     stream->print("[%s+0x%x]", spname, stack_slot_offset);
3883     stream->print("  (%s of caller)", spname);
3884     stream->cr();
3885   }
3886 }
3887 
3888 // Returns whether this nmethod has code comments.
3889 bool nmethod::has_code_comment(address begin, address end) {
3890   // scopes?
3891   ScopeDesc* sd  = scope_desc_in(begin, end);
3892   if (sd != nullptr) return true;
3893 
3894   // relocations?
3895   const char* str = reloc_string_for(begin, end);
3896   if (str != nullptr) return true;
3897 
3898   // implicit exceptions?
3899   int cont_offset = ImplicitExceptionTable(this).continuation_offset((uint)(begin - code_begin()));
3900   if (cont_offset != 0) return true;
3901 
3902   return false;
3903 }
3904 
3905 void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) {
3906   ImplicitExceptionTable implicit_table(this);
3907   int pc_offset = (int)(begin - code_begin());
3908   int cont_offset = implicit_table.continuation_offset(pc_offset);
3909   bool oop_map_required = false;
3910   if (cont_offset != 0) {
3911     st->move_to(column, 6, 0);
3912     if (pc_offset == cont_offset) {
3913       st->print("; implicit exception: deoptimizes");
3914       oop_map_required = true;
3915     } else {
3916       st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
3917     }
3918   }
3919 
3920   // Find an oopmap in (begin, end].  We use the odd half-closed
3921   // interval so that oop maps and scope descs which are tied to the
3922   // byte after a call are printed with the call itself.  OopMaps
3923   // associated with implicit exceptions are printed with the implicit
3924   // instruction.
3925   address base = code_begin();
3926   ImmutableOopMapSet* oms = oop_maps();
3927   if (oms != nullptr) {
3928     for (int i = 0, imax = oms->count(); i < imax; i++) {
3929       const ImmutableOopMapPair* pair = oms->pair_at(i);
3930       const ImmutableOopMap* om = pair->get_from(oms);
3931       address pc = base + pair->pc_offset();
3932       if (pc >= begin) {
3933 #if INCLUDE_JVMCI
3934         bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset();
3935 #else
3936         bool is_implicit_deopt = false;
3937 #endif
3938         if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) {
3939           st->move_to(column, 6, 0);
3940           st->print("; ");
3941           om->print_on(st);
3942           oop_map_required = false;
3943         }
3944       }
3945       if (pc > end) {
3946         break;
3947       }
3948     }
3949   }
3950   assert(!oop_map_required, "missed oopmap");
3951 
3952   Thread* thread = Thread::current();
3953 
3954   // Print any debug info present at this pc.
3955   ScopeDesc* sd  = scope_desc_in(begin, end);
3956   if (sd != nullptr) {
3957     st->move_to(column, 6, 0);
3958     if (sd->bci() == SynchronizationEntryBCI) {
3959       st->print(";*synchronization entry");
3960     } else if (sd->bci() == AfterBci) {
3961       st->print(";* method exit (unlocked if synchronized)");
3962     } else if (sd->bci() == UnwindBci) {
3963       st->print(";* unwind (locked if synchronized)");
3964     } else if (sd->bci() == AfterExceptionBci) {
3965       st->print(";* unwind (unlocked if synchronized)");
3966     } else if (sd->bci() == UnknownBci) {
3967       st->print(";* unknown");
3968     } else if (sd->bci() == InvalidFrameStateBci) {
3969       st->print(";* invalid frame state");
3970     } else {
3971       if (sd->method() == nullptr) {
3972         st->print("method is nullptr");
3973       } else if (sd->method()->is_native()) {
3974         st->print("method is native");
3975       } else {
3976         Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
3977         st->print(";*%s", Bytecodes::name(bc));
3978         switch (bc) {
3979         case Bytecodes::_invokevirtual:
3980         case Bytecodes::_invokespecial:
3981         case Bytecodes::_invokestatic:
3982         case Bytecodes::_invokeinterface:
3983           {
3984             Bytecode_invoke invoke(methodHandle(thread, sd->method()), sd->bci());
3985             st->print(" ");
3986             if (invoke.name() != nullptr)
3987               invoke.name()->print_symbol_on(st);
3988             else
3989               st->print("<UNKNOWN>");
3990             break;
3991           }
3992         case Bytecodes::_getfield:
3993         case Bytecodes::_putfield:
3994         case Bytecodes::_getstatic:
3995         case Bytecodes::_putstatic:
3996           {
3997             Bytecode_field field(methodHandle(thread, sd->method()), sd->bci());
3998             st->print(" ");
3999             if (field.name() != nullptr)
4000               field.name()->print_symbol_on(st);
4001             else
4002               st->print("<UNKNOWN>");
4003           }
4004         default:
4005           break;
4006         }
4007       }
4008       st->print(" {reexecute=%d rethrow=%d return_oop=%d return_scalarized=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop(), sd->return_scalarized());
4009     }
4010 
4011     // Print all scopes
4012     for (;sd != nullptr; sd = sd->sender()) {
4013       st->move_to(column, 6, 0);
4014       st->print("; -");
4015       if (sd->should_reexecute()) {
4016         st->print(" (reexecute)");
4017       }
4018       if (sd->method() == nullptr) {
4019         st->print("method is nullptr");
4020       } else {
4021         sd->method()->print_short_name(st);
4022       }
4023       int lineno = sd->method()->line_number_from_bci(sd->bci());
4024       if (lineno != -1) {
4025         st->print("@%d (line %d)", sd->bci(), lineno);
4026       } else {
4027         st->print("@%d", sd->bci());
4028       }
4029       st->cr();
4030     }
4031   }
4032 
4033   // Print relocation information
4034   // Prevent memory leak: allocating without ResourceMark.
4035   ResourceMark rm;
4036   const char* str = reloc_string_for(begin, end);
4037   if (str != nullptr) {
4038     if (sd != nullptr) st->cr();
4039     st->move_to(column, 6, 0);
4040     st->print(";   {%s}", str);
4041   }
4042 }
4043 
4044 #endif
4045 
4046 address nmethod::call_instruction_address(address pc) const {
4047   if (NativeCall::is_call_before(pc)) {
4048     NativeCall *ncall = nativeCall_before(pc);
4049     return ncall->instruction_address();
4050   }
4051   return nullptr;
4052 }
4053 
4054 void nmethod::print_value_on_impl(outputStream* st) const {
4055   st->print_cr("nmethod");
4056 #if defined(SUPPORT_DATA_STRUCTS)
4057   print_on_with_msg(st, nullptr);
4058 #endif
4059 }
4060 
4061 #ifndef PRODUCT
4062 
4063 void nmethod::print_calls(outputStream* st) {
4064   RelocIterator iter(this);
4065   while (iter.next()) {
4066     switch (iter.type()) {
4067     case relocInfo::virtual_call_type: {
4068       CompiledICLocker ml_verify(this);
4069       CompiledIC_at(&iter)->print();
4070       break;
4071     }
4072     case relocInfo::static_call_type:
4073     case relocInfo::opt_virtual_call_type:
4074       st->print_cr("Direct call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
4075       CompiledDirectCall::at(iter.reloc())->print();
4076       break;
4077     default:
4078       break;
4079     }
4080   }
4081 }
4082 
4083 void nmethod::print_statistics() {
4084   ttyLocker ttyl;
4085   if (xtty != nullptr)  xtty->head("statistics type='nmethod'");
4086   native_nmethod_stats.print_native_nmethod_stats();
4087 #ifdef COMPILER1
4088   c1_java_nmethod_stats.print_nmethod_stats("C1");
4089 #endif
4090 #ifdef COMPILER2
4091   c2_java_nmethod_stats.print_nmethod_stats("C2");
4092 #endif
4093 #if INCLUDE_JVMCI
4094   jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
4095 #endif
4096   unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
4097   DebugInformationRecorder::print_statistics();
4098   pc_nmethod_stats.print_pc_stats();
4099   Dependencies::print_statistics();
4100   ExternalsRecorder::print_statistics();
4101   if (xtty != nullptr)  xtty->tail("statistics");
4102 }
4103 
4104 #endif // !PRODUCT
4105 
4106 #if INCLUDE_JVMCI
4107 void nmethod::update_speculation(JavaThread* thread) {
4108   jlong speculation = thread->pending_failed_speculation();
4109   if (speculation != 0) {
4110     guarantee(jvmci_nmethod_data() != nullptr, "failed speculation in nmethod without failed speculation list");
4111     jvmci_nmethod_data()->add_failed_speculation(this, speculation);
4112     thread->set_pending_failed_speculation(0);
4113   }
4114 }
4115 
4116 const char* nmethod::jvmci_name() {
4117   if (jvmci_nmethod_data() != nullptr) {
4118     return jvmci_nmethod_data()->name();
4119   }
4120   return nullptr;
4121 }
4122 #endif