< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp

Print this page

  50 #include "metaprogramming/primitiveConversions.hpp"
  51 #include "oops/klass.hpp"
  52 #include "oops/method.inline.hpp"
  53 #include "oops/objArrayKlass.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "prims/forte.hpp"
  56 #include "prims/jvmtiExport.hpp"
  57 #include "prims/jvmtiThreadState.hpp"
  58 #include "prims/methodHandles.hpp"
  59 #include "prims/nativeLookup.hpp"
  60 #include "runtime/arguments.hpp"
  61 #include "runtime/atomic.hpp"
  62 #include "runtime/basicLock.inline.hpp"
  63 #include "runtime/frame.inline.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/init.hpp"
  66 #include "runtime/interfaceSupport.inline.hpp"
  67 #include "runtime/java.hpp"
  68 #include "runtime/javaCalls.hpp"
  69 #include "runtime/jniHandles.inline.hpp"
  70 #include "runtime/perfData.hpp"
  71 #include "runtime/sharedRuntime.hpp"
  72 #include "runtime/stackWatermarkSet.hpp"
  73 #include "runtime/stubRoutines.hpp"
  74 #include "runtime/synchronizer.inline.hpp"
  75 #include "runtime/timerTrace.hpp"
  76 #include "runtime/vframe.inline.hpp"
  77 #include "runtime/vframeArray.hpp"
  78 #include "runtime/vm_version.hpp"

  79 #include "utilities/copy.hpp"
  80 #include "utilities/dtrace.hpp"
  81 #include "utilities/events.hpp"
  82 #include "utilities/globalDefinitions.hpp"
  83 #include "utilities/resourceHash.hpp"
  84 #include "utilities/macros.hpp"
  85 #include "utilities/xmlstream.hpp"
  86 #ifdef COMPILER1
  87 #include "c1/c1_Runtime1.hpp"
  88 #endif
  89 #if INCLUDE_JFR
  90 #include "jfr/jfr.inline.hpp"
  91 #endif
  92 
  93 // Shared runtime stub routines reside in their own unique blob with a
  94 // single entry point
  95 
  96 
  97 #define SHARED_STUB_FIELD_DEFINE(name, type) \
  98   type        SharedRuntime::BLOB_FIELD_NAME(name);
  99   SHARED_STUBS_DO(SHARED_STUB_FIELD_DEFINE)
 100 #undef SHARED_STUB_FIELD_DEFINE
 101 
 102 nmethod*            SharedRuntime::_cont_doYield_stub;
 103 






 104 #define SHARED_STUB_NAME_DECLARE(name, type) "Shared Runtime " # name "_blob",
 105 const char *SharedRuntime::_stub_names[] = {
 106   SHARED_STUBS_DO(SHARED_STUB_NAME_DECLARE)
 107 };
 108 
 109 //----------------------------generate_stubs-----------------------------------
 110 void SharedRuntime::generate_initial_stubs() {
 111   // Build this early so it's available for the interpreter.
 112   _throw_StackOverflowError_blob =
 113     generate_throw_exception(SharedStubId::throw_StackOverflowError_id,
 114                              CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
 115 }
 116 
 117 void SharedRuntime::generate_stubs() {
 118   _wrong_method_blob =
 119     generate_resolve_blob(SharedStubId::wrong_method_id,
 120                           CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method));
 121   _wrong_method_abstract_blob =
 122     generate_resolve_blob(SharedStubId::wrong_method_abstract_id,
 123                           CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract));

 150     generate_throw_exception(SharedStubId::throw_NullPointerException_at_call_id,
 151                              CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
 152 
 153 #if COMPILER2_OR_JVMCI
 154   // Vectors are generated only by C2 and JVMCI.
 155   bool support_wide = is_wide_vector(MaxVectorSize);
 156   if (support_wide) {
 157     _polling_page_vectors_safepoint_handler_blob =
 158       generate_handler_blob(SharedStubId::polling_page_vectors_safepoint_handler_id,
 159                             CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
 160   }
 161 #endif // COMPILER2_OR_JVMCI
 162   _polling_page_safepoint_handler_blob =
 163     generate_handler_blob(SharedStubId::polling_page_safepoint_handler_id,
 164                           CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
 165   _polling_page_return_handler_blob =
 166     generate_handler_blob(SharedStubId::polling_page_return_handler_id,
 167                           CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
 168 
 169   generate_deopt_blob();












 170 }
 171 
 172 void SharedRuntime::init_adapter_library() {
 173   AdapterHandlerLibrary::initialize();
 174 }
 175 







































 176 #if INCLUDE_JFR
 177 //------------------------------generate jfr runtime stubs ------
 178 void SharedRuntime::generate_jfr_stubs() {
 179   ResourceMark rm;
 180   const char* timer_msg = "SharedRuntime generate_jfr_stubs";
 181   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
 182 
 183   _jfr_write_checkpoint_blob = generate_jfr_write_checkpoint();
 184   _jfr_return_lease_blob = generate_jfr_return_lease();
 185 }
 186 
 187 #endif // INCLUDE_JFR
 188 
 189 #include <math.h>
 190 
 191 // Implementation of SharedRuntime
 192 
 193 #ifndef PRODUCT
 194 // For statistics
 195 uint SharedRuntime::_ic_miss_ctr = 0;
 196 uint SharedRuntime::_wrong_method_ctr = 0;
 197 uint SharedRuntime::_resolve_static_ctr = 0;
 198 uint SharedRuntime::_resolve_virtual_ctr = 0;
 199 uint SharedRuntime::_resolve_opt_virtual_ctr = 0;


 200 uint SharedRuntime::_implicit_null_throws = 0;
 201 uint SharedRuntime::_implicit_div0_throws = 0;
 202 
 203 int64_t SharedRuntime::_nof_normal_calls = 0;
 204 int64_t SharedRuntime::_nof_inlined_calls = 0;
 205 int64_t SharedRuntime::_nof_megamorphic_calls = 0;
 206 int64_t SharedRuntime::_nof_static_calls = 0;
 207 int64_t SharedRuntime::_nof_inlined_static_calls = 0;
 208 int64_t SharedRuntime::_nof_interface_calls = 0;
 209 int64_t SharedRuntime::_nof_inlined_interface_calls = 0;
 210 
 211 uint SharedRuntime::_new_instance_ctr=0;
 212 uint SharedRuntime::_new_array_ctr=0;
 213 uint SharedRuntime::_multi2_ctr=0;
 214 uint SharedRuntime::_multi3_ctr=0;
 215 uint SharedRuntime::_multi4_ctr=0;
 216 uint SharedRuntime::_multi5_ctr=0;
 217 uint SharedRuntime::_mon_enter_stub_ctr=0;
 218 uint SharedRuntime::_mon_exit_stub_ctr=0;
 219 uint SharedRuntime::_mon_enter_ctr=0;

 233 uint SharedRuntime::_unsafe_set_memory_ctr=0;
 234 
 235 int     SharedRuntime::_ICmiss_index                    = 0;
 236 int     SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
 237 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
 238 
 239 
 240 void SharedRuntime::trace_ic_miss(address at) {
 241   for (int i = 0; i < _ICmiss_index; i++) {
 242     if (_ICmiss_at[i] == at) {
 243       _ICmiss_count[i]++;
 244       return;
 245     }
 246   }
 247   int index = _ICmiss_index++;
 248   if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
 249   _ICmiss_at[index] = at;
 250   _ICmiss_count[index] = 1;
 251 }
 252 
 253 void SharedRuntime::print_ic_miss_histogram() {
 254   if (ICMissHistogram) {
 255     tty->print_cr("IC Miss Histogram:");
 256     int tot_misses = 0;
 257     for (int i = 0; i < _ICmiss_index; i++) {
 258       tty->print_cr("  at: " INTPTR_FORMAT "  nof: %d", p2i(_ICmiss_at[i]), _ICmiss_count[i]);
 259       tot_misses += _ICmiss_count[i];
 260     }
 261     tty->print_cr("Total IC misses: %7d", tot_misses);
 262   }
 263 }
 264 #endif // PRODUCT
 265 
 266 
 267 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
 268   return x * y;
 269 JRT_END
 270 
 271 
 272 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
 273   if (x == min_jlong && y == CONST64(-1)) {
 274     return x;
 275   } else {
 276     return x / y;
 277   }
 278 JRT_END
 279 
 280 
 281 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
 282   if (x == min_jlong && y == CONST64(-1)) {
 283     return 0;
 284   } else {

 553       bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
 554       if (overflow_state->reserved_stack_activation() != current->stack_base()) {
 555         overflow_state->set_reserved_stack_activation(current->stack_base());
 556       }
 557       assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
 558       // The deferred StackWatermarkSet::after_unwind check will be performed in
 559       // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception)
 560       return SharedRuntime::deopt_blob()->unpack_with_exception();
 561     } else {
 562       // The deferred StackWatermarkSet::after_unwind check will be performed in
 563       // * OptoRuntime::handle_exception_C_helper for C2 code
 564       // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
 565       return nm->exception_begin();
 566     }
 567   }
 568 
 569   // Entry code
 570   if (StubRoutines::returns_to_call_stub(return_address)) {
 571     // The deferred StackWatermarkSet::after_unwind check will be performed in
 572     // JavaCallWrapper::~JavaCallWrapper

 573     return StubRoutines::catch_exception_entry();
 574   }
 575   if (blob != nullptr && blob->is_upcall_stub()) {
 576     return StubRoutines::upcall_stub_exception_handler();
 577   }
 578   // Interpreted code
 579   if (Interpreter::contains(return_address)) {
 580     // The deferred StackWatermarkSet::after_unwind check will be performed in
 581     // InterpreterRuntime::exception_handler_for_exception
 582     return Interpreter::rethrow_exception_entry();
 583   }
 584 
 585   guarantee(blob == nullptr || !blob->is_runtime_stub(), "caller should have skipped stub");
 586   guarantee(!VtableStubs::contains(return_address), "null exceptions in vtables should have been handled already!");
 587 
 588 #ifndef PRODUCT
 589   { ResourceMark rm;
 590     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
 591     os::print_location(tty, (intptr_t)return_address);
 592     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");

 711   jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
 712   JvmtiVTMSTransitionDisabler::VTMS_vthread_unmount(vthread, hide);
 713   JNIHandles::destroy_local(vthread);
 714 JRT_END
 715 #endif // INCLUDE_JVMTI
 716 
 717 // The interpreter code to call this tracing function is only
 718 // called/generated when UL is on for redefine, class and has the right level
 719 // and tags. Since obsolete methods are never compiled, we don't have
 720 // to modify the compilers to generate calls to this function.
 721 //
 722 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
 723     JavaThread* thread, Method* method))
 724   if (method->is_obsolete()) {
 725     // We are calling an obsolete method, but this is not necessarily
 726     // an error. Our method could have been redefined just after we
 727     // fetched the Method* from the constant pool.
 728     ResourceMark rm;
 729     log_trace(redefine, class, obsolete)("calling obsolete method '%s'", method->name_and_sig_as_C_string());
 730   }












 731   return 0;
 732 JRT_END
 733 
 734 // ret_pc points into caller; we are returning caller's exception handler
 735 // for given exception
 736 // Note that the implementation of this method assumes it's only called when an exception has actually occured
 737 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
 738                                                     bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
 739   assert(nm != nullptr, "must exist");
 740   ResourceMark rm;
 741 
 742 #if INCLUDE_JVMCI
 743   if (nm->is_compiled_by_jvmci()) {
 744     // lookup exception handler for this pc
 745     int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
 746     ExceptionHandlerTable table(nm);
 747     HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
 748     if (t != nullptr) {
 749       return nm->code_begin() + t->pco();
 750     } else {

1350 
1351   // determine call info & receiver
1352   // note: a) receiver is null for static calls
1353   //       b) an exception is thrown if receiver is null for non-static calls
1354   CallInfo call_info;
1355   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1356   Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1357 
1358   NoSafepointVerifier nsv;
1359 
1360   methodHandle callee_method(current, call_info.selected_method());
1361 
1362   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1363          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1364          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1365          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1366          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1367 
1368   assert(!caller_nm->is_unloading(), "It should not be unloading");
1369 
1370 #ifndef PRODUCT
1371   // tracing/debugging/statistics
1372   uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1373                  (is_virtual) ? (&_resolve_virtual_ctr) :
1374                                 (&_resolve_static_ctr);
1375   Atomic::inc(addr);
1376 

1377   if (TraceCallFixup) {
1378     ResourceMark rm(current);
1379     tty->print("resolving %s%s (%s) call to",
1380                (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1381                Bytecodes::name(invoke_code));
1382     callee_method->print_short_name(tty);
1383     tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1384                   p2i(caller_frame.pc()), p2i(callee_method->code()));
1385   }
1386 #endif
1387 
1388   if (invoke_code == Bytecodes::_invokestatic) {
1389     assert(callee_method->method_holder()->is_initialized() ||
1390            callee_method->method_holder()->is_reentrant_initialization(current),
1391            "invalid class initialization state for invoke_static");
1392     if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1393       // In order to keep class initialization check, do not patch call
1394       // site for static call when the class is not fully initialized.
1395       // Proper check is enforced by call site re-resolution on every invocation.
1396       //

1412 
1413   // Make sure the callee nmethod does not get deoptimized and removed before
1414   // we are done patching the code.
1415 
1416 
1417   CompiledICLocker ml(caller_nm);
1418   if (is_virtual && !is_optimized) {
1419     CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1420     inline_cache->update(&call_info, receiver->klass());
1421   } else {
1422     // Callsite is a direct call - set it to the destination method
1423     CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1424     callsite->set(callee_method);
1425   }
1426 
1427   return callee_method;
1428 }
1429 
1430 // Inline caches exist only in compiled code
1431 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))


1432 #ifdef ASSERT
1433   RegisterMap reg_map(current,
1434                       RegisterMap::UpdateMap::skip,
1435                       RegisterMap::ProcessFrames::include,
1436                       RegisterMap::WalkContinuation::skip);
1437   frame stub_frame = current->last_frame();
1438   assert(stub_frame.is_runtime_frame(), "sanity check");
1439   frame caller_frame = stub_frame.sender(&reg_map);
1440   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1441 #endif /* ASSERT */
1442 
1443   methodHandle callee_method;
1444   JRT_BLOCK
1445     callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1446     // Return Method* through TLS
1447     current->set_vm_result_metadata(callee_method());
1448   JRT_BLOCK_END
1449   // return compiled code entry point after potential safepoints
1450   return get_resolved_entry(current, callee_method);
1451 JRT_END
1452 
1453 
1454 // Handle call site that has been made non-entrant
1455 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))


1456   // 6243940 We might end up in here if the callee is deoptimized
1457   // as we race to call it.  We don't want to take a safepoint if
1458   // the caller was interpreted because the caller frame will look
1459   // interpreted to the stack walkers and arguments are now
1460   // "compiled" so it is much better to make this transition
1461   // invisible to the stack walking code. The i2c path will
1462   // place the callee method in the callee_target. It is stashed
1463   // there because if we try and find the callee by normal means a
1464   // safepoint is possible and have trouble gc'ing the compiled args.
1465   RegisterMap reg_map(current,
1466                       RegisterMap::UpdateMap::skip,
1467                       RegisterMap::ProcessFrames::include,
1468                       RegisterMap::WalkContinuation::skip);
1469   frame stub_frame = current->last_frame();
1470   assert(stub_frame.is_runtime_frame(), "sanity check");
1471   frame caller_frame = stub_frame.sender(&reg_map);
1472 
1473   if (caller_frame.is_interpreted_frame() ||
1474       caller_frame.is_entry_frame() ||
1475       caller_frame.is_upcall_stub_frame()) {

1488       // so bypassing it in c2i adapter is benign.
1489       return callee->get_c2i_no_clinit_check_entry();
1490     } else {
1491       return callee->get_c2i_entry();
1492     }
1493   }
1494 
1495   // Must be compiled to compiled path which is safe to stackwalk
1496   methodHandle callee_method;
1497   JRT_BLOCK
1498     // Force resolving of caller (if we called from compiled frame)
1499     callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1500     current->set_vm_result_metadata(callee_method());
1501   JRT_BLOCK_END
1502   // return compiled code entry point after potential safepoints
1503   return get_resolved_entry(current, callee_method);
1504 JRT_END
1505 
1506 // Handle abstract method call
1507 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))


1508   // Verbose error message for AbstractMethodError.
1509   // Get the called method from the invoke bytecode.
1510   vframeStream vfst(current, true);
1511   assert(!vfst.at_end(), "Java frame must exist");
1512   methodHandle caller(current, vfst.method());
1513   Bytecode_invoke invoke(caller, vfst.bci());
1514   DEBUG_ONLY( invoke.verify(); )
1515 
1516   // Find the compiled caller frame.
1517   RegisterMap reg_map(current,
1518                       RegisterMap::UpdateMap::include,
1519                       RegisterMap::ProcessFrames::include,
1520                       RegisterMap::WalkContinuation::skip);
1521   frame stubFrame = current->last_frame();
1522   assert(stubFrame.is_runtime_frame(), "must be");
1523   frame callerFrame = stubFrame.sender(&reg_map);
1524   assert(callerFrame.is_compiled_frame(), "must be");
1525 
1526   // Install exception and return forward entry.
1527   address res = SharedRuntime::throw_AbstractMethodError_entry();

1534       LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1535     }
1536   JRT_BLOCK_END
1537   return res;
1538 JRT_END
1539 
1540 // return verified_code_entry if interp_only_mode is not set for the current thread;
1541 // otherwise return c2i entry.
1542 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method) {
1543   if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1544     // In interp_only_mode we need to go to the interpreted entry
1545     // The c2i won't patch in this mode -- see fixup_callers_callsite
1546     return callee_method->get_c2i_entry();
1547   }
1548   assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1549   return callee_method->verified_code_entry();
1550 }
1551 
1552 // resolve a static call and patch code
1553 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))


1554   methodHandle callee_method;
1555   bool enter_special = false;
1556   JRT_BLOCK
1557     callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1558     current->set_vm_result_metadata(callee_method());
1559   JRT_BLOCK_END
1560   // return compiled code entry point after potential safepoints
1561   return get_resolved_entry(current, callee_method);
1562 JRT_END
1563 
1564 // resolve virtual call and update inline cache to monomorphic
1565 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))


1566   methodHandle callee_method;
1567   JRT_BLOCK
1568     callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1569     current->set_vm_result_metadata(callee_method());
1570   JRT_BLOCK_END
1571   // return compiled code entry point after potential safepoints
1572   return get_resolved_entry(current, callee_method);
1573 JRT_END
1574 
1575 
1576 // Resolve a virtual call that can be statically bound (e.g., always
1577 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1578 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))


1579   methodHandle callee_method;
1580   JRT_BLOCK
1581     callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1582     current->set_vm_result_metadata(callee_method());
1583   JRT_BLOCK_END
1584   // return compiled code entry point after potential safepoints
1585   return get_resolved_entry(current, callee_method);
1586 JRT_END
1587 
1588 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1589   JavaThread* current = THREAD;
1590   ResourceMark rm(current);
1591   CallInfo call_info;
1592   Bytecodes::Code bc;
1593 
1594   // receiver is null for static calls. An exception is thrown for null
1595   // receivers for non-static calls
1596   Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1597 
1598   methodHandle callee_method(current, call_info.selected_method());
1599 
1600 #ifndef PRODUCT
1601   Atomic::inc(&_ic_miss_ctr);
1602 

1603   // Statistics & Tracing
1604   if (TraceCallFixup) {
1605     ResourceMark rm(current);
1606     tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1607     callee_method->print_short_name(tty);
1608     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1609   }
1610 
1611   if (ICMissHistogram) {
1612     MutexLocker m(VMStatistic_lock);
1613     RegisterMap reg_map(current,
1614                         RegisterMap::UpdateMap::skip,
1615                         RegisterMap::ProcessFrames::include,
1616                         RegisterMap::WalkContinuation::skip);
1617     frame f = current->last_frame().real_sender(&reg_map);// skip runtime stub
1618     // produce statistics under the lock
1619     trace_ic_miss(f.pc());
1620   }
1621 #endif
1622 

1705             CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1706             cdc->set_to_clean();
1707             break;
1708           }
1709 
1710           case relocInfo::virtual_call_type: {
1711             // compiled, dispatched call (which used to call an interpreted method)
1712             CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1713             inline_cache->set_to_clean();
1714             break;
1715           }
1716           default:
1717             break;
1718         }
1719       }
1720     }
1721   }
1722 
1723   methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1724 
1725 
1726 #ifndef PRODUCT
1727   Atomic::inc(&_wrong_method_ctr);
1728 

1729   if (TraceCallFixup) {
1730     ResourceMark rm(current);
1731     tty->print("handle_wrong_method reresolving call to");
1732     callee_method->print_short_name(tty);
1733     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1734   }
1735 #endif
1736 
1737   return callee_method;
1738 }
1739 
1740 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1741   // The faulting unsafe accesses should be changed to throw the error
1742   // synchronously instead. Meanwhile the faulting instruction will be
1743   // skipped over (effectively turning it into a no-op) and an
1744   // asynchronous exception will be raised which the thread will
1745   // handle at a later point. If the instruction is a load it will
1746   // return garbage.
1747 
1748   // Request an async exception.

2006 // This is only called when CheckJNICalls is true, and only
2007 // for virtual thread termination.
2008 JRT_LEAF(void,  SharedRuntime::log_jni_monitor_still_held())
2009   assert(CheckJNICalls, "Only call this when checking JNI usage");
2010   if (log_is_enabled(Debug, jni)) {
2011     JavaThread* current = JavaThread::current();
2012     int64_t vthread_id = java_lang_Thread::thread_id(current->vthread());
2013     int64_t carrier_id = java_lang_Thread::thread_id(current->threadObj());
2014     log_debug(jni)("VirtualThread (tid: " INT64_FORMAT ", carrier id: " INT64_FORMAT
2015                    ") exiting with Objects still locked by JNI MonitorEnter.",
2016                    vthread_id, carrier_id);
2017   }
2018 JRT_END
2019 
2020 #ifndef PRODUCT
2021 
2022 void SharedRuntime::print_statistics() {
2023   ttyLocker ttyl;
2024   if (xtty != nullptr)  xtty->head("statistics type='SharedRuntime'");
2025 
2026   SharedRuntime::print_ic_miss_histogram();
2027 
2028   // Dump the JRT_ENTRY counters
2029   if (_new_instance_ctr) tty->print_cr("%5u new instance requires GC", _new_instance_ctr);
2030   if (_new_array_ctr) tty->print_cr("%5u new array requires GC", _new_array_ctr);
2031   if (_multi2_ctr) tty->print_cr("%5u multianewarray 2 dim", _multi2_ctr);
2032   if (_multi3_ctr) tty->print_cr("%5u multianewarray 3 dim", _multi3_ctr);
2033   if (_multi4_ctr) tty->print_cr("%5u multianewarray 4 dim", _multi4_ctr);
2034   if (_multi5_ctr) tty->print_cr("%5u multianewarray 5 dim", _multi5_ctr);
2035 
2036   tty->print_cr("%5u inline cache miss in compiled", _ic_miss_ctr);
2037   tty->print_cr("%5u wrong method", _wrong_method_ctr);
2038   tty->print_cr("%5u unresolved static call site", _resolve_static_ctr);
2039   tty->print_cr("%5u unresolved virtual call site", _resolve_virtual_ctr);
2040   tty->print_cr("%5u unresolved opt virtual call site", _resolve_opt_virtual_ctr);
2041 
2042   if (_mon_enter_stub_ctr) tty->print_cr("%5u monitor enter stub", _mon_enter_stub_ctr);
2043   if (_mon_exit_stub_ctr) tty->print_cr("%5u monitor exit stub", _mon_exit_stub_ctr);
2044   if (_mon_enter_ctr) tty->print_cr("%5u monitor enter slow", _mon_enter_ctr);
2045   if (_mon_exit_ctr) tty->print_cr("%5u monitor exit slow", _mon_exit_ctr);
2046   if (_partial_subtype_ctr) tty->print_cr("%5u slow partial subtype", _partial_subtype_ctr);
2047   if (_jbyte_array_copy_ctr) tty->print_cr("%5u byte array copies", _jbyte_array_copy_ctr);
2048   if (_jshort_array_copy_ctr) tty->print_cr("%5u short array copies", _jshort_array_copy_ctr);
2049   if (_jint_array_copy_ctr) tty->print_cr("%5u int array copies", _jint_array_copy_ctr);
2050   if (_jlong_array_copy_ctr) tty->print_cr("%5u long array copies", _jlong_array_copy_ctr);
2051   if (_oop_array_copy_ctr) tty->print_cr("%5u oop array copies", _oop_array_copy_ctr);
2052   if (_checkcast_array_copy_ctr) tty->print_cr("%5u checkcast array copies", _checkcast_array_copy_ctr);
2053   if (_unsafe_array_copy_ctr) tty->print_cr("%5u unsafe array copies", _unsafe_array_copy_ctr);
2054   if (_generic_array_copy_ctr) tty->print_cr("%5u generic array copies", _generic_array_copy_ctr);
2055   if (_slow_array_copy_ctr) tty->print_cr("%5u slow array copies", _slow_array_copy_ctr);
2056   if (_find_handler_ctr) tty->print_cr("%5u find exception handler", _find_handler_ctr);
2057   if (_rethrow_ctr) tty->print_cr("%5u rethrow handler", _rethrow_ctr);
2058   if (_unsafe_set_memory_ctr) tty->print_cr("%5u unsafe set memorys", _unsafe_set_memory_ctr);
2059 
2060   AdapterHandlerLibrary::print_statistics();
2061 
2062   if (xtty != nullptr)  xtty->tail("statistics");
2063 }
2064 


































2065 inline double percent(int64_t x, int64_t y) {
2066   return 100.0 * (double)x / (double)MAX2(y, (int64_t)1);
2067 }
2068 
2069 class MethodArityHistogram {
2070  public:
2071   enum { MAX_ARITY = 256 };
2072  private:
2073   static uint64_t _arity_histogram[MAX_ARITY]; // histogram of #args
2074   static uint64_t _size_histogram[MAX_ARITY];  // histogram of arg size in words
2075   static uint64_t _total_compiled_calls;
2076   static uint64_t _max_compiled_calls_per_method;
2077   static int _max_arity;                       // max. arity seen
2078   static int _max_size;                        // max. arg size seen
2079 
2080   static void add_method_to_histogram(nmethod* nm) {
2081     Method* method = (nm == nullptr) ? nullptr : nm->method();
2082     if (method != nullptr) {
2083       ArgumentCount args(method->signature());
2084       int arity   = args.size() + (method->is_static() ? 0 : 1);

2129     // Take the Compile_lock to protect against changes in the CodeBlob structures
2130     MutexLocker mu1(Compile_lock, Mutex::_safepoint_check_flag);
2131     // Take the CodeCache_lock to protect against changes in the CodeHeap structure
2132     MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2133     _max_arity = _max_size = 0;
2134     _total_compiled_calls = 0;
2135     _max_compiled_calls_per_method = 0;
2136     for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2137     CodeCache::nmethods_do(add_method_to_histogram);
2138     print_histogram();
2139   }
2140 };
2141 
2142 uint64_t MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2143 uint64_t MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2144 uint64_t MethodArityHistogram::_total_compiled_calls;
2145 uint64_t MethodArityHistogram::_max_compiled_calls_per_method;
2146 int MethodArityHistogram::_max_arity;
2147 int MethodArityHistogram::_max_size;
2148 
2149 void SharedRuntime::print_call_statistics(uint64_t comp_total) {
2150   tty->print_cr("Calls from compiled code:");
2151   int64_t total  = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2152   int64_t mono_c = _nof_normal_calls - _nof_megamorphic_calls;
2153   int64_t mono_i = _nof_interface_calls;
2154   tty->print_cr("\t" INT64_FORMAT_W(12) " (100%%)  total non-inlined   ", total);
2155   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- virtual calls       ", _nof_normal_calls, percent(_nof_normal_calls, total));
2156   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- inlined          ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
2157   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- monomorphic      ", mono_c, percent(mono_c, _nof_normal_calls));
2158   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- megamorphic      ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
2159   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- interface calls     ", _nof_interface_calls, percent(_nof_interface_calls, total));
2160   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- inlined          ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
2161   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- monomorphic      ", mono_i, percent(mono_i, _nof_interface_calls));
2162   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
2163   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- inlined          ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
2164   tty->cr();
2165   tty->print_cr("Note 1: counter updates are not MT-safe.");
2166   tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2167   tty->print_cr("        %% in nested categories are relative to their category");
2168   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
2169   tty->cr();

2182 // A simple wrapper class around the calling convention information
2183 // that allows sharing of adapters for the same calling convention.
2184 class AdapterFingerPrint : public MetaspaceObj {
2185  private:
2186   enum {
2187     _basic_type_bits = 4,
2188     _basic_type_mask = right_n_bits(_basic_type_bits),
2189     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2190   };
2191   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2192   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2193 
2194   int _length;
2195 
2196   static int data_offset() { return sizeof(AdapterFingerPrint); }
2197   int* data_pointer() {
2198     return (int*)((address)this + data_offset());
2199   }
2200 
2201   // Private construtor. Use allocate() to get an instance.
2202   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2203     int* data = data_pointer();
2204     // Pack the BasicTypes with 8 per int
2205     _length = length(total_args_passed);

2206     int sig_index = 0;
2207     for (int index = 0; index < _length; index++) {
2208       int value = 0;
2209       for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2210         int bt = adapter_encoding(sig_bt[sig_index++]);
2211         assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2212         value = (value << _basic_type_bits) | bt;
2213       }
2214       data[index] = value;
2215     }
2216   }
2217 
2218   // Call deallocate instead
2219   ~AdapterFingerPrint() {
2220     FreeHeap(this);
2221   }
2222 
2223   static int length(int total_args) {
2224     return (total_args + (_basic_types_per_int-1)) / _basic_types_per_int;
2225   }
2226 
2227   static int compute_size(int total_args_passed, BasicType* sig_bt) {
2228     int len = length(total_args_passed);
2229     return sizeof(AdapterFingerPrint) + (len * sizeof(int));
2230   }
2231 
2232   // Remap BasicTypes that are handled equivalently by the adapters.
2233   // These are correct for the current system but someday it might be
2234   // necessary to make this mapping platform dependent.
2235   static int adapter_encoding(BasicType in) {
2236     switch (in) {
2237       case T_BOOLEAN:
2238       case T_BYTE:
2239       case T_SHORT:
2240       case T_CHAR:
2241         // There are all promoted to T_INT in the calling convention
2242         return T_INT;
2243 
2244       case T_OBJECT:
2245       case T_ARRAY:
2246         // In other words, we assume that any register good enough for
2247         // an int or long is good enough for a managed pointer.
2248 #ifdef _LP64
2249         return T_LONG;

2272   }
2273 
2274   template<typename Function>
2275   void iterate_args(Function function) {
2276     for (int i = 0; i < length(); i++) {
2277       unsigned val = (unsigned)value(i);
2278       // args are packed so that first/lower arguments are in the highest
2279       // bits of each int value, so iterate from highest to the lowest
2280       for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2281         unsigned v = (val >> j) & _basic_type_mask;
2282         if (v == 0) {
2283           continue;
2284         }
2285         function(v);
2286       }
2287     }
2288   }
2289 
2290  public:
2291   static AdapterFingerPrint* allocate(int total_args_passed, BasicType* sig_bt) {
2292     int size_in_bytes = compute_size(total_args_passed, sig_bt);
2293     return new (size_in_bytes) AdapterFingerPrint(total_args_passed, sig_bt);



2294   }
2295 
2296   static void deallocate(AdapterFingerPrint* fp) {
2297     fp->~AdapterFingerPrint();
2298   }
2299 
2300   int value(int index) {
2301     int* data = data_pointer();
2302     return data[index];
2303   }
2304 
2305   int length() {
2306     return _length;
2307   }
2308 
2309   unsigned int compute_hash() {
2310     int hash = 0;
2311     for (int i = 0; i < length(); i++) {
2312       int v = value(i);
2313       //Add arithmetic operation to the hash, like +3 to improve hashing
2314       hash = ((hash << 8) ^ v ^ (hash >> 5)) + 3;
2315     }
2316     return (unsigned int)hash;
2317   }

2401     }
2402 #endif
2403     return sig_bt;
2404   }
2405 
2406   bool equals(AdapterFingerPrint* other) {
2407     if (other->_length != _length) {
2408       return false;
2409     } else {
2410       for (int i = 0; i < _length; i++) {
2411         if (value(i) != other->value(i)) {
2412           return false;
2413         }
2414       }
2415     }
2416     return true;
2417   }
2418 
2419   // methods required by virtue of being a MetaspaceObj
2420   void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2421   int size() const { return (int)heap_word_size(sizeof(AdapterFingerPrint) + (_length * sizeof(int))); }
2422   MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2423 
2424   static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2425     NOT_PRODUCT(_equals++);
2426     return fp1->equals(fp2);
2427   }
2428 
2429   static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2430     return fp->compute_hash();
2431   }
2432 };
2433 
2434 #if INCLUDE_CDS
2435 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2436   return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2437 }
2438 
2439 class ArchivedAdapterTable : public OffsetCompactHashtable<
2440   AdapterFingerPrint*,
2441   AdapterHandlerEntry*,

2470   }
2471 #endif // INCLUDE_CDS
2472   if (entry == nullptr) {
2473     assert_lock_strong(AdapterHandlerLibrary_lock);
2474     AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2475     if (entry_p != nullptr) {
2476       entry = *entry_p;
2477       assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2478              entry->fingerprint()->as_basic_args_string(), entry->fingerprint()->as_string(), entry->fingerprint()->compute_hash(),
2479              fp->as_basic_args_string(), fp->as_string(), fp->compute_hash());
2480   #ifndef PRODUCT
2481       _runtime_hits++;
2482   #endif
2483     }
2484   }
2485   AdapterFingerPrint::deallocate(fp);
2486   return entry;
2487 }
2488 
2489 #ifndef PRODUCT
2490 static void print_table_statistics() {
2491   auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2492     return sizeof(*key) + sizeof(*a);
2493   };
2494   TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2495   ts.print(tty, "AdapterHandlerTable");
2496   tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2497                 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2498   int total_hits = _archived_hits + _runtime_hits;
2499   tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2500                 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2501 }
2502 #endif
2503 
2504 // ---------------------------------------------------------------------------
2505 // Implementation of AdapterHandlerLibrary
2506 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2507 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2508 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2509 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2510 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2511 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2512 #if INCLUDE_CDS
2513 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2514 #endif // INCLUDE_CDS
2515 static const int AdapterHandlerLibrary_size = 16*K;
2516 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2517 
2518 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2519   assert(_buffer != nullptr, "should be initialized");
2520   return _buffer;
2521 }
2522 

3434   assert(found, "Should have found handler");
3435 }
3436 
3437 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3438   st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3439   if (get_i2c_entry() != nullptr) {
3440     st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3441   }
3442   if (get_c2i_entry() != nullptr) {
3443     st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3444   }
3445   if (get_c2i_unverified_entry() != nullptr) {
3446     st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3447   }
3448   if (get_c2i_no_clinit_check_entry() != nullptr) {
3449     st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3450   }
3451   st->cr();
3452 }
3453 
3454 #ifndef PRODUCT
3455 
3456 void AdapterHandlerLibrary::print_statistics() {
3457   print_table_statistics();
3458 }
3459 
3460 #endif /* PRODUCT */
3461 
3462 bool AdapterHandlerLibrary::is_abstract_method_adapter(AdapterHandlerEntry* entry) {
3463   if (entry == _abstract_method_handler) {
3464     return true;
3465   }
3466   return false;
3467 }
3468 
3469 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3470   assert(current == JavaThread::current(), "pre-condition");
3471   StackOverflow* overflow_state = current->stack_overflow_state();
3472   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3473   overflow_state->set_reserved_stack_activation(current->stack_base());
3474 JRT_END
3475 
3476 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
3477   ResourceMark rm(current);
3478   frame activation;
3479   nmethod* nm = nullptr;
3480   int count = 1;
3481 

  50 #include "metaprogramming/primitiveConversions.hpp"
  51 #include "oops/klass.hpp"
  52 #include "oops/method.inline.hpp"
  53 #include "oops/objArrayKlass.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "prims/forte.hpp"
  56 #include "prims/jvmtiExport.hpp"
  57 #include "prims/jvmtiThreadState.hpp"
  58 #include "prims/methodHandles.hpp"
  59 #include "prims/nativeLookup.hpp"
  60 #include "runtime/arguments.hpp"
  61 #include "runtime/atomic.hpp"
  62 #include "runtime/basicLock.inline.hpp"
  63 #include "runtime/frame.inline.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/init.hpp"
  66 #include "runtime/interfaceSupport.inline.hpp"
  67 #include "runtime/java.hpp"
  68 #include "runtime/javaCalls.hpp"
  69 #include "runtime/jniHandles.inline.hpp"
  70 #include "runtime/perfData.inline.hpp"
  71 #include "runtime/sharedRuntime.hpp"
  72 #include "runtime/stackWatermarkSet.hpp"
  73 #include "runtime/stubRoutines.hpp"
  74 #include "runtime/synchronizer.inline.hpp"
  75 #include "runtime/timerTrace.hpp"
  76 #include "runtime/vframe.inline.hpp"
  77 #include "runtime/vframeArray.hpp"
  78 #include "runtime/vm_version.hpp"
  79 #include "services/management.hpp"
  80 #include "utilities/copy.hpp"
  81 #include "utilities/dtrace.hpp"
  82 #include "utilities/events.hpp"
  83 #include "utilities/globalDefinitions.hpp"
  84 #include "utilities/resourceHash.hpp"
  85 #include "utilities/macros.hpp"
  86 #include "utilities/xmlstream.hpp"
  87 #ifdef COMPILER1
  88 #include "c1/c1_Runtime1.hpp"
  89 #endif
  90 #if INCLUDE_JFR
  91 #include "jfr/jfr.inline.hpp"
  92 #endif
  93 
  94 // Shared runtime stub routines reside in their own unique blob with a
  95 // single entry point
  96 
  97 
  98 #define SHARED_STUB_FIELD_DEFINE(name, type) \
  99   type        SharedRuntime::BLOB_FIELD_NAME(name);
 100   SHARED_STUBS_DO(SHARED_STUB_FIELD_DEFINE)
 101 #undef SHARED_STUB_FIELD_DEFINE
 102 
 103 nmethod*            SharedRuntime::_cont_doYield_stub;
 104 
 105 PerfTickCounters* SharedRuntime::_perf_resolve_opt_virtual_total_time = nullptr;
 106 PerfTickCounters* SharedRuntime::_perf_resolve_virtual_total_time     = nullptr;
 107 PerfTickCounters* SharedRuntime::_perf_resolve_static_total_time      = nullptr;
 108 PerfTickCounters* SharedRuntime::_perf_handle_wrong_method_total_time = nullptr;
 109 PerfTickCounters* SharedRuntime::_perf_ic_miss_total_time             = nullptr;
 110 
 111 #define SHARED_STUB_NAME_DECLARE(name, type) "Shared Runtime " # name "_blob",
 112 const char *SharedRuntime::_stub_names[] = {
 113   SHARED_STUBS_DO(SHARED_STUB_NAME_DECLARE)
 114 };
 115 
 116 //----------------------------generate_stubs-----------------------------------
 117 void SharedRuntime::generate_initial_stubs() {
 118   // Build this early so it's available for the interpreter.
 119   _throw_StackOverflowError_blob =
 120     generate_throw_exception(SharedStubId::throw_StackOverflowError_id,
 121                              CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
 122 }
 123 
 124 void SharedRuntime::generate_stubs() {
 125   _wrong_method_blob =
 126     generate_resolve_blob(SharedStubId::wrong_method_id,
 127                           CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method));
 128   _wrong_method_abstract_blob =
 129     generate_resolve_blob(SharedStubId::wrong_method_abstract_id,
 130                           CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract));

 157     generate_throw_exception(SharedStubId::throw_NullPointerException_at_call_id,
 158                              CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
 159 
 160 #if COMPILER2_OR_JVMCI
 161   // Vectors are generated only by C2 and JVMCI.
 162   bool support_wide = is_wide_vector(MaxVectorSize);
 163   if (support_wide) {
 164     _polling_page_vectors_safepoint_handler_blob =
 165       generate_handler_blob(SharedStubId::polling_page_vectors_safepoint_handler_id,
 166                             CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
 167   }
 168 #endif // COMPILER2_OR_JVMCI
 169   _polling_page_safepoint_handler_blob =
 170     generate_handler_blob(SharedStubId::polling_page_safepoint_handler_id,
 171                           CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
 172   _polling_page_return_handler_blob =
 173     generate_handler_blob(SharedStubId::polling_page_return_handler_id,
 174                           CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
 175 
 176   generate_deopt_blob();
 177 
 178   if (UsePerfData) {
 179     EXCEPTION_MARK;
 180     NEWPERFTICKCOUNTERS(_perf_resolve_opt_virtual_total_time, SUN_CI, "resovle_opt_virtual_call");
 181     NEWPERFTICKCOUNTERS(_perf_resolve_virtual_total_time,     SUN_CI, "resovle_virtual_call");
 182     NEWPERFTICKCOUNTERS(_perf_resolve_static_total_time,      SUN_CI, "resovle_static_call");
 183     NEWPERFTICKCOUNTERS(_perf_handle_wrong_method_total_time, SUN_CI, "handle_wrong_method");
 184     NEWPERFTICKCOUNTERS(_perf_ic_miss_total_time ,            SUN_CI, "ic_miss");
 185     if (HAS_PENDING_EXCEPTION) {
 186       vm_exit_during_initialization("SharedRuntime::generate_stubs() failed unexpectedly");
 187     }
 188   }
 189 }
 190 
 191 void SharedRuntime::init_adapter_library() {
 192   AdapterHandlerLibrary::initialize();
 193 }
 194 
 195 static void print_counter_on(outputStream* st, const char* name, PerfTickCounters* counter, uint cnt) {
 196   st->print("  %-28s " JLONG_FORMAT_W(6) "us", name, counter->elapsed_counter_value_us());
 197   if (TraceThreadTime) {
 198     st->print(" (elapsed) " JLONG_FORMAT_W(6) "us (thread)", counter->thread_counter_value_us());
 199   }
 200   st->print(" / %5d events", cnt);
 201   st->cr();
 202 }
 203 
 204 void SharedRuntime::print_counters_on(outputStream* st) {
 205   st->print_cr("SharedRuntime:");
 206   if (UsePerfData) {
 207     print_counter_on(st, "resolve_opt_virtual_call:", _perf_resolve_opt_virtual_total_time, _resolve_opt_virtual_ctr);
 208     print_counter_on(st, "resolve_virtual_call:",     _perf_resolve_virtual_total_time,     _resolve_virtual_ctr);
 209     print_counter_on(st, "resolve_static_call:",      _perf_resolve_static_total_time,      _resolve_static_ctr);
 210     print_counter_on(st, "handle_wrong_method:",      _perf_handle_wrong_method_total_time, _wrong_method_ctr);
 211     print_counter_on(st, "ic_miss:",                  _perf_ic_miss_total_time,             _ic_miss_ctr);
 212 
 213     jlong total_elapsed_time_us = Management::ticks_to_us(_perf_resolve_opt_virtual_total_time->elapsed_counter_value() +
 214                                                           _perf_resolve_virtual_total_time->elapsed_counter_value() +
 215                                                           _perf_resolve_static_total_time->elapsed_counter_value() +
 216                                                           _perf_handle_wrong_method_total_time->elapsed_counter_value() +
 217                                                           _perf_ic_miss_total_time->elapsed_counter_value());
 218     st->print("Total:                      " JLONG_FORMAT_W(5) "us", total_elapsed_time_us);
 219     if (TraceThreadTime) {
 220       jlong total_thread_time_us = Management::ticks_to_us(_perf_resolve_opt_virtual_total_time->thread_counter_value() +
 221                                                            _perf_resolve_virtual_total_time->thread_counter_value() +
 222                                                            _perf_resolve_static_total_time->thread_counter_value() +
 223                                                            _perf_handle_wrong_method_total_time->thread_counter_value() +
 224                                                            _perf_ic_miss_total_time->thread_counter_value());
 225       st->print(" (elapsed) " JLONG_FORMAT_W(5) "us (thread)", total_thread_time_us);
 226 
 227     }
 228     st->cr();
 229   } else {
 230     st->print_cr("  no data (UsePerfData is turned off)");
 231   }
 232 }
 233 
 234 #if INCLUDE_JFR
 235 //------------------------------generate jfr runtime stubs ------
 236 void SharedRuntime::generate_jfr_stubs() {
 237   ResourceMark rm;
 238   const char* timer_msg = "SharedRuntime generate_jfr_stubs";
 239   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
 240 
 241   _jfr_write_checkpoint_blob = generate_jfr_write_checkpoint();
 242   _jfr_return_lease_blob = generate_jfr_return_lease();
 243 }
 244 
 245 #endif // INCLUDE_JFR
 246 
 247 #include <math.h>
 248 
 249 // Implementation of SharedRuntime
 250 

 251 // For statistics
 252 uint SharedRuntime::_ic_miss_ctr = 0;
 253 uint SharedRuntime::_wrong_method_ctr = 0;
 254 uint SharedRuntime::_resolve_static_ctr = 0;
 255 uint SharedRuntime::_resolve_virtual_ctr = 0;
 256 uint SharedRuntime::_resolve_opt_virtual_ctr = 0;
 257 
 258 #ifndef PRODUCT
 259 uint SharedRuntime::_implicit_null_throws = 0;
 260 uint SharedRuntime::_implicit_div0_throws = 0;
 261 
 262 int64_t SharedRuntime::_nof_normal_calls = 0;
 263 int64_t SharedRuntime::_nof_inlined_calls = 0;
 264 int64_t SharedRuntime::_nof_megamorphic_calls = 0;
 265 int64_t SharedRuntime::_nof_static_calls = 0;
 266 int64_t SharedRuntime::_nof_inlined_static_calls = 0;
 267 int64_t SharedRuntime::_nof_interface_calls = 0;
 268 int64_t SharedRuntime::_nof_inlined_interface_calls = 0;
 269 
 270 uint SharedRuntime::_new_instance_ctr=0;
 271 uint SharedRuntime::_new_array_ctr=0;
 272 uint SharedRuntime::_multi2_ctr=0;
 273 uint SharedRuntime::_multi3_ctr=0;
 274 uint SharedRuntime::_multi4_ctr=0;
 275 uint SharedRuntime::_multi5_ctr=0;
 276 uint SharedRuntime::_mon_enter_stub_ctr=0;
 277 uint SharedRuntime::_mon_exit_stub_ctr=0;
 278 uint SharedRuntime::_mon_enter_ctr=0;

 292 uint SharedRuntime::_unsafe_set_memory_ctr=0;
 293 
 294 int     SharedRuntime::_ICmiss_index                    = 0;
 295 int     SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
 296 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
 297 
 298 
 299 void SharedRuntime::trace_ic_miss(address at) {
 300   for (int i = 0; i < _ICmiss_index; i++) {
 301     if (_ICmiss_at[i] == at) {
 302       _ICmiss_count[i]++;
 303       return;
 304     }
 305   }
 306   int index = _ICmiss_index++;
 307   if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
 308   _ICmiss_at[index] = at;
 309   _ICmiss_count[index] = 1;
 310 }
 311 
 312 void SharedRuntime::print_ic_miss_histogram_on(outputStream* st) {
 313   if (ICMissHistogram) {
 314     st->print_cr("IC Miss Histogram:");
 315     int tot_misses = 0;
 316     for (int i = 0; i < _ICmiss_index; i++) {
 317       st->print_cr("  at: " INTPTR_FORMAT "  nof: %d", p2i(_ICmiss_at[i]), _ICmiss_count[i]);
 318       tot_misses += _ICmiss_count[i];
 319     }
 320     st->print_cr("Total IC misses: %7d", tot_misses);
 321   }
 322 }
 323 #endif // !PRODUCT
 324 
 325 
 326 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
 327   return x * y;
 328 JRT_END
 329 
 330 
 331 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
 332   if (x == min_jlong && y == CONST64(-1)) {
 333     return x;
 334   } else {
 335     return x / y;
 336   }
 337 JRT_END
 338 
 339 
 340 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
 341   if (x == min_jlong && y == CONST64(-1)) {
 342     return 0;
 343   } else {

 612       bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
 613       if (overflow_state->reserved_stack_activation() != current->stack_base()) {
 614         overflow_state->set_reserved_stack_activation(current->stack_base());
 615       }
 616       assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
 617       // The deferred StackWatermarkSet::after_unwind check will be performed in
 618       // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception)
 619       return SharedRuntime::deopt_blob()->unpack_with_exception();
 620     } else {
 621       // The deferred StackWatermarkSet::after_unwind check will be performed in
 622       // * OptoRuntime::handle_exception_C_helper for C2 code
 623       // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
 624       return nm->exception_begin();
 625     }
 626   }
 627 
 628   // Entry code
 629   if (StubRoutines::returns_to_call_stub(return_address)) {
 630     // The deferred StackWatermarkSet::after_unwind check will be performed in
 631     // JavaCallWrapper::~JavaCallWrapper
 632     assert (StubRoutines::catch_exception_entry() != nullptr, "must be generated before");
 633     return StubRoutines::catch_exception_entry();
 634   }
 635   if (blob != nullptr && blob->is_upcall_stub()) {
 636     return StubRoutines::upcall_stub_exception_handler();
 637   }
 638   // Interpreted code
 639   if (Interpreter::contains(return_address)) {
 640     // The deferred StackWatermarkSet::after_unwind check will be performed in
 641     // InterpreterRuntime::exception_handler_for_exception
 642     return Interpreter::rethrow_exception_entry();
 643   }
 644 
 645   guarantee(blob == nullptr || !blob->is_runtime_stub(), "caller should have skipped stub");
 646   guarantee(!VtableStubs::contains(return_address), "null exceptions in vtables should have been handled already!");
 647 
 648 #ifndef PRODUCT
 649   { ResourceMark rm;
 650     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
 651     os::print_location(tty, (intptr_t)return_address);
 652     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");

 771   jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
 772   JvmtiVTMSTransitionDisabler::VTMS_vthread_unmount(vthread, hide);
 773   JNIHandles::destroy_local(vthread);
 774 JRT_END
 775 #endif // INCLUDE_JVMTI
 776 
 777 // The interpreter code to call this tracing function is only
 778 // called/generated when UL is on for redefine, class and has the right level
 779 // and tags. Since obsolete methods are never compiled, we don't have
 780 // to modify the compilers to generate calls to this function.
 781 //
 782 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
 783     JavaThread* thread, Method* method))
 784   if (method->is_obsolete()) {
 785     // We are calling an obsolete method, but this is not necessarily
 786     // an error. Our method could have been redefined just after we
 787     // fetched the Method* from the constant pool.
 788     ResourceMark rm;
 789     log_trace(redefine, class, obsolete)("calling obsolete method '%s'", method->name_and_sig_as_C_string());
 790   }
 791 
 792   LogStreamHandle(Trace, interpreter, bytecode) log;
 793   if (log.is_enabled()) {
 794     ResourceMark rm;
 795     log.print("method entry: " INTPTR_FORMAT " %s %s%s%s%s",
 796               p2i(thread),
 797               (method->is_static() ? "static" : "virtual"),
 798               method->name_and_sig_as_C_string(),
 799               (method->is_native() ? " native" : ""),
 800               (thread->class_being_initialized() != nullptr ? " clinit" : ""),
 801               (method->method_holder()->is_initialized() ? "" : " being_initialized"));
 802   }
 803   return 0;
 804 JRT_END
 805 
 806 // ret_pc points into caller; we are returning caller's exception handler
 807 // for given exception
 808 // Note that the implementation of this method assumes it's only called when an exception has actually occured
 809 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
 810                                                     bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
 811   assert(nm != nullptr, "must exist");
 812   ResourceMark rm;
 813 
 814 #if INCLUDE_JVMCI
 815   if (nm->is_compiled_by_jvmci()) {
 816     // lookup exception handler for this pc
 817     int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
 818     ExceptionHandlerTable table(nm);
 819     HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
 820     if (t != nullptr) {
 821       return nm->code_begin() + t->pco();
 822     } else {

1422 
1423   // determine call info & receiver
1424   // note: a) receiver is null for static calls
1425   //       b) an exception is thrown if receiver is null for non-static calls
1426   CallInfo call_info;
1427   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1428   Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1429 
1430   NoSafepointVerifier nsv;
1431 
1432   methodHandle callee_method(current, call_info.selected_method());
1433 
1434   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1435          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1436          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1437          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1438          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1439 
1440   assert(!caller_nm->is_unloading(), "It should not be unloading");
1441 

1442   // tracing/debugging/statistics
1443   uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1444                  (is_virtual) ? (&_resolve_virtual_ctr) :
1445                                 (&_resolve_static_ctr);
1446   Atomic::inc(addr);
1447 
1448 #ifndef PRODUCT
1449   if (TraceCallFixup) {
1450     ResourceMark rm(current);
1451     tty->print("resolving %s%s (%s) call to",
1452                (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1453                Bytecodes::name(invoke_code));
1454     callee_method->print_short_name(tty);
1455     tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1456                   p2i(caller_frame.pc()), p2i(callee_method->code()));
1457   }
1458 #endif
1459 
1460   if (invoke_code == Bytecodes::_invokestatic) {
1461     assert(callee_method->method_holder()->is_initialized() ||
1462            callee_method->method_holder()->is_reentrant_initialization(current),
1463            "invalid class initialization state for invoke_static");
1464     if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1465       // In order to keep class initialization check, do not patch call
1466       // site for static call when the class is not fully initialized.
1467       // Proper check is enforced by call site re-resolution on every invocation.
1468       //

1484 
1485   // Make sure the callee nmethod does not get deoptimized and removed before
1486   // we are done patching the code.
1487 
1488 
1489   CompiledICLocker ml(caller_nm);
1490   if (is_virtual && !is_optimized) {
1491     CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1492     inline_cache->update(&call_info, receiver->klass());
1493   } else {
1494     // Callsite is a direct call - set it to the destination method
1495     CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1496     callsite->set(callee_method);
1497   }
1498 
1499   return callee_method;
1500 }
1501 
1502 // Inline caches exist only in compiled code
1503 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1504   PerfTraceTime timer(_perf_ic_miss_total_time);
1505 
1506 #ifdef ASSERT
1507   RegisterMap reg_map(current,
1508                       RegisterMap::UpdateMap::skip,
1509                       RegisterMap::ProcessFrames::include,
1510                       RegisterMap::WalkContinuation::skip);
1511   frame stub_frame = current->last_frame();
1512   assert(stub_frame.is_runtime_frame(), "sanity check");
1513   frame caller_frame = stub_frame.sender(&reg_map);
1514   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1515 #endif /* ASSERT */
1516 
1517   methodHandle callee_method;
1518   JRT_BLOCK
1519     callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1520     // Return Method* through TLS
1521     current->set_vm_result_metadata(callee_method());
1522   JRT_BLOCK_END
1523   // return compiled code entry point after potential safepoints
1524   return get_resolved_entry(current, callee_method);
1525 JRT_END
1526 
1527 
1528 // Handle call site that has been made non-entrant
1529 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1530   PerfTraceTime timer(_perf_handle_wrong_method_total_time);
1531 
1532   // 6243940 We might end up in here if the callee is deoptimized
1533   // as we race to call it.  We don't want to take a safepoint if
1534   // the caller was interpreted because the caller frame will look
1535   // interpreted to the stack walkers and arguments are now
1536   // "compiled" so it is much better to make this transition
1537   // invisible to the stack walking code. The i2c path will
1538   // place the callee method in the callee_target. It is stashed
1539   // there because if we try and find the callee by normal means a
1540   // safepoint is possible and have trouble gc'ing the compiled args.
1541   RegisterMap reg_map(current,
1542                       RegisterMap::UpdateMap::skip,
1543                       RegisterMap::ProcessFrames::include,
1544                       RegisterMap::WalkContinuation::skip);
1545   frame stub_frame = current->last_frame();
1546   assert(stub_frame.is_runtime_frame(), "sanity check");
1547   frame caller_frame = stub_frame.sender(&reg_map);
1548 
1549   if (caller_frame.is_interpreted_frame() ||
1550       caller_frame.is_entry_frame() ||
1551       caller_frame.is_upcall_stub_frame()) {

1564       // so bypassing it in c2i adapter is benign.
1565       return callee->get_c2i_no_clinit_check_entry();
1566     } else {
1567       return callee->get_c2i_entry();
1568     }
1569   }
1570 
1571   // Must be compiled to compiled path which is safe to stackwalk
1572   methodHandle callee_method;
1573   JRT_BLOCK
1574     // Force resolving of caller (if we called from compiled frame)
1575     callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1576     current->set_vm_result_metadata(callee_method());
1577   JRT_BLOCK_END
1578   // return compiled code entry point after potential safepoints
1579   return get_resolved_entry(current, callee_method);
1580 JRT_END
1581 
1582 // Handle abstract method call
1583 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1584   PerfTraceTime timer(_perf_handle_wrong_method_total_time);
1585 
1586   // Verbose error message for AbstractMethodError.
1587   // Get the called method from the invoke bytecode.
1588   vframeStream vfst(current, true);
1589   assert(!vfst.at_end(), "Java frame must exist");
1590   methodHandle caller(current, vfst.method());
1591   Bytecode_invoke invoke(caller, vfst.bci());
1592   DEBUG_ONLY( invoke.verify(); )
1593 
1594   // Find the compiled caller frame.
1595   RegisterMap reg_map(current,
1596                       RegisterMap::UpdateMap::include,
1597                       RegisterMap::ProcessFrames::include,
1598                       RegisterMap::WalkContinuation::skip);
1599   frame stubFrame = current->last_frame();
1600   assert(stubFrame.is_runtime_frame(), "must be");
1601   frame callerFrame = stubFrame.sender(&reg_map);
1602   assert(callerFrame.is_compiled_frame(), "must be");
1603 
1604   // Install exception and return forward entry.
1605   address res = SharedRuntime::throw_AbstractMethodError_entry();

1612       LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1613     }
1614   JRT_BLOCK_END
1615   return res;
1616 JRT_END
1617 
1618 // return verified_code_entry if interp_only_mode is not set for the current thread;
1619 // otherwise return c2i entry.
1620 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method) {
1621   if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1622     // In interp_only_mode we need to go to the interpreted entry
1623     // The c2i won't patch in this mode -- see fixup_callers_callsite
1624     return callee_method->get_c2i_entry();
1625   }
1626   assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1627   return callee_method->verified_code_entry();
1628 }
1629 
1630 // resolve a static call and patch code
1631 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1632   PerfTraceTime timer(_perf_resolve_static_total_time);
1633 
1634   methodHandle callee_method;
1635   bool enter_special = false;
1636   JRT_BLOCK
1637     callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1638     current->set_vm_result_metadata(callee_method());
1639   JRT_BLOCK_END
1640   // return compiled code entry point after potential safepoints
1641   return get_resolved_entry(current, callee_method);
1642 JRT_END
1643 
1644 // resolve virtual call and update inline cache to monomorphic
1645 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1646   PerfTraceTime timer(_perf_resolve_virtual_total_time);
1647 
1648   methodHandle callee_method;
1649   JRT_BLOCK
1650     callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1651     current->set_vm_result_metadata(callee_method());
1652   JRT_BLOCK_END
1653   // return compiled code entry point after potential safepoints
1654   return get_resolved_entry(current, callee_method);
1655 JRT_END
1656 
1657 
1658 // Resolve a virtual call that can be statically bound (e.g., always
1659 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1660 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1661   PerfTraceTime timer(_perf_resolve_opt_virtual_total_time);
1662 
1663   methodHandle callee_method;
1664   JRT_BLOCK
1665     callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1666     current->set_vm_result_metadata(callee_method());
1667   JRT_BLOCK_END
1668   // return compiled code entry point after potential safepoints
1669   return get_resolved_entry(current, callee_method);
1670 JRT_END
1671 
1672 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1673   JavaThread* current = THREAD;
1674   ResourceMark rm(current);
1675   CallInfo call_info;
1676   Bytecodes::Code bc;
1677 
1678   // receiver is null for static calls. An exception is thrown for null
1679   // receivers for non-static calls
1680   Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1681 
1682   methodHandle callee_method(current, call_info.selected_method());
1683 

1684   Atomic::inc(&_ic_miss_ctr);
1685 
1686 #ifndef PRODUCT
1687   // Statistics & Tracing
1688   if (TraceCallFixup) {
1689     ResourceMark rm(current);
1690     tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1691     callee_method->print_short_name(tty);
1692     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1693   }
1694 
1695   if (ICMissHistogram) {
1696     MutexLocker m(VMStatistic_lock);
1697     RegisterMap reg_map(current,
1698                         RegisterMap::UpdateMap::skip,
1699                         RegisterMap::ProcessFrames::include,
1700                         RegisterMap::WalkContinuation::skip);
1701     frame f = current->last_frame().real_sender(&reg_map);// skip runtime stub
1702     // produce statistics under the lock
1703     trace_ic_miss(f.pc());
1704   }
1705 #endif
1706 

1789             CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1790             cdc->set_to_clean();
1791             break;
1792           }
1793 
1794           case relocInfo::virtual_call_type: {
1795             // compiled, dispatched call (which used to call an interpreted method)
1796             CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1797             inline_cache->set_to_clean();
1798             break;
1799           }
1800           default:
1801             break;
1802         }
1803       }
1804     }
1805   }
1806 
1807   methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1808 


1809   Atomic::inc(&_wrong_method_ctr);
1810 
1811 #ifndef PRODUCT
1812   if (TraceCallFixup) {
1813     ResourceMark rm(current);
1814     tty->print("handle_wrong_method reresolving call to");
1815     callee_method->print_short_name(tty);
1816     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1817   }
1818 #endif
1819 
1820   return callee_method;
1821 }
1822 
1823 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1824   // The faulting unsafe accesses should be changed to throw the error
1825   // synchronously instead. Meanwhile the faulting instruction will be
1826   // skipped over (effectively turning it into a no-op) and an
1827   // asynchronous exception will be raised which the thread will
1828   // handle at a later point. If the instruction is a load it will
1829   // return garbage.
1830 
1831   // Request an async exception.

2089 // This is only called when CheckJNICalls is true, and only
2090 // for virtual thread termination.
2091 JRT_LEAF(void,  SharedRuntime::log_jni_monitor_still_held())
2092   assert(CheckJNICalls, "Only call this when checking JNI usage");
2093   if (log_is_enabled(Debug, jni)) {
2094     JavaThread* current = JavaThread::current();
2095     int64_t vthread_id = java_lang_Thread::thread_id(current->vthread());
2096     int64_t carrier_id = java_lang_Thread::thread_id(current->threadObj());
2097     log_debug(jni)("VirtualThread (tid: " INT64_FORMAT ", carrier id: " INT64_FORMAT
2098                    ") exiting with Objects still locked by JNI MonitorEnter.",
2099                    vthread_id, carrier_id);
2100   }
2101 JRT_END
2102 
2103 #ifndef PRODUCT
2104 
2105 void SharedRuntime::print_statistics() {
2106   ttyLocker ttyl;
2107   if (xtty != nullptr)  xtty->head("statistics type='SharedRuntime'");
2108 
2109   SharedRuntime::print_ic_miss_histogram_on(tty);
2110   SharedRuntime::print_counters_on(tty);
2111   AdapterHandlerLibrary::print_statistics_on(tty);
































2112 
2113   if (xtty != nullptr)  xtty->tail("statistics");
2114 }
2115 
2116 //void SharedRuntime::print_counters_on(outputStream* st) {
2117 //  // Dump the JRT_ENTRY counters
2118 //  if (_new_instance_ctr) st->print_cr("%5u new instance requires GC", _new_instance_ctr);
2119 //  if (_new_array_ctr)    st->print_cr("%5u new array requires GC", _new_array_ctr);
2120 //  if (_multi2_ctr)       st->print_cr("%5u multianewarray 2 dim", _multi2_ctr);
2121 //  if (_multi3_ctr)       st->print_cr("%5u multianewarray 3 dim", _multi3_ctr);
2122 //  if (_multi4_ctr)       st->print_cr("%5u multianewarray 4 dim", _multi4_ctr);
2123 //  if (_multi5_ctr)       st->print_cr("%5u multianewarray 5 dim", _multi5_ctr);
2124 //
2125 //  st->print_cr("%5u inline cache miss in compiled", _ic_miss_ctr);
2126 //  st->print_cr("%5u wrong method", _wrong_method_ctr);
2127 //  st->print_cr("%5u unresolved static call site", _resolve_static_ctr);
2128 //  st->print_cr("%5u unresolved virtual call site", _resolve_virtual_ctr);
2129 //  st->print_cr("%5u unresolved opt virtual call site", _resolve_opt_virtual_ctr);
2130 //
2131 //  if (_mon_enter_stub_ctr)       st->print_cr("%5u monitor enter stub", _mon_enter_stub_ctr);
2132 //  if (_mon_exit_stub_ctr)        st->print_cr("%5u monitor exit stub", _mon_exit_stub_ctr);
2133 //  if (_mon_enter_ctr)            st->print_cr("%5u monitor enter slow", _mon_enter_ctr);
2134 //  if (_mon_exit_ctr)             st->print_cr("%5u monitor exit slow", _mon_exit_ctr);
2135 //  if (_partial_subtype_ctr)      st->print_cr("%5u slow partial subtype", _partial_subtype_ctr);
2136 //  if (_jbyte_array_copy_ctr)     st->print_cr("%5u byte array copies", _jbyte_array_copy_ctr);
2137 //  if (_jshort_array_copy_ctr)    st->print_cr("%5u short array copies", _jshort_array_copy_ctr);
2138 //  if (_jint_array_copy_ctr)      st->print_cr("%5u int array copies", _jint_array_copy_ctr);
2139 //  if (_jlong_array_copy_ctr)     st->print_cr("%5u long array copies", _jlong_array_copy_ctr);
2140 //  if (_oop_array_copy_ctr)       st->print_cr("%5u oop array copies", _oop_array_copy_ctr);
2141 //  if (_checkcast_array_copy_ctr) st->print_cr("%5u checkcast array copies", _checkcast_array_copy_ctr);
2142 //  if (_unsafe_array_copy_ctr)    st->print_cr("%5u unsafe array copies", _unsafe_array_copy_ctr);
2143 //  if (_generic_array_copy_ctr)   st->print_cr("%5u generic array copies", _generic_array_copy_ctr);
2144 //  if (_slow_array_copy_ctr)      st->print_cr("%5u slow array copies", _slow_array_copy_ctr);
2145 //  if (_find_handler_ctr)         st->print_cr("%5u find exception handler", _find_handler_ctr);
2146 //  if (_rethrow_ctr)              st->print_cr("%5u rethrow handler", _rethrow_ctr);
2147 //  if (_unsafe_set_memory_ctr) tty->print_cr("%5u unsafe set memorys", _unsafe_set_memory_ctr);
2148 //}
2149 
2150 inline double percent(int64_t x, int64_t y) {
2151   return 100.0 * (double)x / (double)MAX2(y, (int64_t)1);
2152 }
2153 
2154 class MethodArityHistogram {
2155  public:
2156   enum { MAX_ARITY = 256 };
2157  private:
2158   static uint64_t _arity_histogram[MAX_ARITY]; // histogram of #args
2159   static uint64_t _size_histogram[MAX_ARITY];  // histogram of arg size in words
2160   static uint64_t _total_compiled_calls;
2161   static uint64_t _max_compiled_calls_per_method;
2162   static int _max_arity;                       // max. arity seen
2163   static int _max_size;                        // max. arg size seen
2164 
2165   static void add_method_to_histogram(nmethod* nm) {
2166     Method* method = (nm == nullptr) ? nullptr : nm->method();
2167     if (method != nullptr) {
2168       ArgumentCount args(method->signature());
2169       int arity   = args.size() + (method->is_static() ? 0 : 1);

2214     // Take the Compile_lock to protect against changes in the CodeBlob structures
2215     MutexLocker mu1(Compile_lock, Mutex::_safepoint_check_flag);
2216     // Take the CodeCache_lock to protect against changes in the CodeHeap structure
2217     MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2218     _max_arity = _max_size = 0;
2219     _total_compiled_calls = 0;
2220     _max_compiled_calls_per_method = 0;
2221     for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2222     CodeCache::nmethods_do(add_method_to_histogram);
2223     print_histogram();
2224   }
2225 };
2226 
2227 uint64_t MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2228 uint64_t MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2229 uint64_t MethodArityHistogram::_total_compiled_calls;
2230 uint64_t MethodArityHistogram::_max_compiled_calls_per_method;
2231 int MethodArityHistogram::_max_arity;
2232 int MethodArityHistogram::_max_size;
2233 
2234 void SharedRuntime::print_call_statistics_on(outputStream* st) {
2235   tty->print_cr("Calls from compiled code:");
2236   int64_t total  = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2237   int64_t mono_c = _nof_normal_calls - _nof_megamorphic_calls;
2238   int64_t mono_i = _nof_interface_calls;
2239   tty->print_cr("\t" INT64_FORMAT_W(12) " (100%%)  total non-inlined   ", total);
2240   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- virtual calls       ", _nof_normal_calls, percent(_nof_normal_calls, total));
2241   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- inlined          ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
2242   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- monomorphic      ", mono_c, percent(mono_c, _nof_normal_calls));
2243   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- megamorphic      ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
2244   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- interface calls     ", _nof_interface_calls, percent(_nof_interface_calls, total));
2245   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- inlined          ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
2246   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- monomorphic      ", mono_i, percent(mono_i, _nof_interface_calls));
2247   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
2248   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- inlined          ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
2249   tty->cr();
2250   tty->print_cr("Note 1: counter updates are not MT-safe.");
2251   tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2252   tty->print_cr("        %% in nested categories are relative to their category");
2253   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
2254   tty->cr();

2267 // A simple wrapper class around the calling convention information
2268 // that allows sharing of adapters for the same calling convention.
2269 class AdapterFingerPrint : public MetaspaceObj {
2270  private:
2271   enum {
2272     _basic_type_bits = 4,
2273     _basic_type_mask = right_n_bits(_basic_type_bits),
2274     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2275   };
2276   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2277   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2278 
2279   int _length;
2280 
2281   static int data_offset() { return sizeof(AdapterFingerPrint); }
2282   int* data_pointer() {
2283     return (int*)((address)this + data_offset());
2284   }
2285 
2286   // Private construtor. Use allocate() to get an instance.
2287   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt, int len) {
2288     int* data = data_pointer();
2289     // Pack the BasicTypes with 8 per int
2290     assert(len == length(total_args_passed), "sanity");
2291     _length = len;
2292     int sig_index = 0;
2293     for (int index = 0; index < _length; index++) {
2294       int value = 0;
2295       for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2296         int bt = adapter_encoding(sig_bt[sig_index++]);
2297         assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2298         value = (value << _basic_type_bits) | bt;
2299       }
2300       data[index] = value;
2301     }
2302   }
2303 
2304   // Call deallocate instead
2305   ~AdapterFingerPrint() {
2306     ShouldNotCallThis();
2307   }
2308 
2309   static int length(int total_args) {
2310     return (total_args + (_basic_types_per_int-1)) / _basic_types_per_int;
2311   }
2312 
2313   static int compute_size_in_words(int len) {
2314     return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(int)));

2315   }
2316 
2317   // Remap BasicTypes that are handled equivalently by the adapters.
2318   // These are correct for the current system but someday it might be
2319   // necessary to make this mapping platform dependent.
2320   static int adapter_encoding(BasicType in) {
2321     switch (in) {
2322       case T_BOOLEAN:
2323       case T_BYTE:
2324       case T_SHORT:
2325       case T_CHAR:
2326         // There are all promoted to T_INT in the calling convention
2327         return T_INT;
2328 
2329       case T_OBJECT:
2330       case T_ARRAY:
2331         // In other words, we assume that any register good enough for
2332         // an int or long is good enough for a managed pointer.
2333 #ifdef _LP64
2334         return T_LONG;

2357   }
2358 
2359   template<typename Function>
2360   void iterate_args(Function function) {
2361     for (int i = 0; i < length(); i++) {
2362       unsigned val = (unsigned)value(i);
2363       // args are packed so that first/lower arguments are in the highest
2364       // bits of each int value, so iterate from highest to the lowest
2365       for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2366         unsigned v = (val >> j) & _basic_type_mask;
2367         if (v == 0) {
2368           continue;
2369         }
2370         function(v);
2371       }
2372     }
2373   }
2374 
2375  public:
2376   static AdapterFingerPrint* allocate(int total_args_passed, BasicType* sig_bt) {
2377     int len = length(total_args_passed);
2378     int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2379     AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(total_args_passed, sig_bt, len);
2380     assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2381     return afp;
2382   }
2383 
2384   static void deallocate(AdapterFingerPrint* fp) {
2385     FreeHeap(fp);
2386   }
2387 
2388   int value(int index) {
2389     int* data = data_pointer();
2390     return data[index];
2391   }
2392 
2393   int length() {
2394     return _length;
2395   }
2396 
2397   unsigned int compute_hash() {
2398     int hash = 0;
2399     for (int i = 0; i < length(); i++) {
2400       int v = value(i);
2401       //Add arithmetic operation to the hash, like +3 to improve hashing
2402       hash = ((hash << 8) ^ v ^ (hash >> 5)) + 3;
2403     }
2404     return (unsigned int)hash;
2405   }

2489     }
2490 #endif
2491     return sig_bt;
2492   }
2493 
2494   bool equals(AdapterFingerPrint* other) {
2495     if (other->_length != _length) {
2496       return false;
2497     } else {
2498       for (int i = 0; i < _length; i++) {
2499         if (value(i) != other->value(i)) {
2500           return false;
2501         }
2502       }
2503     }
2504     return true;
2505   }
2506 
2507   // methods required by virtue of being a MetaspaceObj
2508   void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2509   int size() const { return compute_size_in_words(_length); }
2510   MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2511 
2512   static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2513     NOT_PRODUCT(_equals++);
2514     return fp1->equals(fp2);
2515   }
2516 
2517   static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2518     return fp->compute_hash();
2519   }
2520 };
2521 
2522 #if INCLUDE_CDS
2523 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2524   return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2525 }
2526 
2527 class ArchivedAdapterTable : public OffsetCompactHashtable<
2528   AdapterFingerPrint*,
2529   AdapterHandlerEntry*,

2558   }
2559 #endif // INCLUDE_CDS
2560   if (entry == nullptr) {
2561     assert_lock_strong(AdapterHandlerLibrary_lock);
2562     AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2563     if (entry_p != nullptr) {
2564       entry = *entry_p;
2565       assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2566              entry->fingerprint()->as_basic_args_string(), entry->fingerprint()->as_string(), entry->fingerprint()->compute_hash(),
2567              fp->as_basic_args_string(), fp->as_string(), fp->compute_hash());
2568   #ifndef PRODUCT
2569       _runtime_hits++;
2570   #endif
2571     }
2572   }
2573   AdapterFingerPrint::deallocate(fp);
2574   return entry;
2575 }
2576 
2577 #ifndef PRODUCT
2578 void AdapterHandlerLibrary::print_statistics_on(outputStream* st) {
2579   auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2580     return sizeof(*key) + sizeof(*a);
2581   };
2582   TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2583   ts.print(st, "AdapterHandlerTable");
2584   st->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2585                _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2586   int total_hits = _archived_hits + _runtime_hits;
2587   st->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2588                _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2589 }
2590 #endif // !PRODUCT
2591 
2592 // ---------------------------------------------------------------------------
2593 // Implementation of AdapterHandlerLibrary
2594 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2595 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2596 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2597 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2598 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2599 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2600 #if INCLUDE_CDS
2601 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2602 #endif // INCLUDE_CDS
2603 static const int AdapterHandlerLibrary_size = 16*K;
2604 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2605 
2606 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2607   assert(_buffer != nullptr, "should be initialized");
2608   return _buffer;
2609 }
2610 

3522   assert(found, "Should have found handler");
3523 }
3524 
3525 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3526   st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3527   if (get_i2c_entry() != nullptr) {
3528     st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3529   }
3530   if (get_c2i_entry() != nullptr) {
3531     st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3532   }
3533   if (get_c2i_unverified_entry() != nullptr) {
3534     st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3535   }
3536   if (get_c2i_no_clinit_check_entry() != nullptr) {
3537     st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3538   }
3539   st->cr();
3540 }
3541 








3542 bool AdapterHandlerLibrary::is_abstract_method_adapter(AdapterHandlerEntry* entry) {
3543   if (entry == _abstract_method_handler) {
3544     return true;
3545   }
3546   return false;
3547 }
3548 
3549 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3550   assert(current == JavaThread::current(), "pre-condition");
3551   StackOverflow* overflow_state = current->stack_overflow_state();
3552   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3553   overflow_state->set_reserved_stack_activation(current->stack_base());
3554 JRT_END
3555 
3556 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
3557   ResourceMark rm(current);
3558   frame activation;
3559   nmethod* nm = nullptr;
3560   int count = 1;
3561 
< prev index next >