< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp

Print this page

  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/vmClasses.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/compiledMethod.inline.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/abstractCompiler.hpp"
  37 #include "compiler/compileBroker.hpp"
  38 #include "compiler/disassembler.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "gc/shared/gcLocker.inline.hpp"
  42 #include "interpreter/interpreter.hpp"
  43 #include "interpreter/interpreterRuntime.hpp"
  44 #include "jvm.h"
  45 #include "jfr/jfrEvents.hpp"
  46 #include "logging/log.hpp"

  47 #include "memory/resourceArea.hpp"
  48 #include "memory/universe.hpp"


  49 #include "metaprogramming/primitiveConversions.hpp"
  50 #include "oops/compiledICHolder.inline.hpp"
  51 #include "oops/klass.hpp"
  52 #include "oops/method.inline.hpp"
  53 #include "oops/objArrayKlass.hpp"

  54 #include "oops/oop.inline.hpp"

  55 #include "prims/forte.hpp"
  56 #include "prims/jvmtiExport.hpp"
  57 #include "prims/jvmtiThreadState.hpp"
  58 #include "prims/methodHandles.hpp"
  59 #include "prims/nativeLookup.hpp"
  60 #include "runtime/atomic.hpp"
  61 #include "runtime/frame.inline.hpp"
  62 #include "runtime/handles.inline.hpp"
  63 #include "runtime/init.hpp"
  64 #include "runtime/interfaceSupport.inline.hpp"
  65 #include "runtime/java.hpp"
  66 #include "runtime/javaCalls.hpp"
  67 #include "runtime/jniHandles.inline.hpp"
  68 #include "runtime/sharedRuntime.hpp"
  69 #include "runtime/stackWatermarkSet.hpp"
  70 #include "runtime/stubRoutines.hpp"
  71 #include "runtime/synchronizer.hpp"
  72 #include "runtime/vframe.inline.hpp"
  73 #include "runtime/vframeArray.hpp"
  74 #include "runtime/vm_version.hpp"
  75 #include "utilities/copy.hpp"
  76 #include "utilities/dtrace.hpp"
  77 #include "utilities/events.hpp"
  78 #include "utilities/resourceHash.hpp"
  79 #include "utilities/macros.hpp"
  80 #include "utilities/xmlstream.hpp"
  81 #ifdef COMPILER1
  82 #include "c1/c1_Runtime1.hpp"
  83 #endif
  84 
  85 // Shared stub locations
  86 RuntimeStub*        SharedRuntime::_wrong_method_blob;
  87 RuntimeStub*        SharedRuntime::_wrong_method_abstract_blob;
  88 RuntimeStub*        SharedRuntime::_ic_miss_blob;
  89 RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
  90 RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
  91 RuntimeStub*        SharedRuntime::_resolve_static_call_blob;
  92 address             SharedRuntime::_resolve_static_call_entry;
  93 
  94 DeoptimizationBlob* SharedRuntime::_deopt_blob;
  95 SafepointBlob*      SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
  96 SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
  97 SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
  98 
  99 #ifdef COMPILER2
 100 UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
 101 #endif // COMPILER2
 102 
 103 nmethod*            SharedRuntime::_cont_doYield_stub;
 104 
 105 //----------------------------generate_stubs-----------------------------------
 106 void SharedRuntime::generate_stubs() {
 107   _wrong_method_blob                   = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),          "wrong_method_stub");
 108   _wrong_method_abstract_blob          = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
 109   _ic_miss_blob                        = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),  "ic_miss_stub");
 110   _resolve_opt_virtual_call_blob       = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),   "resolve_opt_virtual_call");
 111   _resolve_virtual_call_blob           = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),       "resolve_virtual_call");
 112   _resolve_static_call_blob            = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),        "resolve_static_call");
 113   _resolve_static_call_entry           = _resolve_static_call_blob->entry_point();
 114 
 115   AdapterHandlerLibrary::initialize();
 116 
 117 #if COMPILER2_OR_JVMCI
 118   // Vectors are generated only by C2 and JVMCI.
 119   bool support_wide = is_wide_vector(MaxVectorSize);
 120   if (support_wide) {
 121     _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
 122   }
 123 #endif // COMPILER2_OR_JVMCI
 124   _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
 125   _polling_page_return_handler_blob    = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
 126 
 127   generate_deopt_blob();
 128 
 129 #ifdef COMPILER2
 130   generate_uncommon_trap_blob();
 131 #endif // COMPILER2
 132 }
 133 

1114 // for a call current in progress, i.e., arguments has been pushed on stack
1115 // but callee has not been invoked yet.  Caller frame must be compiled.
1116 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1117                                               CallInfo& callinfo, TRAPS) {
1118   Handle receiver;
1119   Handle nullHandle;  // create a handy null handle for exception returns
1120   JavaThread* current = THREAD;
1121 
1122   assert(!vfst.at_end(), "Java frame must exist");
1123 
1124   // Find caller and bci from vframe
1125   methodHandle caller(current, vfst.method());
1126   int          bci   = vfst.bci();
1127 
1128   if (caller->is_continuation_enter_intrinsic()) {
1129     bc = Bytecodes::_invokestatic;
1130     LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1131     return receiver;
1132   }
1133 















1134   Bytecode_invoke bytecode(caller, bci);
1135   int bytecode_index = bytecode.index();
1136   bc = bytecode.invoke_code();
1137 
1138   methodHandle attached_method(current, extract_attached_method(vfst));
1139   if (attached_method.not_null()) {
1140     Method* callee = bytecode.static_target(CHECK_NH);
1141     vmIntrinsics::ID id = callee->intrinsic_id();
1142     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1143     // it attaches statically resolved method to the call site.
1144     if (MethodHandles::is_signature_polymorphic(id) &&
1145         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1146       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1147 
1148       // Adjust invocation mode according to the attached method.
1149       switch (bc) {
1150         case Bytecodes::_invokevirtual:
1151           if (attached_method->method_holder()->is_interface()) {
1152             bc = Bytecodes::_invokeinterface;
1153           }
1154           break;
1155         case Bytecodes::_invokeinterface:
1156           if (!attached_method->method_holder()->is_interface()) {
1157             bc = Bytecodes::_invokevirtual;
1158           }
1159           break;
1160         case Bytecodes::_invokehandle:
1161           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1162             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1163                                               : Bytecodes::_invokevirtual;
1164           }
1165           break;
1166         default:
1167           break;
1168       }






1169     }
1170   }
1171 
1172   assert(bc != Bytecodes::_illegal, "not initialized");
1173 
1174   bool has_receiver = bc != Bytecodes::_invokestatic &&
1175                       bc != Bytecodes::_invokedynamic &&
1176                       bc != Bytecodes::_invokehandle;

1177 
1178   // Find receiver for non-static call
1179   if (has_receiver) {
1180     // This register map must be update since we need to find the receiver for
1181     // compiled frames. The receiver might be in a register.
1182     RegisterMap reg_map2(current,
1183                          RegisterMap::UpdateMap::include,
1184                          RegisterMap::ProcessFrames::include,
1185                          RegisterMap::WalkContinuation::skip);
1186     frame stubFrame   = current->last_frame();
1187     // Caller-frame is a compiled frame
1188     frame callerFrame = stubFrame.sender(&reg_map2);
1189 
1190     if (attached_method.is_null()) {
1191       Method* callee = bytecode.static_target(CHECK_NH);

1192       if (callee == nullptr) {
1193         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1194       }
1195     }
1196 
1197     // Retrieve from a compiled argument list
1198     receiver = Handle(current, callerFrame.retrieve_receiver(&reg_map2));
1199     assert(oopDesc::is_oop_or_null(receiver()), "");
1200 
1201     if (receiver.is_null()) {
1202       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);










1203     }
1204   }
1205 
1206   // Resolve method
1207   if (attached_method.not_null()) {
1208     // Parameterized by attached method.
1209     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1210   } else {
1211     // Parameterized by bytecode.
1212     constantPoolHandle constants(current, caller->constants());
1213     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1214   }
1215 
1216 #ifdef ASSERT
1217   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1218   if (has_receiver) {
1219     assert(receiver.not_null(), "should have thrown exception");
1220     Klass* receiver_klass = receiver->klass();
1221     Klass* rk = nullptr;
1222     if (attached_method.not_null()) {
1223       // In case there's resolved method attached, use its holder during the check.
1224       rk = attached_method->method_holder();
1225     } else {
1226       // Klass is already loaded.
1227       constantPoolHandle constants(current, caller->constants());
1228       rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1229     }
1230     Klass* static_receiver_klass = rk;
1231     assert(receiver_klass->is_subtype_of(static_receiver_klass),
1232            "actual receiver must be subclass of static receiver klass");
1233     if (receiver_klass->is_instance_klass()) {
1234       if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1235         tty->print_cr("ERROR: Klass not yet initialized!!");
1236         receiver_klass->print();
1237       }
1238       assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1239     }
1240   }
1241 #endif
1242 
1243   return receiver;
1244 }
1245 
1246 methodHandle SharedRuntime::find_callee_method(TRAPS) {
1247   JavaThread* current = THREAD;
1248   ResourceMark rm(current);
1249   // We need first to check if any Java activations (compiled, interpreted)
1250   // exist on the stack since last JavaCall.  If not, we need
1251   // to get the target method from the JavaCall wrapper.
1252   vframeStream vfst(current, true);  // Do not skip any javaCalls
1253   methodHandle callee_method;
1254   if (vfst.at_end()) {
1255     // No Java frames were found on stack since we did the JavaCall.
1256     // Hence the stack can only contain an entry_frame.  We need to
1257     // find the target method from the stub frame.
1258     RegisterMap reg_map(current,
1259                         RegisterMap::UpdateMap::skip,
1260                         RegisterMap::ProcessFrames::include,
1261                         RegisterMap::WalkContinuation::skip);
1262     frame fr = current->last_frame();
1263     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1264     fr = fr.sender(&reg_map);
1265     assert(fr.is_entry_frame(), "must be");
1266     // fr is now pointing to the entry frame.
1267     callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1268   } else {
1269     Bytecodes::Code bc;
1270     CallInfo callinfo;
1271     find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));




1272     callee_method = methodHandle(current, callinfo.selected_method());
1273   }
1274   assert(callee_method()->is_method(), "must be");
1275   return callee_method;
1276 }
1277 
1278 // Resolves a call.
1279 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1280   methodHandle callee_method;
1281   callee_method = resolve_sub_helper(is_virtual, is_optimized, THREAD);
1282   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1283     int retry_count = 0;
1284     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1285            callee_method->method_holder() != vmClasses::Object_klass()) {
1286       // If has a pending exception then there is no need to re-try to
1287       // resolve this method.
1288       // If the method has been redefined, we need to try again.
1289       // Hack: we have no way to update the vtables of arrays, so don't
1290       // require that java.lang.Object has been updated.
1291 
1292       // It is very unlikely that method is redefined more than 100 times
1293       // in the middle of resolve. If it is looping here more than 100 times
1294       // means then there could be a bug here.
1295       guarantee((retry_count++ < 100),
1296                 "Could not resolve to latest version of redefined method");
1297       // method is redefined in the middle of resolve so re-try.
1298       callee_method = resolve_sub_helper(is_virtual, is_optimized, THREAD);
1299     }
1300   }
1301   return callee_method;
1302 }
1303 
1304 // This fails if resolution required refilling of IC stubs
1305 bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
1306                                                 CompiledMethod* caller_nm, bool is_virtual, bool is_optimized,
1307                                                 Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) {
1308   StaticCallInfo static_call_info;
1309   CompiledICInfo virtual_call_info;
1310 
1311   // Make sure the callee nmethod does not get deoptimized and removed before
1312   // we are done patching the code.
1313   CompiledMethod* callee = callee_method->code();
1314 
1315   if (callee != nullptr) {
1316     assert(callee->is_compiled(), "must be nmethod for patching");
1317   }
1318 
1319   if (callee != nullptr && !callee->is_in_use()) {
1320     // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1321     callee = nullptr;
1322   }
1323 #ifdef ASSERT
1324   address dest_entry_point = callee == nullptr ? 0 : callee->entry_point(); // used below
1325 #endif
1326 
1327   bool is_nmethod = caller_nm->is_nmethod();
1328 
1329   if (is_virtual) {
1330     assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");







1331     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1332     Klass* klass = invoke_code == Bytecodes::_invokehandle ? nullptr : receiver->klass();
1333     CompiledIC::compute_monomorphic_entry(callee_method, klass,
1334                      is_optimized, static_bound, is_nmethod, virtual_call_info,
1335                      CHECK_false);
1336   } else {
1337     // static call
1338     CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info);
1339   }
1340 
1341   // grab lock, check for deoptimization and potentially patch caller
1342   {
1343     CompiledICLocker ml(caller_nm);
1344 
1345     // Lock blocks for safepoint during which both nmethods can change state.
1346 
1347     // Now that we are ready to patch if the Method* was redefined then
1348     // don't update call site and let the caller retry.
1349     // Don't update call site if callee nmethod was unloaded or deoptimized.
1350     // Don't update call site if callee nmethod was replaced by an other nmethod
1351     // which may happen when multiply alive nmethod (tiered compilation)
1352     // will be supported.
1353     if (!callee_method->is_old() &&
1354         (callee == nullptr || (callee->is_in_use() && callee_method->code() == callee))) {
1355       NoSafepointVerifier nsv;
1356 #ifdef ASSERT
1357       // We must not try to patch to jump to an already unloaded method.
1358       if (dest_entry_point != 0) {

1371       } else {
1372         if (VM_Version::supports_fast_class_init_checks() &&
1373             invoke_code == Bytecodes::_invokestatic &&
1374             callee_method->needs_clinit_barrier() &&
1375             callee != nullptr && callee->is_compiled_by_jvmci()) {
1376           return true; // skip patching for JVMCI
1377         }
1378         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
1379         if (is_nmethod && caller_nm->method()->is_continuation_enter_intrinsic()) {
1380           ssc->compute_entry_for_continuation_entry(callee_method, static_call_info);
1381         }
1382         if (ssc->is_clean()) ssc->set(static_call_info);
1383       }
1384     }
1385   } // unlock CompiledICLocker
1386   return true;
1387 }
1388 
1389 // Resolves a call.  The compilers generate code for calls that go here
1390 // and are patched with the real destination of the call.
1391 methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimized, TRAPS) {
1392   JavaThread* current = THREAD;
1393   ResourceMark rm(current);
1394   RegisterMap cbl_map(current,
1395                       RegisterMap::UpdateMap::skip,
1396                       RegisterMap::ProcessFrames::include,
1397                       RegisterMap::WalkContinuation::skip);
1398   frame caller_frame = current->last_frame().sender(&cbl_map);
1399 
1400   CodeBlob* caller_cb = caller_frame.cb();
1401   guarantee(caller_cb != nullptr && caller_cb->is_compiled(), "must be called from compiled method");
1402   CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
1403 
1404   // determine call info & receiver
1405   // note: a) receiver is null for static calls
1406   //       b) an exception is thrown if receiver is null for non-static calls
1407   CallInfo call_info;
1408   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1409   Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1410   methodHandle callee_method(current, call_info.selected_method());




1411 
1412   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1413          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1414          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1415          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1416          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1417 
1418   assert(!caller_nm->is_unloading(), "It should not be unloading");
1419 
1420 #ifndef PRODUCT
1421   // tracing/debugging/statistics
1422   uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1423                  (is_virtual) ? (&_resolve_virtual_ctr) :
1424                                 (&_resolve_static_ctr);
1425   Atomic::inc(addr);
1426 
1427   if (TraceCallFixup) {
1428     ResourceMark rm(current);
1429     tty->print("resolving %s%s (%s) call to",
1430                (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",

1455   // If the resolved method is a MethodHandle invoke target, the call
1456   // site must be a MethodHandle call site, because the lambda form might tail-call
1457   // leaving the stack in a state unknown to either caller or callee
1458   // TODO detune for now but we might need it again
1459 //  assert(!callee_method->is_compiled_lambda_form() ||
1460 //         caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1461 
1462   // Compute entry points. This might require generation of C2I converter
1463   // frames, so we cannot be holding any locks here. Furthermore, the
1464   // computation of the entry points is independent of patching the call.  We
1465   // always return the entry-point, but we only patch the stub if the call has
1466   // not been deoptimized.  Return values: For a virtual call this is an
1467   // (cached_oop, destination address) pair. For a static call/optimized
1468   // virtual this is just a destination address.
1469 
1470   // Patching IC caches may fail if we run out if transition stubs.
1471   // We refill the ic stubs then and try again.
1472   for (;;) {
1473     ICRefillVerifier ic_refill_verifier;
1474     bool successful = resolve_sub_helper_internal(callee_method, caller_frame, caller_nm,
1475                                                   is_virtual, is_optimized, receiver,
1476                                                   call_info, invoke_code, CHECK_(methodHandle()));
1477     if (successful) {
1478       return callee_method;
1479     } else {
1480       InlineCacheBuffer::refill_ic_stubs();
1481     }
1482   }
1483 
1484 }
1485 
1486 
1487 // Inline caches exist only in compiled code
1488 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1489 #ifdef ASSERT
1490   RegisterMap reg_map(current,
1491                       RegisterMap::UpdateMap::skip,
1492                       RegisterMap::ProcessFrames::include,
1493                       RegisterMap::WalkContinuation::skip);
1494   frame stub_frame = current->last_frame();
1495   assert(stub_frame.is_runtime_frame(), "sanity check");
1496   frame caller_frame = stub_frame.sender(&reg_map);
1497   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1498 #endif /* ASSERT */
1499 
1500   methodHandle callee_method;


1501   JRT_BLOCK
1502     callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1503     // Return Method* through TLS
1504     current->set_vm_result_2(callee_method());
1505   JRT_BLOCK_END
1506   // return compiled code entry point after potential safepoints
1507   assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1508   return callee_method->verified_code_entry();
1509 JRT_END
1510 
1511 
1512 // Handle call site that has been made non-entrant
1513 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1514   // 6243940 We might end up in here if the callee is deoptimized
1515   // as we race to call it.  We don't want to take a safepoint if
1516   // the caller was interpreted because the caller frame will look
1517   // interpreted to the stack walkers and arguments are now
1518   // "compiled" so it is much better to make this transition
1519   // invisible to the stack walking code. The i2c path will
1520   // place the callee method in the callee_target. It is stashed
1521   // there because if we try and find the callee by normal means a
1522   // safepoint is possible and have trouble gc'ing the compiled args.
1523   RegisterMap reg_map(current,
1524                       RegisterMap::UpdateMap::skip,
1525                       RegisterMap::ProcessFrames::include,
1526                       RegisterMap::WalkContinuation::skip);
1527   frame stub_frame = current->last_frame();
1528   assert(stub_frame.is_runtime_frame(), "sanity check");
1529   frame caller_frame = stub_frame.sender(&reg_map);
1530 
1531   if (caller_frame.is_interpreted_frame() ||
1532       caller_frame.is_entry_frame() ||
1533       caller_frame.is_upcall_stub_frame()) {
1534     Method* callee = current->callee_target();
1535     guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1536     current->set_vm_result_2(callee);
1537     current->set_callee_target(nullptr);
1538     if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1539       // Bypass class initialization checks in c2i when caller is in native.
1540       // JNI calls to static methods don't have class initialization checks.
1541       // Fast class initialization checks are present in c2i adapters and call into
1542       // SharedRuntime::handle_wrong_method() on the slow path.
1543       //
1544       // JVM upcalls may land here as well, but there's a proper check present in
1545       // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1546       // so bypassing it in c2i adapter is benign.
1547       return callee->get_c2i_no_clinit_check_entry();
1548     } else {
1549       return callee->get_c2i_entry();




1550     }
1551   }
1552 
1553   // Must be compiled to compiled path which is safe to stackwalk
1554   methodHandle callee_method;



1555   JRT_BLOCK
1556     // Force resolving of caller (if we called from compiled frame)
1557     callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1558     current->set_vm_result_2(callee_method());
1559   JRT_BLOCK_END
1560   // return compiled code entry point after potential safepoints
1561   assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1562   return callee_method->verified_code_entry();
1563 JRT_END
1564 
1565 // Handle abstract method call
1566 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1567   // Verbose error message for AbstractMethodError.
1568   // Get the called method from the invoke bytecode.
1569   vframeStream vfst(current, true);
1570   assert(!vfst.at_end(), "Java frame must exist");
1571   methodHandle caller(current, vfst.method());
1572   Bytecode_invoke invoke(caller, vfst.bci());
1573   DEBUG_ONLY( invoke.verify(); )
1574 
1575   // Find the compiled caller frame.
1576   RegisterMap reg_map(current,
1577                       RegisterMap::UpdateMap::include,
1578                       RegisterMap::ProcessFrames::include,
1579                       RegisterMap::WalkContinuation::skip);
1580   frame stubFrame = current->last_frame();
1581   assert(stubFrame.is_runtime_frame(), "must be");
1582   frame callerFrame = stubFrame.sender(&reg_map);
1583   assert(callerFrame.is_compiled_frame(), "must be");
1584 
1585   // Install exception and return forward entry.
1586   address res = StubRoutines::throw_AbstractMethodError_entry();
1587   JRT_BLOCK
1588     methodHandle callee(current, invoke.static_target(current));
1589     if (!callee.is_null()) {
1590       oop recv = callerFrame.retrieve_receiver(&reg_map);
1591       Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1592       res = StubRoutines::forward_exception_entry();
1593       LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1594     }
1595   JRT_BLOCK_END
1596   return res;
1597 JRT_END
1598 
1599 
1600 // resolve a static call and patch code
1601 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1602   methodHandle callee_method;

1603   bool enter_special = false;
1604   JRT_BLOCK
1605     callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1606     current->set_vm_result_2(callee_method());
1607 
1608     if (current->is_interp_only_mode()) {
1609       RegisterMap reg_map(current,
1610                           RegisterMap::UpdateMap::skip,
1611                           RegisterMap::ProcessFrames::include,
1612                           RegisterMap::WalkContinuation::skip);
1613       frame stub_frame = current->last_frame();
1614       assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1615       frame caller = stub_frame.sender(&reg_map);
1616       enter_special = caller.cb() != nullptr && caller.cb()->is_compiled()
1617         && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic();
1618     }
1619   JRT_BLOCK_END
1620 
1621   if (current->is_interp_only_mode() && enter_special) {
1622     // enterSpecial is compiled and calls this method to resolve the call to Continuation::enter
1623     // but in interp_only_mode we need to go to the interpreted entry
1624     // The c2i won't patch in this mode -- see fixup_callers_callsite
1625     //
1626     // This should probably be done in all cases, not just enterSpecial (see JDK-8218403),
1627     // but that's part of a larger fix, and the situation is worse for enterSpecial, as it has no
1628     // interpreted version.
1629     return callee_method->get_c2i_entry();
1630   }
1631 
1632   // return compiled code entry point after potential safepoints
1633   assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1634   return callee_method->verified_code_entry();


1635 JRT_END
1636 
1637 
1638 // resolve virtual call and update inline cache to monomorphic
1639 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1640   methodHandle callee_method;

1641   JRT_BLOCK
1642     callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1643     current->set_vm_result_2(callee_method());
1644   JRT_BLOCK_END
1645   // return compiled code entry point after potential safepoints
1646   assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1647   return callee_method->verified_code_entry();


1648 JRT_END
1649 
1650 
1651 // Resolve a virtual call that can be statically bound (e.g., always
1652 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1653 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1654   methodHandle callee_method;

1655   JRT_BLOCK
1656     callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1657     current->set_vm_result_2(callee_method());
1658   JRT_BLOCK_END
1659   // return compiled code entry point after potential safepoints
1660   assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1661   return callee_method->verified_code_entry();


1662 JRT_END
1663 
1664 // The handle_ic_miss_helper_internal function returns false if it failed due
1665 // to either running out of vtable stubs or ic stubs due to IC transitions
1666 // to transitional states. The needs_ic_stub_refill value will be set if
1667 // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
1668 // refills the IC stubs and tries again.
1669 bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
1670                                                    const frame& caller_frame, methodHandle callee_method,
1671                                                    Bytecodes::Code bc, CallInfo& call_info,
1672                                                    bool& needs_ic_stub_refill, TRAPS) {
1673   CompiledICLocker ml(caller_nm);
1674   CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1675   bool should_be_mono = false;
1676   if (inline_cache->is_optimized()) {
1677     if (TraceCallFixup) {
1678       ResourceMark rm(THREAD);
1679       tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1680       callee_method->print_short_name(tty);
1681       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1682     }

1683     should_be_mono = true;
1684   } else if (inline_cache->is_icholder_call()) {
1685     CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1686     if (ic_oop != nullptr) {
1687       if (!ic_oop->is_loader_alive()) {
1688         // Deferred IC cleaning due to concurrent class unloading
1689         if (!inline_cache->set_to_clean()) {
1690           needs_ic_stub_refill = true;
1691           return false;
1692         }
1693       } else if (receiver()->klass() == ic_oop->holder_klass()) {
1694         // This isn't a real miss. We must have seen that compiled code
1695         // is now available and we want the call site converted to a
1696         // monomorphic compiled call site.
1697         // We can't assert for callee_method->code() != nullptr because it
1698         // could have been deoptimized in the meantime
1699         if (TraceCallFixup) {
1700           ResourceMark rm(THREAD);
1701           tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1702           callee_method->print_short_name(tty);
1703           tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1704         }
1705         should_be_mono = true;
1706       }
1707     }
1708   }
1709 
1710   if (should_be_mono) {
1711     // We have a path that was monomorphic but was going interpreted
1712     // and now we have (or had) a compiled entry. We correct the IC
1713     // by using a new icBuffer.
1714     CompiledICInfo info;
1715     Klass* receiver_klass = receiver()->klass();
1716     inline_cache->compute_monomorphic_entry(callee_method,
1717                                             receiver_klass,
1718                                             inline_cache->is_optimized(),
1719                                             false, caller_nm->is_nmethod(),

1720                                             info, CHECK_false);
1721     if (!inline_cache->set_to_monomorphic(info)) {
1722       needs_ic_stub_refill = true;
1723       return false;
1724     }
1725   } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1726     // Potential change to megamorphic
1727 
1728     bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false);
1729     if (needs_ic_stub_refill) {
1730       return false;
1731     }
1732     if (!successful) {
1733       if (!inline_cache->set_to_clean()) {
1734         needs_ic_stub_refill = true;
1735         return false;
1736       }
1737     }
1738   } else {
1739     // Either clean or megamorphic
1740   }
1741   return true;
1742 }
1743 
1744 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1745   JavaThread* current = THREAD;
1746   ResourceMark rm(current);
1747   CallInfo call_info;
1748   Bytecodes::Code bc;
1749 
1750   // receiver is null for static calls. An exception is thrown for null
1751   // receivers for non-static calls
1752   Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1753   // Compiler1 can produce virtual call sites that can actually be statically bound
1754   // If we fell thru to below we would think that the site was going megamorphic
1755   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1756   // we'd try and do a vtable dispatch however methods that can be statically bound
1757   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1758   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1759   // plain ic_miss) and the site will be converted to an optimized virtual call site
1760   // never to miss again. I don't believe C2 will produce code like this but if it
1761   // did this would still be the correct thing to do for it too, hence no ifdef.
1762   //
1763   if (call_info.resolved_method()->can_be_statically_bound()) {
1764     methodHandle callee_method = SharedRuntime::reresolve_call_site(CHECK_(methodHandle()));


1765     if (TraceCallFixup) {
1766       RegisterMap reg_map(current,
1767                           RegisterMap::UpdateMap::skip,
1768                           RegisterMap::ProcessFrames::include,
1769                           RegisterMap::WalkContinuation::skip);
1770       frame caller_frame = current->last_frame().sender(&reg_map);
1771       ResourceMark rm(current);
1772       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1773       callee_method->print_short_name(tty);
1774       tty->print_cr(" from pc: " INTPTR_FORMAT, p2i(caller_frame.pc()));
1775       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1776     }
1777     return callee_method;
1778   }
1779 
1780   methodHandle callee_method(current, call_info.selected_method());
1781 
1782 #ifndef PRODUCT
1783   Atomic::inc(&_ic_miss_ctr);
1784 

1803 #endif
1804 
1805   // install an event collector so that when a vtable stub is created the
1806   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1807   // event can't be posted when the stub is created as locks are held
1808   // - instead the event will be deferred until the event collector goes
1809   // out of scope.
1810   JvmtiDynamicCodeEventCollector event_collector;
1811 
1812   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1813   // Transitioning IC caches may require transition stubs. If we run out
1814   // of transition stubs, we have to drop locks and perform a safepoint
1815   // that refills them.
1816   RegisterMap reg_map(current,
1817                       RegisterMap::UpdateMap::skip,
1818                       RegisterMap::ProcessFrames::include,
1819                       RegisterMap::WalkContinuation::skip);
1820   frame caller_frame = current->last_frame().sender(&reg_map);
1821   CodeBlob* cb = caller_frame.cb();
1822   CompiledMethod* caller_nm = cb->as_compiled_method();




1823 
1824   for (;;) {
1825     ICRefillVerifier ic_refill_verifier;
1826     bool needs_ic_stub_refill = false;
1827     bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1828                                                      bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle()));
1829     if (successful || !needs_ic_stub_refill) {
1830       return callee_method;
1831     } else {
1832       InlineCacheBuffer::refill_ic_stubs();
1833     }
1834   }
1835 }
1836 
1837 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1838   CompiledICLocker ml(caller_nm);
1839   if (is_static_call) {
1840     CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1841     if (!ssc->is_clean()) {
1842       return ssc->set_to_clean();
1843     }
1844   } else {
1845     // compiled, dispatched call (which used to call an interpreted method)
1846     CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1847     if (!inline_cache->is_clean()) {
1848       return inline_cache->set_to_clean();
1849     }
1850   }
1851   return true;
1852 }
1853 
1854 //
1855 // Resets a call-site in compiled code so it will get resolved again.
1856 // This routines handles both virtual call sites, optimized virtual call
1857 // sites, and static call sites. Typically used to change a call sites
1858 // destination from compiled to interpreted.
1859 //
1860 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1861   JavaThread* current = THREAD;
1862   ResourceMark rm(current);
1863   RegisterMap reg_map(current,
1864                       RegisterMap::UpdateMap::skip,
1865                       RegisterMap::ProcessFrames::include,
1866                       RegisterMap::WalkContinuation::skip);
1867   frame stub_frame = current->last_frame();
1868   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1869   frame caller = stub_frame.sender(&reg_map);



1870 
1871   // Do nothing if the frame isn't a live compiled frame.
1872   // nmethod could be deoptimized by the time we get here
1873   // so no update to the caller is needed.
1874 
1875   if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1876       (caller.is_native_frame() && ((CompiledMethod*)caller.cb())->method()->is_continuation_enter_intrinsic())) {
1877 
1878     address pc = caller.pc();
1879 
1880     // Check for static or virtual call
1881     bool is_static_call = false;
1882     CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
1883 
1884     // Default call_addr is the location of the "basic" call.
1885     // Determine the address of the call we a reresolving. With
1886     // Inline Caches we will always find a recognizable call.
1887     // With Inline Caches disabled we may or may not find a
1888     // recognizable call. We will always find a call for static
1889     // calls and for optimized virtual calls. For vanilla virtual
1890     // calls it depends on the state of the UseInlineCaches switch.
1891     //
1892     // With Inline Caches disabled we can get here for a virtual call
1893     // for two reasons:
1894     //   1 - calling an abstract method. The vtable for abstract methods
1895     //       will run us thru handle_wrong_method and we will eventually
1896     //       end up in the interpreter to throw the ame.
1897     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1898     //       call and between the time we fetch the entry address and
1899     //       we jump to it the target gets deoptimized. Similar to 1
1900     //       we will wind up in the interprter (thru a c2i with c2).
1901     //
1902     address call_addr = nullptr;
1903     {
1904       // Get call instruction under lock because another thread may be
1905       // busy patching it.
1906       CompiledICLocker ml(caller_nm);
1907       // Location of call instruction
1908       call_addr = caller_nm->call_instruction_address(pc);
1909     }
1910 
1911     // Check relocations for the matching call to 1) avoid false positives,
1912     // and 2) determine the type.
1913     if (call_addr != nullptr) {
1914       // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1915       // bytes back in the instruction stream so we must also check for reloc info.
1916       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1917       bool ret = iter.next(); // Get item
1918       if (ret) {
1919         bool is_static_call = false;

1920         switch (iter.type()) {
1921           case relocInfo::static_call_type:
1922             is_static_call = true;
1923 
1924           case relocInfo::virtual_call_type:
1925           case relocInfo::opt_virtual_call_type:

1926             // Cleaning the inline cache will force a new resolve. This is more robust
1927             // than directly setting it to the new destination, since resolving of calls
1928             // is always done through the same code path. (experience shows that it
1929             // leads to very hard to track down bugs, if an inline cache gets updated
1930             // to a wrong method). It should not be performance critical, since the
1931             // resolve is only done once.
1932             guarantee(iter.addr() == call_addr, "must find call");
1933             for (;;) {
1934               ICRefillVerifier ic_refill_verifier;
1935               if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
1936                 InlineCacheBuffer::refill_ic_stubs();
1937               } else {
1938                 break;
1939               }
1940             }
1941             break;
1942           default:
1943             break;
1944         }
1945       }
1946     }
1947   }
1948 
1949   methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1950 
1951 
1952 #ifndef PRODUCT
1953   Atomic::inc(&_wrong_method_ctr);
1954 
1955   if (TraceCallFixup) {
1956     ResourceMark rm(current);
1957     tty->print("handle_wrong_method reresolving call to");
1958     callee_method->print_short_name(tty);
1959     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1960   }
1961 #endif
1962 
1963   return callee_method;
1964 }
1965 
1966 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1967   // The faulting unsafe accesses should be changed to throw the error
1968   // synchronously instead. Meanwhile the faulting instruction will be
1969   // skipped over (effectively turning it into a no-op) and an
1970   // asynchronous exception will be raised which the thread will

2102       // there. If you're lucky you'll get the assert in the bugid, if not you've
2103       // just made a call site that could be megamorphic into a monomorphic site
2104       // for the rest of its life! Just another racing bug in the life of
2105       // fixup_callers_callsite ...
2106       //
2107       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
2108       iter.next();
2109       assert(iter.has_current(), "must have a reloc at java call site");
2110       relocInfo::relocType typ = iter.reloc()->type();
2111       if (typ != relocInfo::static_call_type &&
2112            typ != relocInfo::opt_virtual_call_type &&
2113            typ != relocInfo::static_stub_type) {
2114         return;
2115       }
2116       if (nm->method()->is_continuation_enter_intrinsic()) {
2117         if (ContinuationEntry::is_interpreted_call(call->instruction_address())) {
2118           return;
2119         }
2120       }
2121       address destination = call->destination();
2122       address entry_point = callee->verified_entry_point();
2123       if (should_fixup_call_destination(destination, entry_point, caller_pc, moop, cb)) {
2124         call->set_destination_mt_safe(entry_point);
2125       }
2126     }
2127   }
2128 JRT_END
2129 
2130 
2131 // same as JVM_Arraycopy, but called directly from compiled code
2132 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
2133                                                 oopDesc* dest, jint dest_pos,
2134                                                 jint length,
2135                                                 JavaThread* current)) {
2136 #ifndef PRODUCT
2137   _slow_array_copy_ctr++;
2138 #endif
2139   // Check if we have null pointers
2140   if (src == nullptr || dest == nullptr) {
2141     THROW(vmSymbols::java_lang_NullPointerException());
2142   }

2414   tty->print_cr("        %% in nested categories are relative to their category");
2415   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
2416   tty->cr();
2417 
2418   MethodArityHistogram h;
2419 }
2420 #endif
2421 
2422 #ifndef PRODUCT
2423 static int _lookups; // number of calls to lookup
2424 static int _equals;  // number of buckets checked with matching hash
2425 static int _hits;    // number of successful lookups
2426 static int _compact; // number of equals calls with compact signature
2427 #endif
2428 
2429 // A simple wrapper class around the calling convention information
2430 // that allows sharing of adapters for the same calling convention.
2431 class AdapterFingerPrint : public CHeapObj<mtCode> {
2432  private:
2433   enum {
2434     _basic_type_bits = 4,
2435     _basic_type_mask = right_n_bits(_basic_type_bits),
2436     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2437     _compact_int_count = 3
2438   };
2439   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2440   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2441 
2442   union {
2443     int  _compact[_compact_int_count];
2444     int* _fingerprint;
2445   } _value;
2446   int _length; // A negative length indicates the fingerprint is in the compact form,
2447                // Otherwise _value._fingerprint is the array.
2448 
2449   // Remap BasicTypes that are handled equivalently by the adapters.
2450   // These are correct for the current system but someday it might be
2451   // necessary to make this mapping platform dependent.
2452   static int adapter_encoding(BasicType in) {
2453     switch (in) {
2454       case T_BOOLEAN:
2455       case T_BYTE:
2456       case T_SHORT:
2457       case T_CHAR:
2458         // There are all promoted to T_INT in the calling convention
2459         return T_INT;
2460 
2461       case T_OBJECT:
2462       case T_ARRAY:
2463         // In other words, we assume that any register good enough for
2464         // an int or long is good enough for a managed pointer.
2465 #ifdef _LP64
2466         return T_LONG;
2467 #else
2468         return T_INT;
2469 #endif
2470 
2471       case T_INT:
2472       case T_LONG:
2473       case T_FLOAT:
2474       case T_DOUBLE:
2475       case T_VOID:
2476         return in;
2477 
2478       default:
2479         ShouldNotReachHere();
2480         return T_CONFLICT;
2481     }
2482   }
2483 
2484  public:
2485   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2486     // The fingerprint is based on the BasicType signature encoded
2487     // into an array of ints with eight entries per int.

2488     int* ptr;
2489     int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2490     if (len <= _compact_int_count) {
2491       assert(_compact_int_count == 3, "else change next line");
2492       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2493       // Storing the signature encoded as signed chars hits about 98%
2494       // of the time.
2495       _length = -len;
2496       ptr = _value._compact;
2497     } else {
2498       _length = len;
2499       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2500       ptr = _value._fingerprint;
2501     }
2502 
2503     // Now pack the BasicTypes with 8 per int
2504     int sig_index = 0;


2505     for (int index = 0; index < len; index++) {
2506       int value = 0;
2507       for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2508         int bt = adapter_encoding(sig_bt[sig_index++]);
2509         assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2510         value = (value << _basic_type_bits) | bt;























2511       }
2512       ptr[index] = value;
2513     }

2514   }
2515 
2516   ~AdapterFingerPrint() {
2517     if (_length > 0) {
2518       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2519     }
2520   }
2521 
2522   int value(int index) {
2523     if (_length < 0) {
2524       return _value._compact[index];
2525     }
2526     return _value._fingerprint[index];
2527   }
2528   int length() {
2529     if (_length < 0) return -_length;
2530     return _length;
2531   }
2532 
2533   bool is_compact() {

2558   const char* as_basic_args_string() {
2559     stringStream st;
2560     bool long_prev = false;
2561     for (int i = 0; i < length(); i++) {
2562       unsigned val = (unsigned)value(i);
2563       // args are packed so that first/lower arguments are in the highest
2564       // bits of each int value, so iterate from highest to the lowest
2565       for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2566         unsigned v = (val >> j) & _basic_type_mask;
2567         if (v == 0) {
2568           assert(i == length() - 1, "Only expect zeroes in the last word");
2569           continue;
2570         }
2571         if (long_prev) {
2572           long_prev = false;
2573           if (v == T_VOID) {
2574             st.print("J");
2575           } else {
2576             st.print("L");
2577           }
2578         }
2579         switch (v) {
2580           case T_INT:    st.print("I");    break;
2581           case T_LONG:   long_prev = true; break;
2582           case T_FLOAT:  st.print("F");    break;
2583           case T_DOUBLE: st.print("D");    break;
2584           case T_VOID:   break;
2585           default: ShouldNotReachHere();
2586         }
2587       }
2588     }
2589     if (long_prev) {
2590       st.print("L");
2591     }
2592     return st.as_string();
2593   }
2594 #endif // !product
2595 
2596   bool equals(AdapterFingerPrint* other) {
2597     if (other->_length != _length) {
2598       return false;
2599     }
2600     if (_length < 0) {
2601       assert(_compact_int_count == 3, "else change next line");
2602       return _value._compact[0] == other->_value._compact[0] &&
2603              _value._compact[1] == other->_value._compact[1] &&
2604              _value._compact[2] == other->_value._compact[2];
2605     } else {

2613   }
2614 
2615   static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2616     NOT_PRODUCT(_equals++);
2617     return fp1->equals(fp2);
2618   }
2619 
2620   static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2621     return fp->compute_hash();
2622   }
2623 };
2624 
2625 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2626 using AdapterHandlerTable = ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2627                   AnyObj::C_HEAP, mtCode,
2628                   AdapterFingerPrint::compute_hash,
2629                   AdapterFingerPrint::equals>;
2630 static AdapterHandlerTable* _adapter_handler_table;
2631 
2632 // Find a entry with the same fingerprint if it exists
2633 static AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
2634   NOT_PRODUCT(_lookups++);
2635   assert_lock_strong(AdapterHandlerLibrary_lock);
2636   AdapterFingerPrint fp(total_args_passed, sig_bt);
2637   AdapterHandlerEntry** entry = _adapter_handler_table->get(&fp);
2638   if (entry != nullptr) {
2639 #ifndef PRODUCT
2640     if (fp.is_compact()) _compact++;
2641     _hits++;
2642 #endif
2643     return *entry;
2644   }
2645   return nullptr;
2646 }
2647 
2648 #ifndef PRODUCT
2649 static void print_table_statistics() {
2650   auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2651     return sizeof(*key) + sizeof(*a);
2652   };
2653   TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2654   ts.print(tty, "AdapterHandlerTable");
2655   tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2656                 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2657   tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2658                 _lookups, _equals, _hits, _compact);
2659 }
2660 #endif
2661 
2662 // ---------------------------------------------------------------------------
2663 // Implementation of AdapterHandlerLibrary
2664 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2665 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2666 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2667 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2668 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2669 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2670 const int AdapterHandlerLibrary_size = 16*K;
2671 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2672 
2673 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2674   return _buffer;
2675 }
2676 
2677 static void post_adapter_creation(const AdapterBlob* new_adapter,
2678                                   const AdapterHandlerEntry* entry) {
2679   if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2680     char blob_id[256];
2681     jio_snprintf(blob_id,
2682                  sizeof(blob_id),
2683                  "%s(%s)",
2684                  new_adapter->name(),
2685                  entry->fingerprint()->as_string());
2686     if (Forte::is_enabled()) {
2687       Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2688     }
2689 
2690     if (JvmtiExport::should_post_dynamic_code_generated()) {

2693   }
2694 }
2695 
2696 void AdapterHandlerLibrary::initialize() {
2697   ResourceMark rm;
2698   AdapterBlob* no_arg_blob = nullptr;
2699   AdapterBlob* int_arg_blob = nullptr;
2700   AdapterBlob* obj_arg_blob = nullptr;
2701   AdapterBlob* obj_int_arg_blob = nullptr;
2702   AdapterBlob* obj_obj_arg_blob = nullptr;
2703   {
2704     _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2705     MutexLocker mu(AdapterHandlerLibrary_lock);
2706 
2707     // Create a special handler for abstract methods.  Abstract methods
2708     // are never compiled so an i2c entry is somewhat meaningless, but
2709     // throw AbstractMethodError just in case.
2710     // Pass wrong_method_abstract for the c2i transitions to return
2711     // AbstractMethodError for invalid invocations.
2712     address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2713     _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, nullptr),
2714                                                                 StubRoutines::throw_AbstractMethodError_entry(),

2715                                                                 wrong_method_abstract, wrong_method_abstract);
2716 
2717     _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2718     _no_arg_handler = create_adapter(no_arg_blob, 0, nullptr, true);
2719 
2720     BasicType obj_args[] = { T_OBJECT };
2721     _obj_arg_handler = create_adapter(obj_arg_blob, 1, obj_args, true);

2722 
2723     BasicType int_args[] = { T_INT };
2724     _int_arg_handler = create_adapter(int_arg_blob, 1, int_args, true);


2725 
2726     BasicType obj_int_args[] = { T_OBJECT, T_INT };
2727     _obj_int_arg_handler = create_adapter(obj_int_arg_blob, 2, obj_int_args, true);


2728 
2729     BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2730     _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, 2, obj_obj_args, true);









2731 
2732     assert(no_arg_blob != nullptr &&
2733           obj_arg_blob != nullptr &&
2734           int_arg_blob != nullptr &&
2735           obj_int_arg_blob != nullptr &&
2736           obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
2737   }

2738 
2739   // Outside of the lock
2740   post_adapter_creation(no_arg_blob, _no_arg_handler);
2741   post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2742   post_adapter_creation(int_arg_blob, _int_arg_handler);
2743   post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2744   post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2745 }
2746 
2747 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2748                                                       address i2c_entry,
2749                                                       address c2i_entry,


2750                                                       address c2i_unverified_entry,

2751                                                       address c2i_no_clinit_check_entry) {
2752   // Insert an entry into the table
2753   return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry,
2754                                  c2i_no_clinit_check_entry);
2755 }
2756 
2757 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2758   if (method->is_abstract()) {
2759     return _abstract_method_handler;
2760   }
2761   int total_args_passed = method->size_of_parameters(); // All args on stack
2762   if (total_args_passed == 0) {
2763     return _no_arg_handler;
2764   } else if (total_args_passed == 1) {
2765     if (!method->is_static()) {



2766       return _obj_arg_handler;
2767     }
2768     switch (method->signature()->char_at(1)) {
2769       case JVM_SIGNATURE_CLASS:









2770       case JVM_SIGNATURE_ARRAY:
2771         return _obj_arg_handler;
2772       case JVM_SIGNATURE_INT:
2773       case JVM_SIGNATURE_BOOLEAN:
2774       case JVM_SIGNATURE_CHAR:
2775       case JVM_SIGNATURE_BYTE:
2776       case JVM_SIGNATURE_SHORT:
2777         return _int_arg_handler;
2778     }
2779   } else if (total_args_passed == 2 &&
2780              !method->is_static()) {
2781     switch (method->signature()->char_at(1)) {
2782       case JVM_SIGNATURE_CLASS:









2783       case JVM_SIGNATURE_ARRAY:
2784         return _obj_obj_arg_handler;
2785       case JVM_SIGNATURE_INT:
2786       case JVM_SIGNATURE_BOOLEAN:
2787       case JVM_SIGNATURE_CHAR:
2788       case JVM_SIGNATURE_BYTE:
2789       case JVM_SIGNATURE_SHORT:
2790         return _obj_int_arg_handler;
2791     }
2792   }
2793   return nullptr;
2794 }
2795 
2796 class AdapterSignatureIterator : public SignatureIterator {
2797  private:
2798   BasicType stack_sig_bt[16];
2799   BasicType* sig_bt;
2800   int index;




2801 
2802  public:
2803   AdapterSignatureIterator(Symbol* signature,
2804                            fingerprint_t fingerprint,
2805                            bool is_static,
2806                            int total_args_passed) :
2807     SignatureIterator(signature, fingerprint),
2808     index(0)
2809   {
2810     sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2811     if (!is_static) { // Pass in receiver first
2812       sig_bt[index++] = T_OBJECT;
2813     }
2814     do_parameters_on(this);
2815   }
2816 
2817   BasicType* basic_types() {
2818     return sig_bt;











2819   }
2820 




























































































2821 #ifdef ASSERT
2822   int slots() {
2823     return index;
2824   }





2825 #endif


















































2826 
2827  private:










2828 
2829   friend class SignatureIterator;  // so do_parameters_on can call do_type
2830   void do_type(BasicType type) {
2831     sig_bt[index++] = type;
2832     if (type == T_LONG || type == T_DOUBLE) {
2833       sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots



2834     }
2835   }
2836 };









2837 
2838 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2839   // Use customized signature handler.  Need to lock around updates to
2840   // the _adapter_handler_table (it is not safe for concurrent readers
2841   // and a single writer: this could be fixed if it becomes a
2842   // problem).
2843 
2844   // Fast-path for trivial adapters
2845   AdapterHandlerEntry* entry = get_simple_adapter(method);
2846   if (entry != nullptr) {
2847     return entry;
2848   }
2849 
2850   ResourceMark rm;
2851   AdapterBlob* new_adapter = nullptr;
2852 
2853   // Fill in the signature array, for the calling-convention call.
2854   int total_args_passed = method->size_of_parameters(); // All args on stack















2855 
2856   AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2857                               method->is_static(), total_args_passed);
2858   assert(si.slots() == total_args_passed, "");
2859   BasicType* sig_bt = si.basic_types();
2860   {
2861     MutexLocker mu(AdapterHandlerLibrary_lock);
2862 













2863     // Lookup method signature's fingerprint
2864     entry = lookup(total_args_passed, sig_bt);
2865 
2866     if (entry != nullptr) {
2867 #ifdef ASSERT
2868       if (VerifyAdapterSharing) {
2869         AdapterBlob* comparison_blob = nullptr;
2870         AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, total_args_passed, sig_bt, false);
2871         assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
2872         assert(comparison_entry->compare_code(entry), "code must match");
2873         // Release the one just created and return the original
2874         delete comparison_entry;
2875       }
2876 #endif
2877       return entry;
2878     }
2879 
2880     entry = create_adapter(new_adapter, total_args_passed, sig_bt, /* allocate_code_blob */ true);
2881   }
2882 
2883   // Outside of the lock
2884   if (new_adapter != nullptr) {
2885     post_adapter_creation(new_adapter, entry);
2886   }
2887   return entry;
2888 }
2889 
2890 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
2891                                                            int total_args_passed,
2892                                                            BasicType* sig_bt,
2893                                                            bool allocate_code_blob) {
2894 
2895   // StubRoutines::_final_stubs_code is initialized after this function can be called. As a result,
2896   // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated prior
2897   // to all StubRoutines::_final_stubs_code being set. Checks refer to runtime range checks generated
2898   // in an I2C stub that ensure that an I2C stub is called from an interpreter frame or stubs.
2899   bool contains_all_checks = StubRoutines::final_stubs_code() != nullptr;
2900 
2901   VMRegPair stack_regs[16];
2902   VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2903 
2904   // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2905   int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2906   BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2907   CodeBuffer buffer(buf);
2908   short buffer_locs[20];
2909   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2910                                           sizeof(buffer_locs)/sizeof(relocInfo));
2911 
2912   // Make a C heap allocated version of the fingerprint to store in the adapter
2913   AdapterFingerPrint* fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
2914   MacroAssembler _masm(&buffer);
2915   AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2916                                                 total_args_passed,
2917                                                 comp_args_on_stack,
2918                                                 sig_bt,
2919                                                 regs,
2920                                                 fingerprint);












2921 
2922 #ifdef ASSERT
2923   if (VerifyAdapterSharing) {
2924     entry->save_code(buf->code_begin(), buffer.insts_size());
2925     if (!allocate_code_blob) {
2926       return entry;
2927     }
2928   }
2929 #endif
2930 
2931   new_adapter = AdapterBlob::create(&buffer);
2932   NOT_PRODUCT(int insts_size = buffer.insts_size());
2933   if (new_adapter == nullptr) {
2934     // CodeCache is full, disable compilation
2935     // Ought to log this but compile log is only per compile thread
2936     // and we're some non descript Java thread.
2937     return nullptr;
2938   }
2939   entry->relocate(new_adapter->content_begin());
2940 #ifndef PRODUCT
2941   // debugging support
2942   if (PrintAdapterHandlers || PrintStubCode) {
2943     ttyLocker ttyl;
2944     entry->print_adapter_on(tty);
2945     tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
2946                   _adapter_handler_table->number_of_entries(), fingerprint->as_basic_args_string(),
2947                   fingerprint->as_string(), insts_size);
2948     tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
2949     if (Verbose || PrintStubCode) {
2950       address first_pc = entry->base_address();
2951       if (first_pc != nullptr) {

2953                              NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
2954         tty->cr();
2955       }
2956     }
2957   }
2958 #endif
2959 
2960   // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
2961   // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
2962   if (contains_all_checks || !VerifyAdapterCalls) {
2963     assert_lock_strong(AdapterHandlerLibrary_lock);
2964     _adapter_handler_table->put(fingerprint, entry);
2965   }
2966   return entry;
2967 }
2968 
2969 address AdapterHandlerEntry::base_address() {
2970   address base = _i2c_entry;
2971   if (base == nullptr)  base = _c2i_entry;
2972   assert(base <= _c2i_entry || _c2i_entry == nullptr, "");


2973   assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");

2974   assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
2975   return base;
2976 }
2977 
2978 void AdapterHandlerEntry::relocate(address new_base) {
2979   address old_base = base_address();
2980   assert(old_base != nullptr, "");
2981   ptrdiff_t delta = new_base - old_base;
2982   if (_i2c_entry != nullptr)
2983     _i2c_entry += delta;
2984   if (_c2i_entry != nullptr)
2985     _c2i_entry += delta;




2986   if (_c2i_unverified_entry != nullptr)
2987     _c2i_unverified_entry += delta;


2988   if (_c2i_no_clinit_check_entry != nullptr)
2989     _c2i_no_clinit_check_entry += delta;
2990   assert(base_address() == new_base, "");
2991 }
2992 
2993 
2994 AdapterHandlerEntry::~AdapterHandlerEntry() {
2995   delete _fingerprint;



2996 #ifdef ASSERT
2997   FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
2998 #endif
2999 }
3000 
3001 
3002 #ifdef ASSERT
3003 // Capture the code before relocation so that it can be compared
3004 // against other versions.  If the code is captured after relocation
3005 // then relative instructions won't be equivalent.
3006 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3007   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3008   _saved_code_length = length;
3009   memcpy(_saved_code, buffer, length);
3010 }
3011 
3012 
3013 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3014   assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3015 

3062 
3063       struct { double data[20]; } locs_buf;
3064       struct { double data[20]; } stubs_locs_buf;
3065       buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3066 #if defined(AARCH64) || defined(PPC64)
3067       // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3068       // in the constant pool to ensure ordering between the barrier and oops
3069       // accesses. For native_wrappers we need a constant.
3070       // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3071       // static java call that is resolved in the runtime.
3072       if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3073         buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3074       }
3075 #endif
3076       buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3077       MacroAssembler _masm(&buffer);
3078 
3079       // Fill in the signature array, for the calling-convention call.
3080       const int total_args_passed = method->size_of_parameters();
3081 

3082       VMRegPair stack_regs[16];

3083       VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3084 
3085       AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3086                               method->is_static(), total_args_passed);
3087       BasicType* sig_bt = si.basic_types();
3088       assert(si.slots() == total_args_passed, "");
3089       BasicType ret_type = si.return_type();








3090 
3091       // Now get the compiled-Java arguments layout.
3092       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3093 
3094       // Generate the compiled-to-native wrapper code
3095       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3096 
3097       if (nm != nullptr) {
3098         {
3099           MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
3100           if (nm->make_in_use()) {
3101             method->set_code(method, nm);
3102           }
3103         }
3104 
3105         DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
3106         if (directive->PrintAssemblyOption) {
3107           nm->print_code();
3108         }
3109         DirectivesStack::release(directive);

3314       st->print("Adapter for signature: ");
3315       a->print_adapter_on(st);
3316       return true;
3317     } else {
3318       return false; // keep looking
3319     }
3320   };
3321   assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3322   _adapter_handler_table->iterate(findblob);
3323   assert(found, "Should have found handler");
3324 }
3325 
3326 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3327   st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3328   if (get_i2c_entry() != nullptr) {
3329     st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3330   }
3331   if (get_c2i_entry() != nullptr) {
3332     st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3333   }






3334   if (get_c2i_unverified_entry() != nullptr) {
3335     st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));



3336   }
3337   if (get_c2i_no_clinit_check_entry() != nullptr) {
3338     st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3339   }
3340   st->cr();
3341 }
3342 
3343 #ifndef PRODUCT
3344 
3345 void AdapterHandlerLibrary::print_statistics() {
3346   print_table_statistics();
3347 }
3348 
3349 #endif /* PRODUCT */
3350 
3351 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3352   assert(current == JavaThread::current(), "pre-condition");
3353   StackOverflow* overflow_state = current->stack_overflow_state();
3354   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3355   overflow_state->set_reserved_stack_activation(current->stack_base());

3404         event.set_method(method);
3405         event.commit();
3406       }
3407     }
3408   }
3409   return activation;
3410 }
3411 
3412 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3413   // After any safepoint, just before going back to compiled code,
3414   // we inform the GC that we will be doing initializing writes to
3415   // this object in the future without emitting card-marks, so
3416   // GC may take any compensating steps.
3417 
3418   oop new_obj = current->vm_result();
3419   if (new_obj == nullptr) return;
3420 
3421   BarrierSet *bs = BarrierSet::barrier_set();
3422   bs->on_slowpath_allocation_exit(current, new_obj);
3423 }





































































































































































































  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/vmClasses.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/compiledMethod.inline.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/abstractCompiler.hpp"
  37 #include "compiler/compileBroker.hpp"
  38 #include "compiler/disassembler.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "gc/shared/gcLocker.inline.hpp"
  42 #include "interpreter/interpreter.hpp"
  43 #include "interpreter/interpreterRuntime.hpp"
  44 #include "jvm.h"
  45 #include "jfr/jfrEvents.hpp"
  46 #include "logging/log.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/access.hpp"
  51 #include "oops/fieldStreams.inline.hpp"
  52 #include "metaprogramming/primitiveConversions.hpp"
  53 #include "oops/compiledICHolder.inline.hpp"
  54 #include "oops/klass.hpp"
  55 #include "oops/method.inline.hpp"
  56 #include "oops/objArrayKlass.hpp"
  57 #include "oops/objArrayOop.inline.hpp"
  58 #include "oops/oop.inline.hpp"
  59 #include "oops/inlineKlass.inline.hpp"
  60 #include "prims/forte.hpp"
  61 #include "prims/jvmtiExport.hpp"
  62 #include "prims/jvmtiThreadState.hpp"
  63 #include "prims/methodHandles.hpp"
  64 #include "prims/nativeLookup.hpp"
  65 #include "runtime/atomic.hpp"
  66 #include "runtime/frame.inline.hpp"
  67 #include "runtime/handles.inline.hpp"
  68 #include "runtime/init.hpp"
  69 #include "runtime/interfaceSupport.inline.hpp"
  70 #include "runtime/java.hpp"
  71 #include "runtime/javaCalls.hpp"
  72 #include "runtime/jniHandles.inline.hpp"
  73 #include "runtime/sharedRuntime.hpp"
  74 #include "runtime/stackWatermarkSet.hpp"
  75 #include "runtime/stubRoutines.hpp"
  76 #include "runtime/synchronizer.hpp"
  77 #include "runtime/vframe.inline.hpp"
  78 #include "runtime/vframeArray.hpp"
  79 #include "runtime/vm_version.hpp"
  80 #include "utilities/copy.hpp"
  81 #include "utilities/dtrace.hpp"
  82 #include "utilities/events.hpp"
  83 #include "utilities/resourceHash.hpp"
  84 #include "utilities/macros.hpp"
  85 #include "utilities/xmlstream.hpp"
  86 #ifdef COMPILER1
  87 #include "c1/c1_Runtime1.hpp"
  88 #endif
  89 
  90 // Shared stub locations
  91 RuntimeStub*        SharedRuntime::_wrong_method_blob;
  92 RuntimeStub*        SharedRuntime::_wrong_method_abstract_blob;
  93 RuntimeStub*        SharedRuntime::_ic_miss_blob;
  94 RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
  95 RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
  96 RuntimeStub*        SharedRuntime::_resolve_static_call_blob;

  97 
  98 DeoptimizationBlob* SharedRuntime::_deopt_blob;
  99 SafepointBlob*      SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
 100 SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
 101 SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
 102 
 103 #ifdef COMPILER2
 104 UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
 105 #endif // COMPILER2
 106 
 107 nmethod*            SharedRuntime::_cont_doYield_stub;
 108 
 109 //----------------------------generate_stubs-----------------------------------
 110 void SharedRuntime::generate_stubs() {
 111   _wrong_method_blob                   = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),          "wrong_method_stub");
 112   _wrong_method_abstract_blob          = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
 113   _ic_miss_blob                        = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),  "ic_miss_stub");
 114   _resolve_opt_virtual_call_blob       = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),   "resolve_opt_virtual_call");
 115   _resolve_virtual_call_blob           = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),       "resolve_virtual_call");
 116   _resolve_static_call_blob            = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),        "resolve_static_call");

 117 
 118   AdapterHandlerLibrary::initialize();
 119 
 120 #if COMPILER2_OR_JVMCI
 121   // Vectors are generated only by C2 and JVMCI.
 122   bool support_wide = is_wide_vector(MaxVectorSize);
 123   if (support_wide) {
 124     _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
 125   }
 126 #endif // COMPILER2_OR_JVMCI
 127   _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
 128   _polling_page_return_handler_blob    = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
 129 
 130   generate_deopt_blob();
 131 
 132 #ifdef COMPILER2
 133   generate_uncommon_trap_blob();
 134 #endif // COMPILER2
 135 }
 136 

1117 // for a call current in progress, i.e., arguments has been pushed on stack
1118 // but callee has not been invoked yet.  Caller frame must be compiled.
1119 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1120                                               CallInfo& callinfo, TRAPS) {
1121   Handle receiver;
1122   Handle nullHandle;  // create a handy null handle for exception returns
1123   JavaThread* current = THREAD;
1124 
1125   assert(!vfst.at_end(), "Java frame must exist");
1126 
1127   // Find caller and bci from vframe
1128   methodHandle caller(current, vfst.method());
1129   int          bci   = vfst.bci();
1130 
1131   if (caller->is_continuation_enter_intrinsic()) {
1132     bc = Bytecodes::_invokestatic;
1133     LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1134     return receiver;
1135   }
1136 
1137   // Substitutability test implementation piggy backs on static call resolution
1138   Bytecodes::Code code = caller->java_code_at(bci);
1139   if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1140     bc = Bytecodes::_invokestatic;
1141     methodHandle attached_method(THREAD, extract_attached_method(vfst));
1142     assert(attached_method.not_null(), "must have attached method");
1143     vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1144     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1145 #ifdef ASSERT
1146     Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature());
1147     assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1148 #endif
1149     return receiver;
1150   }
1151 
1152   Bytecode_invoke bytecode(caller, bci);
1153   int bytecode_index = bytecode.index();
1154   bc = bytecode.invoke_code();
1155 
1156   methodHandle attached_method(current, extract_attached_method(vfst));
1157   if (attached_method.not_null()) {
1158     Method* callee = bytecode.static_target(CHECK_NH);
1159     vmIntrinsics::ID id = callee->intrinsic_id();
1160     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1161     // it attaches statically resolved method to the call site.
1162     if (MethodHandles::is_signature_polymorphic(id) &&
1163         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1164       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1165 
1166       // Adjust invocation mode according to the attached method.
1167       switch (bc) {
1168         case Bytecodes::_invokevirtual:
1169           if (attached_method->method_holder()->is_interface()) {
1170             bc = Bytecodes::_invokeinterface;
1171           }
1172           break;
1173         case Bytecodes::_invokeinterface:
1174           if (!attached_method->method_holder()->is_interface()) {
1175             bc = Bytecodes::_invokevirtual;
1176           }
1177           break;
1178         case Bytecodes::_invokehandle:
1179           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1180             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1181                                               : Bytecodes::_invokevirtual;
1182           }
1183           break;
1184         default:
1185           break;
1186       }
1187     } else {
1188       assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1189       if (!attached_method->method_holder()->is_inline_klass()) {
1190         // Ignore the attached method in this case to not confuse below code
1191         attached_method = methodHandle(current, nullptr);
1192       }
1193     }
1194   }
1195 
1196   assert(bc != Bytecodes::_illegal, "not initialized");
1197 
1198   bool has_receiver = bc != Bytecodes::_invokestatic &&
1199                       bc != Bytecodes::_invokedynamic &&
1200                       bc != Bytecodes::_invokehandle;
1201   bool check_null_and_abstract = true;
1202 
1203   // Find receiver for non-static call
1204   if (has_receiver) {
1205     // This register map must be update since we need to find the receiver for
1206     // compiled frames. The receiver might be in a register.
1207     RegisterMap reg_map2(current,
1208                          RegisterMap::UpdateMap::include,
1209                          RegisterMap::ProcessFrames::include,
1210                          RegisterMap::WalkContinuation::skip);
1211     frame stubFrame   = current->last_frame();
1212     // Caller-frame is a compiled frame
1213     frame callerFrame = stubFrame.sender(&reg_map2);
1214 
1215     Method* callee = attached_method();
1216     if (callee == nullptr) {
1217       callee = bytecode.static_target(CHECK_NH);
1218       if (callee == nullptr) {
1219         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1220       }
1221     }
1222     bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->is_compiled_by_c1();
1223     if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1224       // If the receiver is an inline type that is passed as fields, no oop is available
1225       // Resolve the call without receiver null checking.
1226       assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1227       assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1228       if (bc == Bytecodes::_invokeinterface) {
1229         bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1230       }
1231       check_null_and_abstract = false;
1232     } else {
1233       // Retrieve from a compiled argument list
1234       receiver = Handle(current, callerFrame.retrieve_receiver(&reg_map2));
1235       assert(oopDesc::is_oop_or_null(receiver()), "");
1236       if (receiver.is_null()) {
1237         THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1238       }
1239     }
1240   }
1241 
1242   // Resolve method
1243   if (attached_method.not_null()) {
1244     // Parameterized by attached method.
1245     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1246   } else {
1247     // Parameterized by bytecode.
1248     constantPoolHandle constants(current, caller->constants());
1249     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1250   }
1251 
1252 #ifdef ASSERT
1253   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1254   if (has_receiver && check_null_and_abstract) {
1255     assert(receiver.not_null(), "should have thrown exception");
1256     Klass* receiver_klass = receiver->klass();
1257     Klass* rk = nullptr;
1258     if (attached_method.not_null()) {
1259       // In case there's resolved method attached, use its holder during the check.
1260       rk = attached_method->method_holder();
1261     } else {
1262       // Klass is already loaded.
1263       constantPoolHandle constants(current, caller->constants());
1264       rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1265     }
1266     Klass* static_receiver_klass = rk;
1267     assert(receiver_klass->is_subtype_of(static_receiver_klass),
1268            "actual receiver must be subclass of static receiver klass");
1269     if (receiver_klass->is_instance_klass()) {
1270       if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1271         tty->print_cr("ERROR: Klass not yet initialized!!");
1272         receiver_klass->print();
1273       }
1274       assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1275     }
1276   }
1277 #endif
1278 
1279   return receiver;
1280 }
1281 
1282 methodHandle SharedRuntime::find_callee_method(bool is_optimized, bool& caller_is_c1, TRAPS) {
1283   JavaThread* current = THREAD;
1284   ResourceMark rm(current);
1285   // We need first to check if any Java activations (compiled, interpreted)
1286   // exist on the stack since last JavaCall.  If not, we need
1287   // to get the target method from the JavaCall wrapper.
1288   vframeStream vfst(current, true);  // Do not skip any javaCalls
1289   methodHandle callee_method;
1290   if (vfst.at_end()) {
1291     // No Java frames were found on stack since we did the JavaCall.
1292     // Hence the stack can only contain an entry_frame.  We need to
1293     // find the target method from the stub frame.
1294     RegisterMap reg_map(current,
1295                         RegisterMap::UpdateMap::skip,
1296                         RegisterMap::ProcessFrames::include,
1297                         RegisterMap::WalkContinuation::skip);
1298     frame fr = current->last_frame();
1299     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1300     fr = fr.sender(&reg_map);
1301     assert(fr.is_entry_frame(), "must be");
1302     // fr is now pointing to the entry frame.
1303     callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1304   } else {
1305     Bytecodes::Code bc;
1306     CallInfo callinfo;
1307     find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1308     // Calls via mismatching methods are always non-scalarized
1309     if (callinfo.resolved_method()->mismatch() && !is_optimized) {
1310       caller_is_c1 = true;
1311     }
1312     callee_method = methodHandle(current, callinfo.selected_method());
1313   }
1314   assert(callee_method()->is_method(), "must be");
1315   return callee_method;
1316 }
1317 
1318 // Resolves a call.
1319 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_is_c1, TRAPS) {
1320   methodHandle callee_method;
1321   callee_method = resolve_sub_helper(is_virtual, is_optimized, caller_is_c1, THREAD);
1322   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1323     int retry_count = 0;
1324     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1325            callee_method->method_holder() != vmClasses::Object_klass()) {
1326       // If has a pending exception then there is no need to re-try to
1327       // resolve this method.
1328       // If the method has been redefined, we need to try again.
1329       // Hack: we have no way to update the vtables of arrays, so don't
1330       // require that java.lang.Object has been updated.
1331 
1332       // It is very unlikely that method is redefined more than 100 times
1333       // in the middle of resolve. If it is looping here more than 100 times
1334       // means then there could be a bug here.
1335       guarantee((retry_count++ < 100),
1336                 "Could not resolve to latest version of redefined method");
1337       // method is redefined in the middle of resolve so re-try.
1338       callee_method = resolve_sub_helper(is_virtual, is_optimized, caller_is_c1, THREAD);
1339     }
1340   }
1341   return callee_method;
1342 }
1343 
1344 // This fails if resolution required refilling of IC stubs
1345 bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
1346                                                 CompiledMethod* caller_nm, bool is_virtual, bool is_optimized, bool& caller_is_c1,
1347                                                 Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) {
1348   StaticCallInfo static_call_info;
1349   CompiledICInfo virtual_call_info;
1350 
1351   // Make sure the callee nmethod does not get deoptimized and removed before
1352   // we are done patching the code.
1353   CompiledMethod* callee = callee_method->code();
1354 
1355   if (callee != nullptr) {
1356     assert(callee->is_compiled(), "must be nmethod for patching");
1357   }
1358 
1359   if (callee != nullptr && !callee->is_in_use()) {
1360     // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1361     callee = nullptr;
1362   }
1363 #ifdef ASSERT
1364   address dest_entry_point = callee == nullptr ? 0 : callee->entry_point(); // used below
1365 #endif
1366 
1367   bool is_nmethod = caller_nm->is_nmethod();
1368 
1369   if (is_virtual) {
1370     Klass* receiver_klass = nullptr;
1371     if (!caller_is_c1 && callee_method->is_scalarized_arg(0)) {
1372       // If the receiver is an inline type that is passed as fields, no oop is available
1373       receiver_klass = callee_method->method_holder();
1374     } else {
1375       assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
1376       receiver_klass = invoke_code == Bytecodes::_invokehandle ? nullptr : receiver->klass();
1377     }
1378     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1379     CompiledIC::compute_monomorphic_entry(callee_method, receiver_klass,
1380                      is_optimized, static_bound, is_nmethod, caller_is_c1, virtual_call_info,

1381                      CHECK_false);
1382   } else {
1383     // static call
1384     CompiledStaticCall::compute_entry(callee_method, caller_nm, static_call_info);
1385   }
1386 
1387   // grab lock, check for deoptimization and potentially patch caller
1388   {
1389     CompiledICLocker ml(caller_nm);
1390 
1391     // Lock blocks for safepoint during which both nmethods can change state.
1392 
1393     // Now that we are ready to patch if the Method* was redefined then
1394     // don't update call site and let the caller retry.
1395     // Don't update call site if callee nmethod was unloaded or deoptimized.
1396     // Don't update call site if callee nmethod was replaced by an other nmethod
1397     // which may happen when multiply alive nmethod (tiered compilation)
1398     // will be supported.
1399     if (!callee_method->is_old() &&
1400         (callee == nullptr || (callee->is_in_use() && callee_method->code() == callee))) {
1401       NoSafepointVerifier nsv;
1402 #ifdef ASSERT
1403       // We must not try to patch to jump to an already unloaded method.
1404       if (dest_entry_point != 0) {

1417       } else {
1418         if (VM_Version::supports_fast_class_init_checks() &&
1419             invoke_code == Bytecodes::_invokestatic &&
1420             callee_method->needs_clinit_barrier() &&
1421             callee != nullptr && callee->is_compiled_by_jvmci()) {
1422           return true; // skip patching for JVMCI
1423         }
1424         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
1425         if (is_nmethod && caller_nm->method()->is_continuation_enter_intrinsic()) {
1426           ssc->compute_entry_for_continuation_entry(callee_method, static_call_info);
1427         }
1428         if (ssc->is_clean()) ssc->set(static_call_info);
1429       }
1430     }
1431   } // unlock CompiledICLocker
1432   return true;
1433 }
1434 
1435 // Resolves a call.  The compilers generate code for calls that go here
1436 // and are patched with the real destination of the call.
1437 methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimized, bool& caller_is_c1, TRAPS) {
1438   JavaThread* current = THREAD;
1439   ResourceMark rm(current);
1440   RegisterMap cbl_map(current,
1441                       RegisterMap::UpdateMap::skip,
1442                       RegisterMap::ProcessFrames::include,
1443                       RegisterMap::WalkContinuation::skip);
1444   frame caller_frame = current->last_frame().sender(&cbl_map);
1445 
1446   CodeBlob* caller_cb = caller_frame.cb();
1447   guarantee(caller_cb != nullptr && caller_cb->is_compiled(), "must be called from compiled method");
1448   CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
1449 
1450   // determine call info & receiver
1451   // note: a) receiver is null for static calls
1452   //       b) an exception is thrown if receiver is null for non-static calls
1453   CallInfo call_info;
1454   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1455   Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1456   methodHandle callee_method(current, call_info.selected_method());
1457   // Calls via mismatching methods are always non-scalarized
1458   if (caller_nm->is_compiled_by_c1() || (call_info.resolved_method()->mismatch() && !is_optimized)) {
1459     caller_is_c1 = true;
1460   }
1461 
1462   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1463          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1464          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1465          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1466          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1467 
1468   assert(!caller_nm->is_unloading(), "It should not be unloading");
1469 
1470 #ifndef PRODUCT
1471   // tracing/debugging/statistics
1472   uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1473                  (is_virtual) ? (&_resolve_virtual_ctr) :
1474                                 (&_resolve_static_ctr);
1475   Atomic::inc(addr);
1476 
1477   if (TraceCallFixup) {
1478     ResourceMark rm(current);
1479     tty->print("resolving %s%s (%s) call to",
1480                (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",

1505   // If the resolved method is a MethodHandle invoke target, the call
1506   // site must be a MethodHandle call site, because the lambda form might tail-call
1507   // leaving the stack in a state unknown to either caller or callee
1508   // TODO detune for now but we might need it again
1509 //  assert(!callee_method->is_compiled_lambda_form() ||
1510 //         caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1511 
1512   // Compute entry points. This might require generation of C2I converter
1513   // frames, so we cannot be holding any locks here. Furthermore, the
1514   // computation of the entry points is independent of patching the call.  We
1515   // always return the entry-point, but we only patch the stub if the call has
1516   // not been deoptimized.  Return values: For a virtual call this is an
1517   // (cached_oop, destination address) pair. For a static call/optimized
1518   // virtual this is just a destination address.
1519 
1520   // Patching IC caches may fail if we run out if transition stubs.
1521   // We refill the ic stubs then and try again.
1522   for (;;) {
1523     ICRefillVerifier ic_refill_verifier;
1524     bool successful = resolve_sub_helper_internal(callee_method, caller_frame, caller_nm,
1525                                                   is_virtual, is_optimized, caller_is_c1, receiver,
1526                                                   call_info, invoke_code, CHECK_(methodHandle()));
1527     if (successful) {
1528       return callee_method;
1529     } else {
1530       InlineCacheBuffer::refill_ic_stubs();
1531     }
1532   }
1533 
1534 }
1535 
1536 
1537 // Inline caches exist only in compiled code
1538 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1539 #ifdef ASSERT
1540   RegisterMap reg_map(current,
1541                       RegisterMap::UpdateMap::skip,
1542                       RegisterMap::ProcessFrames::include,
1543                       RegisterMap::WalkContinuation::skip);
1544   frame stub_frame = current->last_frame();
1545   assert(stub_frame.is_runtime_frame(), "sanity check");
1546   frame caller_frame = stub_frame.sender(&reg_map);
1547   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1548 #endif /* ASSERT */
1549 
1550   methodHandle callee_method;
1551   bool is_optimized = false;
1552   bool caller_is_c1 = false;
1553   JRT_BLOCK
1554     callee_method = SharedRuntime::handle_ic_miss_helper(is_optimized, caller_is_c1, CHECK_NULL);
1555     // Return Method* through TLS
1556     current->set_vm_result_2(callee_method());
1557   JRT_BLOCK_END
1558   // return compiled code entry point after potential safepoints
1559   return entry_for_handle_wrong_method(callee_method, false, is_optimized, caller_is_c1);

1560 JRT_END
1561 
1562 
1563 // Handle call site that has been made non-entrant
1564 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1565   // 6243940 We might end up in here if the callee is deoptimized
1566   // as we race to call it.  We don't want to take a safepoint if
1567   // the caller was interpreted because the caller frame will look
1568   // interpreted to the stack walkers and arguments are now
1569   // "compiled" so it is much better to make this transition
1570   // invisible to the stack walking code. The i2c path will
1571   // place the callee method in the callee_target. It is stashed
1572   // there because if we try and find the callee by normal means a
1573   // safepoint is possible and have trouble gc'ing the compiled args.
1574   RegisterMap reg_map(current,
1575                       RegisterMap::UpdateMap::skip,
1576                       RegisterMap::ProcessFrames::include,
1577                       RegisterMap::WalkContinuation::skip);
1578   frame stub_frame = current->last_frame();
1579   assert(stub_frame.is_runtime_frame(), "sanity check");
1580   frame caller_frame = stub_frame.sender(&reg_map);
1581 
1582   if (caller_frame.is_interpreted_frame() ||
1583       caller_frame.is_entry_frame() ||
1584       caller_frame.is_upcall_stub_frame()) {
1585     Method* callee = current->callee_target();
1586     guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1587     current->set_vm_result_2(callee);
1588     current->set_callee_target(nullptr);
1589     if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1590       // Bypass class initialization checks in c2i when caller is in native.
1591       // JNI calls to static methods don't have class initialization checks.
1592       // Fast class initialization checks are present in c2i adapters and call into
1593       // SharedRuntime::handle_wrong_method() on the slow path.
1594       //
1595       // JVM upcalls may land here as well, but there's a proper check present in
1596       // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1597       // so bypassing it in c2i adapter is benign.
1598       return callee->get_c2i_no_clinit_check_entry();
1599     } else {
1600       if (caller_frame.is_interpreted_frame()) {
1601         return callee->get_c2i_inline_entry();
1602       } else {
1603         return callee->get_c2i_entry();
1604       }
1605     }
1606   }
1607 
1608   // Must be compiled to compiled path which is safe to stackwalk
1609   methodHandle callee_method;
1610   bool is_static_call = false;
1611   bool is_optimized = false;
1612   bool caller_is_c1 = false;
1613   JRT_BLOCK
1614     // Force resolving of caller (if we called from compiled frame)
1615     callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_NULL);
1616     current->set_vm_result_2(callee_method());
1617   JRT_BLOCK_END
1618   // return compiled code entry point after potential safepoints
1619   return entry_for_handle_wrong_method(callee_method, is_static_call, is_optimized, caller_is_c1);

1620 JRT_END
1621 
1622 // Handle abstract method call
1623 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1624   // Verbose error message for AbstractMethodError.
1625   // Get the called method from the invoke bytecode.
1626   vframeStream vfst(current, true);
1627   assert(!vfst.at_end(), "Java frame must exist");
1628   methodHandle caller(current, vfst.method());
1629   Bytecode_invoke invoke(caller, vfst.bci());
1630   DEBUG_ONLY( invoke.verify(); )
1631 
1632   // Find the compiled caller frame.
1633   RegisterMap reg_map(current,
1634                       RegisterMap::UpdateMap::include,
1635                       RegisterMap::ProcessFrames::include,
1636                       RegisterMap::WalkContinuation::skip);
1637   frame stubFrame = current->last_frame();
1638   assert(stubFrame.is_runtime_frame(), "must be");
1639   frame callerFrame = stubFrame.sender(&reg_map);
1640   assert(callerFrame.is_compiled_frame(), "must be");
1641 
1642   // Install exception and return forward entry.
1643   address res = StubRoutines::throw_AbstractMethodError_entry();
1644   JRT_BLOCK
1645     methodHandle callee(current, invoke.static_target(current));
1646     if (!callee.is_null()) {
1647       oop recv = callerFrame.retrieve_receiver(&reg_map);
1648       Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1649       res = StubRoutines::forward_exception_entry();
1650       LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1651     }
1652   JRT_BLOCK_END
1653   return res;
1654 JRT_END
1655 
1656 
1657 // resolve a static call and patch code
1658 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1659   methodHandle callee_method;
1660   bool caller_is_c1 = false;
1661   bool enter_special = false;
1662   JRT_BLOCK
1663     callee_method = SharedRuntime::resolve_helper(false, false, caller_is_c1, CHECK_NULL);
1664     current->set_vm_result_2(callee_method());
1665 
1666     if (current->is_interp_only_mode()) {
1667       RegisterMap reg_map(current,
1668                           RegisterMap::UpdateMap::skip,
1669                           RegisterMap::ProcessFrames::include,
1670                           RegisterMap::WalkContinuation::skip);
1671       frame stub_frame = current->last_frame();
1672       assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1673       frame caller = stub_frame.sender(&reg_map);
1674       enter_special = caller.cb() != nullptr && caller.cb()->is_compiled()
1675         && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic();
1676     }
1677   JRT_BLOCK_END
1678 
1679   if (current->is_interp_only_mode() && enter_special) {
1680     // enterSpecial is compiled and calls this method to resolve the call to Continuation::enter
1681     // but in interp_only_mode we need to go to the interpreted entry
1682     // The c2i won't patch in this mode -- see fixup_callers_callsite
1683     //
1684     // This should probably be done in all cases, not just enterSpecial (see JDK-8218403),
1685     // but that's part of a larger fix, and the situation is worse for enterSpecial, as it has no
1686     // interpreted version.
1687     return callee_method->get_c2i_entry();
1688   }
1689 
1690   // return compiled code entry point after potential safepoints
1691   address entry = caller_is_c1 ?
1692     callee_method->verified_inline_code_entry() : callee_method->verified_code_entry();
1693   assert(entry != nullptr, "Jump to zero!");
1694   return entry;
1695 JRT_END
1696 
1697 
1698 // resolve virtual call and update inline cache to monomorphic
1699 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1700   methodHandle callee_method;
1701   bool caller_is_c1 = false;
1702   JRT_BLOCK
1703     callee_method = SharedRuntime::resolve_helper(true, false, caller_is_c1, CHECK_NULL);
1704     current->set_vm_result_2(callee_method());
1705   JRT_BLOCK_END
1706   // return compiled code entry point after potential safepoints
1707   address entry = caller_is_c1 ?
1708     callee_method->verified_inline_code_entry() : callee_method->verified_inline_ro_code_entry();
1709   assert(entry != nullptr, "Jump to zero!");
1710   return entry;
1711 JRT_END
1712 
1713 
1714 // Resolve a virtual call that can be statically bound (e.g., always
1715 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1716 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1717   methodHandle callee_method;
1718   bool caller_is_c1 = false;
1719   JRT_BLOCK
1720     callee_method = SharedRuntime::resolve_helper(true, true, caller_is_c1, CHECK_NULL);
1721     current->set_vm_result_2(callee_method());
1722   JRT_BLOCK_END
1723   // return compiled code entry point after potential safepoints
1724   address entry = caller_is_c1 ?
1725     callee_method->verified_inline_code_entry() : callee_method->verified_code_entry();
1726   assert(entry != nullptr, "Jump to zero!");
1727   return entry;
1728 JRT_END
1729 
1730 // The handle_ic_miss_helper_internal function returns false if it failed due
1731 // to either running out of vtable stubs or ic stubs due to IC transitions
1732 // to transitional states. The needs_ic_stub_refill value will be set if
1733 // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
1734 // refills the IC stubs and tries again.
1735 bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
1736                                                    const frame& caller_frame, methodHandle callee_method,
1737                                                    Bytecodes::Code bc, CallInfo& call_info,
1738                                                    bool& needs_ic_stub_refill, bool& is_optimized, bool caller_is_c1, TRAPS) {
1739   CompiledICLocker ml(caller_nm);
1740   CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1741   bool should_be_mono = false;
1742   if (inline_cache->is_optimized()) {
1743     if (TraceCallFixup) {
1744       ResourceMark rm(THREAD);
1745       tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1746       callee_method->print_short_name(tty);
1747       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1748     }
1749     is_optimized = true;
1750     should_be_mono = true;
1751   } else if (inline_cache->is_icholder_call()) {
1752     CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1753     if (ic_oop != nullptr) {
1754       if (!ic_oop->is_loader_alive()) {
1755         // Deferred IC cleaning due to concurrent class unloading
1756         if (!inline_cache->set_to_clean()) {
1757           needs_ic_stub_refill = true;
1758           return false;
1759         }
1760       } else if (receiver()->klass() == ic_oop->holder_klass()) {
1761         // This isn't a real miss. We must have seen that compiled code
1762         // is now available and we want the call site converted to a
1763         // monomorphic compiled call site.
1764         // We can't assert for callee_method->code() != nullptr because it
1765         // could have been deoptimized in the meantime
1766         if (TraceCallFixup) {
1767           ResourceMark rm(THREAD);
1768           tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1769           callee_method->print_short_name(tty);
1770           tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1771         }
1772         should_be_mono = true;
1773       }
1774     }
1775   }
1776 
1777   if (should_be_mono) {
1778     // We have a path that was monomorphic but was going interpreted
1779     // and now we have (or had) a compiled entry. We correct the IC
1780     // by using a new icBuffer.
1781     CompiledICInfo info;
1782     Klass* receiver_klass = receiver()->klass();
1783     inline_cache->compute_monomorphic_entry(callee_method,
1784                                             receiver_klass,
1785                                             inline_cache->is_optimized(),
1786                                             false, caller_nm->is_nmethod(),
1787                                             caller_is_c1,
1788                                             info, CHECK_false);
1789     if (!inline_cache->set_to_monomorphic(info)) {
1790       needs_ic_stub_refill = true;
1791       return false;
1792     }
1793   } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1794     // Potential change to megamorphic
1795 
1796     bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, caller_is_c1, CHECK_false);
1797     if (needs_ic_stub_refill) {
1798       return false;
1799     }
1800     if (!successful) {
1801       if (!inline_cache->set_to_clean()) {
1802         needs_ic_stub_refill = true;
1803         return false;
1804       }
1805     }
1806   } else {
1807     // Either clean or megamorphic
1808   }
1809   return true;
1810 }
1811 
1812 methodHandle SharedRuntime::handle_ic_miss_helper(bool& is_optimized, bool& caller_is_c1, TRAPS) {
1813   JavaThread* current = THREAD;
1814   ResourceMark rm(current);
1815   CallInfo call_info;
1816   Bytecodes::Code bc;
1817 
1818   // receiver is null for static calls. An exception is thrown for null
1819   // receivers for non-static calls
1820   Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1821   // Compiler1 can produce virtual call sites that can actually be statically bound
1822   // If we fell thru to below we would think that the site was going megamorphic
1823   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1824   // we'd try and do a vtable dispatch however methods that can be statically bound
1825   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1826   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1827   // plain ic_miss) and the site will be converted to an optimized virtual call site
1828   // never to miss again. I don't believe C2 will produce code like this but if it
1829   // did this would still be the correct thing to do for it too, hence no ifdef.
1830   //
1831   if (call_info.resolved_method()->can_be_statically_bound()) {
1832     bool is_static_call = false;
1833     methodHandle callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_(methodHandle()));
1834     assert(!is_static_call, "IC miss at static call?");
1835     if (TraceCallFixup) {
1836       RegisterMap reg_map(current,
1837                           RegisterMap::UpdateMap::skip,
1838                           RegisterMap::ProcessFrames::include,
1839                           RegisterMap::WalkContinuation::skip);
1840       frame caller_frame = current->last_frame().sender(&reg_map);
1841       ResourceMark rm(current);
1842       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1843       callee_method->print_short_name(tty);
1844       tty->print_cr(" from pc: " INTPTR_FORMAT, p2i(caller_frame.pc()));
1845       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1846     }
1847     return callee_method;
1848   }
1849 
1850   methodHandle callee_method(current, call_info.selected_method());
1851 
1852 #ifndef PRODUCT
1853   Atomic::inc(&_ic_miss_ctr);
1854 

1873 #endif
1874 
1875   // install an event collector so that when a vtable stub is created the
1876   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1877   // event can't be posted when the stub is created as locks are held
1878   // - instead the event will be deferred until the event collector goes
1879   // out of scope.
1880   JvmtiDynamicCodeEventCollector event_collector;
1881 
1882   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1883   // Transitioning IC caches may require transition stubs. If we run out
1884   // of transition stubs, we have to drop locks and perform a safepoint
1885   // that refills them.
1886   RegisterMap reg_map(current,
1887                       RegisterMap::UpdateMap::skip,
1888                       RegisterMap::ProcessFrames::include,
1889                       RegisterMap::WalkContinuation::skip);
1890   frame caller_frame = current->last_frame().sender(&reg_map);
1891   CodeBlob* cb = caller_frame.cb();
1892   CompiledMethod* caller_nm = cb->as_compiled_method();
1893   // Calls via mismatching methods are always non-scalarized
1894   if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1895     caller_is_c1 = true;
1896   }
1897 
1898   for (;;) {
1899     ICRefillVerifier ic_refill_verifier;
1900     bool needs_ic_stub_refill = false;
1901     bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1902                                                      bc, call_info, needs_ic_stub_refill, is_optimized, caller_is_c1, CHECK_(methodHandle()));
1903     if (successful || !needs_ic_stub_refill) {
1904       return callee_method;
1905     } else {
1906       InlineCacheBuffer::refill_ic_stubs();
1907     }
1908   }
1909 }
1910 
1911 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1912   CompiledICLocker ml(caller_nm);
1913   if (is_static_call) {
1914     CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1915     if (!ssc->is_clean()) {
1916       return ssc->set_to_clean();
1917     }
1918   } else {
1919     // compiled, dispatched call (which used to call an interpreted method)
1920     CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1921     if (!inline_cache->is_clean()) {
1922       return inline_cache->set_to_clean();
1923     }
1924   }
1925   return true;
1926 }
1927 
1928 //
1929 // Resets a call-site in compiled code so it will get resolved again.
1930 // This routines handles both virtual call sites, optimized virtual call
1931 // sites, and static call sites. Typically used to change a call sites
1932 // destination from compiled to interpreted.
1933 //
1934 methodHandle SharedRuntime::reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_is_c1, TRAPS) {
1935   JavaThread* current = THREAD;
1936   ResourceMark rm(current);
1937   RegisterMap reg_map(current,
1938                       RegisterMap::UpdateMap::skip,
1939                       RegisterMap::ProcessFrames::include,
1940                       RegisterMap::WalkContinuation::skip);
1941   frame stub_frame = current->last_frame();
1942   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1943   frame caller = stub_frame.sender(&reg_map);
1944   if (caller.is_compiled_frame()) {
1945     caller_is_c1 = caller.cb()->is_compiled_by_c1();
1946   }
1947 
1948   // Do nothing if the frame isn't a live compiled frame.
1949   // nmethod could be deoptimized by the time we get here
1950   // so no update to the caller is needed.
1951 
1952   if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1953       (caller.is_native_frame() && ((CompiledMethod*)caller.cb())->method()->is_continuation_enter_intrinsic())) {
1954 
1955     address pc = caller.pc();
1956 
1957     // Check for static or virtual call

1958     CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
1959 
1960     // Default call_addr is the location of the "basic" call.
1961     // Determine the address of the call we a reresolving. With
1962     // Inline Caches we will always find a recognizable call.
1963     // With Inline Caches disabled we may or may not find a
1964     // recognizable call. We will always find a call for static
1965     // calls and for optimized virtual calls. For vanilla virtual
1966     // calls it depends on the state of the UseInlineCaches switch.
1967     //
1968     // With Inline Caches disabled we can get here for a virtual call
1969     // for two reasons:
1970     //   1 - calling an abstract method. The vtable for abstract methods
1971     //       will run us thru handle_wrong_method and we will eventually
1972     //       end up in the interpreter to throw the ame.
1973     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1974     //       call and between the time we fetch the entry address and
1975     //       we jump to it the target gets deoptimized. Similar to 1
1976     //       we will wind up in the interprter (thru a c2i with c2).
1977     //
1978     address call_addr = nullptr;
1979     {
1980       // Get call instruction under lock because another thread may be
1981       // busy patching it.
1982       CompiledICLocker ml(caller_nm);
1983       // Location of call instruction
1984       call_addr = caller_nm->call_instruction_address(pc);
1985     }
1986 
1987     // Check relocations for the matching call to 1) avoid false positives,
1988     // and 2) determine the type.
1989     if (call_addr != nullptr) {
1990       // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1991       // bytes back in the instruction stream so we must also check for reloc info.
1992       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1993       bool ret = iter.next(); // Get item
1994       if (ret) {
1995         is_static_call = false;
1996         is_optimized = false;
1997         switch (iter.type()) {
1998           case relocInfo::static_call_type:
1999             is_static_call = true;
2000 
2001           case relocInfo::virtual_call_type:
2002           case relocInfo::opt_virtual_call_type:
2003             is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
2004             // Cleaning the inline cache will force a new resolve. This is more robust
2005             // than directly setting it to the new destination, since resolving of calls
2006             // is always done through the same code path. (experience shows that it
2007             // leads to very hard to track down bugs, if an inline cache gets updated
2008             // to a wrong method). It should not be performance critical, since the
2009             // resolve is only done once.
2010             guarantee(iter.addr() == call_addr, "must find call");
2011             for (;;) {
2012               ICRefillVerifier ic_refill_verifier;
2013               if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
2014                 InlineCacheBuffer::refill_ic_stubs();
2015               } else {
2016                 break;
2017               }
2018             }
2019             break;
2020           default:
2021             break;
2022         }
2023       }
2024     }
2025   }
2026 
2027   methodHandle callee_method = find_callee_method(is_optimized, caller_is_c1, CHECK_(methodHandle()));

2028 
2029 #ifndef PRODUCT
2030   Atomic::inc(&_wrong_method_ctr);
2031 
2032   if (TraceCallFixup) {
2033     ResourceMark rm(current);
2034     tty->print("handle_wrong_method reresolving call to");
2035     callee_method->print_short_name(tty);
2036     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
2037   }
2038 #endif
2039 
2040   return callee_method;
2041 }
2042 
2043 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
2044   // The faulting unsafe accesses should be changed to throw the error
2045   // synchronously instead. Meanwhile the faulting instruction will be
2046   // skipped over (effectively turning it into a no-op) and an
2047   // asynchronous exception will be raised which the thread will

2179       // there. If you're lucky you'll get the assert in the bugid, if not you've
2180       // just made a call site that could be megamorphic into a monomorphic site
2181       // for the rest of its life! Just another racing bug in the life of
2182       // fixup_callers_callsite ...
2183       //
2184       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
2185       iter.next();
2186       assert(iter.has_current(), "must have a reloc at java call site");
2187       relocInfo::relocType typ = iter.reloc()->type();
2188       if (typ != relocInfo::static_call_type &&
2189            typ != relocInfo::opt_virtual_call_type &&
2190            typ != relocInfo::static_stub_type) {
2191         return;
2192       }
2193       if (nm->method()->is_continuation_enter_intrinsic()) {
2194         if (ContinuationEntry::is_interpreted_call(call->instruction_address())) {
2195           return;
2196         }
2197       }
2198       address destination = call->destination();
2199       address entry_point = cb->is_compiled_by_c1() ? callee->verified_inline_entry_point() : callee->verified_entry_point();
2200       if (should_fixup_call_destination(destination, entry_point, caller_pc, moop, cb)) {
2201         call->set_destination_mt_safe(entry_point);
2202       }
2203     }
2204   }
2205 JRT_END
2206 
2207 
2208 // same as JVM_Arraycopy, but called directly from compiled code
2209 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
2210                                                 oopDesc* dest, jint dest_pos,
2211                                                 jint length,
2212                                                 JavaThread* current)) {
2213 #ifndef PRODUCT
2214   _slow_array_copy_ctr++;
2215 #endif
2216   // Check if we have null pointers
2217   if (src == nullptr || dest == nullptr) {
2218     THROW(vmSymbols::java_lang_NullPointerException());
2219   }

2491   tty->print_cr("        %% in nested categories are relative to their category");
2492   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
2493   tty->cr();
2494 
2495   MethodArityHistogram h;
2496 }
2497 #endif
2498 
2499 #ifndef PRODUCT
2500 static int _lookups; // number of calls to lookup
2501 static int _equals;  // number of buckets checked with matching hash
2502 static int _hits;    // number of successful lookups
2503 static int _compact; // number of equals calls with compact signature
2504 #endif
2505 
2506 // A simple wrapper class around the calling convention information
2507 // that allows sharing of adapters for the same calling convention.
2508 class AdapterFingerPrint : public CHeapObj<mtCode> {
2509  private:
2510   enum {
2511     _basic_type_bits = 5,
2512     _basic_type_mask = right_n_bits(_basic_type_bits),
2513     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2514     _compact_int_count = 3
2515   };
2516   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2517   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2518 
2519   union {
2520     int  _compact[_compact_int_count];
2521     int* _fingerprint;
2522   } _value;
2523   int _length; // A negative length indicates the fingerprint is in the compact form,
2524                // Otherwise _value._fingerprint is the array.
2525 
2526   // Remap BasicTypes that are handled equivalently by the adapters.
2527   // These are correct for the current system but someday it might be
2528   // necessary to make this mapping platform dependent.
2529   static BasicType adapter_encoding(BasicType in) {
2530     switch (in) {
2531       case T_BOOLEAN:
2532       case T_BYTE:
2533       case T_SHORT:
2534       case T_CHAR:
2535         // They are all promoted to T_INT in the calling convention
2536         return T_INT;
2537 
2538       case T_OBJECT:
2539       case T_ARRAY:
2540         // In other words, we assume that any register good enough for
2541         // an int or long is good enough for a managed pointer.
2542 #ifdef _LP64
2543         return T_LONG;
2544 #else
2545         return T_INT;
2546 #endif
2547 
2548       case T_INT:
2549       case T_LONG:
2550       case T_FLOAT:
2551       case T_DOUBLE:
2552       case T_VOID:
2553         return in;
2554 
2555       default:
2556         ShouldNotReachHere();
2557         return T_CONFLICT;
2558     }
2559   }
2560 
2561  public:
2562   AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2563     // The fingerprint is based on the BasicType signature encoded
2564     // into an array of ints with eight entries per int.
2565     int total_args_passed = (sig != nullptr) ? sig->length() : 0;
2566     int* ptr;
2567     int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2568     if (len <= _compact_int_count) {
2569       assert(_compact_int_count == 3, "else change next line");
2570       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2571       // Storing the signature encoded as signed chars hits about 98%
2572       // of the time.
2573       _length = -len;
2574       ptr = _value._compact;
2575     } else {
2576       _length = len;
2577       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2578       ptr = _value._fingerprint;
2579     }
2580 
2581     // Now pack the BasicTypes with 8 per int
2582     int sig_index = 0;
2583     BasicType prev_bt = T_ILLEGAL;
2584     int vt_count = 0;
2585     for (int index = 0; index < len; index++) {
2586       int value = 0;
2587       for (int byte = 0; byte < _basic_types_per_int; byte++) {
2588         BasicType bt = T_ILLEGAL;
2589         if (sig_index < total_args_passed) {
2590           bt = sig->at(sig_index++)._bt;
2591           if (bt == T_METADATA) {
2592             // Found start of inline type in signature
2593             assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2594             if (sig_index == 1 && has_ro_adapter) {
2595               // With a ro_adapter, replace receiver inline type delimiter by T_VOID to prevent matching
2596               // with other adapters that have the same inline type as first argument and no receiver.
2597               bt = T_VOID;
2598             }
2599             vt_count++;
2600           } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2601             // Found end of inline type in signature
2602             assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2603             vt_count--;
2604             assert(vt_count >= 0, "invalid vt_count");
2605           } else if (vt_count == 0) {
2606             // Widen fields that are not part of a scalarized inline type argument
2607             bt = adapter_encoding(bt);
2608           }
2609           prev_bt = bt;
2610         }
2611         int bt_val = (bt == T_ILLEGAL) ? 0 : bt;
2612         assert((bt_val & _basic_type_mask) == bt_val, "must fit in 4 bits");
2613         value = (value << _basic_type_bits) | bt_val;
2614       }
2615       ptr[index] = value;
2616     }
2617     assert(vt_count == 0, "invalid vt_count");
2618   }
2619 
2620   ~AdapterFingerPrint() {
2621     if (_length > 0) {
2622       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2623     }
2624   }
2625 
2626   int value(int index) {
2627     if (_length < 0) {
2628       return _value._compact[index];
2629     }
2630     return _value._fingerprint[index];
2631   }
2632   int length() {
2633     if (_length < 0) return -_length;
2634     return _length;
2635   }
2636 
2637   bool is_compact() {

2662   const char* as_basic_args_string() {
2663     stringStream st;
2664     bool long_prev = false;
2665     for (int i = 0; i < length(); i++) {
2666       unsigned val = (unsigned)value(i);
2667       // args are packed so that first/lower arguments are in the highest
2668       // bits of each int value, so iterate from highest to the lowest
2669       for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2670         unsigned v = (val >> j) & _basic_type_mask;
2671         if (v == 0) {
2672           assert(i == length() - 1, "Only expect zeroes in the last word");
2673           continue;
2674         }
2675         if (long_prev) {
2676           long_prev = false;
2677           if (v == T_VOID) {
2678             st.print("J");
2679           } else {
2680             st.print("L");
2681           }
2682         } else if (v == T_LONG) {
2683           long_prev = true;
2684         } else if (v != T_VOID){
2685           st.print("%c", type2char((BasicType)v));




2686         }
2687       }
2688     }
2689     if (long_prev) {
2690       st.print("L");
2691     }
2692     return st.as_string();
2693   }
2694 #endif // !product
2695 
2696   bool equals(AdapterFingerPrint* other) {
2697     if (other->_length != _length) {
2698       return false;
2699     }
2700     if (_length < 0) {
2701       assert(_compact_int_count == 3, "else change next line");
2702       return _value._compact[0] == other->_value._compact[0] &&
2703              _value._compact[1] == other->_value._compact[1] &&
2704              _value._compact[2] == other->_value._compact[2];
2705     } else {

2713   }
2714 
2715   static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2716     NOT_PRODUCT(_equals++);
2717     return fp1->equals(fp2);
2718   }
2719 
2720   static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2721     return fp->compute_hash();
2722   }
2723 };
2724 
2725 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2726 using AdapterHandlerTable = ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2727                   AnyObj::C_HEAP, mtCode,
2728                   AdapterFingerPrint::compute_hash,
2729                   AdapterFingerPrint::equals>;
2730 static AdapterHandlerTable* _adapter_handler_table;
2731 
2732 // Find a entry with the same fingerprint if it exists
2733 static AdapterHandlerEntry* lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2734   NOT_PRODUCT(_lookups++);
2735   assert_lock_strong(AdapterHandlerLibrary_lock);
2736   AdapterFingerPrint fp(sig, has_ro_adapter);
2737   AdapterHandlerEntry** entry = _adapter_handler_table->get(&fp);
2738   if (entry != nullptr) {
2739 #ifndef PRODUCT
2740     if (fp.is_compact()) _compact++;
2741     _hits++;
2742 #endif
2743     return *entry;
2744   }
2745   return nullptr;
2746 }
2747 
2748 #ifndef PRODUCT
2749 static void print_table_statistics() {
2750   auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2751     return sizeof(*key) + sizeof(*a);
2752   };
2753   TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2754   ts.print(tty, "AdapterHandlerTable");
2755   tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2756                 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2757   tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2758                 _lookups, _equals, _hits, _compact);
2759 }
2760 #endif
2761 
2762 // ---------------------------------------------------------------------------
2763 // Implementation of AdapterHandlerLibrary
2764 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2765 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2766 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2767 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2768 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2769 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2770 const int AdapterHandlerLibrary_size = 48*K;
2771 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2772 
2773 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2774   return _buffer;
2775 }
2776 
2777 static void post_adapter_creation(const AdapterBlob* new_adapter,
2778                                   const AdapterHandlerEntry* entry) {
2779   if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2780     char blob_id[256];
2781     jio_snprintf(blob_id,
2782                  sizeof(blob_id),
2783                  "%s(%s)",
2784                  new_adapter->name(),
2785                  entry->fingerprint()->as_string());
2786     if (Forte::is_enabled()) {
2787       Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2788     }
2789 
2790     if (JvmtiExport::should_post_dynamic_code_generated()) {

2793   }
2794 }
2795 
2796 void AdapterHandlerLibrary::initialize() {
2797   ResourceMark rm;
2798   AdapterBlob* no_arg_blob = nullptr;
2799   AdapterBlob* int_arg_blob = nullptr;
2800   AdapterBlob* obj_arg_blob = nullptr;
2801   AdapterBlob* obj_int_arg_blob = nullptr;
2802   AdapterBlob* obj_obj_arg_blob = nullptr;
2803   {
2804     _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2805     MutexLocker mu(AdapterHandlerLibrary_lock);
2806 
2807     // Create a special handler for abstract methods.  Abstract methods
2808     // are never compiled so an i2c entry is somewhat meaningless, but
2809     // throw AbstractMethodError just in case.
2810     // Pass wrong_method_abstract for the c2i transitions to return
2811     // AbstractMethodError for invalid invocations.
2812     address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2813     _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(nullptr),
2814                                                                 StubRoutines::throw_AbstractMethodError_entry(),
2815                                                                 wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
2816                                                                 wrong_method_abstract, wrong_method_abstract);

2817     _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);

2818 
2819     CompiledEntrySignature no_args;
2820     no_args.compute_calling_conventions();
2821     _no_arg_handler = create_adapter(no_arg_blob, no_args, true);
2822 
2823     CompiledEntrySignature obj_args;
2824     SigEntry::add_entry(obj_args.sig(), T_OBJECT, nullptr);
2825     obj_args.compute_calling_conventions();
2826     _obj_arg_handler = create_adapter(obj_arg_blob, obj_args, true);
2827 
2828     CompiledEntrySignature int_args;
2829     SigEntry::add_entry(int_args.sig(), T_INT, nullptr);
2830     int_args.compute_calling_conventions();
2831     _int_arg_handler = create_adapter(int_arg_blob, int_args, true);
2832 
2833     CompiledEntrySignature obj_int_args;
2834     SigEntry::add_entry(obj_int_args.sig(), T_OBJECT, nullptr);
2835     SigEntry::add_entry(obj_int_args.sig(), T_INT, nullptr);
2836     obj_int_args.compute_calling_conventions();
2837     _obj_int_arg_handler = create_adapter(obj_int_arg_blob, obj_int_args, true);
2838 
2839     CompiledEntrySignature obj_obj_args;
2840     SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT, nullptr);
2841     SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT, nullptr);
2842     obj_obj_args.compute_calling_conventions();
2843     _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, obj_obj_args, true);
2844 
2845     assert(no_arg_blob != nullptr &&
2846           obj_arg_blob != nullptr &&
2847           int_arg_blob != nullptr &&
2848           obj_int_arg_blob != nullptr &&
2849           obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
2850   }
2851   return;
2852 
2853   // Outside of the lock
2854   post_adapter_creation(no_arg_blob, _no_arg_handler);
2855   post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2856   post_adapter_creation(int_arg_blob, _int_arg_handler);
2857   post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2858   post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2859 }
2860 
2861 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2862                                                       address i2c_entry,
2863                                                       address c2i_entry,
2864                                                       address c2i_inline_entry,
2865                                                       address c2i_inline_ro_entry,
2866                                                       address c2i_unverified_entry,
2867                                                       address c2i_unverified_inline_entry,
2868                                                       address c2i_no_clinit_check_entry) {
2869   return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry,
2870                               c2i_unverified_inline_entry, c2i_no_clinit_check_entry);

2871 }
2872 
2873 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2874   if (method->is_abstract()) {
2875     return nullptr;
2876   }
2877   int total_args_passed = method->size_of_parameters(); // All args on stack
2878   if (total_args_passed == 0) {
2879     return _no_arg_handler;
2880   } else if (total_args_passed == 1) {
2881     if (!method->is_static()) {
2882       if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2883         return nullptr;
2884       }
2885       return _obj_arg_handler;
2886     }
2887     switch (method->signature()->char_at(1)) {
2888       case JVM_SIGNATURE_CLASS: {
2889         if (InlineTypePassFieldsAsArgs) {
2890           SignatureStream ss(method->signature());
2891           InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2892           if (vk != nullptr) {
2893             return nullptr;
2894           }
2895         }
2896         return _obj_arg_handler;
2897       }
2898       case JVM_SIGNATURE_ARRAY:
2899         return _obj_arg_handler;
2900       case JVM_SIGNATURE_INT:
2901       case JVM_SIGNATURE_BOOLEAN:
2902       case JVM_SIGNATURE_CHAR:
2903       case JVM_SIGNATURE_BYTE:
2904       case JVM_SIGNATURE_SHORT:
2905         return _int_arg_handler;
2906     }
2907   } else if (total_args_passed == 2 &&
2908              !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2909     switch (method->signature()->char_at(1)) {
2910       case JVM_SIGNATURE_CLASS: {
2911         if (InlineTypePassFieldsAsArgs) {
2912           SignatureStream ss(method->signature());
2913           InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2914           if (vk != nullptr) {
2915             return nullptr;
2916           }
2917         }
2918         return _obj_obj_arg_handler;
2919       }
2920       case JVM_SIGNATURE_ARRAY:
2921         return _obj_obj_arg_handler;
2922       case JVM_SIGNATURE_INT:
2923       case JVM_SIGNATURE_BOOLEAN:
2924       case JVM_SIGNATURE_CHAR:
2925       case JVM_SIGNATURE_BYTE:
2926       case JVM_SIGNATURE_SHORT:
2927         return _obj_int_arg_handler;
2928     }
2929   }
2930   return nullptr;
2931 }
2932 
2933 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2934   _method(method), _num_inline_args(0), _has_inline_recv(false),
2935   _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2936   _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2937   _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2938   _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2939   _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2940   _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2941 }
2942 
2943 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2944 // or the same entry for VEP and VIEP(RO).
2945 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2946   if (!has_scalarized_args()) {
2947     // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2948     return CodeOffsets::Verified_Entry;
2949   }
2950   if (_method->is_static()) {
2951     // Static methods don't need VIEP(RO)
2952     return CodeOffsets::Verified_Entry;



2953   }
2954 
2955   if (has_inline_recv()) {
2956     if (num_inline_args() == 1) {
2957       // Share same entry for VIEP and VIEP(RO).
2958       // This is quite common: we have an instance method in an InlineKlass that has
2959       // no inline type args other than <this>.
2960       return CodeOffsets::Verified_Inline_Entry;
2961     } else {
2962       assert(num_inline_args() > 1, "must be");
2963       // No sharing:
2964       //   VIEP(RO) -- <this> is passed as object
2965       //   VEP      -- <this> is passed as fields
2966       return CodeOffsets::Verified_Inline_Entry_RO;
2967     }
2968   }
2969 
2970   // Either a static method, or <this> is not an inline type
2971   if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2972     // No sharing:
2973     // Some arguments are passed on the stack, and we have inserted reserved entries
2974     // into the VEP, but we never insert reserved entries into the VIEP(RO).
2975     return CodeOffsets::Verified_Inline_Entry_RO;
2976   } else {
2977     // Share same entry for VEP and VIEP(RO).
2978     return CodeOffsets::Verified_Entry;
2979   }
2980 }
2981 
2982 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2983 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2984   if (_supers != nullptr) {
2985     return _supers;
2986   }
2987   _supers = new GrowableArray<Method*>();
2988   // Skip private, static, and <init> methods
2989   if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2990     return _supers;
2991   }
2992   Symbol* name = _method->name();
2993   Symbol* signature = _method->signature();
2994   const Klass* holder = _method->method_holder()->super();
2995   Symbol* holder_name = holder->name();
2996   ThreadInVMfromUnknown tiv;
2997   JavaThread* current = JavaThread::current();
2998   HandleMark hm(current);
2999   Handle loader(current, _method->method_holder()->class_loader());
3000 
3001   // Walk up the class hierarchy and search for super methods
3002   while (holder != nullptr) {
3003     Method* super_method = holder->lookup_method(name, signature);
3004     if (super_method == nullptr) {
3005       break;
3006     }
3007     if (!super_method->is_static() && !super_method->is_private() &&
3008         (!super_method->is_package_private() ||
3009          super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
3010       _supers->push(super_method);
3011     }
3012     holder = super_method->method_holder()->super();
3013   }
3014   // Search interfaces for super methods
3015   Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
3016   for (int i = 0; i < interfaces->length(); ++i) {
3017     Method* m = interfaces->at(i)->lookup_method(name, signature);
3018     if (m != nullptr && !m->is_static() && m->is_public()) {
3019       _supers->push(m);
3020     }
3021   }
3022   return _supers;
3023 }
3024 
3025 // Iterate over arguments and compute scalarized and non-scalarized signatures
3026 void CompiledEntrySignature::compute_calling_conventions(bool init) {
3027   bool has_scalarized = false;
3028   if (_method != nullptr) {
3029     InstanceKlass* holder = _method->method_holder();
3030     int arg_num = 0;
3031     if (!_method->is_static()) {
3032       if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() &&
3033           (init || _method->is_scalarized_arg(arg_num))) {
3034         _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
3035         has_scalarized = true;
3036         _has_inline_recv = true;
3037         _num_inline_args++;
3038       } else {
3039         SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
3040       }
3041       SigEntry::add_entry(_sig, T_OBJECT, holder->name());
3042       SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
3043       arg_num++;
3044     }
3045     for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
3046       BasicType bt = ss.type();
3047       if (bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) {
3048         InlineKlass* vk = ss.as_inline_klass(holder);
3049         if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
3050           // Check for a calling convention mismatch with super method(s)
3051           bool scalar_super = false;
3052           bool non_scalar_super = false;
3053           GrowableArray<Method*>* supers = get_supers();
3054           for (int i = 0; i < supers->length(); ++i) {
3055             Method* super_method = supers->at(i);
3056             if (super_method->is_scalarized_arg(arg_num)) {
3057               scalar_super = true;
3058             } else {
3059               non_scalar_super = true;
3060             }
3061           }
3062 #ifdef ASSERT
3063           // Randomly enable below code paths for stress testing
3064           bool stress = init && StressCallingConvention;
3065           if (stress && (os::random() & 1) == 1) {
3066             non_scalar_super = true;
3067             if ((os::random() & 1) == 1) {
3068               scalar_super = true;
3069             }
3070           }
3071 #endif
3072           if (non_scalar_super) {
3073             // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
3074             if (scalar_super) {
3075               // Found non-scalar *and* scalar super methods. We can't handle both.
3076               // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
3077               for (int i = 0; i < supers->length(); ++i) {
3078                 Method* super_method = supers->at(i);
3079                 if (super_method->is_scalarized_arg(arg_num) debug_only(|| (stress && (os::random() & 1) == 1))) {
3080                   super_method->set_mismatch();
3081                   MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
3082                   JavaThread* thread = JavaThread::current();
3083                   HandleMark hm(thread);
3084                   methodHandle mh(thread, super_method);
3085                   DeoptimizationScope deopt_scope;
3086                   CodeCache::mark_for_deoptimization(&deopt_scope, mh());
3087                   deopt_scope.deoptimize_marked();
3088                 }
3089               }
3090             }
3091             // Fall back to non-scalarized calling convention
3092             SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
3093             SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
3094           } else {
3095             _num_inline_args++;
3096             has_scalarized = true;
3097             int last = _sig_cc->length();
3098             int last_ro = _sig_cc_ro->length();
3099             _sig_cc->appendAll(vk->extended_sig());
3100             _sig_cc_ro->appendAll(vk->extended_sig());
3101             if (bt == T_OBJECT) {
3102               // Nullable inline type argument, insert InlineTypeNode::IsInit field right after T_METADATA delimiter
3103               _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, nullptr));
3104               _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, nullptr));
3105             }
3106           }
3107         } else {
3108           SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
3109           SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
3110         }
3111         bt = T_OBJECT;
3112       } else {
3113         SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
3114         SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
3115       }
3116       SigEntry::add_entry(_sig, bt, ss.as_symbol());
3117       if (bt != T_VOID) {
3118         arg_num++;
3119       }
3120     }
3121   }
3122 
3123   // Compute the non-scalarized calling convention
3124   _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3125   _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3126 
3127   // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3128   if (has_scalarized && !_method->is_native()) {
3129     _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3130     _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3131 
3132     _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3133     _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3134 
3135     _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3136     _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3137 
3138     // Upper bound on stack arguments to avoid hitting the argument limit and
3139     // bailing out of compilation ("unsupported incoming calling sequence").
3140     // TODO we need a reasonable limit (flag?) here
3141     if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
3142       return; // Success
3143     }
3144   }
3145 
3146   // No scalarized args
3147   _sig_cc = _sig;
3148   _regs_cc = _regs;
3149   _args_on_stack_cc = _args_on_stack;
3150 
3151   _sig_cc_ro = _sig;
3152   _regs_cc_ro = _regs;
3153   _args_on_stack_cc_ro = _args_on_stack;
3154 }
3155 
3156 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3157   // Use customized signature handler.  Need to lock around updates to
3158   // the _adapter_handler_table (it is not safe for concurrent readers
3159   // and a single writer: this could be fixed if it becomes a
3160   // problem).
3161 
3162   // Fast-path for trivial adapters
3163   AdapterHandlerEntry* entry = get_simple_adapter(method);
3164   if (entry != nullptr) {
3165     return entry;
3166   }
3167 
3168   ResourceMark rm;
3169   AdapterBlob* new_adapter = nullptr;
3170 
3171   CompiledEntrySignature ces(method());
3172   ces.compute_calling_conventions();
3173   if (ces.has_scalarized_args()) {
3174     if (!method->has_scalarized_args()) {
3175       assert(!method()->constMethod()->is_shared(), "Cannot update shared const object");
3176       method->set_has_scalarized_args();
3177     }
3178     if (ces.c1_needs_stack_repair()) {
3179       method->set_c1_needs_stack_repair();
3180     }
3181     if (ces.c2_needs_stack_repair() && !method->c2_needs_stack_repair()) {
3182       assert(!method->constMethod()->is_shared(), "Cannot update a shared const object");
3183       method->set_c2_needs_stack_repair();
3184     }
3185   } else if (method->is_abstract()) {
3186     return _abstract_method_handler;
3187   }
3188 




3189   {
3190     MutexLocker mu(AdapterHandlerLibrary_lock);
3191 
3192     if (ces.has_scalarized_args() && method->is_abstract()) {
3193       // Save a C heap allocated version of the signature for abstract methods with scalarized inline type arguments
3194       address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
3195       entry = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(nullptr),
3196                                                StubRoutines::throw_AbstractMethodError_entry(),
3197                                                wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
3198                                                wrong_method_abstract, wrong_method_abstract);
3199       GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro()->length(), mtInternal);
3200       heap_sig->appendAll(ces.sig_cc_ro());
3201       entry->set_sig_cc(heap_sig);
3202       return entry;
3203     }
3204 
3205     // Lookup method signature's fingerprint
3206     entry = lookup(ces.sig_cc(), ces.has_inline_recv());
3207 
3208     if (entry != nullptr) {
3209 #ifdef ASSERT
3210       if (VerifyAdapterSharing) {
3211         AdapterBlob* comparison_blob = nullptr;
3212         AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, ces, false);
3213         assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
3214         assert(comparison_entry->compare_code(entry), "code must match");
3215         // Release the one just created and return the original
3216         delete comparison_entry;
3217       }
3218 #endif
3219       return entry;
3220     }
3221 
3222     entry = create_adapter(new_adapter, ces, /* allocate_code_blob */ true);
3223   }
3224 
3225   // Outside of the lock
3226   if (new_adapter != nullptr) {
3227     post_adapter_creation(new_adapter, entry);
3228   }
3229   return entry;
3230 }
3231 
3232 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
3233                                                            CompiledEntrySignature& ces,

3234                                                            bool allocate_code_blob) {
3235 
3236   // StubRoutines::_final_stubs_code is initialized after this function can be called. As a result,
3237   // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated prior
3238   // to all StubRoutines::_final_stubs_code being set. Checks refer to runtime range checks generated
3239   // in an I2C stub that ensure that an I2C stub is called from an interpreter frame or stubs.
3240   bool contains_all_checks = StubRoutines::final_stubs_code() != nullptr;
3241 





3242   BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3243   CodeBuffer buffer(buf);
3244   short buffer_locs[20];
3245   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3246                                           sizeof(buffer_locs)/sizeof(relocInfo));
3247 
3248   // Make a C heap allocated version of the fingerprint to store in the adapter
3249   AdapterFingerPrint* fingerprint = new AdapterFingerPrint(ces.sig_cc(), ces.has_inline_recv());
3250   MacroAssembler _masm(&buffer);
3251   AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
3252                                                 ces.args_on_stack(),
3253                                                 ces.sig(),
3254                                                 ces.regs(),
3255                                                 ces.sig_cc(),
3256                                                 ces.regs_cc(),
3257                                                 ces.sig_cc_ro(),
3258                                                 ces.regs_cc_ro(),
3259                                                 fingerprint,
3260                                                 new_adapter,
3261                                                 allocate_code_blob);
3262 
3263   if (ces.has_scalarized_args()) {
3264     // Save a C heap allocated version of the scalarized signature and store it in the adapter
3265     GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3266     heap_sig->appendAll(ces.sig_cc());
3267     entry->set_sig_cc(heap_sig);
3268   }
3269 
3270 #ifdef ASSERT
3271   if (VerifyAdapterSharing) {
3272     entry->save_code(buf->code_begin(), buffer.insts_size());
3273     if (!allocate_code_blob) {
3274       return entry;
3275     }
3276   }
3277 #endif
3278 

3279   NOT_PRODUCT(int insts_size = buffer.insts_size());
3280   if (new_adapter == nullptr) {
3281     // CodeCache is full, disable compilation
3282     // Ought to log this but compile log is only per compile thread
3283     // and we're some non descript Java thread.
3284     return nullptr;
3285   }
3286   entry->relocate(new_adapter->content_begin());
3287 #ifndef PRODUCT
3288   // debugging support
3289   if (PrintAdapterHandlers || PrintStubCode) {
3290     ttyLocker ttyl;
3291     entry->print_adapter_on(tty);
3292     tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
3293                   _adapter_handler_table->number_of_entries(), fingerprint->as_basic_args_string(),
3294                   fingerprint->as_string(), insts_size);
3295     tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
3296     if (Verbose || PrintStubCode) {
3297       address first_pc = entry->base_address();
3298       if (first_pc != nullptr) {

3300                              NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
3301         tty->cr();
3302       }
3303     }
3304   }
3305 #endif
3306 
3307   // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
3308   // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
3309   if (contains_all_checks || !VerifyAdapterCalls) {
3310     assert_lock_strong(AdapterHandlerLibrary_lock);
3311     _adapter_handler_table->put(fingerprint, entry);
3312   }
3313   return entry;
3314 }
3315 
3316 address AdapterHandlerEntry::base_address() {
3317   address base = _i2c_entry;
3318   if (base == nullptr)  base = _c2i_entry;
3319   assert(base <= _c2i_entry || _c2i_entry == nullptr, "");
3320   assert(base <= _c2i_inline_entry || _c2i_inline_entry == nullptr, "");
3321   assert(base <= _c2i_inline_ro_entry || _c2i_inline_ro_entry == nullptr, "");
3322   assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");
3323   assert(base <= _c2i_unverified_inline_entry || _c2i_unverified_inline_entry == nullptr, "");
3324   assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
3325   return base;
3326 }
3327 
3328 void AdapterHandlerEntry::relocate(address new_base) {
3329   address old_base = base_address();
3330   assert(old_base != nullptr, "");
3331   ptrdiff_t delta = new_base - old_base;
3332   if (_i2c_entry != nullptr)
3333     _i2c_entry += delta;
3334   if (_c2i_entry != nullptr)
3335     _c2i_entry += delta;
3336   if (_c2i_inline_entry != nullptr)
3337     _c2i_inline_entry += delta;
3338   if (_c2i_inline_ro_entry != nullptr)
3339     _c2i_inline_ro_entry += delta;
3340   if (_c2i_unverified_entry != nullptr)
3341     _c2i_unverified_entry += delta;
3342   if (_c2i_unverified_inline_entry != nullptr)
3343     _c2i_unverified_inline_entry += delta;
3344   if (_c2i_no_clinit_check_entry != nullptr)
3345     _c2i_no_clinit_check_entry += delta;
3346   assert(base_address() == new_base, "");
3347 }
3348 
3349 
3350 AdapterHandlerEntry::~AdapterHandlerEntry() {
3351   delete _fingerprint;
3352   if (_sig_cc != nullptr) {
3353     delete _sig_cc;
3354   }
3355 #ifdef ASSERT
3356   FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3357 #endif
3358 }
3359 
3360 
3361 #ifdef ASSERT
3362 // Capture the code before relocation so that it can be compared
3363 // against other versions.  If the code is captured after relocation
3364 // then relative instructions won't be equivalent.
3365 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3366   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3367   _saved_code_length = length;
3368   memcpy(_saved_code, buffer, length);
3369 }
3370 
3371 
3372 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3373   assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3374 

3421 
3422       struct { double data[20]; } locs_buf;
3423       struct { double data[20]; } stubs_locs_buf;
3424       buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3425 #if defined(AARCH64) || defined(PPC64)
3426       // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3427       // in the constant pool to ensure ordering between the barrier and oops
3428       // accesses. For native_wrappers we need a constant.
3429       // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3430       // static java call that is resolved in the runtime.
3431       if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3432         buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3433       }
3434 #endif
3435       buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3436       MacroAssembler _masm(&buffer);
3437 
3438       // Fill in the signature array, for the calling-convention call.
3439       const int total_args_passed = method->size_of_parameters();
3440 
3441       BasicType stack_sig_bt[16];
3442       VMRegPair stack_regs[16];
3443       BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3444       VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3445 
3446       int i = 0;
3447       if (!method->is_static()) {  // Pass in receiver first
3448         sig_bt[i++] = T_OBJECT;
3449       }
3450       SignatureStream ss(method->signature());
3451       for (; !ss.at_return_type(); ss.next()) {
3452         sig_bt[i++] = ss.type();  // Collect remaining bits of signature
3453         if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3454           sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
3455         }
3456       }
3457       assert(i == total_args_passed, "");
3458       BasicType ret_type = ss.type();
3459 
3460       // Now get the compiled-Java arguments layout.
3461       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3462 
3463       // Generate the compiled-to-native wrapper code
3464       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3465 
3466       if (nm != nullptr) {
3467         {
3468           MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
3469           if (nm->make_in_use()) {
3470             method->set_code(method, nm);
3471           }
3472         }
3473 
3474         DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
3475         if (directive->PrintAssemblyOption) {
3476           nm->print_code();
3477         }
3478         DirectivesStack::release(directive);

3683       st->print("Adapter for signature: ");
3684       a->print_adapter_on(st);
3685       return true;
3686     } else {
3687       return false; // keep looking
3688     }
3689   };
3690   assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3691   _adapter_handler_table->iterate(findblob);
3692   assert(found, "Should have found handler");
3693 }
3694 
3695 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3696   st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3697   if (get_i2c_entry() != nullptr) {
3698     st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3699   }
3700   if (get_c2i_entry() != nullptr) {
3701     st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3702   }
3703   if (get_c2i_entry() != nullptr) {
3704     st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3705   }
3706   if (get_c2i_entry() != nullptr) {
3707     st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3708   }
3709   if (get_c2i_unverified_entry() != nullptr) {
3710     st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3711   }
3712   if (get_c2i_unverified_entry() != nullptr) {
3713     st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3714   }
3715   if (get_c2i_no_clinit_check_entry() != nullptr) {
3716     st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3717   }
3718   st->cr();
3719 }
3720 
3721 #ifndef PRODUCT
3722 
3723 void AdapterHandlerLibrary::print_statistics() {
3724   print_table_statistics();
3725 }
3726 
3727 #endif /* PRODUCT */
3728 
3729 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3730   assert(current == JavaThread::current(), "pre-condition");
3731   StackOverflow* overflow_state = current->stack_overflow_state();
3732   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3733   overflow_state->set_reserved_stack_activation(current->stack_base());

3782         event.set_method(method);
3783         event.commit();
3784       }
3785     }
3786   }
3787   return activation;
3788 }
3789 
3790 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3791   // After any safepoint, just before going back to compiled code,
3792   // we inform the GC that we will be doing initializing writes to
3793   // this object in the future without emitting card-marks, so
3794   // GC may take any compensating steps.
3795 
3796   oop new_obj = current->vm_result();
3797   if (new_obj == nullptr) return;
3798 
3799   BarrierSet *bs = BarrierSet::barrier_set();
3800   bs->on_slowpath_allocation_exit(current, new_obj);
3801 }
3802 
3803 // We are at a compiled code to interpreter call. We need backing
3804 // buffers for all inline type arguments. Allocate an object array to
3805 // hold them (convenient because once we're done with it we don't have
3806 // to worry about freeing it).
3807 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
3808   assert(InlineTypePassFieldsAsArgs, "no reason to call this");
3809   ResourceMark rm;
3810 
3811   int nb_slots = 0;
3812   InstanceKlass* holder = callee->method_holder();
3813   allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
3814   if (allocate_receiver) {
3815     nb_slots++;
3816   }
3817   int arg_num = callee->is_static() ? 0 : 1;
3818   for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3819     BasicType bt = ss.type();
3820     if ((bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) && callee->is_scalarized_arg(arg_num)) {
3821       nb_slots++;
3822     }
3823     if (bt != T_VOID) {
3824       arg_num++;
3825     }
3826   }
3827   objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
3828   objArrayHandle array(THREAD, array_oop);
3829   arg_num = callee->is_static() ? 0 : 1;
3830   int i = 0;
3831   if (allocate_receiver) {
3832     InlineKlass* vk = InlineKlass::cast(holder);
3833     oop res = vk->allocate_instance(CHECK_NULL);
3834     array->obj_at_put(i++, res);
3835   }
3836   for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3837     BasicType bt = ss.type();
3838     if ((bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) && callee->is_scalarized_arg(arg_num)) {
3839       InlineKlass* vk = ss.as_inline_klass(holder);
3840       assert(vk != nullptr, "Unexpected klass");
3841       oop res = vk->allocate_instance(CHECK_NULL);
3842       array->obj_at_put(i++, res);
3843     }
3844     if (bt != T_VOID) {
3845       arg_num++;
3846     }
3847   }
3848   return array();
3849 }
3850 
3851 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
3852   methodHandle callee(current, callee_method);
3853   oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
3854   current->set_vm_result(array);
3855   current->set_vm_result_2(callee()); // TODO: required to keep callee live?
3856 JRT_END
3857 
3858 // We're returning from an interpreted method: load each field into a
3859 // register following the calling convention
3860 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
3861 {
3862   assert(res->klass()->is_inline_klass(), "only inline types here");
3863   ResourceMark rm;
3864   RegisterMap reg_map(current,
3865                       RegisterMap::UpdateMap::include,
3866                       RegisterMap::ProcessFrames::include,
3867                       RegisterMap::WalkContinuation::skip);
3868   frame stubFrame = current->last_frame();
3869   frame callerFrame = stubFrame.sender(&reg_map);
3870   assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
3871 
3872   InlineKlass* vk = InlineKlass::cast(res->klass());
3873 
3874   const Array<SigEntry>* sig_vk = vk->extended_sig();
3875   const Array<VMRegPair>* regs = vk->return_regs();
3876 
3877   if (regs == nullptr) {
3878     // The fields of the inline klass don't fit in registers, bail out
3879     return;
3880   }
3881 
3882   int j = 1;
3883   for (int i = 0; i < sig_vk->length(); i++) {
3884     BasicType bt = sig_vk->at(i)._bt;
3885     if (bt == T_METADATA) {
3886       continue;
3887     }
3888     if (bt == T_VOID) {
3889       if (sig_vk->at(i-1)._bt == T_LONG ||
3890           sig_vk->at(i-1)._bt == T_DOUBLE) {
3891         j++;
3892       }
3893       continue;
3894     }
3895     int off = sig_vk->at(i)._offset;
3896     assert(off > 0, "offset in object should be positive");
3897     VMRegPair pair = regs->at(j);
3898     address loc = reg_map.location(pair.first(), nullptr);
3899     switch(bt) {
3900     case T_BOOLEAN:
3901       *(jboolean*)loc = res->bool_field(off);
3902       break;
3903     case T_CHAR:
3904       *(jchar*)loc = res->char_field(off);
3905       break;
3906     case T_BYTE:
3907       *(jbyte*)loc = res->byte_field(off);
3908       break;
3909     case T_SHORT:
3910       *(jshort*)loc = res->short_field(off);
3911       break;
3912     case T_INT: {
3913       *(jint*)loc = res->int_field(off);
3914       break;
3915     }
3916     case T_LONG:
3917 #ifdef _LP64
3918       *(intptr_t*)loc = res->long_field(off);
3919 #else
3920       Unimplemented();
3921 #endif
3922       break;
3923     case T_OBJECT:
3924     case T_ARRAY: {
3925       *(oop*)loc = res->obj_field(off);
3926       break;
3927     }
3928     case T_FLOAT:
3929       *(jfloat*)loc = res->float_field(off);
3930       break;
3931     case T_DOUBLE:
3932       *(jdouble*)loc = res->double_field(off);
3933       break;
3934     default:
3935       ShouldNotReachHere();
3936     }
3937     j++;
3938   }
3939   assert(j == regs->length(), "missed a field?");
3940 
3941 #ifdef ASSERT
3942   VMRegPair pair = regs->at(0);
3943   address loc = reg_map.location(pair.first(), nullptr);
3944   assert(*(oopDesc**)loc == res, "overwritten object");
3945 #endif
3946 
3947   current->set_vm_result(res);
3948 }
3949 JRT_END
3950 
3951 // We've returned to an interpreted method, the interpreter needs a
3952 // reference to an inline type instance. Allocate it and initialize it
3953 // from field's values in registers.
3954 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
3955 {
3956   ResourceMark rm;
3957   RegisterMap reg_map(current,
3958                       RegisterMap::UpdateMap::include,
3959                       RegisterMap::ProcessFrames::include,
3960                       RegisterMap::WalkContinuation::skip);
3961   frame stubFrame = current->last_frame();
3962   frame callerFrame = stubFrame.sender(&reg_map);
3963 
3964 #ifdef ASSERT
3965   InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
3966 #endif
3967 
3968   if (!is_set_nth_bit(res, 0)) {
3969     // We're not returning with inline type fields in registers (the
3970     // calling convention didn't allow it for this inline klass)
3971     assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
3972     current->set_vm_result((oopDesc*)res);
3973     assert(verif_vk == nullptr, "broken calling convention");
3974     return;
3975   }
3976 
3977   clear_nth_bit(res, 0);
3978   InlineKlass* vk = (InlineKlass*)res;
3979   assert(verif_vk == vk, "broken calling convention");
3980   assert(Metaspace::contains((void*)res), "should be klass");
3981 
3982   // Allocate handles for every oop field so they are safe in case of
3983   // a safepoint when allocating
3984   GrowableArray<Handle> handles;
3985   vk->save_oop_fields(reg_map, handles);
3986 
3987   // It's unsafe to safepoint until we are here
3988   JRT_BLOCK;
3989   {
3990     JavaThread* THREAD = current;
3991     oop vt = vk->realloc_result(reg_map, handles, CHECK);
3992     current->set_vm_result(vt);
3993   }
3994   JRT_BLOCK_END;
3995 }
3996 JRT_END
3997 
< prev index next >