< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp

Print this page

  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/vmClasses.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/compiledMethod.inline.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/abstractCompiler.hpp"
  37 #include "compiler/compileBroker.hpp"
  38 #include "compiler/disassembler.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "gc/shared/gcLocker.inline.hpp"
  42 #include "interpreter/interpreter.hpp"
  43 #include "interpreter/interpreterRuntime.hpp"
  44 #include "jvm.h"
  45 #include "jfr/jfrEvents.hpp"
  46 #include "logging/log.hpp"

  47 #include "memory/resourceArea.hpp"
  48 #include "memory/universe.hpp"


  49 #include "oops/compiledICHolder.inline.hpp"
  50 #include "oops/klass.hpp"
  51 #include "oops/method.inline.hpp"
  52 #include "oops/objArrayKlass.hpp"

  53 #include "oops/oop.inline.hpp"

  54 #include "prims/forte.hpp"
  55 #include "prims/jvmtiExport.hpp"
  56 #include "prims/methodHandles.hpp"
  57 #include "prims/nativeLookup.hpp"
  58 #include "runtime/atomic.hpp"
  59 #include "runtime/frame.inline.hpp"
  60 #include "runtime/handles.inline.hpp"
  61 #include "runtime/init.hpp"
  62 #include "runtime/interfaceSupport.inline.hpp"
  63 #include "runtime/java.hpp"
  64 #include "runtime/javaCalls.hpp"
  65 #include "runtime/sharedRuntime.hpp"
  66 #include "runtime/stackWatermarkSet.hpp"
  67 #include "runtime/stubRoutines.hpp"
  68 #include "runtime/synchronizer.hpp"
  69 #include "runtime/vframe.inline.hpp"
  70 #include "runtime/vframeArray.hpp"
  71 #include "runtime/vm_version.hpp"
  72 #include "utilities/copy.hpp"
  73 #include "utilities/dtrace.hpp"
  74 #include "utilities/events.hpp"
  75 #include "utilities/resourceHash.hpp"
  76 #include "utilities/macros.hpp"
  77 #include "utilities/xmlstream.hpp"
  78 #ifdef COMPILER1
  79 #include "c1/c1_Runtime1.hpp"
  80 #endif
  81 
  82 // Shared stub locations
  83 RuntimeStub*        SharedRuntime::_wrong_method_blob;
  84 RuntimeStub*        SharedRuntime::_wrong_method_abstract_blob;
  85 RuntimeStub*        SharedRuntime::_ic_miss_blob;
  86 RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
  87 RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
  88 RuntimeStub*        SharedRuntime::_resolve_static_call_blob;
  89 address             SharedRuntime::_resolve_static_call_entry;
  90 
  91 DeoptimizationBlob* SharedRuntime::_deopt_blob;
  92 SafepointBlob*      SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
  93 SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
  94 SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
  95 
  96 #ifdef COMPILER2
  97 UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
  98 #endif // COMPILER2
  99 
 100 nmethod*            SharedRuntime::_cont_doYield_stub;
 101 
 102 //----------------------------generate_stubs-----------------------------------
 103 void SharedRuntime::generate_stubs() {
 104   _wrong_method_blob                   = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),          "wrong_method_stub");
 105   _wrong_method_abstract_blob          = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
 106   _ic_miss_blob                        = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),  "ic_miss_stub");
 107   _resolve_opt_virtual_call_blob       = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),   "resolve_opt_virtual_call");
 108   _resolve_virtual_call_blob           = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),       "resolve_virtual_call");
 109   _resolve_static_call_blob            = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),        "resolve_static_call");
 110   _resolve_static_call_entry           = _resolve_static_call_blob->entry_point();
 111 
 112   AdapterHandlerLibrary::initialize();
 113 
 114 #if COMPILER2_OR_JVMCI
 115   // Vectors are generated only by C2 and JVMCI.
 116   bool support_wide = is_wide_vector(MaxVectorSize);
 117   if (support_wide) {
 118     _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
 119   }
 120 #endif // COMPILER2_OR_JVMCI
 121   _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
 122   _polling_page_return_handler_blob    = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
 123 
 124   generate_deopt_blob();
 125 
 126 #ifdef COMPILER2
 127   generate_uncommon_trap_blob();
 128 #endif // COMPILER2
 129 }
 130 

1170 // for a call current in progress, i.e., arguments has been pushed on stack
1171 // but callee has not been invoked yet.  Caller frame must be compiled.
1172 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1173                                               CallInfo& callinfo, TRAPS) {
1174   Handle receiver;
1175   Handle nullHandle;  // create a handy null handle for exception returns
1176   JavaThread* current = THREAD;
1177 
1178   assert(!vfst.at_end(), "Java frame must exist");
1179 
1180   // Find caller and bci from vframe
1181   methodHandle caller(current, vfst.method());
1182   int          bci   = vfst.bci();
1183 
1184   if (caller->is_continuation_enter_intrinsic()) {
1185     bc = Bytecodes::_invokestatic;
1186     LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1187     return receiver;
1188   }
1189 















1190   Bytecode_invoke bytecode(caller, bci);
1191   int bytecode_index = bytecode.index();
1192   bc = bytecode.invoke_code();
1193 
1194   methodHandle attached_method(current, extract_attached_method(vfst));
1195   if (attached_method.not_null()) {
1196     Method* callee = bytecode.static_target(CHECK_NH);
1197     vmIntrinsics::ID id = callee->intrinsic_id();
1198     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1199     // it attaches statically resolved method to the call site.
1200     if (MethodHandles::is_signature_polymorphic(id) &&
1201         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1202       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1203 
1204       // Adjust invocation mode according to the attached method.
1205       switch (bc) {
1206         case Bytecodes::_invokevirtual:
1207           if (attached_method->method_holder()->is_interface()) {
1208             bc = Bytecodes::_invokeinterface;
1209           }
1210           break;
1211         case Bytecodes::_invokeinterface:
1212           if (!attached_method->method_holder()->is_interface()) {
1213             bc = Bytecodes::_invokevirtual;
1214           }
1215           break;
1216         case Bytecodes::_invokehandle:
1217           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1218             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1219                                               : Bytecodes::_invokevirtual;
1220           }
1221           break;
1222         default:
1223           break;
1224       }






1225     }
1226   }
1227 
1228   assert(bc != Bytecodes::_illegal, "not initialized");
1229 
1230   bool has_receiver = bc != Bytecodes::_invokestatic &&
1231                       bc != Bytecodes::_invokedynamic &&
1232                       bc != Bytecodes::_invokehandle;

1233 
1234   // Find receiver for non-static call
1235   if (has_receiver) {
1236     // This register map must be update since we need to find the receiver for
1237     // compiled frames. The receiver might be in a register.
1238     RegisterMap reg_map2(current,
1239                          RegisterMap::UpdateMap::include,
1240                          RegisterMap::ProcessFrames::include,
1241                          RegisterMap::WalkContinuation::skip);
1242     frame stubFrame   = current->last_frame();
1243     // Caller-frame is a compiled frame
1244     frame callerFrame = stubFrame.sender(&reg_map2);





1245 
1246     if (attached_method.is_null()) {
1247       Method* callee = bytecode.static_target(CHECK_NH);

1248       if (callee == NULL) {
1249         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1250       }
1251     }
1252 
1253     // Retrieve from a compiled argument list
1254     receiver = Handle(current, callerFrame.retrieve_receiver(&reg_map2));
1255     assert(oopDesc::is_oop_or_null(receiver()), "");
1256 
1257     if (receiver.is_null()) {
1258       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);








1259     }
1260   }
1261 
1262   // Resolve method
1263   if (attached_method.not_null()) {
1264     // Parameterized by attached method.
1265     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1266   } else {
1267     // Parameterized by bytecode.
1268     constantPoolHandle constants(current, caller->constants());
1269     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1270   }
1271 
1272 #ifdef ASSERT
1273   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1274   if (has_receiver) {
1275     assert(receiver.not_null(), "should have thrown exception");
1276     Klass* receiver_klass = receiver->klass();
1277     Klass* rk = NULL;
1278     if (attached_method.not_null()) {
1279       // In case there's resolved method attached, use its holder during the check.
1280       rk = attached_method->method_holder();
1281     } else {
1282       // Klass is already loaded.
1283       constantPoolHandle constants(current, caller->constants());
1284       rk = constants->klass_ref_at(bytecode_index, CHECK_NH);
1285     }
1286     Klass* static_receiver_klass = rk;
1287     assert(receiver_klass->is_subtype_of(static_receiver_klass),
1288            "actual receiver must be subclass of static receiver klass");
1289     if (receiver_klass->is_instance_klass()) {
1290       if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1291         tty->print_cr("ERROR: Klass not yet initialized!!");
1292         receiver_klass->print();
1293       }
1294       assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");

1315                         RegisterMap::UpdateMap::skip,
1316                         RegisterMap::ProcessFrames::include,
1317                         RegisterMap::WalkContinuation::skip);
1318     frame fr = current->last_frame();
1319     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1320     fr = fr.sender(&reg_map);
1321     assert(fr.is_entry_frame(), "must be");
1322     // fr is now pointing to the entry frame.
1323     callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1324   } else {
1325     Bytecodes::Code bc;
1326     CallInfo callinfo;
1327     find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1328     callee_method = methodHandle(current, callinfo.selected_method());
1329   }
1330   assert(callee_method()->is_method(), "must be");
1331   return callee_method;
1332 }
1333 
1334 // Resolves a call.
1335 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1336   methodHandle callee_method;
1337   callee_method = resolve_sub_helper(is_virtual, is_optimized, THREAD);
1338   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1339     int retry_count = 0;
1340     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1341            callee_method->method_holder() != vmClasses::Object_klass()) {
1342       // If has a pending exception then there is no need to re-try to
1343       // resolve this method.
1344       // If the method has been redefined, we need to try again.
1345       // Hack: we have no way to update the vtables of arrays, so don't
1346       // require that java.lang.Object has been updated.
1347 
1348       // It is very unlikely that method is redefined more than 100 times
1349       // in the middle of resolve. If it is looping here more than 100 times
1350       // means then there could be a bug here.
1351       guarantee((retry_count++ < 100),
1352                 "Could not resolve to latest version of redefined method");
1353       // method is redefined in the middle of resolve so re-try.
1354       callee_method = resolve_sub_helper(is_virtual, is_optimized, THREAD);
1355     }
1356   }
1357   return callee_method;
1358 }
1359 
1360 // This fails if resolution required refilling of IC stubs
1361 bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
1362                                                 CompiledMethod* caller_nm, bool is_virtual, bool is_optimized,
1363                                                 Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) {
1364   StaticCallInfo static_call_info;
1365   CompiledICInfo virtual_call_info;
1366 
1367   // Make sure the callee nmethod does not get deoptimized and removed before
1368   // we are done patching the code.
1369   CompiledMethod* callee = callee_method->code();
1370 
1371   if (callee != NULL) {
1372     assert(callee->is_compiled(), "must be nmethod for patching");
1373   }
1374 
1375   if (callee != NULL && !callee->is_in_use()) {
1376     // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1377     callee = NULL;
1378   }
1379 #ifdef ASSERT
1380   address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
1381 #endif
1382 
1383   bool is_nmethod = caller_nm->is_nmethod();

1384 
1385   if (is_virtual) {
1386     assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");







1387     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1388     Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass();
1389     CompiledIC::compute_monomorphic_entry(callee_method, klass,
1390                      is_optimized, static_bound, is_nmethod, virtual_call_info,
1391                      CHECK_false);
1392   } else {
1393     // static call
1394     CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info);
1395   }
1396 
1397   // grab lock, check for deoptimization and potentially patch caller
1398   {
1399     CompiledICLocker ml(caller_nm);
1400 
1401     // Lock blocks for safepoint during which both nmethods can change state.
1402 
1403     // Now that we are ready to patch if the Method* was redefined then
1404     // don't update call site and let the caller retry.
1405     // Don't update call site if callee nmethod was unloaded or deoptimized.
1406     // Don't update call site if callee nmethod was replaced by an other nmethod
1407     // which may happen when multiply alive nmethod (tiered compilation)
1408     // will be supported.
1409     if (!callee_method->is_old() &&
1410         (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) {
1411       NoSafepointVerifier nsv;
1412 #ifdef ASSERT
1413       // We must not try to patch to jump to an already unloaded method.
1414       if (dest_entry_point != 0) {

1427       } else {
1428         if (VM_Version::supports_fast_class_init_checks() &&
1429             invoke_code == Bytecodes::_invokestatic &&
1430             callee_method->needs_clinit_barrier() &&
1431             callee != NULL && callee->is_compiled_by_jvmci()) {
1432           return true; // skip patching for JVMCI
1433         }
1434         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
1435         if (is_nmethod && caller_nm->method()->is_continuation_enter_intrinsic()) {
1436           ssc->compute_entry_for_continuation_entry(callee_method, static_call_info);
1437         }
1438         if (ssc->is_clean()) ssc->set(static_call_info);
1439       }
1440     }
1441   } // unlock CompiledICLocker
1442   return true;
1443 }
1444 
1445 // Resolves a call.  The compilers generate code for calls that go here
1446 // and are patched with the real destination of the call.
1447 methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimized, TRAPS) {
1448   JavaThread* current = THREAD;
1449   ResourceMark rm(current);
1450   RegisterMap cbl_map(current,
1451                       RegisterMap::UpdateMap::skip,
1452                       RegisterMap::ProcessFrames::include,
1453                       RegisterMap::WalkContinuation::skip);
1454   frame caller_frame = current->last_frame().sender(&cbl_map);
1455 
1456   CodeBlob* caller_cb = caller_frame.cb();
1457   guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
1458   CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();

1459 
1460   // determine call info & receiver
1461   // note: a) receiver is NULL for static calls
1462   //       b) an exception is thrown if receiver is NULL for non-static calls
1463   CallInfo call_info;
1464   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1465   Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1466   methodHandle callee_method(current, call_info.selected_method());
1467 
1468   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1469          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1470          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1471          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1472          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1473 
1474   assert(!caller_nm->is_unloading(), "It should not be unloading");
1475 
1476 #ifndef PRODUCT
1477   // tracing/debugging/statistics
1478   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :

1537     }
1538   }
1539 
1540 }
1541 
1542 
1543 // Inline caches exist only in compiled code
1544 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1545 #ifdef ASSERT
1546   RegisterMap reg_map(current,
1547                       RegisterMap::UpdateMap::skip,
1548                       RegisterMap::ProcessFrames::include,
1549                       RegisterMap::WalkContinuation::skip);
1550   frame stub_frame = current->last_frame();
1551   assert(stub_frame.is_runtime_frame(), "sanity check");
1552   frame caller_frame = stub_frame.sender(&reg_map);
1553   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1554 #endif /* ASSERT */
1555 
1556   methodHandle callee_method;


1557   JRT_BLOCK
1558     callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1559     // Return Method* through TLS
1560     current->set_vm_result_2(callee_method());
1561   JRT_BLOCK_END
1562   // return compiled code entry point after potential safepoints
1563   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1564   return callee_method->verified_code_entry();
1565 JRT_END
1566 
1567 
1568 // Handle call site that has been made non-entrant
1569 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1570   // 6243940 We might end up in here if the callee is deoptimized
1571   // as we race to call it.  We don't want to take a safepoint if
1572   // the caller was interpreted because the caller frame will look
1573   // interpreted to the stack walkers and arguments are now
1574   // "compiled" so it is much better to make this transition
1575   // invisible to the stack walking code. The i2c path will
1576   // place the callee method in the callee_target. It is stashed
1577   // there because if we try and find the callee by normal means a
1578   // safepoint is possible and have trouble gc'ing the compiled args.
1579   RegisterMap reg_map(current,
1580                       RegisterMap::UpdateMap::skip,
1581                       RegisterMap::ProcessFrames::include,
1582                       RegisterMap::WalkContinuation::skip);
1583   frame stub_frame = current->last_frame();
1584   assert(stub_frame.is_runtime_frame(), "sanity check");
1585   frame caller_frame = stub_frame.sender(&reg_map);
1586 
1587   if (caller_frame.is_interpreted_frame() ||
1588       caller_frame.is_entry_frame() ||
1589       caller_frame.is_upcall_stub_frame()) {
1590     Method* callee = current->callee_target();
1591     guarantee(callee != NULL && callee->is_method(), "bad handshake");
1592     current->set_vm_result_2(callee);
1593     current->set_callee_target(NULL);
1594     if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1595       // Bypass class initialization checks in c2i when caller is in native.
1596       // JNI calls to static methods don't have class initialization checks.
1597       // Fast class initialization checks are present in c2i adapters and call into
1598       // SharedRuntime::handle_wrong_method() on the slow path.
1599       //
1600       // JVM upcalls may land here as well, but there's a proper check present in
1601       // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1602       // so bypassing it in c2i adapter is benign.
1603       return callee->get_c2i_no_clinit_check_entry();
1604     } else {
1605       return callee->get_c2i_entry();




1606     }
1607   }
1608 
1609   // Must be compiled to compiled path which is safe to stackwalk
1610   methodHandle callee_method;



1611   JRT_BLOCK
1612     // Force resolving of caller (if we called from compiled frame)
1613     callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1614     current->set_vm_result_2(callee_method());
1615   JRT_BLOCK_END
1616   // return compiled code entry point after potential safepoints
1617   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1618   return callee_method->verified_code_entry();
1619 JRT_END
1620 
1621 // Handle abstract method call
1622 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1623   // Verbose error message for AbstractMethodError.
1624   // Get the called method from the invoke bytecode.
1625   vframeStream vfst(current, true);
1626   assert(!vfst.at_end(), "Java frame must exist");
1627   methodHandle caller(current, vfst.method());
1628   Bytecode_invoke invoke(caller, vfst.bci());
1629   DEBUG_ONLY( invoke.verify(); )
1630 
1631   // Find the compiled caller frame.
1632   RegisterMap reg_map(current,
1633                       RegisterMap::UpdateMap::include,
1634                       RegisterMap::ProcessFrames::include,
1635                       RegisterMap::WalkContinuation::skip);
1636   frame stubFrame = current->last_frame();
1637   assert(stubFrame.is_runtime_frame(), "must be");
1638   frame callerFrame = stubFrame.sender(&reg_map);
1639   assert(callerFrame.is_compiled_frame(), "must be");
1640 
1641   // Install exception and return forward entry.
1642   address res = StubRoutines::throw_AbstractMethodError_entry();
1643   JRT_BLOCK
1644     methodHandle callee(current, invoke.static_target(current));
1645     if (!callee.is_null()) {
1646       oop recv = callerFrame.retrieve_receiver(&reg_map);
1647       Klass *recv_klass = (recv != NULL) ? recv->klass() : NULL;
1648       res = StubRoutines::forward_exception_entry();
1649       LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1650     }
1651   JRT_BLOCK_END
1652   return res;
1653 JRT_END
1654 
1655 
1656 // resolve a static call and patch code
1657 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1658   methodHandle callee_method;

1659   bool enter_special = false;
1660   JRT_BLOCK
1661     callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1662     current->set_vm_result_2(callee_method());
1663 
1664     if (current->is_interp_only_mode()) {
1665       RegisterMap reg_map(current,
1666                           RegisterMap::UpdateMap::skip,
1667                           RegisterMap::ProcessFrames::include,
1668                           RegisterMap::WalkContinuation::skip);
1669       frame stub_frame = current->last_frame();
1670       assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1671       frame caller = stub_frame.sender(&reg_map);
1672       enter_special = caller.cb() != NULL && caller.cb()->is_compiled()
1673         && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic();
1674     }
1675   JRT_BLOCK_END
1676 
1677   if (current->is_interp_only_mode() && enter_special) {
1678     // enterSpecial is compiled and calls this method to resolve the call to Continuation::enter
1679     // but in interp_only_mode we need to go to the interpreted entry
1680     // The c2i won't patch in this mode -- see fixup_callers_callsite
1681     //
1682     // This should probably be done in all cases, not just enterSpecial (see JDK-8218403),
1683     // but that's part of a larger fix, and the situation is worse for enterSpecial, as it has no
1684     // interpreted version.
1685     return callee_method->get_c2i_entry();
1686   }
1687 
1688   // return compiled code entry point after potential safepoints
1689   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1690   return callee_method->verified_code_entry();


1691 JRT_END
1692 
1693 
1694 // resolve virtual call and update inline cache to monomorphic
1695 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1696   methodHandle callee_method;

1697   JRT_BLOCK
1698     callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1699     current->set_vm_result_2(callee_method());
1700   JRT_BLOCK_END
1701   // return compiled code entry point after potential safepoints
1702   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1703   return callee_method->verified_code_entry();


1704 JRT_END
1705 
1706 
1707 // Resolve a virtual call that can be statically bound (e.g., always
1708 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1709 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1710   methodHandle callee_method;

1711   JRT_BLOCK
1712     callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1713     current->set_vm_result_2(callee_method());
1714   JRT_BLOCK_END
1715   // return compiled code entry point after potential safepoints
1716   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1717   return callee_method->verified_code_entry();


1718 JRT_END
1719 
1720 // The handle_ic_miss_helper_internal function returns false if it failed due
1721 // to either running out of vtable stubs or ic stubs due to IC transitions
1722 // to transitional states. The needs_ic_stub_refill value will be set if
1723 // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
1724 // refills the IC stubs and tries again.
1725 bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
1726                                                    const frame& caller_frame, methodHandle callee_method,
1727                                                    Bytecodes::Code bc, CallInfo& call_info,
1728                                                    bool& needs_ic_stub_refill, TRAPS) {
1729   CompiledICLocker ml(caller_nm);
1730   CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1731   bool should_be_mono = false;
1732   if (inline_cache->is_optimized()) {
1733     if (TraceCallFixup) {
1734       ResourceMark rm(THREAD);
1735       tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1736       callee_method->print_short_name(tty);
1737       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1738     }

1739     should_be_mono = true;
1740   } else if (inline_cache->is_icholder_call()) {
1741     CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1742     if (ic_oop != NULL) {
1743       if (!ic_oop->is_loader_alive()) {
1744         // Deferred IC cleaning due to concurrent class unloading
1745         if (!inline_cache->set_to_clean()) {
1746           needs_ic_stub_refill = true;
1747           return false;
1748         }
1749       } else if (receiver()->klass() == ic_oop->holder_klass()) {
1750         // This isn't a real miss. We must have seen that compiled code
1751         // is now available and we want the call site converted to a
1752         // monomorphic compiled call site.
1753         // We can't assert for callee_method->code() != NULL because it
1754         // could have been deoptimized in the meantime
1755         if (TraceCallFixup) {
1756           ResourceMark rm(THREAD);
1757           tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1758           callee_method->print_short_name(tty);
1759           tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1760         }
1761         should_be_mono = true;
1762       }
1763     }
1764   }
1765 
1766   if (should_be_mono) {
1767     // We have a path that was monomorphic but was going interpreted
1768     // and now we have (or had) a compiled entry. We correct the IC
1769     // by using a new icBuffer.
1770     CompiledICInfo info;
1771     Klass* receiver_klass = receiver()->klass();
1772     inline_cache->compute_monomorphic_entry(callee_method,
1773                                             receiver_klass,
1774                                             inline_cache->is_optimized(),
1775                                             false, caller_nm->is_nmethod(),

1776                                             info, CHECK_false);
1777     if (!inline_cache->set_to_monomorphic(info)) {
1778       needs_ic_stub_refill = true;
1779       return false;
1780     }
1781   } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1782     // Potential change to megamorphic
1783 
1784     bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false);
1785     if (needs_ic_stub_refill) {
1786       return false;
1787     }
1788     if (!successful) {
1789       if (!inline_cache->set_to_clean()) {
1790         needs_ic_stub_refill = true;
1791         return false;
1792       }
1793     }
1794   } else {
1795     // Either clean or megamorphic
1796   }
1797   return true;
1798 }
1799 
1800 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1801   JavaThread* current = THREAD;
1802   ResourceMark rm(current);
1803   CallInfo call_info;
1804   Bytecodes::Code bc;
1805 
1806   // receiver is NULL for static calls. An exception is thrown for NULL
1807   // receivers for non-static calls
1808   Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1809   // Compiler1 can produce virtual call sites that can actually be statically bound
1810   // If we fell thru to below we would think that the site was going megamorphic
1811   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1812   // we'd try and do a vtable dispatch however methods that can be statically bound
1813   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1814   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1815   // plain ic_miss) and the site will be converted to an optimized virtual call site
1816   // never to miss again. I don't believe C2 will produce code like this but if it
1817   // did this would still be the correct thing to do for it too, hence no ifdef.
1818   //
1819   if (call_info.resolved_method()->can_be_statically_bound()) {
1820     methodHandle callee_method = SharedRuntime::reresolve_call_site(CHECK_(methodHandle()));


1821     if (TraceCallFixup) {
1822       RegisterMap reg_map(current,
1823                           RegisterMap::UpdateMap::skip,
1824                           RegisterMap::ProcessFrames::include,
1825                           RegisterMap::WalkContinuation::skip);
1826       frame caller_frame = current->last_frame().sender(&reg_map);
1827       ResourceMark rm(current);
1828       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1829       callee_method->print_short_name(tty);
1830       tty->print_cr(" from pc: " INTPTR_FORMAT, p2i(caller_frame.pc()));
1831       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1832     }
1833     return callee_method;
1834   }
1835 
1836   methodHandle callee_method(current, call_info.selected_method());
1837 
1838 #ifndef PRODUCT
1839   Atomic::inc(&_ic_miss_ctr);
1840 

1859 #endif
1860 
1861   // install an event collector so that when a vtable stub is created the
1862   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1863   // event can't be posted when the stub is created as locks are held
1864   // - instead the event will be deferred until the event collector goes
1865   // out of scope.
1866   JvmtiDynamicCodeEventCollector event_collector;
1867 
1868   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1869   // Transitioning IC caches may require transition stubs. If we run out
1870   // of transition stubs, we have to drop locks and perform a safepoint
1871   // that refills them.
1872   RegisterMap reg_map(current,
1873                       RegisterMap::UpdateMap::skip,
1874                       RegisterMap::ProcessFrames::include,
1875                       RegisterMap::WalkContinuation::skip);
1876   frame caller_frame = current->last_frame().sender(&reg_map);
1877   CodeBlob* cb = caller_frame.cb();
1878   CompiledMethod* caller_nm = cb->as_compiled_method();

1879 
1880   for (;;) {
1881     ICRefillVerifier ic_refill_verifier;
1882     bool needs_ic_stub_refill = false;
1883     bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1884                                                      bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle()));
1885     if (successful || !needs_ic_stub_refill) {
1886       return callee_method;
1887     } else {
1888       InlineCacheBuffer::refill_ic_stubs();
1889     }
1890   }
1891 }
1892 
1893 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1894   CompiledICLocker ml(caller_nm);
1895   if (is_static_call) {
1896     CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1897     if (!ssc->is_clean()) {
1898       return ssc->set_to_clean();
1899     }
1900   } else {
1901     // compiled, dispatched call (which used to call an interpreted method)
1902     CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1903     if (!inline_cache->is_clean()) {
1904       return inline_cache->set_to_clean();
1905     }
1906   }
1907   return true;
1908 }
1909 
1910 //
1911 // Resets a call-site in compiled code so it will get resolved again.
1912 // This routines handles both virtual call sites, optimized virtual call
1913 // sites, and static call sites. Typically used to change a call sites
1914 // destination from compiled to interpreted.
1915 //
1916 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1917   JavaThread* current = THREAD;
1918   ResourceMark rm(current);
1919   RegisterMap reg_map(current,
1920                       RegisterMap::UpdateMap::skip,
1921                       RegisterMap::ProcessFrames::include,
1922                       RegisterMap::WalkContinuation::skip);
1923   frame stub_frame = current->last_frame();
1924   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1925   frame caller = stub_frame.sender(&reg_map);



1926 
1927   // Do nothing if the frame isn't a live compiled frame.
1928   // nmethod could be deoptimized by the time we get here
1929   // so no update to the caller is needed.
1930 
1931   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1932 
1933     address pc = caller.pc();
1934 
1935     // Check for static or virtual call
1936     bool is_static_call = false;
1937     CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
1938 
1939     // Default call_addr is the location of the "basic" call.
1940     // Determine the address of the call we a reresolving. With
1941     // Inline Caches we will always find a recognizable call.
1942     // With Inline Caches disabled we may or may not find a
1943     // recognizable call. We will always find a call for static
1944     // calls and for optimized virtual calls. For vanilla virtual
1945     // calls it depends on the state of the UseInlineCaches switch.
1946     //
1947     // With Inline Caches disabled we can get here for a virtual call
1948     // for two reasons:
1949     //   1 - calling an abstract method. The vtable for abstract methods
1950     //       will run us thru handle_wrong_method and we will eventually
1951     //       end up in the interpreter to throw the ame.
1952     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1953     //       call and between the time we fetch the entry address and
1954     //       we jump to it the target gets deoptimized. Similar to 1
1955     //       we will wind up in the interprter (thru a c2i with c2).
1956     //
1957     address call_addr = NULL;
1958     {
1959       // Get call instruction under lock because another thread may be
1960       // busy patching it.
1961       CompiledICLocker ml(caller_nm);
1962       // Location of call instruction
1963       call_addr = caller_nm->call_instruction_address(pc);
1964     }
1965 
1966     // Check relocations for the matching call to 1) avoid false positives,
1967     // and 2) determine the type.
1968     if (call_addr != NULL) {
1969       // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1970       // bytes back in the instruction stream so we must also check for reloc info.
1971       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1972       bool ret = iter.next(); // Get item
1973       if (ret) {
1974         bool is_static_call = false;

1975         switch (iter.type()) {
1976           case relocInfo::static_call_type:
1977             is_static_call = true;
1978 
1979           case relocInfo::virtual_call_type:
1980           case relocInfo::opt_virtual_call_type:

1981             // Cleaning the inline cache will force a new resolve. This is more robust
1982             // than directly setting it to the new destination, since resolving of calls
1983             // is always done through the same code path. (experience shows that it
1984             // leads to very hard to track down bugs, if an inline cache gets updated
1985             // to a wrong method). It should not be performance critical, since the
1986             // resolve is only done once.
1987             guarantee(iter.addr() == call_addr, "must find call");
1988             for (;;) {
1989               ICRefillVerifier ic_refill_verifier;
1990               if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
1991                 InlineCacheBuffer::refill_ic_stubs();
1992               } else {
1993                 break;
1994               }
1995             }
1996             break;
1997           default:
1998             break;
1999         }
2000       }
2001     }
2002   }
2003 
2004   methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
2005 
2006 
2007 #ifndef PRODUCT
2008   Atomic::inc(&_wrong_method_ctr);
2009 
2010   if (TraceCallFixup) {
2011     ResourceMark rm(current);
2012     tty->print("handle_wrong_method reresolving call to");
2013     callee_method->print_short_name(tty);
2014     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
2015   }
2016 #endif
2017 
2018   return callee_method;
2019 }
2020 
2021 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
2022   // The faulting unsafe accesses should be changed to throw the error
2023   // synchronously instead. Meanwhile the faulting instruction will be
2024   // skipped over (effectively turning it into a no-op) and an
2025   // asynchronous exception will be raised which the thread will
2026   // handle at a later point. If the instruction is a load it will

2159       // for the rest of its life! Just another racing bug in the life of
2160       // fixup_callers_callsite ...
2161       //
2162       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
2163       iter.next();
2164       assert(iter.has_current(), "must have a reloc at java call site");
2165       relocInfo::relocType typ = iter.reloc()->type();
2166       if (typ != relocInfo::static_call_type &&
2167            typ != relocInfo::opt_virtual_call_type &&
2168            typ != relocInfo::static_stub_type) {
2169         return;
2170       }
2171       if (nm->method()->is_continuation_enter_intrinsic()) {
2172         assert(ContinuationEntry::is_interpreted_call(call->instruction_address()) == JavaThread::current()->is_interp_only_mode(),
2173           "mode: %d", JavaThread::current()->is_interp_only_mode());
2174         if (ContinuationEntry::is_interpreted_call(call->instruction_address())) {
2175           return;
2176         }
2177       }
2178       address destination = call->destination();
2179       address entry_point = callee->verified_entry_point();
2180       if (should_fixup_call_destination(destination, entry_point, caller_pc, moop, cb)) {
2181         call->set_destination_mt_safe(entry_point);
2182       }
2183     }
2184   }
2185 JRT_END
2186 
2187 
2188 // same as JVM_Arraycopy, but called directly from compiled code
2189 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
2190                                                 oopDesc* dest, jint dest_pos,
2191                                                 jint length,
2192                                                 JavaThread* current)) {
2193 #ifndef PRODUCT
2194   _slow_array_copy_ctr++;
2195 #endif
2196   // Check if we have null pointers
2197   if (src == NULL || dest == NULL) {
2198     THROW(vmSymbols::java_lang_NullPointerException());
2199   }

2489  private:
2490   enum {
2491     _basic_type_bits = 4,
2492     _basic_type_mask = right_n_bits(_basic_type_bits),
2493     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2494     _compact_int_count = 3
2495   };
2496   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2497   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2498 
2499   union {
2500     int  _compact[_compact_int_count];
2501     int* _fingerprint;
2502   } _value;
2503   int _length; // A negative length indicates the fingerprint is in the compact form,
2504                // Otherwise _value._fingerprint is the array.
2505 
2506   // Remap BasicTypes that are handled equivalently by the adapters.
2507   // These are correct for the current system but someday it might be
2508   // necessary to make this mapping platform dependent.
2509   static int adapter_encoding(BasicType in) {
2510     switch (in) {
2511       case T_BOOLEAN:
2512       case T_BYTE:
2513       case T_SHORT:
2514       case T_CHAR:
2515         // There are all promoted to T_INT in the calling convention
2516         return T_INT;
2517 
2518       case T_OBJECT:
2519       case T_ARRAY:
2520         // In other words, we assume that any register good enough for
2521         // an int or long is good enough for a managed pointer.
2522 #ifdef _LP64
2523         return T_LONG;
2524 #else
2525         return T_INT;
2526 #endif
2527 
2528       case T_INT:
2529       case T_LONG:
2530       case T_FLOAT:
2531       case T_DOUBLE:
2532       case T_VOID:
2533         return in;
2534 
2535       default:
2536         ShouldNotReachHere();
2537         return T_CONFLICT;
2538     }
2539   }
2540 
2541  public:
2542   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2543     // The fingerprint is based on the BasicType signature encoded
2544     // into an array of ints with eight entries per int.

2545     int* ptr;
2546     int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2547     if (len <= _compact_int_count) {
2548       assert(_compact_int_count == 3, "else change next line");
2549       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2550       // Storing the signature encoded as signed chars hits about 98%
2551       // of the time.
2552       _length = -len;
2553       ptr = _value._compact;
2554     } else {
2555       _length = len;
2556       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2557       ptr = _value._fingerprint;
2558     }
2559 
2560     // Now pack the BasicTypes with 8 per int
2561     int sig_index = 0;


2562     for (int index = 0; index < len; index++) {
2563       int value = 0;
2564       for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2565         int bt = adapter_encoding(sig_bt[sig_index++]);
2566         assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2567         value = (value << _basic_type_bits) | bt;























2568       }
2569       ptr[index] = value;
2570     }

2571   }
2572 
2573   ~AdapterFingerPrint() {
2574     if (_length > 0) {
2575       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2576     }
2577   }
2578 
2579   int value(int index) {
2580     if (_length < 0) {
2581       return _value._compact[index];
2582     }
2583     return _value._fingerprint[index];
2584   }
2585   int length() {
2586     if (_length < 0) return -_length;
2587     return _length;
2588   }
2589 
2590   bool is_compact() {

2615   const char* as_basic_args_string() {
2616     stringStream st;
2617     bool long_prev = false;
2618     for (int i = 0; i < length(); i++) {
2619       unsigned val = (unsigned)value(i);
2620       // args are packed so that first/lower arguments are in the highest
2621       // bits of each int value, so iterate from highest to the lowest
2622       for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2623         unsigned v = (val >> j) & _basic_type_mask;
2624         if (v == 0) {
2625           assert(i == length() - 1, "Only expect zeroes in the last word");
2626           continue;
2627         }
2628         if (long_prev) {
2629           long_prev = false;
2630           if (v == T_VOID) {
2631             st.print("J");
2632           } else {
2633             st.print("L");
2634           }
2635         }
2636         switch (v) {
2637           case T_INT:    st.print("I");    break;
2638           case T_LONG:   long_prev = true; break;
2639           case T_FLOAT:  st.print("F");    break;
2640           case T_DOUBLE: st.print("D");    break;
2641           case T_VOID:   break;
2642           default: ShouldNotReachHere();
2643         }
2644       }
2645     }
2646     if (long_prev) {
2647       st.print("L");
2648     }
2649     return st.as_string();
2650   }
2651 #endif // !product
2652 
2653   bool equals(AdapterFingerPrint* other) {
2654     if (other->_length != _length) {
2655       return false;
2656     }
2657     if (_length < 0) {
2658       assert(_compact_int_count == 3, "else change next line");
2659       return _value._compact[0] == other->_value._compact[0] &&
2660              _value._compact[1] == other->_value._compact[1] &&
2661              _value._compact[2] == other->_value._compact[2];
2662     } else {

2669     return true;
2670   }
2671 
2672   static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2673     NOT_PRODUCT(_equals++);
2674     return fp1->equals(fp2);
2675   }
2676 
2677   static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2678     return fp->compute_hash();
2679   }
2680 };
2681 
2682 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2683 ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2684                   AnyObj::C_HEAP, mtCode,
2685                   AdapterFingerPrint::compute_hash,
2686                   AdapterFingerPrint::equals> _adapter_handler_table;
2687 
2688 // Find a entry with the same fingerprint if it exists
2689 static AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
2690   NOT_PRODUCT(_lookups++);
2691   assert_lock_strong(AdapterHandlerLibrary_lock);
2692   AdapterFingerPrint fp(total_args_passed, sig_bt);
2693   AdapterHandlerEntry** entry = _adapter_handler_table.get(&fp);
2694   if (entry != nullptr) {
2695 #ifndef PRODUCT
2696     if (fp.is_compact()) _compact++;
2697     _hits++;
2698 #endif
2699     return *entry;
2700   }
2701   return nullptr;
2702 }
2703 
2704 #ifndef PRODUCT
2705 static void print_table_statistics() {
2706   auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2707     return sizeof(*key) + sizeof(*a);
2708   };
2709   TableStatistics ts = _adapter_handler_table.statistics_calculate(size);
2710   ts.print(tty, "AdapterHandlerTable");
2711   tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2712                 _adapter_handler_table.table_size(), _adapter_handler_table.number_of_entries());
2713   tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2714                 _lookups, _equals, _hits, _compact);
2715 }
2716 #endif
2717 
2718 // ---------------------------------------------------------------------------
2719 // Implementation of AdapterHandlerLibrary
2720 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
2721 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = NULL;
2722 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = NULL;
2723 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = NULL;
2724 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = NULL;
2725 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = NULL;
2726 const int AdapterHandlerLibrary_size = 16*K;
2727 BufferBlob* AdapterHandlerLibrary::_buffer = NULL;
2728 
2729 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2730   return _buffer;
2731 }
2732 
2733 static void post_adapter_creation(const AdapterBlob* new_adapter,
2734                                   const AdapterHandlerEntry* entry) {
2735   if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2736     char blob_id[256];
2737     jio_snprintf(blob_id,
2738                  sizeof(blob_id),
2739                  "%s(%s)",
2740                  new_adapter->name(),
2741                  entry->fingerprint()->as_string());
2742     if (Forte::is_enabled()) {
2743       Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2744     }
2745 
2746     if (JvmtiExport::should_post_dynamic_code_generated()) {

2748     }
2749   }
2750 }
2751 
2752 void AdapterHandlerLibrary::initialize() {
2753   ResourceMark rm;
2754   AdapterBlob* no_arg_blob = NULL;
2755   AdapterBlob* int_arg_blob = NULL;
2756   AdapterBlob* obj_arg_blob = NULL;
2757   AdapterBlob* obj_int_arg_blob = NULL;
2758   AdapterBlob* obj_obj_arg_blob = NULL;
2759   {
2760     MutexLocker mu(AdapterHandlerLibrary_lock);
2761 
2762     // Create a special handler for abstract methods.  Abstract methods
2763     // are never compiled so an i2c entry is somewhat meaningless, but
2764     // throw AbstractMethodError just in case.
2765     // Pass wrong_method_abstract for the c2i transitions to return
2766     // AbstractMethodError for invalid invocations.
2767     address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2768     _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
2769                                                                 StubRoutines::throw_AbstractMethodError_entry(),

2770                                                                 wrong_method_abstract, wrong_method_abstract);
2771 
2772     _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2773     _no_arg_handler = create_adapter(no_arg_blob, 0, NULL, true);
2774 
2775     BasicType obj_args[] = { T_OBJECT };
2776     _obj_arg_handler = create_adapter(obj_arg_blob, 1, obj_args, true);






2777 
2778     BasicType int_args[] = { T_INT };
2779     _int_arg_handler = create_adapter(int_arg_blob, 1, int_args, true);


2780 
2781     BasicType obj_int_args[] = { T_OBJECT, T_INT };
2782     _obj_int_arg_handler = create_adapter(obj_int_arg_blob, 2, obj_int_args, true);



2783 
2784     BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2785     _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, 2, obj_obj_args, true);



2786 
2787     assert(no_arg_blob != NULL &&
2788           obj_arg_blob != NULL &&
2789           int_arg_blob != NULL &&
2790           obj_int_arg_blob != NULL &&
2791           obj_obj_arg_blob != NULL, "Initial adapters must be properly created");
2792   }

2793 
2794   // Outside of the lock
2795   post_adapter_creation(no_arg_blob, _no_arg_handler);
2796   post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2797   post_adapter_creation(int_arg_blob, _int_arg_handler);
2798   post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2799   post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2800 }
2801 
2802 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2803                                                       address i2c_entry,
2804                                                       address c2i_entry,


2805                                                       address c2i_unverified_entry,

2806                                                       address c2i_no_clinit_check_entry) {
2807   // Insert an entry into the table
2808   return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry,
2809                                  c2i_no_clinit_check_entry);
2810 }
2811 
2812 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2813   if (method->is_abstract()) {
2814     return _abstract_method_handler;
2815   }
2816   int total_args_passed = method->size_of_parameters(); // All args on stack
2817   if (total_args_passed == 0) {
2818     return _no_arg_handler;
2819   } else if (total_args_passed == 1) {
2820     if (!method->is_static()) {



2821       return _obj_arg_handler;
2822     }
2823     switch (method->signature()->char_at(1)) {
2824       case JVM_SIGNATURE_CLASS:









2825       case JVM_SIGNATURE_ARRAY:
2826         return _obj_arg_handler;
2827       case JVM_SIGNATURE_INT:
2828       case JVM_SIGNATURE_BOOLEAN:
2829       case JVM_SIGNATURE_CHAR:
2830       case JVM_SIGNATURE_BYTE:
2831       case JVM_SIGNATURE_SHORT:
2832         return _int_arg_handler;
2833     }
2834   } else if (total_args_passed == 2 &&
2835              !method->is_static()) {
2836     switch (method->signature()->char_at(1)) {
2837       case JVM_SIGNATURE_CLASS:









2838       case JVM_SIGNATURE_ARRAY:
2839         return _obj_obj_arg_handler;
2840       case JVM_SIGNATURE_INT:
2841       case JVM_SIGNATURE_BOOLEAN:
2842       case JVM_SIGNATURE_CHAR:
2843       case JVM_SIGNATURE_BYTE:
2844       case JVM_SIGNATURE_SHORT:
2845         return _obj_int_arg_handler;
2846     }
2847   }
2848   return NULL;
2849 }
2850 
2851 class AdapterSignatureIterator : public SignatureIterator {
2852  private:
2853   BasicType stack_sig_bt[16];
2854   BasicType* sig_bt;
2855   int index;




2856 
2857  public:
2858   AdapterSignatureIterator(Symbol* signature,
2859                            fingerprint_t fingerprint,
2860                            bool is_static,
2861                            int total_args_passed) :
2862     SignatureIterator(signature, fingerprint),
2863     index(0)
2864   {
2865     sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2866     if (!is_static) { // Pass in receiver first
2867       sig_bt[index++] = T_OBJECT;
2868     }
2869     do_parameters_on(this);
2870   }
2871 
2872   BasicType* basic_types() {
2873     return sig_bt;











2874   }
2875 
2876 #ifdef ASSERT
2877   int slots() {
2878     return index;




























































2879   }
2880 #endif
2881 
2882  private:


2883 
2884   friend class SignatureIterator;  // so do_parameters_on can call do_type
2885   void do_type(BasicType type) {
2886     sig_bt[index++] = type;
2887     if (type == T_LONG || type == T_DOUBLE) {
2888       sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots











2889     }
2890   }
2891 };









2892 
2893 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2894   // Use customized signature handler.  Need to lock around updates to
2895   // the _adapter_handler_table (it is not safe for concurrent readers
2896   // and a single writer: this could be fixed if it becomes a
2897   // problem).
2898 
2899   // Fast-path for trivial adapters
2900   AdapterHandlerEntry* entry = get_simple_adapter(method);
2901   if (entry != NULL) {
2902     return entry;
2903   }
2904 
2905   ResourceMark rm;
2906   AdapterBlob* new_adapter = NULL;
2907 
2908   // Fill in the signature array, for the calling-convention call.
2909   int total_args_passed = method->size_of_parameters(); // All args on stack







2910 
2911   AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2912                               method->is_static(), total_args_passed);
2913   assert(si.slots() == total_args_passed, "");
2914   BasicType* sig_bt = si.basic_types();
2915   {
2916     MutexLocker mu(AdapterHandlerLibrary_lock);
2917 













2918     // Lookup method signature's fingerprint
2919     entry = lookup(total_args_passed, sig_bt);
2920 
2921     if (entry != NULL) {
2922 #ifdef ASSERT
2923       if (VerifyAdapterSharing) {
2924         AdapterBlob* comparison_blob = NULL;
2925         AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, total_args_passed, sig_bt, false);
2926         assert(comparison_blob == NULL, "no blob should be created when creating an adapter for comparison");
2927         assert(comparison_entry->compare_code(entry), "code must match");
2928         // Release the one just created and return the original
2929         delete comparison_entry;
2930       }
2931 #endif
2932       return entry;
2933     }
2934 
2935     entry = create_adapter(new_adapter, total_args_passed, sig_bt, /* allocate_code_blob */ true);
2936   }
2937 
2938   // Outside of the lock
2939   if (new_adapter != NULL) {
2940     post_adapter_creation(new_adapter, entry);
2941   }
2942   return entry;
2943 }
2944 
2945 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
2946                                                            int total_args_passed,
2947                                                            BasicType* sig_bt,
2948                                                            bool allocate_code_blob) {
2949 
2950   // StubRoutines::code2() is initialized after this function can be called. As a result,
2951   // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated
2952   // prior to StubRoutines::code2() being set. Checks refer to checks generated in an I2C
2953   // stub that ensure that an I2C stub is called from an interpreter frame.
2954   bool contains_all_checks = StubRoutines::code2() != NULL;
2955 
2956   VMRegPair stack_regs[16];
2957   VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2958 
2959   // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2960   int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2961   BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2962   CodeBuffer buffer(buf);
2963   short buffer_locs[20];
2964   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2965                                           sizeof(buffer_locs)/sizeof(relocInfo));
2966 
2967   // Make a C heap allocated version of the fingerprint to store in the adapter
2968   AdapterFingerPrint* fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
2969   MacroAssembler _masm(&buffer);
2970   AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2971                                                 total_args_passed,
2972                                                 comp_args_on_stack,
2973                                                 sig_bt,
2974                                                 regs,
2975                                                 fingerprint);












2976 
2977 #ifdef ASSERT
2978   if (VerifyAdapterSharing) {
2979     entry->save_code(buf->code_begin(), buffer.insts_size());
2980     if (!allocate_code_blob) {
2981       return entry;
2982     }
2983   }
2984 #endif
2985 
2986   new_adapter = AdapterBlob::create(&buffer);
2987   NOT_PRODUCT(int insts_size = buffer.insts_size());
2988   if (new_adapter == NULL) {
2989     // CodeCache is full, disable compilation
2990     // Ought to log this but compile log is only per compile thread
2991     // and we're some non descript Java thread.
2992     return NULL;
2993   }
2994   entry->relocate(new_adapter->content_begin());
2995 #ifndef PRODUCT
2996   // debugging support
2997   if (PrintAdapterHandlers || PrintStubCode) {
2998     ttyLocker ttyl;
2999     entry->print_adapter_on(tty);
3000     tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
3001                   _adapter_handler_table.number_of_entries(), fingerprint->as_basic_args_string(),
3002                   fingerprint->as_string(), insts_size);
3003     tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
3004     if (Verbose || PrintStubCode) {
3005       address first_pc = entry->base_address();
3006       if (first_pc != NULL) {

3008                              NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
3009         tty->cr();
3010       }
3011     }
3012   }
3013 #endif
3014 
3015   // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
3016   // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
3017   if (contains_all_checks || !VerifyAdapterCalls) {
3018     assert_lock_strong(AdapterHandlerLibrary_lock);
3019     _adapter_handler_table.put(fingerprint, entry);
3020   }
3021   return entry;
3022 }
3023 
3024 address AdapterHandlerEntry::base_address() {
3025   address base = _i2c_entry;
3026   if (base == NULL)  base = _c2i_entry;
3027   assert(base <= _c2i_entry || _c2i_entry == NULL, "");


3028   assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, "");

3029   assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == NULL, "");
3030   return base;
3031 }
3032 
3033 void AdapterHandlerEntry::relocate(address new_base) {
3034   address old_base = base_address();
3035   assert(old_base != NULL, "");
3036   ptrdiff_t delta = new_base - old_base;
3037   if (_i2c_entry != NULL)
3038     _i2c_entry += delta;
3039   if (_c2i_entry != NULL)
3040     _c2i_entry += delta;




3041   if (_c2i_unverified_entry != NULL)
3042     _c2i_unverified_entry += delta;


3043   if (_c2i_no_clinit_check_entry != NULL)
3044     _c2i_no_clinit_check_entry += delta;
3045   assert(base_address() == new_base, "");
3046 }
3047 
3048 
3049 AdapterHandlerEntry::~AdapterHandlerEntry() {
3050   delete _fingerprint;



3051 #ifdef ASSERT
3052   FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3053 #endif
3054 }
3055 
3056 
3057 #ifdef ASSERT
3058 // Capture the code before relocation so that it can be compared
3059 // against other versions.  If the code is captured after relocation
3060 // then relative instructions won't be equivalent.
3061 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3062   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3063   _saved_code_length = length;
3064   memcpy(_saved_code, buffer, length);
3065 }
3066 
3067 
3068 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3069   assert(_saved_code != NULL && other->_saved_code != NULL, "code not saved");
3070 

3117 
3118       struct { double data[20]; } locs_buf;
3119       struct { double data[20]; } stubs_locs_buf;
3120       buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3121 #if defined(AARCH64) || defined(PPC64)
3122       // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3123       // in the constant pool to ensure ordering between the barrier and oops
3124       // accesses. For native_wrappers we need a constant.
3125       // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3126       // static java call that is resolved in the runtime.
3127       if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3128         buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3129       }
3130 #endif
3131       buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3132       MacroAssembler _masm(&buffer);
3133 
3134       // Fill in the signature array, for the calling-convention call.
3135       const int total_args_passed = method->size_of_parameters();
3136 

3137       VMRegPair stack_regs[16];

3138       VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3139 
3140       AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3141                               method->is_static(), total_args_passed);
3142       BasicType* sig_bt = si.basic_types();
3143       assert(si.slots() == total_args_passed, "");
3144       BasicType ret_type = si.return_type();








3145 
3146       // Now get the compiled-Java arguments layout.
3147       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3148 
3149       // Generate the compiled-to-native wrapper code
3150       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3151 
3152       if (nm != NULL) {
3153         {
3154           MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
3155           if (nm->make_in_use()) {
3156             method->set_code(method, nm);
3157           }
3158         }
3159 
3160         DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
3161         if (directive->PrintAssemblyOption) {
3162           nm->print_code();
3163         }
3164         DirectivesStack::release(directive);

3361       st->print("Adapter for signature: ");
3362       a->print_adapter_on(st);
3363       return true;
3364     } else {
3365       return false; // keep looking
3366     }
3367   };
3368   assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3369   _adapter_handler_table.iterate(findblob);
3370   assert(found, "Should have found handler");
3371 }
3372 
3373 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3374   st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3375   if (get_i2c_entry() != NULL) {
3376     st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3377   }
3378   if (get_c2i_entry() != NULL) {
3379     st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3380   }









3381   if (get_c2i_unverified_entry() != NULL) {
3382     st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3383   }
3384   if (get_c2i_no_clinit_check_entry() != NULL) {
3385     st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3386   }
3387   st->cr();
3388 }
3389 
3390 #ifndef PRODUCT
3391 
3392 void AdapterHandlerLibrary::print_statistics() {
3393   print_table_statistics();
3394 }
3395 
3396 #endif /* PRODUCT */
3397 
3398 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3399   assert(current == JavaThread::current(), "pre-condition");
3400   StackOverflow* overflow_state = current->stack_overflow_state();
3401   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3402   overflow_state->set_reserved_stack_activation(current->stack_base());

3451         event.set_method(method);
3452         event.commit();
3453       }
3454     }
3455   }
3456   return activation;
3457 }
3458 
3459 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3460   // After any safepoint, just before going back to compiled code,
3461   // we inform the GC that we will be doing initializing writes to
3462   // this object in the future without emitting card-marks, so
3463   // GC may take any compensating steps.
3464 
3465   oop new_obj = current->vm_result();
3466   if (new_obj == NULL) return;
3467 
3468   BarrierSet *bs = BarrierSet::barrier_set();
3469   bs->on_slowpath_allocation_exit(current, new_obj);
3470 }





































































































































































































  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/vmClasses.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/compiledMethod.inline.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/abstractCompiler.hpp"
  37 #include "compiler/compileBroker.hpp"
  38 #include "compiler/disassembler.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "gc/shared/gcLocker.inline.hpp"
  42 #include "interpreter/interpreter.hpp"
  43 #include "interpreter/interpreterRuntime.hpp"
  44 #include "jvm.h"
  45 #include "jfr/jfrEvents.hpp"
  46 #include "logging/log.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/access.hpp"
  51 #include "oops/fieldStreams.inline.hpp"
  52 #include "oops/compiledICHolder.inline.hpp"
  53 #include "oops/klass.hpp"
  54 #include "oops/method.inline.hpp"
  55 #include "oops/objArrayKlass.hpp"
  56 #include "oops/objArrayOop.inline.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "oops/inlineKlass.inline.hpp"
  59 #include "prims/forte.hpp"
  60 #include "prims/jvmtiExport.hpp"
  61 #include "prims/methodHandles.hpp"
  62 #include "prims/nativeLookup.hpp"
  63 #include "runtime/atomic.hpp"
  64 #include "runtime/frame.inline.hpp"
  65 #include "runtime/handles.inline.hpp"
  66 #include "runtime/init.hpp"
  67 #include "runtime/interfaceSupport.inline.hpp"
  68 #include "runtime/java.hpp"
  69 #include "runtime/javaCalls.hpp"
  70 #include "runtime/sharedRuntime.hpp"
  71 #include "runtime/stackWatermarkSet.hpp"
  72 #include "runtime/stubRoutines.hpp"
  73 #include "runtime/synchronizer.hpp"
  74 #include "runtime/vframe.inline.hpp"
  75 #include "runtime/vframeArray.hpp"
  76 #include "runtime/vm_version.hpp"
  77 #include "utilities/copy.hpp"
  78 #include "utilities/dtrace.hpp"
  79 #include "utilities/events.hpp"
  80 #include "utilities/resourceHash.hpp"
  81 #include "utilities/macros.hpp"
  82 #include "utilities/xmlstream.hpp"
  83 #ifdef COMPILER1
  84 #include "c1/c1_Runtime1.hpp"
  85 #endif
  86 
  87 // Shared stub locations
  88 RuntimeStub*        SharedRuntime::_wrong_method_blob;
  89 RuntimeStub*        SharedRuntime::_wrong_method_abstract_blob;
  90 RuntimeStub*        SharedRuntime::_ic_miss_blob;
  91 RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
  92 RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
  93 RuntimeStub*        SharedRuntime::_resolve_static_call_blob;

  94 
  95 DeoptimizationBlob* SharedRuntime::_deopt_blob;
  96 SafepointBlob*      SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
  97 SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
  98 SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
  99 
 100 #ifdef COMPILER2
 101 UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
 102 #endif // COMPILER2
 103 
 104 nmethod*            SharedRuntime::_cont_doYield_stub;
 105 
 106 //----------------------------generate_stubs-----------------------------------
 107 void SharedRuntime::generate_stubs() {
 108   _wrong_method_blob                   = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),          "wrong_method_stub");
 109   _wrong_method_abstract_blob          = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
 110   _ic_miss_blob                        = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),  "ic_miss_stub");
 111   _resolve_opt_virtual_call_blob       = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),   "resolve_opt_virtual_call");
 112   _resolve_virtual_call_blob           = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),       "resolve_virtual_call");
 113   _resolve_static_call_blob            = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),        "resolve_static_call");

 114 
 115   AdapterHandlerLibrary::initialize();
 116 
 117 #if COMPILER2_OR_JVMCI
 118   // Vectors are generated only by C2 and JVMCI.
 119   bool support_wide = is_wide_vector(MaxVectorSize);
 120   if (support_wide) {
 121     _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
 122   }
 123 #endif // COMPILER2_OR_JVMCI
 124   _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
 125   _polling_page_return_handler_blob    = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
 126 
 127   generate_deopt_blob();
 128 
 129 #ifdef COMPILER2
 130   generate_uncommon_trap_blob();
 131 #endif // COMPILER2
 132 }
 133 

1173 // for a call current in progress, i.e., arguments has been pushed on stack
1174 // but callee has not been invoked yet.  Caller frame must be compiled.
1175 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1176                                               CallInfo& callinfo, TRAPS) {
1177   Handle receiver;
1178   Handle nullHandle;  // create a handy null handle for exception returns
1179   JavaThread* current = THREAD;
1180 
1181   assert(!vfst.at_end(), "Java frame must exist");
1182 
1183   // Find caller and bci from vframe
1184   methodHandle caller(current, vfst.method());
1185   int          bci   = vfst.bci();
1186 
1187   if (caller->is_continuation_enter_intrinsic()) {
1188     bc = Bytecodes::_invokestatic;
1189     LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1190     return receiver;
1191   }
1192 
1193   // Substitutability test implementation piggy backs on static call resolution
1194   Bytecodes::Code code = caller->java_code_at(bci);
1195   if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1196     bc = Bytecodes::_invokestatic;
1197     methodHandle attached_method(THREAD, extract_attached_method(vfst));
1198     assert(attached_method.not_null(), "must have attached method");
1199     vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1200     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1201 #ifdef ASSERT
1202     Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature());
1203     assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1204 #endif
1205     return receiver;
1206   }
1207 
1208   Bytecode_invoke bytecode(caller, bci);
1209   int bytecode_index = bytecode.index();
1210   bc = bytecode.invoke_code();
1211 
1212   methodHandle attached_method(current, extract_attached_method(vfst));
1213   if (attached_method.not_null()) {
1214     Method* callee = bytecode.static_target(CHECK_NH);
1215     vmIntrinsics::ID id = callee->intrinsic_id();
1216     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1217     // it attaches statically resolved method to the call site.
1218     if (MethodHandles::is_signature_polymorphic(id) &&
1219         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1220       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1221 
1222       // Adjust invocation mode according to the attached method.
1223       switch (bc) {
1224         case Bytecodes::_invokevirtual:
1225           if (attached_method->method_holder()->is_interface()) {
1226             bc = Bytecodes::_invokeinterface;
1227           }
1228           break;
1229         case Bytecodes::_invokeinterface:
1230           if (!attached_method->method_holder()->is_interface()) {
1231             bc = Bytecodes::_invokevirtual;
1232           }
1233           break;
1234         case Bytecodes::_invokehandle:
1235           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1236             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1237                                               : Bytecodes::_invokevirtual;
1238           }
1239           break;
1240         default:
1241           break;
1242       }
1243     } else {
1244       assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1245       if (!attached_method->method_holder()->is_inline_klass()) {
1246         // Ignore the attached method in this case to not confuse below code
1247         attached_method = methodHandle(current, NULL);
1248       }
1249     }
1250   }
1251 
1252   assert(bc != Bytecodes::_illegal, "not initialized");
1253 
1254   bool has_receiver = bc != Bytecodes::_invokestatic &&
1255                       bc != Bytecodes::_invokedynamic &&
1256                       bc != Bytecodes::_invokehandle;
1257   bool check_null_and_abstract = true;
1258 
1259   // Find receiver for non-static call
1260   if (has_receiver) {
1261     // This register map must be update since we need to find the receiver for
1262     // compiled frames. The receiver might be in a register.
1263     RegisterMap reg_map2(current,
1264                          RegisterMap::UpdateMap::include,
1265                          RegisterMap::ProcessFrames::include,
1266                          RegisterMap::WalkContinuation::skip);
1267     frame stubFrame   = current->last_frame();
1268     // Caller-frame is a compiled frame
1269     frame callerFrame = stubFrame.sender(&reg_map2);
1270     bool caller_is_c1 = false;
1271 
1272     if (callerFrame.is_compiled_frame()) {
1273       caller_is_c1 = callerFrame.cb()->is_compiled_by_c1();
1274     }
1275 
1276     Method* callee = attached_method();
1277     if (callee == NULL) {
1278       callee = bytecode.static_target(CHECK_NH);
1279       if (callee == NULL) {
1280         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1281       }
1282     }
1283     if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1284       // If the receiver is an inline type that is passed as fields, no oop is available
1285       // Resolve the call without receiver null checking.
1286       assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1287       if (bc == Bytecodes::_invokeinterface) {
1288         bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1289       }
1290       check_null_and_abstract = false;
1291     } else {
1292       // Retrieve from a compiled argument list
1293       receiver = Handle(current, callerFrame.retrieve_receiver(&reg_map2));
1294       assert(oopDesc::is_oop_or_null(receiver()), "");
1295       if (receiver.is_null()) {
1296         THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1297       }
1298     }
1299   }
1300 
1301   // Resolve method
1302   if (attached_method.not_null()) {
1303     // Parameterized by attached method.
1304     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1305   } else {
1306     // Parameterized by bytecode.
1307     constantPoolHandle constants(current, caller->constants());
1308     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1309   }
1310 
1311 #ifdef ASSERT
1312   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1313   if (has_receiver && check_null_and_abstract) {
1314     assert(receiver.not_null(), "should have thrown exception");
1315     Klass* receiver_klass = receiver->klass();
1316     Klass* rk = NULL;
1317     if (attached_method.not_null()) {
1318       // In case there's resolved method attached, use its holder during the check.
1319       rk = attached_method->method_holder();
1320     } else {
1321       // Klass is already loaded.
1322       constantPoolHandle constants(current, caller->constants());
1323       rk = constants->klass_ref_at(bytecode_index, CHECK_NH);
1324     }
1325     Klass* static_receiver_klass = rk;
1326     assert(receiver_klass->is_subtype_of(static_receiver_klass),
1327            "actual receiver must be subclass of static receiver klass");
1328     if (receiver_klass->is_instance_klass()) {
1329       if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1330         tty->print_cr("ERROR: Klass not yet initialized!!");
1331         receiver_klass->print();
1332       }
1333       assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");

1354                         RegisterMap::UpdateMap::skip,
1355                         RegisterMap::ProcessFrames::include,
1356                         RegisterMap::WalkContinuation::skip);
1357     frame fr = current->last_frame();
1358     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1359     fr = fr.sender(&reg_map);
1360     assert(fr.is_entry_frame(), "must be");
1361     // fr is now pointing to the entry frame.
1362     callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1363   } else {
1364     Bytecodes::Code bc;
1365     CallInfo callinfo;
1366     find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1367     callee_method = methodHandle(current, callinfo.selected_method());
1368   }
1369   assert(callee_method()->is_method(), "must be");
1370   return callee_method;
1371 }
1372 
1373 // Resolves a call.
1374 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool* caller_is_c1, TRAPS) {
1375   methodHandle callee_method;
1376   callee_method = resolve_sub_helper(is_virtual, is_optimized, caller_is_c1, THREAD);
1377   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1378     int retry_count = 0;
1379     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1380            callee_method->method_holder() != vmClasses::Object_klass()) {
1381       // If has a pending exception then there is no need to re-try to
1382       // resolve this method.
1383       // If the method has been redefined, we need to try again.
1384       // Hack: we have no way to update the vtables of arrays, so don't
1385       // require that java.lang.Object has been updated.
1386 
1387       // It is very unlikely that method is redefined more than 100 times
1388       // in the middle of resolve. If it is looping here more than 100 times
1389       // means then there could be a bug here.
1390       guarantee((retry_count++ < 100),
1391                 "Could not resolve to latest version of redefined method");
1392       // method is redefined in the middle of resolve so re-try.
1393       callee_method = resolve_sub_helper(is_virtual, is_optimized, caller_is_c1, THREAD);
1394     }
1395   }
1396   return callee_method;
1397 }
1398 
1399 // This fails if resolution required refilling of IC stubs
1400 bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
1401                                                 CompiledMethod* caller_nm, bool is_virtual, bool is_optimized,
1402                                                 Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) {
1403   StaticCallInfo static_call_info;
1404   CompiledICInfo virtual_call_info;
1405 
1406   // Make sure the callee nmethod does not get deoptimized and removed before
1407   // we are done patching the code.
1408   CompiledMethod* callee = callee_method->code();
1409 
1410   if (callee != NULL) {
1411     assert(callee->is_compiled(), "must be nmethod for patching");
1412   }
1413 
1414   if (callee != NULL && !callee->is_in_use()) {
1415     // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1416     callee = NULL;
1417   }
1418 #ifdef ASSERT
1419   address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
1420 #endif
1421 
1422   bool is_nmethod = caller_nm->is_nmethod();
1423   bool caller_is_c1 = caller_nm->is_compiled_by_c1();
1424 
1425   if (is_virtual) {
1426     Klass* receiver_klass = NULL;
1427     if (!caller_is_c1 && callee_method->is_scalarized_arg(0)) {
1428       // If the receiver is an inline type that is passed as fields, no oop is available
1429       receiver_klass = callee_method->method_holder();
1430     } else {
1431       assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
1432       receiver_klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass();
1433     }
1434     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1435     CompiledIC::compute_monomorphic_entry(callee_method, receiver_klass,
1436                      is_optimized, static_bound, is_nmethod, caller_is_c1, virtual_call_info,

1437                      CHECK_false);
1438   } else {
1439     // static call
1440     CompiledStaticCall::compute_entry(callee_method, caller_nm, static_call_info);
1441   }
1442 
1443   // grab lock, check for deoptimization and potentially patch caller
1444   {
1445     CompiledICLocker ml(caller_nm);
1446 
1447     // Lock blocks for safepoint during which both nmethods can change state.
1448 
1449     // Now that we are ready to patch if the Method* was redefined then
1450     // don't update call site and let the caller retry.
1451     // Don't update call site if callee nmethod was unloaded or deoptimized.
1452     // Don't update call site if callee nmethod was replaced by an other nmethod
1453     // which may happen when multiply alive nmethod (tiered compilation)
1454     // will be supported.
1455     if (!callee_method->is_old() &&
1456         (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) {
1457       NoSafepointVerifier nsv;
1458 #ifdef ASSERT
1459       // We must not try to patch to jump to an already unloaded method.
1460       if (dest_entry_point != 0) {

1473       } else {
1474         if (VM_Version::supports_fast_class_init_checks() &&
1475             invoke_code == Bytecodes::_invokestatic &&
1476             callee_method->needs_clinit_barrier() &&
1477             callee != NULL && callee->is_compiled_by_jvmci()) {
1478           return true; // skip patching for JVMCI
1479         }
1480         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
1481         if (is_nmethod && caller_nm->method()->is_continuation_enter_intrinsic()) {
1482           ssc->compute_entry_for_continuation_entry(callee_method, static_call_info);
1483         }
1484         if (ssc->is_clean()) ssc->set(static_call_info);
1485       }
1486     }
1487   } // unlock CompiledICLocker
1488   return true;
1489 }
1490 
1491 // Resolves a call.  The compilers generate code for calls that go here
1492 // and are patched with the real destination of the call.
1493 methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimized, bool* caller_is_c1, TRAPS) {
1494   JavaThread* current = THREAD;
1495   ResourceMark rm(current);
1496   RegisterMap cbl_map(current,
1497                       RegisterMap::UpdateMap::skip,
1498                       RegisterMap::ProcessFrames::include,
1499                       RegisterMap::WalkContinuation::skip);
1500   frame caller_frame = current->last_frame().sender(&cbl_map);
1501 
1502   CodeBlob* caller_cb = caller_frame.cb();
1503   guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
1504   CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
1505   *caller_is_c1 = caller_nm->is_compiled_by_c1();
1506 
1507   // determine call info & receiver
1508   // note: a) receiver is NULL for static calls
1509   //       b) an exception is thrown if receiver is NULL for non-static calls
1510   CallInfo call_info;
1511   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1512   Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1513   methodHandle callee_method(current, call_info.selected_method());
1514 
1515   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1516          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1517          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1518          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1519          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1520 
1521   assert(!caller_nm->is_unloading(), "It should not be unloading");
1522 
1523 #ifndef PRODUCT
1524   // tracing/debugging/statistics
1525   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :

1584     }
1585   }
1586 
1587 }
1588 
1589 
1590 // Inline caches exist only in compiled code
1591 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1592 #ifdef ASSERT
1593   RegisterMap reg_map(current,
1594                       RegisterMap::UpdateMap::skip,
1595                       RegisterMap::ProcessFrames::include,
1596                       RegisterMap::WalkContinuation::skip);
1597   frame stub_frame = current->last_frame();
1598   assert(stub_frame.is_runtime_frame(), "sanity check");
1599   frame caller_frame = stub_frame.sender(&reg_map);
1600   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1601 #endif /* ASSERT */
1602 
1603   methodHandle callee_method;
1604   bool is_optimized = false;
1605   bool caller_is_c1 = false;
1606   JRT_BLOCK
1607     callee_method = SharedRuntime::handle_ic_miss_helper(is_optimized, caller_is_c1, CHECK_NULL);
1608     // Return Method* through TLS
1609     current->set_vm_result_2(callee_method());
1610   JRT_BLOCK_END
1611   // return compiled code entry point after potential safepoints
1612   return entry_for_handle_wrong_method(callee_method, false, is_optimized, caller_is_c1);

1613 JRT_END
1614 
1615 
1616 // Handle call site that has been made non-entrant
1617 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1618   // 6243940 We might end up in here if the callee is deoptimized
1619   // as we race to call it.  We don't want to take a safepoint if
1620   // the caller was interpreted because the caller frame will look
1621   // interpreted to the stack walkers and arguments are now
1622   // "compiled" so it is much better to make this transition
1623   // invisible to the stack walking code. The i2c path will
1624   // place the callee method in the callee_target. It is stashed
1625   // there because if we try and find the callee by normal means a
1626   // safepoint is possible and have trouble gc'ing the compiled args.
1627   RegisterMap reg_map(current,
1628                       RegisterMap::UpdateMap::skip,
1629                       RegisterMap::ProcessFrames::include,
1630                       RegisterMap::WalkContinuation::skip);
1631   frame stub_frame = current->last_frame();
1632   assert(stub_frame.is_runtime_frame(), "sanity check");
1633   frame caller_frame = stub_frame.sender(&reg_map);
1634 
1635   if (caller_frame.is_interpreted_frame() ||
1636       caller_frame.is_entry_frame() ||
1637       caller_frame.is_upcall_stub_frame()) {
1638     Method* callee = current->callee_target();
1639     guarantee(callee != NULL && callee->is_method(), "bad handshake");
1640     current->set_vm_result_2(callee);
1641     current->set_callee_target(NULL);
1642     if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1643       // Bypass class initialization checks in c2i when caller is in native.
1644       // JNI calls to static methods don't have class initialization checks.
1645       // Fast class initialization checks are present in c2i adapters and call into
1646       // SharedRuntime::handle_wrong_method() on the slow path.
1647       //
1648       // JVM upcalls may land here as well, but there's a proper check present in
1649       // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1650       // so bypassing it in c2i adapter is benign.
1651       return callee->get_c2i_no_clinit_check_entry();
1652     } else {
1653       if (caller_frame.is_interpreted_frame()) {
1654         return callee->get_c2i_inline_entry();
1655       } else {
1656         return callee->get_c2i_entry();
1657       }
1658     }
1659   }
1660 
1661   // Must be compiled to compiled path which is safe to stackwalk
1662   methodHandle callee_method;
1663   bool is_static_call = false;
1664   bool is_optimized = false;
1665   bool caller_is_c1 = false;
1666   JRT_BLOCK
1667     // Force resolving of caller (if we called from compiled frame)
1668     callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_NULL);
1669     current->set_vm_result_2(callee_method());
1670   JRT_BLOCK_END
1671   // return compiled code entry point after potential safepoints
1672   return entry_for_handle_wrong_method(callee_method, is_static_call, is_optimized, caller_is_c1);

1673 JRT_END
1674 
1675 // Handle abstract method call
1676 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1677   // Verbose error message for AbstractMethodError.
1678   // Get the called method from the invoke bytecode.
1679   vframeStream vfst(current, true);
1680   assert(!vfst.at_end(), "Java frame must exist");
1681   methodHandle caller(current, vfst.method());
1682   Bytecode_invoke invoke(caller, vfst.bci());
1683   DEBUG_ONLY( invoke.verify(); )
1684 
1685   // Find the compiled caller frame.
1686   RegisterMap reg_map(current,
1687                       RegisterMap::UpdateMap::include,
1688                       RegisterMap::ProcessFrames::include,
1689                       RegisterMap::WalkContinuation::skip);
1690   frame stubFrame = current->last_frame();
1691   assert(stubFrame.is_runtime_frame(), "must be");
1692   frame callerFrame = stubFrame.sender(&reg_map);
1693   assert(callerFrame.is_compiled_frame(), "must be");
1694 
1695   // Install exception and return forward entry.
1696   address res = StubRoutines::throw_AbstractMethodError_entry();
1697   JRT_BLOCK
1698     methodHandle callee(current, invoke.static_target(current));
1699     if (!callee.is_null()) {
1700       oop recv = callerFrame.retrieve_receiver(&reg_map);
1701       Klass *recv_klass = (recv != NULL) ? recv->klass() : NULL;
1702       res = StubRoutines::forward_exception_entry();
1703       LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1704     }
1705   JRT_BLOCK_END
1706   return res;
1707 JRT_END
1708 
1709 
1710 // resolve a static call and patch code
1711 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1712   methodHandle callee_method;
1713   bool caller_is_c1;
1714   bool enter_special = false;
1715   JRT_BLOCK
1716     callee_method = SharedRuntime::resolve_helper(false, false, &caller_is_c1, CHECK_NULL);
1717     current->set_vm_result_2(callee_method());
1718 
1719     if (current->is_interp_only_mode()) {
1720       RegisterMap reg_map(current,
1721                           RegisterMap::UpdateMap::skip,
1722                           RegisterMap::ProcessFrames::include,
1723                           RegisterMap::WalkContinuation::skip);
1724       frame stub_frame = current->last_frame();
1725       assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1726       frame caller = stub_frame.sender(&reg_map);
1727       enter_special = caller.cb() != NULL && caller.cb()->is_compiled()
1728         && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic();
1729     }
1730   JRT_BLOCK_END
1731 
1732   if (current->is_interp_only_mode() && enter_special) {
1733     // enterSpecial is compiled and calls this method to resolve the call to Continuation::enter
1734     // but in interp_only_mode we need to go to the interpreted entry
1735     // The c2i won't patch in this mode -- see fixup_callers_callsite
1736     //
1737     // This should probably be done in all cases, not just enterSpecial (see JDK-8218403),
1738     // but that's part of a larger fix, and the situation is worse for enterSpecial, as it has no
1739     // interpreted version.
1740     return callee_method->get_c2i_entry();
1741   }
1742 
1743   // return compiled code entry point after potential safepoints
1744   address entry = caller_is_c1 ?
1745     callee_method->verified_inline_code_entry() : callee_method->verified_code_entry();
1746   assert(entry != NULL, "Jump to zero!");
1747   return entry;
1748 JRT_END
1749 
1750 
1751 // resolve virtual call and update inline cache to monomorphic
1752 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1753   methodHandle callee_method;
1754   bool caller_is_c1;
1755   JRT_BLOCK
1756     callee_method = SharedRuntime::resolve_helper(true, false, &caller_is_c1, CHECK_NULL);
1757     current->set_vm_result_2(callee_method());
1758   JRT_BLOCK_END
1759   // return compiled code entry point after potential safepoints
1760   address entry = caller_is_c1 ?
1761     callee_method->verified_inline_code_entry() : callee_method->verified_inline_ro_code_entry();
1762   assert(entry != NULL, "Jump to zero!");
1763   return entry;
1764 JRT_END
1765 
1766 
1767 // Resolve a virtual call that can be statically bound (e.g., always
1768 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1769 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1770   methodHandle callee_method;
1771   bool caller_is_c1;
1772   JRT_BLOCK
1773     callee_method = SharedRuntime::resolve_helper(true, true, &caller_is_c1, CHECK_NULL);
1774     current->set_vm_result_2(callee_method());
1775   JRT_BLOCK_END
1776   // return compiled code entry point after potential safepoints
1777   address entry = caller_is_c1 ?
1778     callee_method->verified_inline_code_entry() : callee_method->verified_code_entry();
1779   assert(entry != NULL, "Jump to zero!");
1780   return entry;
1781 JRT_END
1782 
1783 // The handle_ic_miss_helper_internal function returns false if it failed due
1784 // to either running out of vtable stubs or ic stubs due to IC transitions
1785 // to transitional states. The needs_ic_stub_refill value will be set if
1786 // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
1787 // refills the IC stubs and tries again.
1788 bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
1789                                                    const frame& caller_frame, methodHandle callee_method,
1790                                                    Bytecodes::Code bc, CallInfo& call_info,
1791                                                    bool& needs_ic_stub_refill, bool& is_optimized, bool caller_is_c1, TRAPS) {
1792   CompiledICLocker ml(caller_nm);
1793   CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1794   bool should_be_mono = false;
1795   if (inline_cache->is_optimized()) {
1796     if (TraceCallFixup) {
1797       ResourceMark rm(THREAD);
1798       tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1799       callee_method->print_short_name(tty);
1800       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1801     }
1802     is_optimized = true;
1803     should_be_mono = true;
1804   } else if (inline_cache->is_icholder_call()) {
1805     CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1806     if (ic_oop != NULL) {
1807       if (!ic_oop->is_loader_alive()) {
1808         // Deferred IC cleaning due to concurrent class unloading
1809         if (!inline_cache->set_to_clean()) {
1810           needs_ic_stub_refill = true;
1811           return false;
1812         }
1813       } else if (receiver()->klass() == ic_oop->holder_klass()) {
1814         // This isn't a real miss. We must have seen that compiled code
1815         // is now available and we want the call site converted to a
1816         // monomorphic compiled call site.
1817         // We can't assert for callee_method->code() != NULL because it
1818         // could have been deoptimized in the meantime
1819         if (TraceCallFixup) {
1820           ResourceMark rm(THREAD);
1821           tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1822           callee_method->print_short_name(tty);
1823           tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1824         }
1825         should_be_mono = true;
1826       }
1827     }
1828   }
1829 
1830   if (should_be_mono) {
1831     // We have a path that was monomorphic but was going interpreted
1832     // and now we have (or had) a compiled entry. We correct the IC
1833     // by using a new icBuffer.
1834     CompiledICInfo info;
1835     Klass* receiver_klass = receiver()->klass();
1836     inline_cache->compute_monomorphic_entry(callee_method,
1837                                             receiver_klass,
1838                                             inline_cache->is_optimized(),
1839                                             false, caller_nm->is_nmethod(),
1840                                             caller_nm->is_compiled_by_c1(),
1841                                             info, CHECK_false);
1842     if (!inline_cache->set_to_monomorphic(info)) {
1843       needs_ic_stub_refill = true;
1844       return false;
1845     }
1846   } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1847     // Potential change to megamorphic
1848 
1849     bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, caller_is_c1, CHECK_false);
1850     if (needs_ic_stub_refill) {
1851       return false;
1852     }
1853     if (!successful) {
1854       if (!inline_cache->set_to_clean()) {
1855         needs_ic_stub_refill = true;
1856         return false;
1857       }
1858     }
1859   } else {
1860     // Either clean or megamorphic
1861   }
1862   return true;
1863 }
1864 
1865 methodHandle SharedRuntime::handle_ic_miss_helper(bool& is_optimized, bool& caller_is_c1, TRAPS) {
1866   JavaThread* current = THREAD;
1867   ResourceMark rm(current);
1868   CallInfo call_info;
1869   Bytecodes::Code bc;
1870 
1871   // receiver is NULL for static calls. An exception is thrown for NULL
1872   // receivers for non-static calls
1873   Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1874   // Compiler1 can produce virtual call sites that can actually be statically bound
1875   // If we fell thru to below we would think that the site was going megamorphic
1876   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1877   // we'd try and do a vtable dispatch however methods that can be statically bound
1878   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1879   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1880   // plain ic_miss) and the site will be converted to an optimized virtual call site
1881   // never to miss again. I don't believe C2 will produce code like this but if it
1882   // did this would still be the correct thing to do for it too, hence no ifdef.
1883   //
1884   if (call_info.resolved_method()->can_be_statically_bound()) {
1885     bool is_static_call = false;
1886     methodHandle callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_(methodHandle()));
1887     assert(!is_static_call, "IC miss at static call?");
1888     if (TraceCallFixup) {
1889       RegisterMap reg_map(current,
1890                           RegisterMap::UpdateMap::skip,
1891                           RegisterMap::ProcessFrames::include,
1892                           RegisterMap::WalkContinuation::skip);
1893       frame caller_frame = current->last_frame().sender(&reg_map);
1894       ResourceMark rm(current);
1895       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1896       callee_method->print_short_name(tty);
1897       tty->print_cr(" from pc: " INTPTR_FORMAT, p2i(caller_frame.pc()));
1898       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1899     }
1900     return callee_method;
1901   }
1902 
1903   methodHandle callee_method(current, call_info.selected_method());
1904 
1905 #ifndef PRODUCT
1906   Atomic::inc(&_ic_miss_ctr);
1907 

1926 #endif
1927 
1928   // install an event collector so that when a vtable stub is created the
1929   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1930   // event can't be posted when the stub is created as locks are held
1931   // - instead the event will be deferred until the event collector goes
1932   // out of scope.
1933   JvmtiDynamicCodeEventCollector event_collector;
1934 
1935   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1936   // Transitioning IC caches may require transition stubs. If we run out
1937   // of transition stubs, we have to drop locks and perform a safepoint
1938   // that refills them.
1939   RegisterMap reg_map(current,
1940                       RegisterMap::UpdateMap::skip,
1941                       RegisterMap::ProcessFrames::include,
1942                       RegisterMap::WalkContinuation::skip);
1943   frame caller_frame = current->last_frame().sender(&reg_map);
1944   CodeBlob* cb = caller_frame.cb();
1945   CompiledMethod* caller_nm = cb->as_compiled_method();
1946   caller_is_c1 = caller_nm->is_compiled_by_c1();
1947 
1948   for (;;) {
1949     ICRefillVerifier ic_refill_verifier;
1950     bool needs_ic_stub_refill = false;
1951     bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1952                                                      bc, call_info, needs_ic_stub_refill, is_optimized, caller_is_c1, CHECK_(methodHandle()));
1953     if (successful || !needs_ic_stub_refill) {
1954       return callee_method;
1955     } else {
1956       InlineCacheBuffer::refill_ic_stubs();
1957     }
1958   }
1959 }
1960 
1961 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1962   CompiledICLocker ml(caller_nm);
1963   if (is_static_call) {
1964     CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1965     if (!ssc->is_clean()) {
1966       return ssc->set_to_clean();
1967     }
1968   } else {
1969     // compiled, dispatched call (which used to call an interpreted method)
1970     CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1971     if (!inline_cache->is_clean()) {
1972       return inline_cache->set_to_clean();
1973     }
1974   }
1975   return true;
1976 }
1977 
1978 //
1979 // Resets a call-site in compiled code so it will get resolved again.
1980 // This routines handles both virtual call sites, optimized virtual call
1981 // sites, and static call sites. Typically used to change a call sites
1982 // destination from compiled to interpreted.
1983 //
1984 methodHandle SharedRuntime::reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_is_c1, TRAPS) {
1985   JavaThread* current = THREAD;
1986   ResourceMark rm(current);
1987   RegisterMap reg_map(current,
1988                       RegisterMap::UpdateMap::skip,
1989                       RegisterMap::ProcessFrames::include,
1990                       RegisterMap::WalkContinuation::skip);
1991   frame stub_frame = current->last_frame();
1992   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1993   frame caller = stub_frame.sender(&reg_map);
1994   if (caller.is_compiled_frame()) {
1995     caller_is_c1 = caller.cb()->is_compiled_by_c1();
1996   }
1997 
1998   // Do nothing if the frame isn't a live compiled frame.
1999   // nmethod could be deoptimized by the time we get here
2000   // so no update to the caller is needed.
2001 
2002   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
2003 
2004     address pc = caller.pc();
2005 
2006     // Check for static or virtual call

2007     CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
2008 
2009     // Default call_addr is the location of the "basic" call.
2010     // Determine the address of the call we a reresolving. With
2011     // Inline Caches we will always find a recognizable call.
2012     // With Inline Caches disabled we may or may not find a
2013     // recognizable call. We will always find a call for static
2014     // calls and for optimized virtual calls. For vanilla virtual
2015     // calls it depends on the state of the UseInlineCaches switch.
2016     //
2017     // With Inline Caches disabled we can get here for a virtual call
2018     // for two reasons:
2019     //   1 - calling an abstract method. The vtable for abstract methods
2020     //       will run us thru handle_wrong_method and we will eventually
2021     //       end up in the interpreter to throw the ame.
2022     //   2 - a racing deoptimization. We could be doing a vanilla vtable
2023     //       call and between the time we fetch the entry address and
2024     //       we jump to it the target gets deoptimized. Similar to 1
2025     //       we will wind up in the interprter (thru a c2i with c2).
2026     //
2027     address call_addr = NULL;
2028     {
2029       // Get call instruction under lock because another thread may be
2030       // busy patching it.
2031       CompiledICLocker ml(caller_nm);
2032       // Location of call instruction
2033       call_addr = caller_nm->call_instruction_address(pc);
2034     }
2035 
2036     // Check relocations for the matching call to 1) avoid false positives,
2037     // and 2) determine the type.
2038     if (call_addr != NULL) {
2039       // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
2040       // bytes back in the instruction stream so we must also check for reloc info.
2041       RelocIterator iter(caller_nm, call_addr, call_addr+1);
2042       bool ret = iter.next(); // Get item
2043       if (ret) {
2044         is_static_call = false;
2045         is_optimized = false;
2046         switch (iter.type()) {
2047           case relocInfo::static_call_type:
2048             is_static_call = true;
2049 
2050           case relocInfo::virtual_call_type:
2051           case relocInfo::opt_virtual_call_type:
2052             is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
2053             // Cleaning the inline cache will force a new resolve. This is more robust
2054             // than directly setting it to the new destination, since resolving of calls
2055             // is always done through the same code path. (experience shows that it
2056             // leads to very hard to track down bugs, if an inline cache gets updated
2057             // to a wrong method). It should not be performance critical, since the
2058             // resolve is only done once.
2059             guarantee(iter.addr() == call_addr, "must find call");
2060             for (;;) {
2061               ICRefillVerifier ic_refill_verifier;
2062               if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
2063                 InlineCacheBuffer::refill_ic_stubs();
2064               } else {
2065                 break;
2066               }
2067             }
2068             break;
2069           default:
2070             break;
2071         }
2072       }
2073     }
2074   }
2075 
2076   methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
2077 

2078 #ifndef PRODUCT
2079   Atomic::inc(&_wrong_method_ctr);
2080 
2081   if (TraceCallFixup) {
2082     ResourceMark rm(current);
2083     tty->print("handle_wrong_method reresolving call to");
2084     callee_method->print_short_name(tty);
2085     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
2086   }
2087 #endif
2088 
2089   return callee_method;
2090 }
2091 
2092 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
2093   // The faulting unsafe accesses should be changed to throw the error
2094   // synchronously instead. Meanwhile the faulting instruction will be
2095   // skipped over (effectively turning it into a no-op) and an
2096   // asynchronous exception will be raised which the thread will
2097   // handle at a later point. If the instruction is a load it will

2230       // for the rest of its life! Just another racing bug in the life of
2231       // fixup_callers_callsite ...
2232       //
2233       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
2234       iter.next();
2235       assert(iter.has_current(), "must have a reloc at java call site");
2236       relocInfo::relocType typ = iter.reloc()->type();
2237       if (typ != relocInfo::static_call_type &&
2238            typ != relocInfo::opt_virtual_call_type &&
2239            typ != relocInfo::static_stub_type) {
2240         return;
2241       }
2242       if (nm->method()->is_continuation_enter_intrinsic()) {
2243         assert(ContinuationEntry::is_interpreted_call(call->instruction_address()) == JavaThread::current()->is_interp_only_mode(),
2244           "mode: %d", JavaThread::current()->is_interp_only_mode());
2245         if (ContinuationEntry::is_interpreted_call(call->instruction_address())) {
2246           return;
2247         }
2248       }
2249       address destination = call->destination();
2250       address entry_point = cb->is_compiled_by_c1() ? callee->verified_inline_entry_point() : callee->verified_entry_point();
2251       if (should_fixup_call_destination(destination, entry_point, caller_pc, moop, cb)) {
2252         call->set_destination_mt_safe(entry_point);
2253       }
2254     }
2255   }
2256 JRT_END
2257 
2258 
2259 // same as JVM_Arraycopy, but called directly from compiled code
2260 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
2261                                                 oopDesc* dest, jint dest_pos,
2262                                                 jint length,
2263                                                 JavaThread* current)) {
2264 #ifndef PRODUCT
2265   _slow_array_copy_ctr++;
2266 #endif
2267   // Check if we have null pointers
2268   if (src == NULL || dest == NULL) {
2269     THROW(vmSymbols::java_lang_NullPointerException());
2270   }

2560  private:
2561   enum {
2562     _basic_type_bits = 4,
2563     _basic_type_mask = right_n_bits(_basic_type_bits),
2564     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2565     _compact_int_count = 3
2566   };
2567   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2568   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2569 
2570   union {
2571     int  _compact[_compact_int_count];
2572     int* _fingerprint;
2573   } _value;
2574   int _length; // A negative length indicates the fingerprint is in the compact form,
2575                // Otherwise _value._fingerprint is the array.
2576 
2577   // Remap BasicTypes that are handled equivalently by the adapters.
2578   // These are correct for the current system but someday it might be
2579   // necessary to make this mapping platform dependent.
2580   static BasicType adapter_encoding(BasicType in) {
2581     switch (in) {
2582       case T_BOOLEAN:
2583       case T_BYTE:
2584       case T_SHORT:
2585       case T_CHAR:
2586         // They are all promoted to T_INT in the calling convention
2587         return T_INT;
2588 
2589       case T_OBJECT:
2590       case T_ARRAY:
2591         // In other words, we assume that any register good enough for
2592         // an int or long is good enough for a managed pointer.
2593 #ifdef _LP64
2594         return T_LONG;
2595 #else
2596         return T_INT;
2597 #endif
2598 
2599       case T_INT:
2600       case T_LONG:
2601       case T_FLOAT:
2602       case T_DOUBLE:
2603       case T_VOID:
2604         return in;
2605 
2606       default:
2607         ShouldNotReachHere();
2608         return T_CONFLICT;
2609     }
2610   }
2611 
2612  public:
2613   AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2614     // The fingerprint is based on the BasicType signature encoded
2615     // into an array of ints with eight entries per int.
2616     int total_args_passed = (sig != NULL) ? sig->length() : 0;
2617     int* ptr;
2618     int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2619     if (len <= _compact_int_count) {
2620       assert(_compact_int_count == 3, "else change next line");
2621       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2622       // Storing the signature encoded as signed chars hits about 98%
2623       // of the time.
2624       _length = -len;
2625       ptr = _value._compact;
2626     } else {
2627       _length = len;
2628       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2629       ptr = _value._fingerprint;
2630     }
2631 
2632     // Now pack the BasicTypes with 8 per int
2633     int sig_index = 0;
2634     BasicType prev_bt = T_ILLEGAL;
2635     int vt_count = 0;
2636     for (int index = 0; index < len; index++) {
2637       int value = 0;
2638       for (int byte = 0; byte < _basic_types_per_int; byte++) {
2639         BasicType bt = T_ILLEGAL;
2640         if (sig_index < total_args_passed) {
2641           bt = sig->at(sig_index++)._bt;
2642           if (bt == T_PRIMITIVE_OBJECT) {
2643             // Found start of inline type in signature
2644             assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2645             if (sig_index == 1 && has_ro_adapter) {
2646               // With a ro_adapter, replace receiver inline type delimiter by T_VOID to prevent matching
2647               // with other adapters that have the same inline type as first argument and no receiver.
2648               bt = T_VOID;
2649             }
2650             vt_count++;
2651           } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2652             // Found end of inline type in signature
2653             assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2654             vt_count--;
2655             assert(vt_count >= 0, "invalid vt_count");
2656           } else if (vt_count == 0) {
2657             // Widen fields that are not part of a scalarized inline type argument
2658             bt = adapter_encoding(bt);
2659           }
2660           prev_bt = bt;
2661         }
2662         int bt_val = (bt == T_ILLEGAL) ? 0 : bt;
2663         assert((bt_val & _basic_type_mask) == bt_val, "must fit in 4 bits");
2664         value = (value << _basic_type_bits) | bt_val;
2665       }
2666       ptr[index] = value;
2667     }
2668     assert(vt_count == 0, "invalid vt_count");
2669   }
2670 
2671   ~AdapterFingerPrint() {
2672     if (_length > 0) {
2673       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2674     }
2675   }
2676 
2677   int value(int index) {
2678     if (_length < 0) {
2679       return _value._compact[index];
2680     }
2681     return _value._fingerprint[index];
2682   }
2683   int length() {
2684     if (_length < 0) return -_length;
2685     return _length;
2686   }
2687 
2688   bool is_compact() {

2713   const char* as_basic_args_string() {
2714     stringStream st;
2715     bool long_prev = false;
2716     for (int i = 0; i < length(); i++) {
2717       unsigned val = (unsigned)value(i);
2718       // args are packed so that first/lower arguments are in the highest
2719       // bits of each int value, so iterate from highest to the lowest
2720       for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2721         unsigned v = (val >> j) & _basic_type_mask;
2722         if (v == 0) {
2723           assert(i == length() - 1, "Only expect zeroes in the last word");
2724           continue;
2725         }
2726         if (long_prev) {
2727           long_prev = false;
2728           if (v == T_VOID) {
2729             st.print("J");
2730           } else {
2731             st.print("L");
2732           }
2733         } else if (v == T_LONG) {
2734           long_prev = true;
2735         } else if (v != T_VOID){
2736           st.print("%c", type2char((BasicType)v));




2737         }
2738       }
2739     }
2740     if (long_prev) {
2741       st.print("L");
2742     }
2743     return st.as_string();
2744   }
2745 #endif // !product
2746 
2747   bool equals(AdapterFingerPrint* other) {
2748     if (other->_length != _length) {
2749       return false;
2750     }
2751     if (_length < 0) {
2752       assert(_compact_int_count == 3, "else change next line");
2753       return _value._compact[0] == other->_value._compact[0] &&
2754              _value._compact[1] == other->_value._compact[1] &&
2755              _value._compact[2] == other->_value._compact[2];
2756     } else {

2763     return true;
2764   }
2765 
2766   static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2767     NOT_PRODUCT(_equals++);
2768     return fp1->equals(fp2);
2769   }
2770 
2771   static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2772     return fp->compute_hash();
2773   }
2774 };
2775 
2776 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2777 ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2778                   AnyObj::C_HEAP, mtCode,
2779                   AdapterFingerPrint::compute_hash,
2780                   AdapterFingerPrint::equals> _adapter_handler_table;
2781 
2782 // Find a entry with the same fingerprint if it exists
2783 static AdapterHandlerEntry* lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2784   NOT_PRODUCT(_lookups++);
2785   assert_lock_strong(AdapterHandlerLibrary_lock);
2786   AdapterFingerPrint fp(sig, has_ro_adapter);
2787   AdapterHandlerEntry** entry = _adapter_handler_table.get(&fp);
2788   if (entry != nullptr) {
2789 #ifndef PRODUCT
2790     if (fp.is_compact()) _compact++;
2791     _hits++;
2792 #endif
2793     return *entry;
2794   }
2795   return nullptr;
2796 }
2797 
2798 #ifndef PRODUCT
2799 static void print_table_statistics() {
2800   auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2801     return sizeof(*key) + sizeof(*a);
2802   };
2803   TableStatistics ts = _adapter_handler_table.statistics_calculate(size);
2804   ts.print(tty, "AdapterHandlerTable");
2805   tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2806                 _adapter_handler_table.table_size(), _adapter_handler_table.number_of_entries());
2807   tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2808                 _lookups, _equals, _hits, _compact);
2809 }
2810 #endif
2811 
2812 // ---------------------------------------------------------------------------
2813 // Implementation of AdapterHandlerLibrary
2814 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
2815 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = NULL;
2816 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = NULL;
2817 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = NULL;
2818 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = NULL;
2819 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = NULL;
2820 const int AdapterHandlerLibrary_size = 48*K;
2821 BufferBlob* AdapterHandlerLibrary::_buffer = NULL;
2822 
2823 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2824   return _buffer;
2825 }
2826 
2827 static void post_adapter_creation(const AdapterBlob* new_adapter,
2828                                   const AdapterHandlerEntry* entry) {
2829   if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2830     char blob_id[256];
2831     jio_snprintf(blob_id,
2832                  sizeof(blob_id),
2833                  "%s(%s)",
2834                  new_adapter->name(),
2835                  entry->fingerprint()->as_string());
2836     if (Forte::is_enabled()) {
2837       Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2838     }
2839 
2840     if (JvmtiExport::should_post_dynamic_code_generated()) {

2842     }
2843   }
2844 }
2845 
2846 void AdapterHandlerLibrary::initialize() {
2847   ResourceMark rm;
2848   AdapterBlob* no_arg_blob = NULL;
2849   AdapterBlob* int_arg_blob = NULL;
2850   AdapterBlob* obj_arg_blob = NULL;
2851   AdapterBlob* obj_int_arg_blob = NULL;
2852   AdapterBlob* obj_obj_arg_blob = NULL;
2853   {
2854     MutexLocker mu(AdapterHandlerLibrary_lock);
2855 
2856     // Create a special handler for abstract methods.  Abstract methods
2857     // are never compiled so an i2c entry is somewhat meaningless, but
2858     // throw AbstractMethodError just in case.
2859     // Pass wrong_method_abstract for the c2i transitions to return
2860     // AbstractMethodError for invalid invocations.
2861     address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2862     _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(NULL),
2863                                                                 StubRoutines::throw_AbstractMethodError_entry(),
2864                                                                 wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
2865                                                                 wrong_method_abstract, wrong_method_abstract);

2866     _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);

2867 
2868     CompiledEntrySignature no_args;
2869     no_args.compute_calling_conventions();
2870     _no_arg_handler = create_adapter(no_arg_blob, no_args, true);
2871 
2872     CompiledEntrySignature obj_args;
2873     SigEntry::add_entry(&obj_args.sig(), T_OBJECT, NULL);
2874     obj_args.compute_calling_conventions();
2875     _obj_arg_handler = create_adapter(obj_arg_blob, obj_args, true);
2876 
2877     CompiledEntrySignature int_args;
2878     SigEntry::add_entry(&int_args.sig(), T_INT, NULL);
2879     int_args.compute_calling_conventions();
2880     _int_arg_handler = create_adapter(int_arg_blob, int_args, true);
2881 
2882     CompiledEntrySignature obj_int_args;
2883     SigEntry::add_entry(&obj_int_args.sig(), T_OBJECT, NULL);
2884     SigEntry::add_entry(&obj_int_args.sig(), T_INT, NULL);
2885     obj_int_args.compute_calling_conventions();
2886     _obj_int_arg_handler = create_adapter(obj_int_arg_blob, obj_int_args, true);
2887 
2888     CompiledEntrySignature obj_obj_args;
2889     SigEntry::add_entry(&obj_obj_args.sig(), T_OBJECT, NULL);
2890     SigEntry::add_entry(&obj_obj_args.sig(), T_OBJECT, NULL);
2891     obj_obj_args.compute_calling_conventions();
2892     _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, obj_obj_args, true);
2893 
2894     assert(no_arg_blob != NULL &&
2895           obj_arg_blob != NULL &&
2896           int_arg_blob != NULL &&
2897           obj_int_arg_blob != NULL &&
2898           obj_obj_arg_blob != NULL, "Initial adapters must be properly created");
2899   }
2900   return;
2901 
2902   // Outside of the lock
2903   post_adapter_creation(no_arg_blob, _no_arg_handler);
2904   post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2905   post_adapter_creation(int_arg_blob, _int_arg_handler);
2906   post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2907   post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2908 }
2909 
2910 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2911                                                       address i2c_entry,
2912                                                       address c2i_entry,
2913                                                       address c2i_inline_entry,
2914                                                       address c2i_inline_ro_entry,
2915                                                       address c2i_unverified_entry,
2916                                                       address c2i_unverified_inline_entry,
2917                                                       address c2i_no_clinit_check_entry) {
2918   return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry,
2919                               c2i_unverified_inline_entry, c2i_no_clinit_check_entry);

2920 }
2921 
2922 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2923   if (method->is_abstract()) {
2924     return NULL;
2925   }
2926   int total_args_passed = method->size_of_parameters(); // All args on stack
2927   if (total_args_passed == 0) {
2928     return _no_arg_handler;
2929   } else if (total_args_passed == 1) {
2930     if (!method->is_static()) {
2931       if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2932         return NULL;
2933       }
2934       return _obj_arg_handler;
2935     }
2936     switch (method->signature()->char_at(1)) {
2937       case JVM_SIGNATURE_CLASS: {
2938         if (InlineTypePassFieldsAsArgs) {
2939           SignatureStream ss(method->signature());
2940           InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2941           if (vk != NULL) {
2942             return NULL;
2943           }
2944         }
2945         return _obj_arg_handler;
2946       }
2947       case JVM_SIGNATURE_ARRAY:
2948         return _obj_arg_handler;
2949       case JVM_SIGNATURE_INT:
2950       case JVM_SIGNATURE_BOOLEAN:
2951       case JVM_SIGNATURE_CHAR:
2952       case JVM_SIGNATURE_BYTE:
2953       case JVM_SIGNATURE_SHORT:
2954         return _int_arg_handler;
2955     }
2956   } else if (total_args_passed == 2 &&
2957              !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2958     switch (method->signature()->char_at(1)) {
2959       case JVM_SIGNATURE_CLASS: {
2960         if (InlineTypePassFieldsAsArgs) {
2961           SignatureStream ss(method->signature());
2962           InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2963           if (vk != NULL) {
2964             return NULL;
2965           }
2966         }
2967         return _obj_obj_arg_handler;
2968       }
2969       case JVM_SIGNATURE_ARRAY:
2970         return _obj_obj_arg_handler;
2971       case JVM_SIGNATURE_INT:
2972       case JVM_SIGNATURE_BOOLEAN:
2973       case JVM_SIGNATURE_CHAR:
2974       case JVM_SIGNATURE_BYTE:
2975       case JVM_SIGNATURE_SHORT:
2976         return _obj_int_arg_handler;
2977     }
2978   }
2979   return NULL;
2980 }
2981 
2982 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2983   _method(method), _num_inline_args(0), _has_inline_recv(false),
2984   _regs(NULL), _regs_cc(NULL), _regs_cc_ro(NULL),
2985   _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2986   _c1_needs_stack_repair(false), _c2_needs_stack_repair(false) {
2987   _sig = new GrowableArray<SigEntry>((method != NULL) ? method->size_of_parameters() : 1);
2988   _sig_cc = new GrowableArray<SigEntry>((method != NULL) ? method->size_of_parameters() : 1);
2989   _sig_cc_ro = new GrowableArray<SigEntry>((method != NULL) ? method->size_of_parameters() : 1);
2990 }
2991 
2992 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2993 // or the same entry for VEP and VIEP(RO).
2994 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2995   if (!has_scalarized_args()) {
2996     // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2997     return CodeOffsets::Verified_Entry;
2998   }
2999   if (_method->is_static()) {
3000     // Static methods don't need VIEP(RO)
3001     return CodeOffsets::Verified_Entry;



3002   }
3003 
3004   if (has_inline_recv()) {
3005     if (num_inline_args() == 1) {
3006       // Share same entry for VIEP and VIEP(RO).
3007       // This is quite common: we have an instance method in an InlineKlass that has
3008       // no inline type args other than <this>.
3009       return CodeOffsets::Verified_Inline_Entry;
3010     } else {
3011       assert(num_inline_args() > 1, "must be");
3012       // No sharing:
3013       //   VIEP(RO) -- <this> is passed as object
3014       //   VEP      -- <this> is passed as fields
3015       return CodeOffsets::Verified_Inline_Entry_RO;
3016     }
3017   }
3018 
3019   // Either a static method, or <this> is not an inline type
3020   if (args_on_stack_cc() != args_on_stack_cc_ro()) {
3021     // No sharing:
3022     // Some arguments are passed on the stack, and we have inserted reserved entries
3023     // into the VEP, but we never insert reserved entries into the VIEP(RO).
3024     return CodeOffsets::Verified_Inline_Entry_RO;
3025   } else {
3026     // Share same entry for VEP and VIEP(RO).
3027     return CodeOffsets::Verified_Entry;
3028   }
3029 }
3030 
3031 void CompiledEntrySignature::compute_calling_conventions(bool init) {
3032   // Iterate over arguments and compute scalarized and non-scalarized signatures
3033   bool has_scalarized = false;
3034   if (_method != NULL) {
3035     InstanceKlass* holder = _method->method_holder();
3036     int arg_num = 0;
3037     if (!_method->is_static()) {
3038       if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() &&
3039           (init || _method->is_scalarized_arg(arg_num))) {
3040         _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
3041         has_scalarized = true;
3042         _has_inline_recv = true;
3043         _num_inline_args++;
3044       } else {
3045         SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
3046       }
3047       SigEntry::add_entry(_sig, T_OBJECT, holder->name());
3048       SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
3049       arg_num++;
3050     }
3051     for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
3052       BasicType bt = ss.type();
3053       if (bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) {
3054         InlineKlass* vk = ss.as_inline_klass(holder);
3055         // TODO 8284443 Mismatch handling, we need to check parent method args (look at klassVtable::needs_new_vtable_entry)
3056         if (vk != NULL && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
3057           _num_inline_args++;
3058           has_scalarized = true;
3059           int last = _sig_cc->length();
3060           int last_ro = _sig_cc_ro->length();
3061           _sig_cc->appendAll(vk->extended_sig());
3062           _sig_cc_ro->appendAll(vk->extended_sig());
3063           if (bt == T_OBJECT) {
3064             // Nullable inline type argument, insert InlineTypeNode::IsInit field right after T_PRIMITIVE_OBJECT
3065             _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, NULL));
3066             _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, NULL));
3067           }
3068         } else {
3069           SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
3070           SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
3071         }
3072         bt = T_OBJECT;
3073       } else {
3074         SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
3075         SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
3076       }
3077       SigEntry::add_entry(_sig, bt, ss.as_symbol());
3078       if (bt != T_VOID) {
3079         arg_num++;
3080       }
3081     }
3082   }

3083 
3084   // Compute the non-scalarized calling convention
3085   _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3086   _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3087 
3088   // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3089   if (has_scalarized && !_method->is_native()) {
3090     _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3091     _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3092 
3093     _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3094     _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3095 
3096     _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3097     _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3098 
3099     // Upper bound on stack arguments to avoid hitting the argument limit and
3100     // bailing out of compilation ("unsupported incoming calling sequence").
3101     // TODO we need a reasonable limit (flag?) here
3102     if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
3103       return; // Success
3104     }
3105   }
3106 
3107   // No scalarized args
3108   _sig_cc = _sig;
3109   _regs_cc = _regs;
3110   _args_on_stack_cc = _args_on_stack;
3111 
3112   _sig_cc_ro = _sig;
3113   _regs_cc_ro = _regs;
3114   _args_on_stack_cc_ro = _args_on_stack;
3115 }
3116 
3117 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3118   // Use customized signature handler.  Need to lock around updates to
3119   // the _adapter_handler_table (it is not safe for concurrent readers
3120   // and a single writer: this could be fixed if it becomes a
3121   // problem).
3122 
3123   // Fast-path for trivial adapters
3124   AdapterHandlerEntry* entry = get_simple_adapter(method);
3125   if (entry != NULL) {
3126     return entry;
3127   }
3128 
3129   ResourceMark rm;
3130   AdapterBlob* new_adapter = NULL;
3131 
3132   CompiledEntrySignature ces(method());
3133   ces.compute_calling_conventions();
3134   if (ces.has_scalarized_args()) {
3135     method->set_has_scalarized_args(true);
3136     method->set_c1_needs_stack_repair(ces.c1_needs_stack_repair());
3137     method->set_c2_needs_stack_repair(ces.c2_needs_stack_repair());
3138   } else if (method->is_abstract()) {
3139     return _abstract_method_handler;
3140   }
3141 




3142   {
3143     MutexLocker mu(AdapterHandlerLibrary_lock);
3144 
3145     if (ces.has_scalarized_args() && method->is_abstract()) {
3146       // Save a C heap allocated version of the signature for abstract methods with scalarized inline type arguments
3147       address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
3148       entry = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(NULL),
3149                                                StubRoutines::throw_AbstractMethodError_entry(),
3150                                                wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
3151                                                wrong_method_abstract, wrong_method_abstract);
3152       GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro().length(), mtInternal);
3153       heap_sig->appendAll(&ces.sig_cc_ro());
3154       entry->set_sig_cc(heap_sig);
3155       return entry;
3156     }
3157 
3158     // Lookup method signature's fingerprint
3159     entry = lookup(&ces.sig_cc(), ces.has_inline_recv());
3160 
3161     if (entry != NULL) {
3162 #ifdef ASSERT
3163       if (VerifyAdapterSharing) {
3164         AdapterBlob* comparison_blob = NULL;
3165         AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, ces, false);
3166         assert(comparison_blob == NULL, "no blob should be created when creating an adapter for comparison");
3167         assert(comparison_entry->compare_code(entry), "code must match");
3168         // Release the one just created and return the original
3169         delete comparison_entry;
3170       }
3171 #endif
3172       return entry;
3173     }
3174 
3175     entry = create_adapter(new_adapter, ces, /* allocate_code_blob */ true);
3176   }
3177 
3178   // Outside of the lock
3179   if (new_adapter != NULL) {
3180     post_adapter_creation(new_adapter, entry);
3181   }
3182   return entry;
3183 }
3184 
3185 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
3186                                                            CompiledEntrySignature& ces,

3187                                                            bool allocate_code_blob) {
3188 
3189   // StubRoutines::code2() is initialized after this function can be called. As a result,
3190   // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated
3191   // prior to StubRoutines::code2() being set. Checks refer to checks generated in an I2C
3192   // stub that ensure that an I2C stub is called from an interpreter frame.
3193   bool contains_all_checks = StubRoutines::code2() != NULL;
3194 





3195   BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3196   CodeBuffer buffer(buf);
3197   short buffer_locs[20];
3198   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3199                                           sizeof(buffer_locs)/sizeof(relocInfo));
3200 
3201   // Make a C heap allocated version of the fingerprint to store in the adapter
3202   AdapterFingerPrint* fingerprint = new AdapterFingerPrint(&ces.sig_cc(), ces.has_inline_recv());
3203   MacroAssembler _masm(&buffer);
3204   AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
3205                                                 ces.args_on_stack(),
3206                                                 &ces.sig(),
3207                                                 ces.regs(),
3208                                                 &ces.sig_cc(),
3209                                                 ces.regs_cc(),
3210                                                 &ces.sig_cc_ro(),
3211                                                 ces.regs_cc_ro(),
3212                                                 fingerprint,
3213                                                 new_adapter,
3214                                                 allocate_code_blob);
3215 
3216   if (ces.has_scalarized_args()) {
3217     // Save a C heap allocated version of the scalarized signature and store it in the adapter
3218     GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc().length(), mtInternal);
3219     heap_sig->appendAll(&ces.sig_cc());
3220     entry->set_sig_cc(heap_sig);
3221   }
3222 
3223 #ifdef ASSERT
3224   if (VerifyAdapterSharing) {
3225     entry->save_code(buf->code_begin(), buffer.insts_size());
3226     if (!allocate_code_blob) {
3227       return entry;
3228     }
3229   }
3230 #endif
3231 

3232   NOT_PRODUCT(int insts_size = buffer.insts_size());
3233   if (new_adapter == NULL) {
3234     // CodeCache is full, disable compilation
3235     // Ought to log this but compile log is only per compile thread
3236     // and we're some non descript Java thread.
3237     return NULL;
3238   }
3239   entry->relocate(new_adapter->content_begin());
3240 #ifndef PRODUCT
3241   // debugging support
3242   if (PrintAdapterHandlers || PrintStubCode) {
3243     ttyLocker ttyl;
3244     entry->print_adapter_on(tty);
3245     tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
3246                   _adapter_handler_table.number_of_entries(), fingerprint->as_basic_args_string(),
3247                   fingerprint->as_string(), insts_size);
3248     tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
3249     if (Verbose || PrintStubCode) {
3250       address first_pc = entry->base_address();
3251       if (first_pc != NULL) {

3253                              NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
3254         tty->cr();
3255       }
3256     }
3257   }
3258 #endif
3259 
3260   // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
3261   // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
3262   if (contains_all_checks || !VerifyAdapterCalls) {
3263     assert_lock_strong(AdapterHandlerLibrary_lock);
3264     _adapter_handler_table.put(fingerprint, entry);
3265   }
3266   return entry;
3267 }
3268 
3269 address AdapterHandlerEntry::base_address() {
3270   address base = _i2c_entry;
3271   if (base == NULL)  base = _c2i_entry;
3272   assert(base <= _c2i_entry || _c2i_entry == NULL, "");
3273   assert(base <= _c2i_inline_entry || _c2i_inline_entry == NULL, "");
3274   assert(base <= _c2i_inline_ro_entry || _c2i_inline_ro_entry == NULL, "");
3275   assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, "");
3276   assert(base <= _c2i_unverified_inline_entry || _c2i_unverified_inline_entry == NULL, "");
3277   assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == NULL, "");
3278   return base;
3279 }
3280 
3281 void AdapterHandlerEntry::relocate(address new_base) {
3282   address old_base = base_address();
3283   assert(old_base != NULL, "");
3284   ptrdiff_t delta = new_base - old_base;
3285   if (_i2c_entry != NULL)
3286     _i2c_entry += delta;
3287   if (_c2i_entry != NULL)
3288     _c2i_entry += delta;
3289   if (_c2i_inline_entry != NULL)
3290     _c2i_inline_entry += delta;
3291   if (_c2i_inline_ro_entry != NULL)
3292     _c2i_inline_ro_entry += delta;
3293   if (_c2i_unverified_entry != NULL)
3294     _c2i_unverified_entry += delta;
3295   if (_c2i_unverified_inline_entry != NULL)
3296     _c2i_unverified_inline_entry += delta;
3297   if (_c2i_no_clinit_check_entry != NULL)
3298     _c2i_no_clinit_check_entry += delta;
3299   assert(base_address() == new_base, "");
3300 }
3301 
3302 
3303 AdapterHandlerEntry::~AdapterHandlerEntry() {
3304   delete _fingerprint;
3305   if (_sig_cc != NULL) {
3306     delete _sig_cc;
3307   }
3308 #ifdef ASSERT
3309   FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3310 #endif
3311 }
3312 
3313 
3314 #ifdef ASSERT
3315 // Capture the code before relocation so that it can be compared
3316 // against other versions.  If the code is captured after relocation
3317 // then relative instructions won't be equivalent.
3318 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3319   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3320   _saved_code_length = length;
3321   memcpy(_saved_code, buffer, length);
3322 }
3323 
3324 
3325 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3326   assert(_saved_code != NULL && other->_saved_code != NULL, "code not saved");
3327 

3374 
3375       struct { double data[20]; } locs_buf;
3376       struct { double data[20]; } stubs_locs_buf;
3377       buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3378 #if defined(AARCH64) || defined(PPC64)
3379       // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3380       // in the constant pool to ensure ordering between the barrier and oops
3381       // accesses. For native_wrappers we need a constant.
3382       // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3383       // static java call that is resolved in the runtime.
3384       if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3385         buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3386       }
3387 #endif
3388       buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3389       MacroAssembler _masm(&buffer);
3390 
3391       // Fill in the signature array, for the calling-convention call.
3392       const int total_args_passed = method->size_of_parameters();
3393 
3394       BasicType stack_sig_bt[16];
3395       VMRegPair stack_regs[16];
3396       BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3397       VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3398 
3399       int i = 0;
3400       if (!method->is_static()) {  // Pass in receiver first
3401         sig_bt[i++] = T_OBJECT;
3402       }
3403       SignatureStream ss(method->signature());
3404       for (; !ss.at_return_type(); ss.next()) {
3405         sig_bt[i++] = ss.type();  // Collect remaining bits of signature
3406         if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3407           sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
3408         }
3409       }
3410       assert(i == total_args_passed, "");
3411       BasicType ret_type = ss.type();
3412 
3413       // Now get the compiled-Java arguments layout.
3414       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3415 
3416       // Generate the compiled-to-native wrapper code
3417       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3418 
3419       if (nm != NULL) {
3420         {
3421           MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
3422           if (nm->make_in_use()) {
3423             method->set_code(method, nm);
3424           }
3425         }
3426 
3427         DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
3428         if (directive->PrintAssemblyOption) {
3429           nm->print_code();
3430         }
3431         DirectivesStack::release(directive);

3628       st->print("Adapter for signature: ");
3629       a->print_adapter_on(st);
3630       return true;
3631     } else {
3632       return false; // keep looking
3633     }
3634   };
3635   assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3636   _adapter_handler_table.iterate(findblob);
3637   assert(found, "Should have found handler");
3638 }
3639 
3640 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3641   st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3642   if (get_i2c_entry() != NULL) {
3643     st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3644   }
3645   if (get_c2i_entry() != NULL) {
3646     st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3647   }
3648   if (get_c2i_entry() != NULL) {
3649     st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3650   }
3651   if (get_c2i_entry() != NULL) {
3652     st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3653   }
3654   if (get_c2i_unverified_entry() != NULL) {
3655     st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3656   }
3657   if (get_c2i_unverified_entry() != NULL) {
3658     st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3659   }
3660   if (get_c2i_no_clinit_check_entry() != NULL) {
3661     st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3662   }
3663   st->cr();
3664 }
3665 
3666 #ifndef PRODUCT
3667 
3668 void AdapterHandlerLibrary::print_statistics() {
3669   print_table_statistics();
3670 }
3671 
3672 #endif /* PRODUCT */
3673 
3674 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3675   assert(current == JavaThread::current(), "pre-condition");
3676   StackOverflow* overflow_state = current->stack_overflow_state();
3677   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3678   overflow_state->set_reserved_stack_activation(current->stack_base());

3727         event.set_method(method);
3728         event.commit();
3729       }
3730     }
3731   }
3732   return activation;
3733 }
3734 
3735 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3736   // After any safepoint, just before going back to compiled code,
3737   // we inform the GC that we will be doing initializing writes to
3738   // this object in the future without emitting card-marks, so
3739   // GC may take any compensating steps.
3740 
3741   oop new_obj = current->vm_result();
3742   if (new_obj == NULL) return;
3743 
3744   BarrierSet *bs = BarrierSet::barrier_set();
3745   bs->on_slowpath_allocation_exit(current, new_obj);
3746 }
3747 
3748 // We are at a compiled code to interpreter call. We need backing
3749 // buffers for all inline type arguments. Allocate an object array to
3750 // hold them (convenient because once we're done with it we don't have
3751 // to worry about freeing it).
3752 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
3753   assert(InlineTypePassFieldsAsArgs, "no reason to call this");
3754   ResourceMark rm;
3755 
3756   int nb_slots = 0;
3757   InstanceKlass* holder = callee->method_holder();
3758   allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
3759   if (allocate_receiver) {
3760     nb_slots++;
3761   }
3762   int arg_num = callee->is_static() ? 0 : 1;
3763   for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3764     BasicType bt = ss.type();
3765     if ((bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) && callee->is_scalarized_arg(arg_num)) {
3766       nb_slots++;
3767     }
3768     if (bt != T_VOID) {
3769       arg_num++;
3770     }
3771   }
3772   objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
3773   objArrayHandle array(THREAD, array_oop);
3774   arg_num = callee->is_static() ? 0 : 1;
3775   int i = 0;
3776   if (allocate_receiver) {
3777     InlineKlass* vk = InlineKlass::cast(holder);
3778     oop res = vk->allocate_instance(CHECK_NULL);
3779     array->obj_at_put(i++, res);
3780   }
3781   for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3782     BasicType bt = ss.type();
3783     if ((bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) && callee->is_scalarized_arg(arg_num)) {
3784       InlineKlass* vk = ss.as_inline_klass(holder);
3785       assert(vk != NULL, "Unexpected klass");
3786       oop res = vk->allocate_instance(CHECK_NULL);
3787       array->obj_at_put(i++, res);
3788     }
3789     if (bt != T_VOID) {
3790       arg_num++;
3791     }
3792   }
3793   return array();
3794 }
3795 
3796 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
3797   methodHandle callee(current, callee_method);
3798   oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
3799   current->set_vm_result(array);
3800   current->set_vm_result_2(callee()); // TODO: required to keep callee live?
3801 JRT_END
3802 
3803 // We're returning from an interpreted method: load each field into a
3804 // register following the calling convention
3805 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
3806 {
3807   assert(res->klass()->is_inline_klass(), "only inline types here");
3808   ResourceMark rm;
3809   RegisterMap reg_map(current,
3810                       RegisterMap::UpdateMap::include,
3811                       RegisterMap::ProcessFrames::include,
3812                       RegisterMap::WalkContinuation::skip);
3813   frame stubFrame = current->last_frame();
3814   frame callerFrame = stubFrame.sender(&reg_map);
3815   assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
3816 
3817   InlineKlass* vk = InlineKlass::cast(res->klass());
3818 
3819   const Array<SigEntry>* sig_vk = vk->extended_sig();
3820   const Array<VMRegPair>* regs = vk->return_regs();
3821 
3822   if (regs == NULL) {
3823     // The fields of the inline klass don't fit in registers, bail out
3824     return;
3825   }
3826 
3827   int j = 1;
3828   for (int i = 0; i < sig_vk->length(); i++) {
3829     BasicType bt = sig_vk->at(i)._bt;
3830     if (bt == T_PRIMITIVE_OBJECT) {
3831       continue;
3832     }
3833     if (bt == T_VOID) {
3834       if (sig_vk->at(i-1)._bt == T_LONG ||
3835           sig_vk->at(i-1)._bt == T_DOUBLE) {
3836         j++;
3837       }
3838       continue;
3839     }
3840     int off = sig_vk->at(i)._offset;
3841     assert(off > 0, "offset in object should be positive");
3842     VMRegPair pair = regs->at(j);
3843     address loc = reg_map.location(pair.first(), nullptr);
3844     switch(bt) {
3845     case T_BOOLEAN:
3846       *(jboolean*)loc = res->bool_field(off);
3847       break;
3848     case T_CHAR:
3849       *(jchar*)loc = res->char_field(off);
3850       break;
3851     case T_BYTE:
3852       *(jbyte*)loc = res->byte_field(off);
3853       break;
3854     case T_SHORT:
3855       *(jshort*)loc = res->short_field(off);
3856       break;
3857     case T_INT: {
3858       *(jint*)loc = res->int_field(off);
3859       break;
3860     }
3861     case T_LONG:
3862 #ifdef _LP64
3863       *(intptr_t*)loc = res->long_field(off);
3864 #else
3865       Unimplemented();
3866 #endif
3867       break;
3868     case T_OBJECT:
3869     case T_ARRAY: {
3870       *(oop*)loc = res->obj_field(off);
3871       break;
3872     }
3873     case T_FLOAT:
3874       *(jfloat*)loc = res->float_field(off);
3875       break;
3876     case T_DOUBLE:
3877       *(jdouble*)loc = res->double_field(off);
3878       break;
3879     default:
3880       ShouldNotReachHere();
3881     }
3882     j++;
3883   }
3884   assert(j == regs->length(), "missed a field?");
3885 
3886 #ifdef ASSERT
3887   VMRegPair pair = regs->at(0);
3888   address loc = reg_map.location(pair.first(), nullptr);
3889   assert(*(oopDesc**)loc == res, "overwritten object");
3890 #endif
3891 
3892   current->set_vm_result(res);
3893 }
3894 JRT_END
3895 
3896 // We've returned to an interpreted method, the interpreter needs a
3897 // reference to an inline type instance. Allocate it and initialize it
3898 // from field's values in registers.
3899 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
3900 {
3901   ResourceMark rm;
3902   RegisterMap reg_map(current,
3903                       RegisterMap::UpdateMap::include,
3904                       RegisterMap::ProcessFrames::include,
3905                       RegisterMap::WalkContinuation::skip);
3906   frame stubFrame = current->last_frame();
3907   frame callerFrame = stubFrame.sender(&reg_map);
3908 
3909 #ifdef ASSERT
3910   InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
3911 #endif
3912 
3913   if (!is_set_nth_bit(res, 0)) {
3914     // We're not returning with inline type fields in registers (the
3915     // calling convention didn't allow it for this inline klass)
3916     assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
3917     current->set_vm_result((oopDesc*)res);
3918     assert(verif_vk == NULL, "broken calling convention");
3919     return;
3920   }
3921 
3922   clear_nth_bit(res, 0);
3923   InlineKlass* vk = (InlineKlass*)res;
3924   assert(verif_vk == vk, "broken calling convention");
3925   assert(Metaspace::contains((void*)res), "should be klass");
3926 
3927   // Allocate handles for every oop field so they are safe in case of
3928   // a safepoint when allocating
3929   GrowableArray<Handle> handles;
3930   vk->save_oop_fields(reg_map, handles);
3931 
3932   // It's unsafe to safepoint until we are here
3933   JRT_BLOCK;
3934   {
3935     JavaThread* THREAD = current;
3936     oop vt = vk->realloc_result(reg_map, handles, CHECK);
3937     current->set_vm_result(vt);
3938   }
3939   JRT_BLOCK_END;
3940 }
3941 JRT_END
3942 
< prev index next >