< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp

Print this page

  30 #include "classfile/javaClasses.inline.hpp"
  31 #include "classfile/stringTable.hpp"
  32 #include "classfile/vmClasses.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/aotCodeCache.hpp"
  35 #include "code/codeCache.hpp"
  36 #include "code/compiledIC.hpp"
  37 #include "code/nmethod.inline.hpp"
  38 #include "code/scopeDesc.hpp"
  39 #include "code/vtableStubs.hpp"
  40 #include "compiler/abstractCompiler.hpp"
  41 #include "compiler/compileBroker.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "gc/shared/barrierSet.hpp"
  44 #include "gc/shared/collectedHeap.hpp"
  45 #include "interpreter/interpreter.hpp"
  46 #include "interpreter/interpreterRuntime.hpp"
  47 #include "jfr/jfrEvents.hpp"
  48 #include "jvm.h"
  49 #include "logging/log.hpp"

  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "metaprogramming/primitiveConversions.hpp"



  53 #include "oops/klass.hpp"
  54 #include "oops/method.inline.hpp"
  55 #include "oops/objArrayKlass.hpp"

  56 #include "oops/oop.inline.hpp"
  57 #include "prims/forte.hpp"
  58 #include "prims/jvmtiExport.hpp"
  59 #include "prims/jvmtiThreadState.hpp"
  60 #include "prims/methodHandles.hpp"
  61 #include "prims/nativeLookup.hpp"
  62 #include "runtime/arguments.hpp"
  63 #include "runtime/atomicAccess.hpp"
  64 #include "runtime/basicLock.inline.hpp"
  65 #include "runtime/frame.inline.hpp"
  66 #include "runtime/handles.inline.hpp"
  67 #include "runtime/init.hpp"
  68 #include "runtime/interfaceSupport.inline.hpp"
  69 #include "runtime/java.hpp"
  70 #include "runtime/javaCalls.hpp"
  71 #include "runtime/jniHandles.inline.hpp"
  72 #include "runtime/osThread.hpp"
  73 #include "runtime/perfData.hpp"
  74 #include "runtime/sharedRuntime.hpp"

  75 #include "runtime/stackWatermarkSet.hpp"
  76 #include "runtime/stubRoutines.hpp"
  77 #include "runtime/synchronizer.hpp"
  78 #include "runtime/timerTrace.hpp"
  79 #include "runtime/vframe.inline.hpp"
  80 #include "runtime/vframeArray.hpp"
  81 #include "runtime/vm_version.hpp"
  82 #include "utilities/copy.hpp"
  83 #include "utilities/dtrace.hpp"
  84 #include "utilities/events.hpp"
  85 #include "utilities/exceptions.hpp"
  86 #include "utilities/globalDefinitions.hpp"
  87 #include "utilities/hashTable.hpp"
  88 #include "utilities/macros.hpp"
  89 #include "utilities/xmlstream.hpp"
  90 #ifdef COMPILER1
  91 #include "c1/c1_Runtime1.hpp"
  92 #endif
  93 #ifdef COMPILER2
  94 #include "opto/runtime.hpp"

1213 // for a call current in progress, i.e., arguments has been pushed on stack
1214 // but callee has not been invoked yet.  Caller frame must be compiled.
1215 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1216                                               CallInfo& callinfo, TRAPS) {
1217   Handle receiver;
1218   Handle nullHandle;  // create a handy null handle for exception returns
1219   JavaThread* current = THREAD;
1220 
1221   assert(!vfst.at_end(), "Java frame must exist");
1222 
1223   // Find caller and bci from vframe
1224   methodHandle caller(current, vfst.method());
1225   int          bci   = vfst.bci();
1226 
1227   if (caller->is_continuation_enter_intrinsic()) {
1228     bc = Bytecodes::_invokestatic;
1229     LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1230     return receiver;
1231   }
1232 
















1233   Bytecode_invoke bytecode(caller, bci);
1234   int bytecode_index = bytecode.index();
1235   bc = bytecode.invoke_code();
1236 
1237   methodHandle attached_method(current, extract_attached_method(vfst));
1238   if (attached_method.not_null()) {
1239     Method* callee = bytecode.static_target(CHECK_NH);
1240     vmIntrinsics::ID id = callee->intrinsic_id();
1241     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1242     // it attaches statically resolved method to the call site.
1243     if (MethodHandles::is_signature_polymorphic(id) &&
1244         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1245       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1246 
1247       // Adjust invocation mode according to the attached method.
1248       switch (bc) {
1249         case Bytecodes::_invokevirtual:
1250           if (attached_method->method_holder()->is_interface()) {
1251             bc = Bytecodes::_invokeinterface;
1252           }
1253           break;
1254         case Bytecodes::_invokeinterface:
1255           if (!attached_method->method_holder()->is_interface()) {
1256             bc = Bytecodes::_invokevirtual;
1257           }
1258           break;
1259         case Bytecodes::_invokehandle:
1260           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1261             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1262                                               : Bytecodes::_invokevirtual;
1263           }
1264           break;
1265         default:
1266           break;
1267       }






1268     }
1269   }
1270 
1271   assert(bc != Bytecodes::_illegal, "not initialized");
1272 
1273   bool has_receiver = bc != Bytecodes::_invokestatic &&
1274                       bc != Bytecodes::_invokedynamic &&
1275                       bc != Bytecodes::_invokehandle;

1276 
1277   // Find receiver for non-static call
1278   if (has_receiver) {
1279     // This register map must be update since we need to find the receiver for
1280     // compiled frames. The receiver might be in a register.
1281     RegisterMap reg_map2(current,
1282                          RegisterMap::UpdateMap::include,
1283                          RegisterMap::ProcessFrames::include,
1284                          RegisterMap::WalkContinuation::skip);
1285     frame stubFrame   = current->last_frame();
1286     // Caller-frame is a compiled frame
1287     frame callerFrame = stubFrame.sender(&reg_map2);
1288 
1289     if (attached_method.is_null()) {
1290       Method* callee = bytecode.static_target(CHECK_NH);

1291       if (callee == nullptr) {
1292         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1293       }
1294     }
1295 
1296     // Retrieve from a compiled argument list
1297     receiver = Handle(current, callerFrame.retrieve_receiver(&reg_map2));
1298     assert(oopDesc::is_oop_or_null(receiver()), "");
1299 
1300     if (receiver.is_null()) {
1301       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);










1302     }
1303   }
1304 
1305   // Resolve method
1306   if (attached_method.not_null()) {
1307     // Parameterized by attached method.
1308     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1309   } else {
1310     // Parameterized by bytecode.
1311     constantPoolHandle constants(current, caller->constants());
1312     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1313   }
1314 
1315 #ifdef ASSERT
1316   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1317   if (has_receiver) {
1318     assert(receiver.not_null(), "should have thrown exception");
1319     Klass* receiver_klass = receiver->klass();
1320     Klass* rk = nullptr;
1321     if (attached_method.not_null()) {
1322       // In case there's resolved method attached, use its holder during the check.
1323       rk = attached_method->method_holder();
1324     } else {
1325       // Klass is already loaded.
1326       constantPoolHandle constants(current, caller->constants());
1327       rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1328     }
1329     Klass* static_receiver_klass = rk;
1330     assert(receiver_klass->is_subtype_of(static_receiver_klass),
1331            "actual receiver must be subclass of static receiver klass");
1332     if (receiver_klass->is_instance_klass()) {
1333       if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1334         tty->print_cr("ERROR: Klass not yet initialized!!");
1335         receiver_klass->print();
1336       }
1337       assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1338     }
1339   }
1340 #endif
1341 
1342   return receiver;
1343 }
1344 
1345 methodHandle SharedRuntime::find_callee_method(TRAPS) {
1346   JavaThread* current = THREAD;
1347   ResourceMark rm(current);
1348   // We need first to check if any Java activations (compiled, interpreted)
1349   // exist on the stack since last JavaCall.  If not, we need
1350   // to get the target method from the JavaCall wrapper.
1351   vframeStream vfst(current, true);  // Do not skip any javaCalls
1352   methodHandle callee_method;
1353   if (vfst.at_end()) {
1354     // No Java frames were found on stack since we did the JavaCall.
1355     // Hence the stack can only contain an entry_frame.  We need to
1356     // find the target method from the stub frame.
1357     RegisterMap reg_map(current,
1358                         RegisterMap::UpdateMap::skip,
1359                         RegisterMap::ProcessFrames::include,
1360                         RegisterMap::WalkContinuation::skip);
1361     frame fr = current->last_frame();
1362     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1363     fr = fr.sender(&reg_map);
1364     assert(fr.is_entry_frame(), "must be");
1365     // fr is now pointing to the entry frame.
1366     callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1367   } else {
1368     Bytecodes::Code bc;
1369     CallInfo callinfo;
1370     find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));




1371     callee_method = methodHandle(current, callinfo.selected_method());
1372   }
1373   assert(callee_method()->is_method(), "must be");
1374   return callee_method;
1375 }
1376 
1377 // Resolves a call.
1378 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1379   JavaThread* current = THREAD;
1380   ResourceMark rm(current);
1381   RegisterMap cbl_map(current,
1382                       RegisterMap::UpdateMap::skip,
1383                       RegisterMap::ProcessFrames::include,
1384                       RegisterMap::WalkContinuation::skip);
1385   frame caller_frame = current->last_frame().sender(&cbl_map);
1386 
1387   CodeBlob* caller_cb = caller_frame.cb();
1388   guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1389   nmethod* caller_nm = caller_cb->as_nmethod();
1390 
1391   // determine call info & receiver
1392   // note: a) receiver is null for static calls
1393   //       b) an exception is thrown if receiver is null for non-static calls
1394   CallInfo call_info;
1395   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1396   Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1397 
1398   NoSafepointVerifier nsv;
1399 
1400   methodHandle callee_method(current, call_info.selected_method());





1401 
1402   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1403          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1404          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1405          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1406          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1407 
1408   assert(!caller_nm->is_unloading(), "It should not be unloading");
1409 
1410 #ifndef PRODUCT
1411   // tracing/debugging/statistics
1412   uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1413                  (is_virtual) ? (&_resolve_virtual_ctr) :
1414                                 (&_resolve_static_ctr);
1415   AtomicAccess::inc(addr);
1416 
1417   if (TraceCallFixup) {
1418     ResourceMark rm(current);
1419     tty->print("resolving %s%s (%s) call to",
1420                (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1421                Bytecodes::name(invoke_code));
1422     callee_method->print_short_name(tty);
1423     tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1424                   p2i(caller_frame.pc()), p2i(callee_method->code()));
1425   }
1426 #endif
1427 
1428   if (invoke_code == Bytecodes::_invokestatic) {
1429     assert(callee_method->method_holder()->is_initialized() ||
1430            callee_method->method_holder()->is_reentrant_initialization(current),
1431            "invalid class initialization state for invoke_static");
1432     if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1433       // In order to keep class initialization check, do not patch call
1434       // site for static call when the class is not fully initialized.
1435       // Proper check is enforced by call site re-resolution on every invocation.
1436       //
1437       // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1438       // explicit class initialization check is put in nmethod entry (VEP).
1439       assert(callee_method->method_holder()->is_linked(), "must be");
1440       return callee_method;
1441     }
1442   }
1443 
1444 
1445   // JSR 292 key invariant:
1446   // If the resolved method is a MethodHandle invoke target, the call
1447   // site must be a MethodHandle call site, because the lambda form might tail-call
1448   // leaving the stack in a state unknown to either caller or callee
1449 
1450   // Compute entry points. The computation of the entry points is independent of
1451   // patching the call.
1452 
1453   // Make sure the callee nmethod does not get deoptimized and removed before
1454   // we are done patching the code.
1455 
1456 
1457   CompiledICLocker ml(caller_nm);
1458   if (is_virtual && !is_optimized) {
1459     CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1460     inline_cache->update(&call_info, receiver->klass());
1461   } else {
1462     // Callsite is a direct call - set it to the destination method
1463     CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1464     callsite->set(callee_method);
1465   }
1466 
1467   return callee_method;
1468 }
1469 
1470 // Inline caches exist only in compiled code
1471 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1472 #ifdef ASSERT
1473   RegisterMap reg_map(current,
1474                       RegisterMap::UpdateMap::skip,
1475                       RegisterMap::ProcessFrames::include,
1476                       RegisterMap::WalkContinuation::skip);
1477   frame stub_frame = current->last_frame();
1478   assert(stub_frame.is_runtime_frame(), "sanity check");
1479   frame caller_frame = stub_frame.sender(&reg_map);
1480   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1481 #endif /* ASSERT */
1482 
1483   methodHandle callee_method;

1484   JRT_BLOCK
1485     callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1486     // Return Method* through TLS
1487     current->set_vm_result_metadata(callee_method());
1488   JRT_BLOCK_END
1489   // return compiled code entry point after potential safepoints
1490   return get_resolved_entry(current, callee_method);
1491 JRT_END
1492 
1493 
1494 // Handle call site that has been made non-entrant
1495 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1496   // 6243940 We might end up in here if the callee is deoptimized
1497   // as we race to call it.  We don't want to take a safepoint if
1498   // the caller was interpreted because the caller frame will look
1499   // interpreted to the stack walkers and arguments are now
1500   // "compiled" so it is much better to make this transition
1501   // invisible to the stack walking code. The i2c path will
1502   // place the callee method in the callee_target. It is stashed
1503   // there because if we try and find the callee by normal means a
1504   // safepoint is possible and have trouble gc'ing the compiled args.
1505   RegisterMap reg_map(current,
1506                       RegisterMap::UpdateMap::skip,
1507                       RegisterMap::ProcessFrames::include,
1508                       RegisterMap::WalkContinuation::skip);
1509   frame stub_frame = current->last_frame();
1510   assert(stub_frame.is_runtime_frame(), "sanity check");
1511   frame caller_frame = stub_frame.sender(&reg_map);
1512 
1513   if (caller_frame.is_interpreted_frame() ||
1514       caller_frame.is_entry_frame() ||
1515       caller_frame.is_upcall_stub_frame()) {
1516     Method* callee = current->callee_target();
1517     guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1518     current->set_vm_result_metadata(callee);
1519     current->set_callee_target(nullptr);
1520     if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1521       // Bypass class initialization checks in c2i when caller is in native.
1522       // JNI calls to static methods don't have class initialization checks.
1523       // Fast class initialization checks are present in c2i adapters and call into
1524       // SharedRuntime::handle_wrong_method() on the slow path.
1525       //
1526       // JVM upcalls may land here as well, but there's a proper check present in
1527       // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1528       // so bypassing it in c2i adapter is benign.
1529       return callee->get_c2i_no_clinit_check_entry();
1530     } else {
1531       return callee->get_c2i_entry();




1532     }
1533   }
1534 
1535   // Must be compiled to compiled path which is safe to stackwalk
1536   methodHandle callee_method;



1537   JRT_BLOCK
1538     // Force resolving of caller (if we called from compiled frame)
1539     callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1540     current->set_vm_result_metadata(callee_method());
1541   JRT_BLOCK_END
1542   // return compiled code entry point after potential safepoints
1543   return get_resolved_entry(current, callee_method);
1544 JRT_END
1545 
1546 // Handle abstract method call
1547 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1548   // Verbose error message for AbstractMethodError.
1549   // Get the called method from the invoke bytecode.
1550   vframeStream vfst(current, true);
1551   assert(!vfst.at_end(), "Java frame must exist");
1552   methodHandle caller(current, vfst.method());
1553   Bytecode_invoke invoke(caller, vfst.bci());
1554   DEBUG_ONLY( invoke.verify(); )
1555 
1556   // Find the compiled caller frame.
1557   RegisterMap reg_map(current,
1558                       RegisterMap::UpdateMap::include,
1559                       RegisterMap::ProcessFrames::include,
1560                       RegisterMap::WalkContinuation::skip);
1561   frame stubFrame = current->last_frame();
1562   assert(stubFrame.is_runtime_frame(), "must be");
1563   frame callerFrame = stubFrame.sender(&reg_map);
1564   assert(callerFrame.is_compiled_frame(), "must be");
1565 
1566   // Install exception and return forward entry.
1567   address res = SharedRuntime::throw_AbstractMethodError_entry();
1568   JRT_BLOCK
1569     methodHandle callee(current, invoke.static_target(current));
1570     if (!callee.is_null()) {
1571       oop recv = callerFrame.retrieve_receiver(&reg_map);
1572       Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1573       res = StubRoutines::forward_exception_entry();
1574       LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1575     }
1576   JRT_BLOCK_END
1577   return res;
1578 JRT_END
1579 
1580 // return verified_code_entry if interp_only_mode is not set for the current thread;
1581 // otherwise return c2i entry.
1582 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method) {
1583   if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1584     // In interp_only_mode we need to go to the interpreted entry
1585     // The c2i won't patch in this mode -- see fixup_callers_callsite
1586     return callee_method->get_c2i_entry();




















1587   }
1588   assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1589   return callee_method->verified_code_entry();
1590 }
1591 
1592 // resolve a static call and patch code
1593 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1594   methodHandle callee_method;

1595   bool enter_special = false;
1596   JRT_BLOCK
1597     callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1598     current->set_vm_result_metadata(callee_method());
1599   JRT_BLOCK_END
1600   // return compiled code entry point after potential safepoints
1601   return get_resolved_entry(current, callee_method);
1602 JRT_END
1603 
1604 // resolve virtual call and update inline cache to monomorphic
1605 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1606   methodHandle callee_method;

1607   JRT_BLOCK
1608     callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1609     current->set_vm_result_metadata(callee_method());
1610   JRT_BLOCK_END
1611   // return compiled code entry point after potential safepoints
1612   return get_resolved_entry(current, callee_method);
1613 JRT_END
1614 
1615 
1616 // Resolve a virtual call that can be statically bound (e.g., always
1617 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1618 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1619   methodHandle callee_method;

1620   JRT_BLOCK
1621     callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1622     current->set_vm_result_metadata(callee_method());
1623   JRT_BLOCK_END
1624   // return compiled code entry point after potential safepoints
1625   return get_resolved_entry(current, callee_method);
1626 JRT_END
1627 
1628 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1629   JavaThread* current = THREAD;
1630   ResourceMark rm(current);
1631   CallInfo call_info;
1632   Bytecodes::Code bc;
1633 
1634   // receiver is null for static calls. An exception is thrown for null
1635   // receivers for non-static calls
1636   Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1637 
1638   methodHandle callee_method(current, call_info.selected_method());
1639 
1640 #ifndef PRODUCT
1641   AtomicAccess::inc(&_ic_miss_ctr);
1642 
1643   // Statistics & Tracing
1644   if (TraceCallFixup) {
1645     ResourceMark rm(current);
1646     tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1647     callee_method->print_short_name(tty);
1648     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1649   }
1650 
1651   if (ICMissHistogram) {
1652     MutexLocker m(VMStatistic_lock);
1653     RegisterMap reg_map(current,
1654                         RegisterMap::UpdateMap::skip,
1655                         RegisterMap::ProcessFrames::include,
1656                         RegisterMap::WalkContinuation::skip);
1657     frame f = current->last_frame().real_sender(&reg_map);// skip runtime stub
1658     // produce statistics under the lock
1659     trace_ic_miss(f.pc());
1660   }
1661 #endif
1662 
1663   // install an event collector so that when a vtable stub is created the
1664   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1665   // event can't be posted when the stub is created as locks are held
1666   // - instead the event will be deferred until the event collector goes
1667   // out of scope.
1668   JvmtiDynamicCodeEventCollector event_collector;
1669 
1670   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1671   RegisterMap reg_map(current,
1672                       RegisterMap::UpdateMap::skip,
1673                       RegisterMap::ProcessFrames::include,
1674                       RegisterMap::WalkContinuation::skip);
1675   frame caller_frame = current->last_frame().sender(&reg_map);
1676   CodeBlob* cb = caller_frame.cb();
1677   nmethod* caller_nm = cb->as_nmethod();




1678 
1679   CompiledICLocker ml(caller_nm);
1680   CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1681   inline_cache->update(&call_info, receiver()->klass());
1682 
1683   return callee_method;
1684 }
1685 
1686 //
1687 // Resets a call-site in compiled code so it will get resolved again.
1688 // This routines handles both virtual call sites, optimized virtual call
1689 // sites, and static call sites. Typically used to change a call sites
1690 // destination from compiled to interpreted.
1691 //
1692 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1693   JavaThread* current = THREAD;
1694   ResourceMark rm(current);
1695   RegisterMap reg_map(current,
1696                       RegisterMap::UpdateMap::skip,
1697                       RegisterMap::ProcessFrames::include,
1698                       RegisterMap::WalkContinuation::skip);
1699   frame stub_frame = current->last_frame();
1700   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1701   frame caller = stub_frame.sender(&reg_map);
1702 
1703   // Do nothing if the frame isn't a live compiled frame.
1704   // nmethod could be deoptimized by the time we get here
1705   // so no update to the caller is needed.
1706 
1707   if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1708       (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1709 













1710     address pc = caller.pc();
1711 
1712     nmethod* caller_nm = CodeCache::find_nmethod(pc);
1713     assert(caller_nm != nullptr, "did not find caller nmethod");
1714 
1715     // Default call_addr is the location of the "basic" call.
1716     // Determine the address of the call we a reresolving. With
1717     // Inline Caches we will always find a recognizable call.
1718     // With Inline Caches disabled we may or may not find a
1719     // recognizable call. We will always find a call for static
1720     // calls and for optimized virtual calls. For vanilla virtual
1721     // calls it depends on the state of the UseInlineCaches switch.
1722     //
1723     // With Inline Caches disabled we can get here for a virtual call
1724     // for two reasons:
1725     //   1 - calling an abstract method. The vtable for abstract methods
1726     //       will run us thru handle_wrong_method and we will eventually
1727     //       end up in the interpreter to throw the ame.
1728     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1729     //       call and between the time we fetch the entry address and
1730     //       we jump to it the target gets deoptimized. Similar to 1
1731     //       we will wind up in the interprter (thru a c2i with c2).
1732     //
1733     CompiledICLocker ml(caller_nm);
1734     address call_addr = caller_nm->call_instruction_address(pc);
1735 
1736     if (call_addr != nullptr) {
1737       // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1738       // bytes back in the instruction stream so we must also check for reloc info.
1739       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1740       bool ret = iter.next(); // Get item
1741       if (ret) {

1742         switch (iter.type()) {
1743           case relocInfo::static_call_type:

1744           case relocInfo::opt_virtual_call_type: {
1745             CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1746             cdc->set_to_clean();



1747             break;
1748           }
1749 
1750           case relocInfo::virtual_call_type: {
1751             // compiled, dispatched call (which used to call an interpreted method)
1752             CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1753             inline_cache->set_to_clean();


1754             break;
1755           }
1756           default:
1757             break;
1758         }
1759       }
1760     }
1761   }
1762 
1763   methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1764 
1765 
1766 #ifndef PRODUCT
1767   AtomicAccess::inc(&_wrong_method_ctr);
1768 
1769   if (TraceCallFixup) {
1770     ResourceMark rm(current);
1771     tty->print("handle_wrong_method reresolving call to");
1772     callee_method->print_short_name(tty);
1773     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1774   }
1775 #endif
1776 
1777   return callee_method;
1778 }
1779 
1780 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1781   // The faulting unsafe accesses should be changed to throw the error
1782   // synchronously instead. Meanwhile the faulting instruction will be
1783   // skipped over (effectively turning it into a no-op) and an
1784   // asynchronous exception will be raised which the thread will
1785   // handle at a later point. If the instruction is a load it will
1786   // return garbage.
1787 
1788   // Request an async exception.
1789   thread->set_pending_unsafe_access_error();
1790 
1791   // Return address of next instruction to execute.

1957   msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
1958 
1959   char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1960   if (message == nullptr) {
1961     // Shouldn't happen, but don't cause even more problems if it does
1962     message = const_cast<char*>(caster_klass->external_name());
1963   } else {
1964     jio_snprintf(message,
1965                  msglen,
1966                  "class %s cannot be cast to class %s (%s%s%s)",
1967                  caster_name,
1968                  target_name,
1969                  caster_klass_description,
1970                  klass_separator,
1971                  target_klass_description
1972                  );
1973   }
1974   return message;
1975 }
1976 















1977 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1978   (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
1979 JRT_END
1980 
1981 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
1982   if (!SafepointSynchronize::is_synchronizing()) {
1983     // Only try quick_enter() if we're not trying to reach a safepoint
1984     // so that the calling thread reaches the safepoint more quickly.
1985     if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
1986       return;
1987     }
1988   }
1989   // NO_ASYNC required because an async exception on the state transition destructor
1990   // would leave you with the lock held and it would never be released.
1991   // The normal monitorenter NullPointerException is thrown without acquiring a lock
1992   // and the model is that an exception implies the method failed.
1993   JRT_BLOCK_NO_ASYNC
1994   Handle h_obj(THREAD, obj);
1995   ObjectSynchronizer::enter(h_obj, lock, current);
1996   assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");

2190   tty->print_cr("Note 1: counter updates are not MT-safe.");
2191   tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2192   tty->print_cr("        %% in nested categories are relative to their category");
2193   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
2194   tty->cr();
2195 
2196   MethodArityHistogram h;
2197 }
2198 #endif
2199 
2200 #ifndef PRODUCT
2201 static int _lookups; // number of calls to lookup
2202 static int _equals;  // number of buckets checked with matching hash
2203 static int _archived_hits; // number of successful lookups in archived table
2204 static int _runtime_hits;  // number of successful lookups in runtime table
2205 #endif
2206 
2207 // A simple wrapper class around the calling convention information
2208 // that allows sharing of adapters for the same calling convention.
2209 class AdapterFingerPrint : public MetaspaceObj {
2210  private:
2211   enum {
2212     _basic_type_bits = 4,
2213     _basic_type_mask = right_n_bits(_basic_type_bits),
2214     _basic_types_per_int = BitsPerInt / _basic_type_bits,

























2215   };
2216   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2217   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2218 
2219   int _length;


2220 
2221   static int data_offset() { return sizeof(AdapterFingerPrint); }
2222   int* data_pointer() {
2223     return (int*)((address)this + data_offset());






2224   }
2225 
2226   // Private construtor. Use allocate() to get an instance.
2227   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt, int len) {
2228     int* data = data_pointer();
2229     // Pack the BasicTypes with 8 per int
2230     assert(len == length(total_args_passed), "sanity");
2231     _length = len;
2232     int sig_index = 0;
2233     for (int index = 0; index < _length; index++) {
2234       int value = 0;
2235       for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2236         int bt = adapter_encoding(sig_bt[sig_index++]);
2237         assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2238         value = (value << _basic_type_bits) | bt;










2239       }
2240       data[index] = value;


2241     }

2242   }
2243 
2244   // Call deallocate instead
2245   ~AdapterFingerPrint() {
2246     ShouldNotCallThis();
2247   }
2248 
2249   static int length(int total_args) {
2250     return (total_args + (_basic_types_per_int-1)) / _basic_types_per_int;
2251   }
2252 
2253   static int compute_size_in_words(int len) {
2254     return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(int)));
2255   }
2256 
2257   // Remap BasicTypes that are handled equivalently by the adapters.
2258   // These are correct for the current system but someday it might be
2259   // necessary to make this mapping platform dependent.
2260   static int adapter_encoding(BasicType in) {
2261     switch (in) {
2262       case T_BOOLEAN:
2263       case T_BYTE:
2264       case T_SHORT:
2265       case T_CHAR:
2266         // There are all promoted to T_INT in the calling convention
2267         return T_INT;
2268 
2269       case T_OBJECT:
2270       case T_ARRAY:
2271         // In other words, we assume that any register good enough for
2272         // an int or long is good enough for a managed pointer.
2273 #ifdef _LP64
2274         return T_LONG;
2275 #else
2276         return T_INT;
2277 #endif
2278 
2279       case T_INT:
2280       case T_LONG:
2281       case T_FLOAT:
2282       case T_DOUBLE:
2283       case T_VOID:
2284         return in;
2285 
2286       default:
2287         ShouldNotReachHere();
2288         return T_CONFLICT;
2289     }
2290   }
2291 
2292   void* operator new(size_t size, size_t fp_size) throw() {
2293     assert(fp_size >= size, "sanity check");
2294     void* p = AllocateHeap(fp_size, mtCode);
2295     memset(p, 0, fp_size);
2296     return p;
2297   }
2298 

2299   template<typename Function>
2300   void iterate_args(Function function) {
2301     for (int i = 0; i < length(); i++) {
2302       unsigned val = (unsigned)value(i);
2303       // args are packed so that first/lower arguments are in the highest
2304       // bits of each int value, so iterate from highest to the lowest
2305       for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2306         unsigned v = (val >> j) & _basic_type_mask;
2307         if (v == 0) {
2308           continue;
2309         }
2310         function(v);
2311       }
2312     }
2313   }
2314 
2315  public:
2316   static AdapterFingerPrint* allocate(int total_args_passed, BasicType* sig_bt) {
2317     int len = length(total_args_passed);
2318     int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2319     AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(total_args_passed, sig_bt, len);
2320     assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2321     return afp;
2322   }
2323 
2324   static void deallocate(AdapterFingerPrint* fp) {
2325     FreeHeap(fp);
2326   }
2327 
2328   int value(int index) {
2329     int* data = data_pointer();
2330     return data[index];
2331   }
2332 
2333   int length() {
2334     return _length;
2335   }
2336 
2337   unsigned int compute_hash() {
2338     int hash = 0;
2339     for (int i = 0; i < length(); i++) {
2340       int v = value(i);
2341       //Add arithmetic operation to the hash, like +3 to improve hashing
2342       hash = ((hash << 8) ^ v ^ (hash >> 5)) + 3;
2343     }
2344     return (unsigned int)hash;
2345   }
2346 
2347   const char* as_string() {
2348     stringStream st;
2349     st.print("0x");





2350     for (int i = 0; i < length(); i++) {
2351       st.print("%x", value(i));


2352     }

2353     return st.as_string();
2354   }
2355 
2356   const char* as_basic_args_string() {
2357     stringStream st;
2358     bool long_prev = false;
2359     iterate_args([&] (int arg) {
2360       if (long_prev) {
2361         long_prev = false;
2362         if (arg == T_VOID) {
2363           st.print("J");
2364         } else {
2365           st.print("L");
2366         }
2367       }
2368       switch (arg) {
2369         case T_INT:    st.print("I");    break;
2370         case T_LONG:   long_prev = true; break;
2371         case T_FLOAT:  st.print("F");    break;
2372         case T_DOUBLE: st.print("D");    break;
2373         case T_VOID:   break;
2374         default: ShouldNotReachHere();
2375       }
2376     });
2377     if (long_prev) {
2378       st.print("L");
2379     }
2380     return st.as_string();
2381   }
2382 
2383   BasicType* as_basic_type(int& nargs) {
2384     nargs = 0;
2385     GrowableArray<BasicType> btarray;
2386     bool long_prev = false;
2387 
2388     iterate_args([&] (int arg) {
2389       if (long_prev) {
2390         long_prev = false;
2391         if (arg == T_VOID) {
2392           btarray.append(T_LONG);
2393         } else {
2394           btarray.append(T_OBJECT); // it could be T_ARRAY; it shouldn't matter
2395         }
2396       }
2397       switch (arg) {
2398         case T_INT: // fallthrough
2399         case T_FLOAT: // fallthrough
2400         case T_DOUBLE:
2401         case T_VOID:
2402           btarray.append((BasicType)arg);
2403           break;
2404         case T_LONG:
2405           long_prev = true;
2406           break;
2407         default: ShouldNotReachHere();
2408       }
2409     });
2410 
2411     if (long_prev) {
2412       btarray.append(T_OBJECT);
2413     }
2414 
2415     nargs = btarray.length();
2416     BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nargs);
2417     int index = 0;
2418     GrowableArrayIterator<BasicType> iter = btarray.begin();
2419     while (iter != btarray.end()) {
2420       sig_bt[index++] = *iter;
2421       ++iter;
2422     }
2423     assert(index == btarray.length(), "sanity check");
2424 #ifdef ASSERT
2425     {
2426       AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(nargs, sig_bt);
2427       assert(this->equals(compare_fp), "sanity check");
2428       AdapterFingerPrint::deallocate(compare_fp);
2429     }
2430 #endif
2431     return sig_bt;
2432   }
2433 
2434   bool equals(AdapterFingerPrint* other) {
2435     if (other->_length != _length) {


2436       return false;
2437     } else {
2438       for (int i = 0; i < _length; i++) {
2439         if (value(i) != other->value(i)) {
2440           return false;
2441         }
2442       }
2443     }
2444     return true;
2445   }
2446 
2447   // methods required by virtue of being a MetaspaceObj
2448   void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2449   int size() const { return compute_size_in_words(_length); }
2450   MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2451 
2452   static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2453     NOT_PRODUCT(_equals++);
2454     return fp1->equals(fp2);
2455   }
2456 
2457   static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2458     return fp->compute_hash();
2459   }

2462 #if INCLUDE_CDS
2463 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2464   return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2465 }
2466 
2467 class ArchivedAdapterTable : public OffsetCompactHashtable<
2468   AdapterFingerPrint*,
2469   AdapterHandlerEntry*,
2470   adapter_fp_equals_compact_hashtable_entry> {};
2471 #endif // INCLUDE_CDS
2472 
2473 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2474 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2475                   AnyObj::C_HEAP, mtCode,
2476                   AdapterFingerPrint::compute_hash,
2477                   AdapterFingerPrint::equals>;
2478 static AdapterHandlerTable* _adapter_handler_table;
2479 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2480 
2481 // Find a entry with the same fingerprint if it exists
2482 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(int total_args_passed, BasicType* sig_bt) {
2483   NOT_PRODUCT(_lookups++);
2484   assert_lock_strong(AdapterHandlerLibrary_lock);
2485   AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2486   AdapterHandlerEntry* entry = nullptr;
2487 #if INCLUDE_CDS
2488   // if we are building the archive then the archived adapter table is
2489   // not valid and we need to use the ones added to the runtime table
2490   if (AOTCodeCache::is_using_adapter()) {
2491     // Search archived table first. It is read-only table so can be searched without lock
2492     entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2493 #ifndef PRODUCT
2494     if (entry != nullptr) {
2495       _archived_hits++;
2496     }
2497 #endif
2498   }
2499 #endif // INCLUDE_CDS
2500   if (entry == nullptr) {
2501     assert_lock_strong(AdapterHandlerLibrary_lock);
2502     AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2503     if (entry_p != nullptr) {
2504       entry = *entry_p;
2505       assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",

2522   TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2523   ts.print(tty, "AdapterHandlerTable");
2524   tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2525                 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2526   int total_hits = _archived_hits + _runtime_hits;
2527   tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2528                 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2529 }
2530 #endif
2531 
2532 // ---------------------------------------------------------------------------
2533 // Implementation of AdapterHandlerLibrary
2534 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2535 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2536 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2537 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2538 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2539 #if INCLUDE_CDS
2540 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2541 #endif // INCLUDE_CDS
2542 static const int AdapterHandlerLibrary_size = 16*K;
2543 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2544 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2545 
2546 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2547   assert(_buffer != nullptr, "should be initialized");
2548   return _buffer;
2549 }
2550 
2551 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2552   if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2553     AdapterBlob* adapter_blob = entry->adapter_blob();
2554     char blob_id[256];
2555     jio_snprintf(blob_id,
2556                  sizeof(blob_id),
2557                  "%s(%s)",
2558                  adapter_blob->name(),
2559                  entry->fingerprint()->as_string());
2560     if (Forte::is_enabled()) {
2561       Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2562     }

2570 void AdapterHandlerLibrary::initialize() {
2571   {
2572     ResourceMark rm;
2573     _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2574     _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2575   }
2576 
2577 #if INCLUDE_CDS
2578   // Link adapters in AOT Cache to their code in AOT Code Cache
2579   if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2580     link_aot_adapters();
2581     lookup_simple_adapters();
2582     return;
2583   }
2584 #endif // INCLUDE_CDS
2585 
2586   ResourceMark rm;
2587   {
2588     MutexLocker mu(AdapterHandlerLibrary_lock);
2589 
2590     _no_arg_handler = create_adapter(0, nullptr);


2591 
2592     BasicType obj_args[] = { T_OBJECT };
2593     _obj_arg_handler = create_adapter(1, obj_args);


2594 
2595     BasicType int_args[] = { T_INT };
2596     _int_arg_handler = create_adapter(1, int_args);


2597 
2598     BasicType obj_int_args[] = { T_OBJECT, T_INT };
2599     _obj_int_arg_handler = create_adapter(2, obj_int_args);



2600 
2601     BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2602     _obj_obj_arg_handler = create_adapter(2, obj_obj_args);



2603 
2604     // we should always get an entry back but we don't have any
2605     // associated blob on Zero
2606     assert(_no_arg_handler != nullptr &&
2607            _obj_arg_handler != nullptr &&
2608            _int_arg_handler != nullptr &&
2609            _obj_int_arg_handler != nullptr &&
2610            _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2611   }
2612 
2613   // Outside of the lock
2614 #ifndef ZERO
2615   // no blobs to register when we are on Zero
2616   post_adapter_creation(_no_arg_handler);
2617   post_adapter_creation(_obj_arg_handler);
2618   post_adapter_creation(_int_arg_handler);
2619   post_adapter_creation(_obj_int_arg_handler);
2620   post_adapter_creation(_obj_obj_arg_handler);
2621 #endif // ZERO
2622 }
2623 
2624 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2625   uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2626   assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2627   return AdapterHandlerEntry::allocate(id, fingerprint);
2628 }
2629 
2630 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2631   int total_args_passed = method->size_of_parameters(); // All args on stack
2632   if (total_args_passed == 0) {
2633     return _no_arg_handler;
2634   } else if (total_args_passed == 1) {
2635     if (!method->is_static()) {



2636       return _obj_arg_handler;
2637     }
2638     switch (method->signature()->char_at(1)) {
2639       case JVM_SIGNATURE_CLASS:









2640       case JVM_SIGNATURE_ARRAY:
2641         return _obj_arg_handler;
2642       case JVM_SIGNATURE_INT:
2643       case JVM_SIGNATURE_BOOLEAN:
2644       case JVM_SIGNATURE_CHAR:
2645       case JVM_SIGNATURE_BYTE:
2646       case JVM_SIGNATURE_SHORT:
2647         return _int_arg_handler;
2648     }
2649   } else if (total_args_passed == 2 &&
2650              !method->is_static()) {
2651     switch (method->signature()->char_at(1)) {
2652       case JVM_SIGNATURE_CLASS:









2653       case JVM_SIGNATURE_ARRAY:
2654         return _obj_obj_arg_handler;
2655       case JVM_SIGNATURE_INT:
2656       case JVM_SIGNATURE_BOOLEAN:
2657       case JVM_SIGNATURE_CHAR:
2658       case JVM_SIGNATURE_BYTE:
2659       case JVM_SIGNATURE_SHORT:
2660         return _obj_int_arg_handler;
2661     }
2662   }
2663   return nullptr;
2664 }
2665 
2666 class AdapterSignatureIterator : public SignatureIterator {
2667  private:
2668   BasicType stack_sig_bt[16];
2669   BasicType* sig_bt;
2670   int index;




2671 
2672  public:
2673   AdapterSignatureIterator(Symbol* signature,
2674                            fingerprint_t fingerprint,
2675                            bool is_static,
2676                            int total_args_passed) :
2677     SignatureIterator(signature, fingerprint),
2678     index(0)
2679   {
2680     sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2681     if (!is_static) { // Pass in receiver first
2682       sig_bt[index++] = T_OBJECT;













2683     }
2684     do_parameters_on(this);
2685   }
2686 
2687   BasicType* basic_types() {
2688     return sig_bt;







2689   }

2690 
2691 #ifdef ASSERT
2692   int slots() {
2693     return index;




































2694   }



















































2695 #endif























































2696 
2697  private:










2698 
2699   friend class SignatureIterator;  // so do_parameters_on can call do_type
2700   void do_type(BasicType type) {
2701     sig_bt[index++] = type;
2702     if (type == T_LONG || type == T_DOUBLE) {
2703       sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots



2704     }
2705   }
2706 };
2707 



































































































































































2708 
2709 const char* AdapterHandlerEntry::_entry_names[] = {
2710   "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
2711 };
2712 
2713 #ifdef ASSERT
2714 void AdapterHandlerLibrary::verify_adapter_sharing(int total_args_passed, BasicType* sig_bt, AdapterHandlerEntry* cached_entry) {
2715   // we can only check for the same code if there is any
2716 #ifndef ZERO
2717   AdapterHandlerEntry* comparison_entry = create_adapter(total_args_passed, sig_bt, true);
2718   assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
2719   assert(comparison_entry->compare_code(cached_entry), "code must match");
2720   // Release the one just created
2721   AdapterHandlerEntry::deallocate(comparison_entry);
2722 # endif // ZERO
2723 }
2724 #endif /* ASSERT*/
2725 
2726 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2727   assert(!method->is_abstract(), "abstract methods do not have adapters");
2728   // Use customized signature handler.  Need to lock around updates to
2729   // the _adapter_handler_table (it is not safe for concurrent readers
2730   // and a single writer: this could be fixed if it becomes a
2731   // problem).
2732 
2733   // Fast-path for trivial adapters
2734   AdapterHandlerEntry* entry = get_simple_adapter(method);
2735   if (entry != nullptr) {
2736     return entry;
2737   }
2738 
2739   ResourceMark rm;
2740   bool new_entry = false;
2741 
2742   // Fill in the signature array, for the calling-convention call.
2743   int total_args_passed = method->size_of_parameters(); // All args on stack











2744 
2745   AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2746                               method->is_static(), total_args_passed);
2747   assert(si.slots() == total_args_passed, "");
2748   BasicType* sig_bt = si.basic_types();
2749   {
2750     MutexLocker mu(AdapterHandlerLibrary_lock);
2751 
2752     // Lookup method signature's fingerprint
2753     entry = lookup(total_args_passed, sig_bt);
2754 
2755     if (entry != nullptr) {
2756 #ifndef ZERO
2757       assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
2758 #endif
2759 #ifdef ASSERT
2760       if (!entry->in_aot_cache() && VerifyAdapterSharing) {
2761         verify_adapter_sharing(total_args_passed, sig_bt, entry);
2762       }
2763 #endif
2764     } else {
2765       entry = create_adapter(total_args_passed, sig_bt);
2766       if (entry != nullptr) {
2767         new_entry = true;
2768       }
2769     }
2770   }
2771 
2772   // Outside of the lock
2773   if (new_entry) {
2774     post_adapter_creation(entry);
2775   }
2776   return entry;
2777 }
2778 
2779 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
2780   ResourceMark rm;
2781   const char* name = AdapterHandlerLibrary::name(handler);
2782   const uint32_t id = AdapterHandlerLibrary::id(handler);
2783 
2784   CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
2785   if (blob != nullptr) {

2800   }
2801   insts_size = adapter_blob->code_size();
2802   st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
2803                 handler->fingerprint()->as_basic_args_string(),
2804                 handler->fingerprint()->as_string(), insts_size);
2805   st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
2806   if (Verbose || PrintStubCode) {
2807     address first_pc = adapter_blob->content_begin();
2808     if (first_pc != nullptr) {
2809       Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
2810       st->cr();
2811     }
2812   }
2813 }
2814 #endif // PRODUCT
2815 
2816 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
2817                                               int entry_offset[AdapterBlob::ENTRY_COUNT]) {
2818   entry_offset[AdapterBlob::I2C] = 0;
2819   entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];


2820   entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];

2821   if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
2822     entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
2823   } else {
2824     entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
2825   }
2826 }
2827 
2828 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
2829                                                   int total_args_passed,
2830                                                   BasicType* sig_bt,
2831                                                   bool is_transient) {
2832   if (log_is_enabled(Info, perf, class, link)) {
2833     ClassLoader::perf_method_adapters_count()->inc();
2834   }
2835 
2836 #ifndef ZERO

2837   BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2838   CodeBuffer buffer(buf);
2839   short buffer_locs[20];
2840   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2841                                          sizeof(buffer_locs)/sizeof(relocInfo));
2842   MacroAssembler masm(&buffer);
2843   VMRegPair stack_regs[16];
2844   VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2845 
2846   // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2847   int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2848   address entry_address[AdapterBlob::ENTRY_COUNT];
2849   SharedRuntime::generate_i2c2i_adapters(&masm,
2850                                          total_args_passed,
2851                                          comp_args_on_stack,
2852                                          sig_bt,
2853                                          regs,
2854                                          entry_address);















2855   // On zero there is no code to save and no need to create a blob and
2856   // or relocate the handler.
2857   int entry_offset[AdapterBlob::ENTRY_COUNT];
2858   address_to_offset(entry_address, entry_offset);
2859 #ifdef ASSERT
2860   if (VerifyAdapterSharing) {
2861     handler->save_code(buf->code_begin(), buffer.insts_size());
2862     if (is_transient) {
2863       return true;
2864     }
2865   }
2866 #endif
2867   AdapterBlob* adapter_blob = AdapterBlob::create(&buffer, entry_offset);
2868   if (adapter_blob == nullptr) {
2869     // CodeCache is full, disable compilation
2870     // Ought to log this but compile log is only per compile thread
2871     // and we're some non descript Java thread.
2872     return false;
2873   }
2874   handler->set_adapter_blob(adapter_blob);
2875   if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
2876     // try to save generated code
2877     const char* name = AdapterHandlerLibrary::name(handler);
2878     const uint32_t id = AdapterHandlerLibrary::id(handler);
2879     bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
2880     assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
2881   }
2882 #endif // ZERO
2883 
2884 #ifndef PRODUCT
2885   // debugging support
2886   if (PrintAdapterHandlers || PrintStubCode) {
2887     print_adapter_handler_info(tty, handler);
2888   }
2889 #endif
2890 
2891   return true;
2892 }
2893 
2894 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(int total_args_passed,
2895                                                            BasicType* sig_bt,
2896                                                            bool is_transient) {
2897   AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);





2898   AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
2899   if (!generate_adapter_code(handler, total_args_passed, sig_bt, is_transient)) {
2900     AdapterHandlerEntry::deallocate(handler);
2901     return nullptr;
2902   }
2903   if (!is_transient) {
2904     assert_lock_strong(AdapterHandlerLibrary_lock);
2905     _adapter_handler_table->put(fp, handler);
2906   }
2907   return handler;
2908 }
2909 
2910 #if INCLUDE_CDS
2911 void AdapterHandlerEntry::remove_unshareable_info() {
2912 #ifdef ASSERT
2913    _saved_code = nullptr;
2914    _saved_code_length = 0;
2915 #endif // ASSERT
2916    _adapter_blob = nullptr;
2917    _linked = false;


2918 }
2919 
2920 class CopyAdapterTableToArchive : StackObj {
2921 private:
2922   CompactHashtableWriter* _writer;
2923   ArchiveBuilder* _builder;
2924 public:
2925   CopyAdapterTableToArchive(CompactHashtableWriter* writer) : _writer(writer),
2926                                                              _builder(ArchiveBuilder::current())
2927   {}
2928 
2929   bool do_entry(AdapterFingerPrint* fp, AdapterHandlerEntry* entry) {
2930     LogStreamHandle(Trace, aot) lsh;
2931     if (ArchiveBuilder::current()->has_been_archived((address)entry)) {
2932       assert(ArchiveBuilder::current()->has_been_archived((address)fp), "must be");
2933       AdapterFingerPrint* buffered_fp = ArchiveBuilder::current()->get_buffered_addr(fp);
2934       assert(buffered_fp != nullptr,"sanity check");
2935       AdapterHandlerEntry* buffered_entry = ArchiveBuilder::current()->get_buffered_addr(entry);
2936       assert(buffered_entry != nullptr,"sanity check");
2937 

2977   }
2978 #endif
2979 }
2980 
2981 // This method is used during production run to link archived adapters (stored in AOT Cache)
2982 // to their code in AOT Code Cache
2983 void AdapterHandlerEntry::link() {
2984   ResourceMark rm;
2985   assert(_fingerprint != nullptr, "_fingerprint must not be null");
2986   bool generate_code = false;
2987   // Generate code only if AOTCodeCache is not available, or
2988   // caching adapters is disabled, or we fail to link
2989   // the AdapterHandlerEntry to its code in the AOTCodeCache
2990   if (AOTCodeCache::is_using_adapter()) {
2991     AdapterHandlerLibrary::link_aot_adapter_handler(this);
2992     // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
2993     if (_adapter_blob == nullptr) {
2994       log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
2995       generate_code = true;
2996     }

















2997   } else {
2998     generate_code = true;
2999   }
3000   if (generate_code) {
3001     int nargs;
3002     BasicType* bt = _fingerprint->as_basic_type(nargs);
3003     if (!AdapterHandlerLibrary::generate_adapter_code(this, nargs, bt, /* is_transient */ false)) {
3004       // Don't throw exceptions during VM initialization because java.lang.* classes
3005       // might not have been initialized, causing problems when constructing the
3006       // Java exception object.
3007       vm_exit_during_initialization("Out of space in CodeCache for adapters");
3008     }
3009   }
3010   if (_adapter_blob != nullptr) {
3011     post_adapter_creation(this);
3012   }
3013   assert(_linked, "AdapterHandlerEntry must now be linked");
3014 }
3015 
3016 void AdapterHandlerLibrary::link_aot_adapters() {
3017   uint max_id = 0;
3018   assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3019   /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3020    * That implies adapter ids of the adapters in the cache may not be contiguous.
3021    * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3022    * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3023    * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3024    */
3025   _aot_adapter_handler_table.iterate_all([&](AdapterHandlerEntry* entry) {
3026     assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3027     entry->link();
3028     max_id = MAX2(max_id, entry->id());
3029   });
3030   // Set adapter id to the maximum id found in the AOTCache
3031   assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3032   _id_counter = max_id;
3033 }
3034 
3035 // This method is called during production run to lookup simple adapters
3036 // in the archived adapter handler table
3037 void AdapterHandlerLibrary::lookup_simple_adapters() {
3038   assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3039 
3040   MutexLocker mu(AdapterHandlerLibrary_lock);
3041   _no_arg_handler = lookup(0, nullptr);
3042 
3043   BasicType obj_args[] = { T_OBJECT };
3044   _obj_arg_handler = lookup(1, obj_args);
3045 
3046   BasicType int_args[] = { T_INT };
3047   _int_arg_handler = lookup(1, int_args);
3048 
3049   BasicType obj_int_args[] = { T_OBJECT, T_INT };
3050   _obj_int_arg_handler = lookup(2, obj_int_args);
3051 
3052   BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
3053   _obj_obj_arg_handler = lookup(2, obj_obj_args);













3054 
3055   assert(_no_arg_handler != nullptr &&
3056          _obj_arg_handler != nullptr &&
3057          _int_arg_handler != nullptr &&
3058          _obj_int_arg_handler != nullptr &&
3059          _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3060   assert(_no_arg_handler->is_linked() &&
3061          _obj_arg_handler->is_linked() &&
3062          _int_arg_handler->is_linked() &&
3063          _obj_int_arg_handler->is_linked() &&
3064          _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3065 }
3066 #endif // INCLUDE_CDS
3067 
3068 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3069   LogStreamHandle(Trace, aot) lsh;
3070   if (lsh.is_enabled()) {
3071     lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3072     lsh.cr();
3073   }
3074   it->push(&_fingerprint);
3075 }
3076 
3077 AdapterHandlerEntry::~AdapterHandlerEntry() {
3078   if (_fingerprint != nullptr) {
3079     AdapterFingerPrint::deallocate(_fingerprint);
3080     _fingerprint = nullptr;
3081   }






3082 #ifdef ASSERT
3083   FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3084 #endif
3085   FreeHeap(this);
3086 }
3087 
3088 
3089 #ifdef ASSERT
3090 // Capture the code before relocation so that it can be compared
3091 // against other versions.  If the code is captured after relocation
3092 // then relative instructions won't be equivalent.
3093 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3094   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3095   _saved_code_length = length;
3096   memcpy(_saved_code, buffer, length);
3097 }
3098 
3099 
3100 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3101   assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");

3149 
3150       struct { double data[20]; } locs_buf;
3151       struct { double data[20]; } stubs_locs_buf;
3152       buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3153 #if defined(AARCH64) || defined(PPC64)
3154       // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3155       // in the constant pool to ensure ordering between the barrier and oops
3156       // accesses. For native_wrappers we need a constant.
3157       // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3158       // static java call that is resolved in the runtime.
3159       if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3160         buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3161       }
3162 #endif
3163       buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3164       MacroAssembler _masm(&buffer);
3165 
3166       // Fill in the signature array, for the calling-convention call.
3167       const int total_args_passed = method->size_of_parameters();
3168 

3169       VMRegPair stack_regs[16];

3170       VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3171 
3172       AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3173                               method->is_static(), total_args_passed);
3174       BasicType* sig_bt = si.basic_types();
3175       assert(si.slots() == total_args_passed, "");
3176       BasicType ret_type = si.return_type();








3177 
3178       // Now get the compiled-Java arguments layout.
3179       SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3180 
3181       // Generate the compiled-to-native wrapper code
3182       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3183 
3184       if (nm != nullptr) {
3185         {
3186           MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3187           if (nm->make_in_use()) {
3188             method->set_code(method, nm);
3189           }
3190         }
3191 
3192         DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3193         if (directive->PrintAssemblyOption) {
3194           nm->print_code();
3195         }
3196         DirectivesStack::release(directive);

3404       if (b == handler->adapter_blob()) {
3405         found = true;
3406         st->print("Adapter for signature: ");
3407         handler->print_adapter_on(st);
3408         return false; // abort iteration
3409       } else {
3410         return true; // keep looking
3411       }
3412     };
3413     assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3414     _adapter_handler_table->iterate(findblob_runtime_table);
3415   }
3416   assert(found, "Should have found handler");
3417 }
3418 
3419 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3420   st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3421   if (adapter_blob() != nullptr) {
3422     st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3423     st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3424     st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));



3425     if (get_c2i_no_clinit_check_entry() != nullptr) {
3426       st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3427     }
3428   }
3429   st->cr();
3430 }
3431 
3432 #ifndef PRODUCT
3433 
3434 void AdapterHandlerLibrary::print_statistics() {
3435   print_table_statistics();
3436 }
3437 
3438 #endif /* PRODUCT */
3439 
3440 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3441   assert(current == JavaThread::current(), "pre-condition");
3442   StackOverflow* overflow_state = current->stack_overflow_state();
3443   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3444   overflow_state->set_reserved_stack_activation(current->stack_base());

3491         event.set_method(method);
3492         event.commit();
3493       }
3494     }
3495   }
3496   return activation;
3497 }
3498 
3499 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3500   // After any safepoint, just before going back to compiled code,
3501   // we inform the GC that we will be doing initializing writes to
3502   // this object in the future without emitting card-marks, so
3503   // GC may take any compensating steps.
3504 
3505   oop new_obj = current->vm_result_oop();
3506   if (new_obj == nullptr) return;
3507 
3508   BarrierSet *bs = BarrierSet::barrier_set();
3509   bs->on_slowpath_allocation_exit(current, new_obj);
3510 }











































































































































































































































































  30 #include "classfile/javaClasses.inline.hpp"
  31 #include "classfile/stringTable.hpp"
  32 #include "classfile/vmClasses.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/aotCodeCache.hpp"
  35 #include "code/codeCache.hpp"
  36 #include "code/compiledIC.hpp"
  37 #include "code/nmethod.inline.hpp"
  38 #include "code/scopeDesc.hpp"
  39 #include "code/vtableStubs.hpp"
  40 #include "compiler/abstractCompiler.hpp"
  41 #include "compiler/compileBroker.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "gc/shared/barrierSet.hpp"
  44 #include "gc/shared/collectedHeap.hpp"
  45 #include "interpreter/interpreter.hpp"
  46 #include "interpreter/interpreterRuntime.hpp"
  47 #include "jfr/jfrEvents.hpp"
  48 #include "jvm.h"
  49 #include "logging/log.hpp"
  50 #include "memory/oopFactory.hpp"
  51 #include "memory/resourceArea.hpp"
  52 #include "memory/universe.hpp"
  53 #include "metaprogramming/primitiveConversions.hpp"
  54 #include "oops/access.hpp"
  55 #include "oops/fieldStreams.inline.hpp"
  56 #include "oops/inlineKlass.inline.hpp"
  57 #include "oops/klass.hpp"
  58 #include "oops/method.inline.hpp"
  59 #include "oops/objArrayKlass.hpp"
  60 #include "oops/objArrayOop.inline.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "prims/forte.hpp"
  63 #include "prims/jvmtiExport.hpp"
  64 #include "prims/jvmtiThreadState.hpp"
  65 #include "prims/methodHandles.hpp"
  66 #include "prims/nativeLookup.hpp"
  67 #include "runtime/arguments.hpp"
  68 #include "runtime/atomicAccess.hpp"
  69 #include "runtime/basicLock.inline.hpp"
  70 #include "runtime/frame.inline.hpp"
  71 #include "runtime/handles.inline.hpp"
  72 #include "runtime/init.hpp"
  73 #include "runtime/interfaceSupport.inline.hpp"
  74 #include "runtime/java.hpp"
  75 #include "runtime/javaCalls.hpp"
  76 #include "runtime/jniHandles.inline.hpp"
  77 #include "runtime/osThread.hpp"
  78 #include "runtime/perfData.hpp"
  79 #include "runtime/sharedRuntime.hpp"
  80 #include "runtime/signature.hpp"
  81 #include "runtime/stackWatermarkSet.hpp"
  82 #include "runtime/stubRoutines.hpp"
  83 #include "runtime/synchronizer.hpp"
  84 #include "runtime/timerTrace.hpp"
  85 #include "runtime/vframe.inline.hpp"
  86 #include "runtime/vframeArray.hpp"
  87 #include "runtime/vm_version.hpp"
  88 #include "utilities/copy.hpp"
  89 #include "utilities/dtrace.hpp"
  90 #include "utilities/events.hpp"
  91 #include "utilities/exceptions.hpp"
  92 #include "utilities/globalDefinitions.hpp"
  93 #include "utilities/hashTable.hpp"
  94 #include "utilities/macros.hpp"
  95 #include "utilities/xmlstream.hpp"
  96 #ifdef COMPILER1
  97 #include "c1/c1_Runtime1.hpp"
  98 #endif
  99 #ifdef COMPILER2
 100 #include "opto/runtime.hpp"

1219 // for a call current in progress, i.e., arguments has been pushed on stack
1220 // but callee has not been invoked yet.  Caller frame must be compiled.
1221 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1222                                               CallInfo& callinfo, TRAPS) {
1223   Handle receiver;
1224   Handle nullHandle;  // create a handy null handle for exception returns
1225   JavaThread* current = THREAD;
1226 
1227   assert(!vfst.at_end(), "Java frame must exist");
1228 
1229   // Find caller and bci from vframe
1230   methodHandle caller(current, vfst.method());
1231   int          bci   = vfst.bci();
1232 
1233   if (caller->is_continuation_enter_intrinsic()) {
1234     bc = Bytecodes::_invokestatic;
1235     LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1236     return receiver;
1237   }
1238 
1239   // Substitutability test implementation piggy backs on static call resolution
1240   Bytecodes::Code code = caller->java_code_at(bci);
1241   if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1242     bc = Bytecodes::_invokestatic;
1243     methodHandle attached_method(THREAD, extract_attached_method(vfst));
1244     assert(attached_method.not_null(), "must have attached method");
1245     vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1246     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1247 #ifdef ASSERT
1248     Symbol* subst_method_name = vmSymbols::isSubstitutable_name();
1249     Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(subst_method_name, vmSymbols::object_object_boolean_signature());
1250     assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1251 #endif
1252     return receiver;
1253   }
1254 
1255   Bytecode_invoke bytecode(caller, bci);
1256   int bytecode_index = bytecode.index();
1257   bc = bytecode.invoke_code();
1258 
1259   methodHandle attached_method(current, extract_attached_method(vfst));
1260   if (attached_method.not_null()) {
1261     Method* callee = bytecode.static_target(CHECK_NH);
1262     vmIntrinsics::ID id = callee->intrinsic_id();
1263     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1264     // it attaches statically resolved method to the call site.
1265     if (MethodHandles::is_signature_polymorphic(id) &&
1266         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1267       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1268 
1269       // Adjust invocation mode according to the attached method.
1270       switch (bc) {
1271         case Bytecodes::_invokevirtual:
1272           if (attached_method->method_holder()->is_interface()) {
1273             bc = Bytecodes::_invokeinterface;
1274           }
1275           break;
1276         case Bytecodes::_invokeinterface:
1277           if (!attached_method->method_holder()->is_interface()) {
1278             bc = Bytecodes::_invokevirtual;
1279           }
1280           break;
1281         case Bytecodes::_invokehandle:
1282           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1283             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1284                                               : Bytecodes::_invokevirtual;
1285           }
1286           break;
1287         default:
1288           break;
1289       }
1290     } else {
1291       assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1292       if (!attached_method->method_holder()->is_inline_klass()) {
1293         // Ignore the attached method in this case to not confuse below code
1294         attached_method = methodHandle(current, nullptr);
1295       }
1296     }
1297   }
1298 
1299   assert(bc != Bytecodes::_illegal, "not initialized");
1300 
1301   bool has_receiver = bc != Bytecodes::_invokestatic &&
1302                       bc != Bytecodes::_invokedynamic &&
1303                       bc != Bytecodes::_invokehandle;
1304   bool check_null_and_abstract = true;
1305 
1306   // Find receiver for non-static call
1307   if (has_receiver) {
1308     // This register map must be update since we need to find the receiver for
1309     // compiled frames. The receiver might be in a register.
1310     RegisterMap reg_map2(current,
1311                          RegisterMap::UpdateMap::include,
1312                          RegisterMap::ProcessFrames::include,
1313                          RegisterMap::WalkContinuation::skip);
1314     frame stubFrame   = current->last_frame();
1315     // Caller-frame is a compiled frame
1316     frame callerFrame = stubFrame.sender(&reg_map2);
1317 
1318     Method* callee = attached_method();
1319     if (callee == nullptr) {
1320       callee = bytecode.static_target(CHECK_NH);
1321       if (callee == nullptr) {
1322         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1323       }
1324     }
1325     bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->as_nmethod()->is_compiled_by_c1();
1326     if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1327       // If the receiver is an inline type that is passed as fields, no oop is available
1328       // Resolve the call without receiver null checking.
1329       assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1330       assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1331       if (bc == Bytecodes::_invokeinterface) {
1332         bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1333       }
1334       check_null_and_abstract = false;
1335     } else {
1336       // Retrieve from a compiled argument list
1337       receiver = Handle(current, callerFrame.retrieve_receiver(&reg_map2));
1338       assert(oopDesc::is_oop_or_null(receiver()), "");
1339       if (receiver.is_null()) {
1340         THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1341       }
1342     }
1343   }
1344 
1345   // Resolve method
1346   if (attached_method.not_null()) {
1347     // Parameterized by attached method.
1348     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1349   } else {
1350     // Parameterized by bytecode.
1351     constantPoolHandle constants(current, caller->constants());
1352     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1353   }
1354 
1355 #ifdef ASSERT
1356   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1357   if (has_receiver && check_null_and_abstract) {
1358     assert(receiver.not_null(), "should have thrown exception");
1359     Klass* receiver_klass = receiver->klass();
1360     Klass* rk = nullptr;
1361     if (attached_method.not_null()) {
1362       // In case there's resolved method attached, use its holder during the check.
1363       rk = attached_method->method_holder();
1364     } else {
1365       // Klass is already loaded.
1366       constantPoolHandle constants(current, caller->constants());
1367       rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1368     }
1369     Klass* static_receiver_klass = rk;
1370     assert(receiver_klass->is_subtype_of(static_receiver_klass),
1371            "actual receiver must be subclass of static receiver klass");
1372     if (receiver_klass->is_instance_klass()) {
1373       if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1374         tty->print_cr("ERROR: Klass not yet initialized!!");
1375         receiver_klass->print();
1376       }
1377       assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1378     }
1379   }
1380 #endif
1381 
1382   return receiver;
1383 }
1384 
1385 methodHandle SharedRuntime::find_callee_method(bool& caller_does_not_scalarize, TRAPS) {
1386   JavaThread* current = THREAD;
1387   ResourceMark rm(current);
1388   // We need first to check if any Java activations (compiled, interpreted)
1389   // exist on the stack since last JavaCall.  If not, we need
1390   // to get the target method from the JavaCall wrapper.
1391   vframeStream vfst(current, true);  // Do not skip any javaCalls
1392   methodHandle callee_method;
1393   if (vfst.at_end()) {
1394     // No Java frames were found on stack since we did the JavaCall.
1395     // Hence the stack can only contain an entry_frame.  We need to
1396     // find the target method from the stub frame.
1397     RegisterMap reg_map(current,
1398                         RegisterMap::UpdateMap::skip,
1399                         RegisterMap::ProcessFrames::include,
1400                         RegisterMap::WalkContinuation::skip);
1401     frame fr = current->last_frame();
1402     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1403     fr = fr.sender(&reg_map);
1404     assert(fr.is_entry_frame(), "must be");
1405     // fr is now pointing to the entry frame.
1406     callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1407   } else {
1408     Bytecodes::Code bc;
1409     CallInfo callinfo;
1410     find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1411     // Calls via mismatching methods are always non-scalarized
1412     if (callinfo.resolved_method()->mismatch()) {
1413       caller_does_not_scalarize = true;
1414     }
1415     callee_method = methodHandle(current, callinfo.selected_method());
1416   }
1417   assert(callee_method()->is_method(), "must be");
1418   return callee_method;
1419 }
1420 
1421 // Resolves a call.
1422 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1423   JavaThread* current = THREAD;
1424   ResourceMark rm(current);
1425   RegisterMap cbl_map(current,
1426                       RegisterMap::UpdateMap::skip,
1427                       RegisterMap::ProcessFrames::include,
1428                       RegisterMap::WalkContinuation::skip);
1429   frame caller_frame = current->last_frame().sender(&cbl_map);
1430 
1431   CodeBlob* caller_cb = caller_frame.cb();
1432   guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1433   nmethod* caller_nm = caller_cb->as_nmethod();
1434 
1435   // determine call info & receiver
1436   // note: a) receiver is null for static calls
1437   //       b) an exception is thrown if receiver is null for non-static calls
1438   CallInfo call_info;
1439   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1440   Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1441 
1442   NoSafepointVerifier nsv;
1443 
1444   methodHandle callee_method(current, call_info.selected_method());
1445   // Calls via mismatching methods are always non-scalarized
1446   bool mismatch = is_optimized ? call_info.selected_method()->mismatch() : call_info.resolved_method()->mismatch();
1447   if (caller_nm->is_compiled_by_c1() || mismatch) {
1448     caller_does_not_scalarize = true;
1449   }
1450 
1451   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1452          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1453          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1454          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1455          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1456 
1457   assert(!caller_nm->is_unloading(), "It should not be unloading");
1458 
1459 #ifndef PRODUCT
1460   // tracing/debugging/statistics
1461   uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1462                  (is_virtual) ? (&_resolve_virtual_ctr) :
1463                                 (&_resolve_static_ctr);
1464   AtomicAccess::inc(addr);
1465 
1466   if (TraceCallFixup) {
1467     ResourceMark rm(current);
1468     tty->print("resolving %s%s (%s) %s call to",
1469                (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1470                Bytecodes::name(invoke_code), (caller_does_not_scalarize) ? "non-scalar" : "");
1471     callee_method->print_short_name(tty);
1472     tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1473                   p2i(caller_frame.pc()), p2i(callee_method->code()));
1474   }
1475 #endif
1476 
1477   if (invoke_code == Bytecodes::_invokestatic) {
1478     assert(callee_method->method_holder()->is_initialized() ||
1479            callee_method->method_holder()->is_reentrant_initialization(current),
1480            "invalid class initialization state for invoke_static");
1481     if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1482       // In order to keep class initialization check, do not patch call
1483       // site for static call when the class is not fully initialized.
1484       // Proper check is enforced by call site re-resolution on every invocation.
1485       //
1486       // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1487       // explicit class initialization check is put in nmethod entry (VEP).
1488       assert(callee_method->method_holder()->is_linked(), "must be");
1489       return callee_method;
1490     }
1491   }
1492 
1493 
1494   // JSR 292 key invariant:
1495   // If the resolved method is a MethodHandle invoke target, the call
1496   // site must be a MethodHandle call site, because the lambda form might tail-call
1497   // leaving the stack in a state unknown to either caller or callee
1498 
1499   // Compute entry points. The computation of the entry points is independent of
1500   // patching the call.
1501 
1502   // Make sure the callee nmethod does not get deoptimized and removed before
1503   // we are done patching the code.
1504 
1505 
1506   CompiledICLocker ml(caller_nm);
1507   if (is_virtual && !is_optimized) {
1508     CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1509     inline_cache->update(&call_info, receiver->klass(), caller_does_not_scalarize);
1510   } else {
1511     // Callsite is a direct call - set it to the destination method
1512     CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1513     callsite->set(callee_method, caller_does_not_scalarize);
1514   }
1515 
1516   return callee_method;
1517 }
1518 
1519 // Inline caches exist only in compiled code
1520 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1521 #ifdef ASSERT
1522   RegisterMap reg_map(current,
1523                       RegisterMap::UpdateMap::skip,
1524                       RegisterMap::ProcessFrames::include,
1525                       RegisterMap::WalkContinuation::skip);
1526   frame stub_frame = current->last_frame();
1527   assert(stub_frame.is_runtime_frame(), "sanity check");
1528   frame caller_frame = stub_frame.sender(&reg_map);
1529   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1530 #endif /* ASSERT */
1531 
1532   methodHandle callee_method;
1533   bool caller_does_not_scalarize = false;
1534   JRT_BLOCK
1535     callee_method = SharedRuntime::handle_ic_miss_helper(caller_does_not_scalarize, CHECK_NULL);
1536     // Return Method* through TLS
1537     current->set_vm_result_metadata(callee_method());
1538   JRT_BLOCK_END
1539   // return compiled code entry point after potential safepoints
1540   return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1541 JRT_END
1542 
1543 
1544 // Handle call site that has been made non-entrant
1545 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1546   // 6243940 We might end up in here if the callee is deoptimized
1547   // as we race to call it.  We don't want to take a safepoint if
1548   // the caller was interpreted because the caller frame will look
1549   // interpreted to the stack walkers and arguments are now
1550   // "compiled" so it is much better to make this transition
1551   // invisible to the stack walking code. The i2c path will
1552   // place the callee method in the callee_target. It is stashed
1553   // there because if we try and find the callee by normal means a
1554   // safepoint is possible and have trouble gc'ing the compiled args.
1555   RegisterMap reg_map(current,
1556                       RegisterMap::UpdateMap::skip,
1557                       RegisterMap::ProcessFrames::include,
1558                       RegisterMap::WalkContinuation::skip);
1559   frame stub_frame = current->last_frame();
1560   assert(stub_frame.is_runtime_frame(), "sanity check");
1561   frame caller_frame = stub_frame.sender(&reg_map);
1562 
1563   if (caller_frame.is_interpreted_frame() ||
1564       caller_frame.is_entry_frame() ||
1565       caller_frame.is_upcall_stub_frame()) {
1566     Method* callee = current->callee_target();
1567     guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1568     current->set_vm_result_metadata(callee);
1569     current->set_callee_target(nullptr);
1570     if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1571       // Bypass class initialization checks in c2i when caller is in native.
1572       // JNI calls to static methods don't have class initialization checks.
1573       // Fast class initialization checks are present in c2i adapters and call into
1574       // SharedRuntime::handle_wrong_method() on the slow path.
1575       //
1576       // JVM upcalls may land here as well, but there's a proper check present in
1577       // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1578       // so bypassing it in c2i adapter is benign.
1579       return callee->get_c2i_no_clinit_check_entry();
1580     } else {
1581       if (caller_frame.is_interpreted_frame()) {
1582         return callee->get_c2i_inline_entry();
1583       } else {
1584         return callee->get_c2i_entry();
1585       }
1586     }
1587   }
1588 
1589   // Must be compiled to compiled path which is safe to stackwalk
1590   methodHandle callee_method;
1591   bool is_static_call = false;
1592   bool is_optimized = false;
1593   bool caller_does_not_scalarize = false;
1594   JRT_BLOCK
1595     // Force resolving of caller (if we called from compiled frame)
1596     callee_method = SharedRuntime::reresolve_call_site(is_optimized, caller_does_not_scalarize, CHECK_NULL);
1597     current->set_vm_result_metadata(callee_method());
1598   JRT_BLOCK_END
1599   // return compiled code entry point after potential safepoints
1600   return get_resolved_entry(current, callee_method, callee_method->is_static(), is_optimized, caller_does_not_scalarize);
1601 JRT_END
1602 
1603 // Handle abstract method call
1604 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1605   // Verbose error message for AbstractMethodError.
1606   // Get the called method from the invoke bytecode.
1607   vframeStream vfst(current, true);
1608   assert(!vfst.at_end(), "Java frame must exist");
1609   methodHandle caller(current, vfst.method());
1610   Bytecode_invoke invoke(caller, vfst.bci());
1611   DEBUG_ONLY( invoke.verify(); )
1612 
1613   // Find the compiled caller frame.
1614   RegisterMap reg_map(current,
1615                       RegisterMap::UpdateMap::include,
1616                       RegisterMap::ProcessFrames::include,
1617                       RegisterMap::WalkContinuation::skip);
1618   frame stubFrame = current->last_frame();
1619   assert(stubFrame.is_runtime_frame(), "must be");
1620   frame callerFrame = stubFrame.sender(&reg_map);
1621   assert(callerFrame.is_compiled_frame(), "must be");
1622 
1623   // Install exception and return forward entry.
1624   address res = SharedRuntime::throw_AbstractMethodError_entry();
1625   JRT_BLOCK
1626     methodHandle callee(current, invoke.static_target(current));
1627     if (!callee.is_null()) {
1628       oop recv = callerFrame.retrieve_receiver(&reg_map);
1629       Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1630       res = StubRoutines::forward_exception_entry();
1631       LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1632     }
1633   JRT_BLOCK_END
1634   return res;
1635 JRT_END
1636 
1637 // return verified_code_entry if interp_only_mode is not set for the current thread;
1638 // otherwise return c2i entry.
1639 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method,
1640                                           bool is_static_call, bool is_optimized, bool caller_does_not_scalarize) {
1641   bool is_interp_only_mode = (StressCallingConvention && (os::random() % (1 << 10)) == 0) || current->is_interp_only_mode();
1642   // In interp_only_mode we need to go to the interpreted entry
1643   // The c2i won't patch in this mode -- see fixup_callers_callsite
1644   bool go_to_interpreter = is_interp_only_mode && !callee_method->is_special_native_intrinsic();
1645 
1646   if (caller_does_not_scalarize) {
1647     if (go_to_interpreter) {
1648       return callee_method->get_c2i_inline_entry();
1649     }
1650     assert(callee_method->verified_inline_code_entry() != nullptr, "Jump to zero!");
1651     return callee_method->verified_inline_code_entry();
1652   } else if (is_static_call || is_optimized) {
1653     if (go_to_interpreter) {
1654       return callee_method->get_c2i_entry();
1655     }
1656     assert(callee_method->verified_code_entry() != nullptr, "Jump to zero!");
1657     return callee_method->verified_code_entry();
1658   } else {
1659     if (go_to_interpreter) {
1660       return callee_method->get_c2i_inline_ro_entry();
1661     }
1662     assert(callee_method->verified_inline_ro_code_entry() != nullptr, "Jump to zero!");
1663     return callee_method->verified_inline_ro_code_entry();
1664   }


1665 }
1666 
1667 // resolve a static call and patch code
1668 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1669   methodHandle callee_method;
1670   bool caller_does_not_scalarize = false;
1671   bool enter_special = false;
1672   JRT_BLOCK
1673     callee_method = SharedRuntime::resolve_helper(false, false, caller_does_not_scalarize, CHECK_NULL);
1674     current->set_vm_result_metadata(callee_method());
1675   JRT_BLOCK_END
1676   // return compiled code entry point after potential safepoints
1677   return get_resolved_entry(current, callee_method, true, false, caller_does_not_scalarize);
1678 JRT_END
1679 
1680 // resolve virtual call and update inline cache to monomorphic
1681 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1682   methodHandle callee_method;
1683   bool caller_does_not_scalarize = false;
1684   JRT_BLOCK
1685     callee_method = SharedRuntime::resolve_helper(true, false, caller_does_not_scalarize, CHECK_NULL);
1686     current->set_vm_result_metadata(callee_method());
1687   JRT_BLOCK_END
1688   // return compiled code entry point after potential safepoints
1689   return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1690 JRT_END
1691 
1692 
1693 // Resolve a virtual call that can be statically bound (e.g., always
1694 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1695 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1696   methodHandle callee_method;
1697   bool caller_does_not_scalarize = false;
1698   JRT_BLOCK
1699     callee_method = SharedRuntime::resolve_helper(true, true, caller_does_not_scalarize, CHECK_NULL);
1700     current->set_vm_result_metadata(callee_method());
1701   JRT_BLOCK_END
1702   // return compiled code entry point after potential safepoints
1703   return get_resolved_entry(current, callee_method, false, true, caller_does_not_scalarize);
1704 JRT_END
1705 
1706 methodHandle SharedRuntime::handle_ic_miss_helper(bool& caller_does_not_scalarize, TRAPS) {
1707   JavaThread* current = THREAD;
1708   ResourceMark rm(current);
1709   CallInfo call_info;
1710   Bytecodes::Code bc;
1711 
1712   // receiver is null for static calls. An exception is thrown for null
1713   // receivers for non-static calls
1714   Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1715 
1716   methodHandle callee_method(current, call_info.selected_method());
1717 
1718 #ifndef PRODUCT
1719   AtomicAccess::inc(&_ic_miss_ctr);
1720 
1721   // Statistics & Tracing
1722   if (TraceCallFixup) {
1723     ResourceMark rm(current);
1724     tty->print("IC miss (%s) %s call to", Bytecodes::name(bc), (caller_does_not_scalarize) ? "non-scalar" : "");
1725     callee_method->print_short_name(tty);
1726     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1727   }
1728 
1729   if (ICMissHistogram) {
1730     MutexLocker m(VMStatistic_lock);
1731     RegisterMap reg_map(current,
1732                         RegisterMap::UpdateMap::skip,
1733                         RegisterMap::ProcessFrames::include,
1734                         RegisterMap::WalkContinuation::skip);
1735     frame f = current->last_frame().real_sender(&reg_map);// skip runtime stub
1736     // produce statistics under the lock
1737     trace_ic_miss(f.pc());
1738   }
1739 #endif
1740 
1741   // install an event collector so that when a vtable stub is created the
1742   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1743   // event can't be posted when the stub is created as locks are held
1744   // - instead the event will be deferred until the event collector goes
1745   // out of scope.
1746   JvmtiDynamicCodeEventCollector event_collector;
1747 
1748   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1749   RegisterMap reg_map(current,
1750                       RegisterMap::UpdateMap::skip,
1751                       RegisterMap::ProcessFrames::include,
1752                       RegisterMap::WalkContinuation::skip);
1753   frame caller_frame = current->last_frame().sender(&reg_map);
1754   CodeBlob* cb = caller_frame.cb();
1755   nmethod* caller_nm = cb->as_nmethod();
1756   // Calls via mismatching methods are always non-scalarized
1757   if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1758     caller_does_not_scalarize = true;
1759   }
1760 
1761   CompiledICLocker ml(caller_nm);
1762   CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1763   inline_cache->update(&call_info, receiver()->klass(), caller_does_not_scalarize);
1764 
1765   return callee_method;
1766 }
1767 
1768 //
1769 // Resets a call-site in compiled code so it will get resolved again.
1770 // This routines handles both virtual call sites, optimized virtual call
1771 // sites, and static call sites. Typically used to change a call sites
1772 // destination from compiled to interpreted.
1773 //
1774 methodHandle SharedRuntime::reresolve_call_site(bool& is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1775   JavaThread* current = THREAD;
1776   ResourceMark rm(current);
1777   RegisterMap reg_map(current,
1778                       RegisterMap::UpdateMap::skip,
1779                       RegisterMap::ProcessFrames::include,
1780                       RegisterMap::WalkContinuation::skip);
1781   frame stub_frame = current->last_frame();
1782   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1783   frame caller = stub_frame.sender(&reg_map);
1784   if (caller.is_compiled_frame()) {
1785     caller_does_not_scalarize = caller.cb()->as_nmethod()->is_compiled_by_c1();
1786   }
1787   assert(!caller.is_interpreted_frame(), "must be compiled");
1788 
1789   // If the frame isn't a live compiled frame (i.e. deoptimized by the time we get here), no IC clearing must be done
1790   // for the caller. However, when the caller is C2 compiled and the callee a C1 or C2 compiled method, then we still
1791   // need to figure out whether it was an optimized virtual call with an inline type receiver. Otherwise, we end up
1792   // using the wrong method entry point and accidentally skip the buffering of the receiver.
1793   methodHandle callee_method = find_callee_method(caller_does_not_scalarize, CHECK_(methodHandle()));
1794   const bool caller_is_compiled_and_not_deoptimized = caller.is_compiled_frame() && !caller.is_deoptimized_frame();
1795   const bool caller_is_continuation_enter_intrinsic =
1796     caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic();
1797   const bool do_IC_clearing = caller_is_compiled_and_not_deoptimized || caller_is_continuation_enter_intrinsic;
1798 
1799   const bool callee_compiled_with_scalarized_receiver = callee_method->has_compiled_code() &&
1800                                                         !callee_method()->is_static() &&
1801                                                         callee_method()->is_scalarized_arg(0);
1802   const bool compute_is_optimized = !caller_does_not_scalarize && callee_compiled_with_scalarized_receiver;
1803 
1804   if (do_IC_clearing || compute_is_optimized) {
1805     address pc = caller.pc();
1806 
1807     nmethod* caller_nm = CodeCache::find_nmethod(pc);
1808     assert(caller_nm != nullptr, "did not find caller nmethod");
1809 
1810     // Default call_addr is the location of the "basic" call.
1811     // Determine the address of the call we a reresolving. With
1812     // Inline Caches we will always find a recognizable call.
1813     // With Inline Caches disabled we may or may not find a
1814     // recognizable call. We will always find a call for static
1815     // calls and for optimized virtual calls. For vanilla virtual
1816     // calls it depends on the state of the UseInlineCaches switch.
1817     //
1818     // With Inline Caches disabled we can get here for a virtual call
1819     // for two reasons:
1820     //   1 - calling an abstract method. The vtable for abstract methods
1821     //       will run us thru handle_wrong_method and we will eventually
1822     //       end up in the interpreter to throw the ame.
1823     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1824     //       call and between the time we fetch the entry address and
1825     //       we jump to it the target gets deoptimized. Similar to 1
1826     //       we will wind up in the interprter (thru a c2i with c2).
1827     //
1828     CompiledICLocker ml(caller_nm);
1829     address call_addr = caller_nm->call_instruction_address(pc);
1830 
1831     if (call_addr != nullptr) {
1832       // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1833       // bytes back in the instruction stream so we must also check for reloc info.
1834       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1835       bool ret = iter.next(); // Get item
1836       if (ret) {
1837         is_optimized = false;
1838         switch (iter.type()) {
1839           case relocInfo::static_call_type:
1840             assert(callee_method->is_static(), "must be");
1841           case relocInfo::opt_virtual_call_type: {
1842             is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1843             if (do_IC_clearing) {
1844               CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1845               cdc->set_to_clean();
1846             }
1847             break;
1848           }
1849 
1850           case relocInfo::virtual_call_type: {
1851             if (do_IC_clearing) {
1852               // compiled, dispatched call (which used to call an interpreted method)
1853               CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1854               inline_cache->set_to_clean();
1855             }
1856             break;
1857           }
1858           default:
1859             break;
1860         }
1861       }
1862     }
1863   }
1864 



1865 #ifndef PRODUCT
1866   AtomicAccess::inc(&_wrong_method_ctr);
1867 
1868   if (TraceCallFixup) {
1869     ResourceMark rm(current);
1870     tty->print("handle_wrong_method reresolving %s call to", (caller_does_not_scalarize) ? "non-scalar" : "");
1871     callee_method->print_short_name(tty);
1872     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1873   }
1874 #endif
1875 
1876   return callee_method;
1877 }
1878 
1879 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1880   // The faulting unsafe accesses should be changed to throw the error
1881   // synchronously instead. Meanwhile the faulting instruction will be
1882   // skipped over (effectively turning it into a no-op) and an
1883   // asynchronous exception will be raised which the thread will
1884   // handle at a later point. If the instruction is a load it will
1885   // return garbage.
1886 
1887   // Request an async exception.
1888   thread->set_pending_unsafe_access_error();
1889 
1890   // Return address of next instruction to execute.

2056   msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
2057 
2058   char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
2059   if (message == nullptr) {
2060     // Shouldn't happen, but don't cause even more problems if it does
2061     message = const_cast<char*>(caster_klass->external_name());
2062   } else {
2063     jio_snprintf(message,
2064                  msglen,
2065                  "class %s cannot be cast to class %s (%s%s%s)",
2066                  caster_name,
2067                  target_name,
2068                  caster_klass_description,
2069                  klass_separator,
2070                  target_klass_description
2071                  );
2072   }
2073   return message;
2074 }
2075 
2076 char* SharedRuntime::generate_identity_exception_message(JavaThread* current, Klass* klass) {
2077   assert(klass->is_inline_klass(), "Must be a concrete value class");
2078   const char* desc = "Cannot synchronize on an instance of value class ";
2079   const char* className = klass->external_name();
2080   size_t msglen = strlen(desc) + strlen(className) + 1;
2081   char* message = NEW_RESOURCE_ARRAY(char, msglen);
2082   if (nullptr == message) {
2083     // Out of memory: can't create detailed error message
2084     message = const_cast<char*>(klass->external_name());
2085   } else {
2086     jio_snprintf(message, msglen, "%s%s", desc, className);
2087   }
2088   return message;
2089 }
2090 
2091 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2092   (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2093 JRT_END
2094 
2095 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2096   if (!SafepointSynchronize::is_synchronizing()) {
2097     // Only try quick_enter() if we're not trying to reach a safepoint
2098     // so that the calling thread reaches the safepoint more quickly.
2099     if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2100       return;
2101     }
2102   }
2103   // NO_ASYNC required because an async exception on the state transition destructor
2104   // would leave you with the lock held and it would never be released.
2105   // The normal monitorenter NullPointerException is thrown without acquiring a lock
2106   // and the model is that an exception implies the method failed.
2107   JRT_BLOCK_NO_ASYNC
2108   Handle h_obj(THREAD, obj);
2109   ObjectSynchronizer::enter(h_obj, lock, current);
2110   assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");

2304   tty->print_cr("Note 1: counter updates are not MT-safe.");
2305   tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2306   tty->print_cr("        %% in nested categories are relative to their category");
2307   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
2308   tty->cr();
2309 
2310   MethodArityHistogram h;
2311 }
2312 #endif
2313 
2314 #ifndef PRODUCT
2315 static int _lookups; // number of calls to lookup
2316 static int _equals;  // number of buckets checked with matching hash
2317 static int _archived_hits; // number of successful lookups in archived table
2318 static int _runtime_hits;  // number of successful lookups in runtime table
2319 #endif
2320 
2321 // A simple wrapper class around the calling convention information
2322 // that allows sharing of adapters for the same calling convention.
2323 class AdapterFingerPrint : public MetaspaceObj {
2324 public:
2325   class Element {
2326   private:
2327     // The highest byte is the type of the argument. The remaining bytes contain the offset of the
2328     // field if it is flattened in the calling convention, -1 otherwise.
2329     juint _payload;
2330 
2331     static constexpr int offset_bit_width = 24;
2332     static constexpr juint offset_bit_mask = (1 << offset_bit_width) - 1;
2333   public:
2334     Element(BasicType bt, int offset) : _payload((static_cast<juint>(bt) << offset_bit_width) | (juint(offset) & offset_bit_mask)) {
2335       assert(offset >= -1 && offset < jint(offset_bit_mask), "invalid offset %d", offset);
2336     }
2337 
2338     BasicType bt() const {
2339       return static_cast<BasicType>(_payload >> offset_bit_width);
2340     }
2341 
2342     int offset() const {
2343       juint res = _payload & offset_bit_mask;
2344       return res == offset_bit_mask ? -1 : res;
2345     }
2346 
2347     juint hash() const {
2348       return _payload;
2349     }
2350 
2351     bool operator!=(const Element& other) const {
2352       return _payload != other._payload;
2353     }
2354   };


2355 
2356 private:
2357   const bool _has_ro_adapter;
2358   const int _length;
2359 
2360   static int data_offset() { return sizeof(AdapterFingerPrint); }
2361   Element* data_pointer() {
2362     return reinterpret_cast<Element*>(reinterpret_cast<address>(this) + data_offset());
2363   }
2364 
2365   const Element& element_at(int index) {
2366     assert(index < length(), "index %d out of bounds for length %d", index, length());
2367     Element* data = data_pointer();
2368     return data[index];
2369   }
2370 
2371   // Private construtor. Use allocate() to get an instance.
2372   AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter)
2373     : _has_ro_adapter(has_ro_adapter), _length(total_args_passed_in_sig(sig)) {
2374     Element* data = data_pointer();
2375     BasicType prev_bt = T_ILLEGAL;
2376     int vt_count = 0;

2377     for (int index = 0; index < _length; index++) {
2378       const SigEntry& sig_entry = sig->at(index);
2379       BasicType bt = sig_entry._bt;
2380       if (bt == T_METADATA) {
2381         // Found start of inline type in signature
2382         assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2383         vt_count++;
2384       } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2385         // Found end of inline type in signature
2386         assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2387         vt_count--;
2388         assert(vt_count >= 0, "invalid vt_count");
2389       } else if (vt_count == 0) {
2390         // Widen fields that are not part of a scalarized inline type argument
2391         assert(sig_entry._offset == -1, "invalid offset for argument that is not a flattened field %d", sig_entry._offset);
2392         bt = adapter_encoding(bt);
2393       }
2394 
2395       ::new(&data[index]) Element(bt, sig_entry._offset);
2396       prev_bt = bt;
2397     }
2398     assert(vt_count == 0, "invalid vt_count");
2399   }
2400 
2401   // Call deallocate instead
2402   ~AdapterFingerPrint() {
2403     ShouldNotCallThis();
2404   }
2405 
2406   static int total_args_passed_in_sig(const GrowableArray<SigEntry>* sig) {
2407     return (sig != nullptr) ? sig->length() : 0;
2408   }
2409 
2410   static int compute_size_in_words(int len) {
2411     return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(Element)));
2412   }
2413 
2414   // Remap BasicTypes that are handled equivalently by the adapters.
2415   // These are correct for the current system but someday it might be
2416   // necessary to make this mapping platform dependent.
2417   static BasicType adapter_encoding(BasicType in) {
2418     switch (in) {
2419       case T_BOOLEAN:
2420       case T_BYTE:
2421       case T_SHORT:
2422       case T_CHAR:
2423         // They are all promoted to T_INT in the calling convention
2424         return T_INT;
2425 
2426       case T_OBJECT:
2427       case T_ARRAY:
2428         // In other words, we assume that any register good enough for
2429         // an int or long is good enough for a managed pointer.
2430 #ifdef _LP64
2431         return T_LONG;
2432 #else
2433         return T_INT;
2434 #endif
2435 
2436       case T_INT:
2437       case T_LONG:
2438       case T_FLOAT:
2439       case T_DOUBLE:
2440       case T_VOID:
2441         return in;
2442 
2443       default:
2444         ShouldNotReachHere();
2445         return T_CONFLICT;
2446     }
2447   }
2448 
2449   void* operator new(size_t size, size_t fp_size) throw() {
2450     assert(fp_size >= size, "sanity check");
2451     void* p = AllocateHeap(fp_size, mtCode);
2452     memset(p, 0, fp_size);
2453     return p;
2454   }
2455 
2456 public:
2457   template<typename Function>
2458   void iterate_args(Function function) {
2459     for (int i = 0; i < length(); i++) {
2460       function(element_at(i));









2461     }
2462   }
2463 
2464   static AdapterFingerPrint* allocate(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2465     int len = total_args_passed_in_sig(sig);

2466     int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2467     AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(sig, has_ro_adapter);
2468     assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2469     return afp;
2470   }
2471 
2472   static void deallocate(AdapterFingerPrint* fp) {
2473     FreeHeap(fp);
2474   }
2475 
2476   bool has_ro_adapter() const {
2477     return _has_ro_adapter;

2478   }
2479 
2480   int length() const {
2481     return _length;
2482   }
2483 
2484   unsigned int compute_hash() {
2485     int hash = 0;
2486     for (int i = 0; i < length(); i++) {
2487       const Element& v = element_at(i);
2488       //Add arithmetic operation to the hash, like +3 to improve hashing
2489       hash = ((hash << 8) ^ v.hash() ^ (hash >> 5)) + 3;
2490     }
2491     return (unsigned int)hash;
2492   }
2493 
2494   const char* as_string() {
2495     stringStream st;
2496     st.print("{");
2497     if (_has_ro_adapter) {
2498       st.print("has_ro_adapter");
2499     } else {
2500       st.print("no_ro_adapter");
2501     }
2502     for (int i = 0; i < length(); i++) {
2503       st.print(", ");
2504       const Element& elem = element_at(i);
2505       st.print("{%s, %d}", type2name(elem.bt()), elem.offset());
2506     }
2507     st.print("}");
2508     return st.as_string();
2509   }
2510 
2511   const char* as_basic_args_string() {
2512     stringStream st;
2513     bool long_prev = false;
2514     iterate_args([&] (const Element& arg) {
2515       if (long_prev) {
2516         long_prev = false;
2517         if (arg.bt() == T_VOID) {
2518           st.print("J");
2519         } else {
2520           st.print("L");
2521         }
2522       }
2523       if (arg.bt() == T_LONG) {
2524         long_prev = true;
2525       } else if (arg.bt() != T_VOID) {
2526         st.print("%c", type2char(arg.bt()));



2527       }
2528     });
2529     if (long_prev) {
2530       st.print("L");
2531     }
2532     return st.as_string();
2533   }
2534 



















































2535   bool equals(AdapterFingerPrint* other) {
2536     if (other->_has_ro_adapter != _has_ro_adapter) {
2537       return false;
2538     } else if (other->_length != _length) {
2539       return false;
2540     } else {
2541       for (int i = 0; i < _length; i++) {
2542         if (element_at(i) != other->element_at(i)) {
2543           return false;
2544         }
2545       }
2546     }
2547     return true;
2548   }
2549 
2550   // methods required by virtue of being a MetaspaceObj
2551   void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2552   int size() const { return compute_size_in_words(_length); }
2553   MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2554 
2555   static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2556     NOT_PRODUCT(_equals++);
2557     return fp1->equals(fp2);
2558   }
2559 
2560   static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2561     return fp->compute_hash();
2562   }

2565 #if INCLUDE_CDS
2566 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2567   return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2568 }
2569 
2570 class ArchivedAdapterTable : public OffsetCompactHashtable<
2571   AdapterFingerPrint*,
2572   AdapterHandlerEntry*,
2573   adapter_fp_equals_compact_hashtable_entry> {};
2574 #endif // INCLUDE_CDS
2575 
2576 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2577 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2578                   AnyObj::C_HEAP, mtCode,
2579                   AdapterFingerPrint::compute_hash,
2580                   AdapterFingerPrint::equals>;
2581 static AdapterHandlerTable* _adapter_handler_table;
2582 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2583 
2584 // Find a entry with the same fingerprint if it exists
2585 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter) {
2586   NOT_PRODUCT(_lookups++);
2587   assert_lock_strong(AdapterHandlerLibrary_lock);
2588   AdapterFingerPrint* fp = AdapterFingerPrint::allocate(sig, has_ro_adapter);
2589   AdapterHandlerEntry* entry = nullptr;
2590 #if INCLUDE_CDS
2591   // if we are building the archive then the archived adapter table is
2592   // not valid and we need to use the ones added to the runtime table
2593   if (AOTCodeCache::is_using_adapter()) {
2594     // Search archived table first. It is read-only table so can be searched without lock
2595     entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2596 #ifndef PRODUCT
2597     if (entry != nullptr) {
2598       _archived_hits++;
2599     }
2600 #endif
2601   }
2602 #endif // INCLUDE_CDS
2603   if (entry == nullptr) {
2604     assert_lock_strong(AdapterHandlerLibrary_lock);
2605     AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2606     if (entry_p != nullptr) {
2607       entry = *entry_p;
2608       assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",

2625   TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2626   ts.print(tty, "AdapterHandlerTable");
2627   tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2628                 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2629   int total_hits = _archived_hits + _runtime_hits;
2630   tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2631                 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2632 }
2633 #endif
2634 
2635 // ---------------------------------------------------------------------------
2636 // Implementation of AdapterHandlerLibrary
2637 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2638 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2639 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2640 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2641 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2642 #if INCLUDE_CDS
2643 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2644 #endif // INCLUDE_CDS
2645 static const int AdapterHandlerLibrary_size = 48*K;
2646 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2647 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2648 
2649 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2650   assert(_buffer != nullptr, "should be initialized");
2651   return _buffer;
2652 }
2653 
2654 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2655   if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2656     AdapterBlob* adapter_blob = entry->adapter_blob();
2657     char blob_id[256];
2658     jio_snprintf(blob_id,
2659                  sizeof(blob_id),
2660                  "%s(%s)",
2661                  adapter_blob->name(),
2662                  entry->fingerprint()->as_string());
2663     if (Forte::is_enabled()) {
2664       Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2665     }

2673 void AdapterHandlerLibrary::initialize() {
2674   {
2675     ResourceMark rm;
2676     _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2677     _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2678   }
2679 
2680 #if INCLUDE_CDS
2681   // Link adapters in AOT Cache to their code in AOT Code Cache
2682   if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2683     link_aot_adapters();
2684     lookup_simple_adapters();
2685     return;
2686   }
2687 #endif // INCLUDE_CDS
2688 
2689   ResourceMark rm;
2690   {
2691     MutexLocker mu(AdapterHandlerLibrary_lock);
2692 
2693     CompiledEntrySignature no_args;
2694     no_args.compute_calling_conventions();
2695     _no_arg_handler = create_adapter(no_args, true);
2696 
2697     CompiledEntrySignature obj_args;
2698     SigEntry::add_entry(obj_args.sig(), T_OBJECT);
2699     obj_args.compute_calling_conventions();
2700     _obj_arg_handler = create_adapter(obj_args, true);
2701 
2702     CompiledEntrySignature int_args;
2703     SigEntry::add_entry(int_args.sig(), T_INT);
2704     int_args.compute_calling_conventions();
2705     _int_arg_handler = create_adapter(int_args, true);
2706 
2707     CompiledEntrySignature obj_int_args;
2708     SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
2709     SigEntry::add_entry(obj_int_args.sig(), T_INT);
2710     obj_int_args.compute_calling_conventions();
2711     _obj_int_arg_handler = create_adapter(obj_int_args, true);
2712 
2713     CompiledEntrySignature obj_obj_args;
2714     SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2715     SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2716     obj_obj_args.compute_calling_conventions();
2717     _obj_obj_arg_handler = create_adapter(obj_obj_args, true);
2718 
2719     // we should always get an entry back but we don't have any
2720     // associated blob on Zero
2721     assert(_no_arg_handler != nullptr &&
2722            _obj_arg_handler != nullptr &&
2723            _int_arg_handler != nullptr &&
2724            _obj_int_arg_handler != nullptr &&
2725            _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2726   }
2727 
2728   // Outside of the lock
2729 #ifndef ZERO
2730   // no blobs to register when we are on Zero
2731   post_adapter_creation(_no_arg_handler);
2732   post_adapter_creation(_obj_arg_handler);
2733   post_adapter_creation(_int_arg_handler);
2734   post_adapter_creation(_obj_int_arg_handler);
2735   post_adapter_creation(_obj_obj_arg_handler);
2736 #endif // ZERO
2737 }
2738 
2739 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2740   uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2741   assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2742   return AdapterHandlerEntry::allocate(id, fingerprint);
2743 }
2744 
2745 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2746   int total_args_passed = method->size_of_parameters(); // All args on stack
2747   if (total_args_passed == 0) {
2748     return _no_arg_handler;
2749   } else if (total_args_passed == 1) {
2750     if (!method->is_static()) {
2751       if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2752         return nullptr;
2753       }
2754       return _obj_arg_handler;
2755     }
2756     switch (method->signature()->char_at(1)) {
2757       case JVM_SIGNATURE_CLASS: {
2758         if (InlineTypePassFieldsAsArgs) {
2759           SignatureStream ss(method->signature());
2760           InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2761           if (vk != nullptr) {
2762             return nullptr;
2763           }
2764         }
2765         return _obj_arg_handler;
2766       }
2767       case JVM_SIGNATURE_ARRAY:
2768         return _obj_arg_handler;
2769       case JVM_SIGNATURE_INT:
2770       case JVM_SIGNATURE_BOOLEAN:
2771       case JVM_SIGNATURE_CHAR:
2772       case JVM_SIGNATURE_BYTE:
2773       case JVM_SIGNATURE_SHORT:
2774         return _int_arg_handler;
2775     }
2776   } else if (total_args_passed == 2 &&
2777              !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2778     switch (method->signature()->char_at(1)) {
2779       case JVM_SIGNATURE_CLASS: {
2780         if (InlineTypePassFieldsAsArgs) {
2781           SignatureStream ss(method->signature());
2782           InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2783           if (vk != nullptr) {
2784             return nullptr;
2785           }
2786         }
2787         return _obj_obj_arg_handler;
2788       }
2789       case JVM_SIGNATURE_ARRAY:
2790         return _obj_obj_arg_handler;
2791       case JVM_SIGNATURE_INT:
2792       case JVM_SIGNATURE_BOOLEAN:
2793       case JVM_SIGNATURE_CHAR:
2794       case JVM_SIGNATURE_BYTE:
2795       case JVM_SIGNATURE_SHORT:
2796         return _obj_int_arg_handler;
2797     }
2798   }
2799   return nullptr;
2800 }
2801 
2802 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2803   _method(method), _num_inline_args(0), _has_inline_recv(false),
2804   _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2805   _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2806   _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2807   _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2808   _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2809   _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2810 }
2811 
2812 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2813 // or the same entry for VEP and VIEP(RO).
2814 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2815   if (!has_scalarized_args()) {
2816     // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2817     return CodeOffsets::Verified_Entry;
2818   }
2819   if (_method->is_static()) {
2820     // Static methods don't need VIEP(RO)
2821     return CodeOffsets::Verified_Entry;
2822   }
2823 
2824   if (has_inline_recv()) {
2825     if (num_inline_args() == 1) {
2826       // Share same entry for VIEP and VIEP(RO).
2827       // This is quite common: we have an instance method in an InlineKlass that has
2828       // no inline type args other than <this>.
2829       return CodeOffsets::Verified_Inline_Entry;
2830     } else {
2831       assert(num_inline_args() > 1, "must be");
2832       // No sharing:
2833       //   VIEP(RO) -- <this> is passed as object
2834       //   VEP      -- <this> is passed as fields
2835       return CodeOffsets::Verified_Inline_Entry_RO;
2836     }

2837   }
2838 
2839   // Either a static method, or <this> is not an inline type
2840   if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2841     // No sharing:
2842     // Some arguments are passed on the stack, and we have inserted reserved entries
2843     // into the VEP, but we never insert reserved entries into the VIEP(RO).
2844     return CodeOffsets::Verified_Inline_Entry_RO;
2845   } else {
2846     // Share same entry for VEP and VIEP(RO).
2847     return CodeOffsets::Verified_Entry;
2848   }
2849 }
2850 
2851 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2852 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2853   if (_supers != nullptr) {
2854     return _supers;
2855   }
2856   _supers = new GrowableArray<Method*>();
2857   // Skip private, static, and <init> methods
2858   if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2859     return _supers;
2860   }
2861   Symbol* name = _method->name();
2862   Symbol* signature = _method->signature();
2863   const Klass* holder = _method->method_holder()->super();
2864   Symbol* holder_name = holder->name();
2865   ThreadInVMfromUnknown tiv;
2866   JavaThread* current = JavaThread::current();
2867   HandleMark hm(current);
2868   Handle loader(current, _method->method_holder()->class_loader());
2869 
2870   // Walk up the class hierarchy and search for super methods
2871   while (holder != nullptr) {
2872     Method* super_method = holder->lookup_method(name, signature);
2873     if (super_method == nullptr) {
2874       break;
2875     }
2876     if (!super_method->is_static() && !super_method->is_private() &&
2877         (!super_method->is_package_private() ||
2878          super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
2879       _supers->push(super_method);
2880     }
2881     holder = super_method->method_holder()->super();
2882   }
2883   // Search interfaces for super methods
2884   Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
2885   for (int i = 0; i < interfaces->length(); ++i) {
2886     Method* m = interfaces->at(i)->lookup_method(name, signature);
2887     if (m != nullptr && !m->is_static() && m->is_public()) {
2888       _supers->push(m);
2889     }
2890   }
2891   return _supers;
2892 }
2893 
2894 // Iterate over arguments and compute scalarized and non-scalarized signatures
2895 void CompiledEntrySignature::compute_calling_conventions(bool init) {
2896   bool has_scalarized = false;
2897   if (_method != nullptr) {
2898     InstanceKlass* holder = _method->method_holder();
2899     int arg_num = 0;
2900     if (!_method->is_static()) {
2901       // We shouldn't scalarize 'this' in a value class constructor
2902       if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() && !_method->is_object_constructor() &&
2903           (init || _method->is_scalarized_arg(arg_num))) {
2904         _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
2905         _sig_cc->insert_before(1, SigEntry(T_OBJECT, 0, nullptr, false, true)); // buffer argument
2906         has_scalarized = true;
2907         _has_inline_recv = true;
2908         _num_inline_args++;
2909       } else {
2910         SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
2911       }
2912       SigEntry::add_entry(_sig, T_OBJECT, holder->name());
2913       SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
2914       arg_num++;
2915     }
2916     for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
2917       BasicType bt = ss.type();
2918       if (bt == T_OBJECT) {
2919         InlineKlass* vk = ss.as_inline_klass(holder);
2920         if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
2921           // Check for a calling convention mismatch with super method(s)
2922           bool scalar_super = false;
2923           bool non_scalar_super = false;
2924           GrowableArray<Method*>* supers = get_supers();
2925           for (int i = 0; i < supers->length(); ++i) {
2926             Method* super_method = supers->at(i);
2927             if (super_method->is_scalarized_arg(arg_num)) {
2928               scalar_super = true;
2929             } else {
2930               non_scalar_super = true;
2931             }
2932           }
2933 #ifdef ASSERT
2934           // Randomly enable below code paths for stress testing
2935           bool stress = init && StressCallingConvention;
2936           if (stress && (os::random() & 1) == 1) {
2937             non_scalar_super = true;
2938             if ((os::random() & 1) == 1) {
2939               scalar_super = true;
2940             }
2941           }
2942 #endif
2943           if (non_scalar_super) {
2944             // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
2945             if (scalar_super) {
2946               // Found non-scalar *and* scalar super methods. We can't handle both.
2947               // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
2948               for (int i = 0; i < supers->length(); ++i) {
2949                 Method* super_method = supers->at(i);
2950                 if (super_method->is_scalarized_arg(arg_num) DEBUG_ONLY(|| (stress && (os::random() & 1) == 1))) {
2951                   JavaThread* thread = JavaThread::current();
2952                   HandleMark hm(thread);
2953                   methodHandle mh(thread, super_method);
2954                   DeoptimizationScope deopt_scope;
2955                   {
2956                     // Keep the lock scope minimal. Prevent interference with other
2957                     // dependency checks by setting mismatch and marking within the lock.
2958                     MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
2959                     super_method->set_mismatch();
2960                     CodeCache::mark_for_deoptimization(&deopt_scope, mh());
2961                   }
2962                   deopt_scope.deoptimize_marked();
2963                 }
2964               }
2965             }
2966             // Fall back to non-scalarized calling convention
2967             SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2968             SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2969           } else {
2970             _num_inline_args++;
2971             has_scalarized = true;
2972             int last = _sig_cc->length();
2973             int last_ro = _sig_cc_ro->length();
2974             _sig_cc->appendAll(vk->extended_sig());
2975             _sig_cc_ro->appendAll(vk->extended_sig());
2976             // buffer argument
2977             _sig_cc->insert_before(last + 1, SigEntry(T_OBJECT, 0, nullptr, false, true));
2978             _sig_cc_ro->insert_before(last_ro + 1, SigEntry(T_OBJECT, 0, nullptr, false, true));
2979             // Insert InlineTypeNode::NullMarker field right after T_METADATA delimiter
2980             _sig_cc->insert_before(last + 2, SigEntry(T_BOOLEAN, -1, nullptr, true, false));
2981             _sig_cc_ro->insert_before(last_ro + 2, SigEntry(T_BOOLEAN, -1, nullptr, true, false));
2982           }
2983         } else {
2984           SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2985           SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2986         }
2987         bt = T_OBJECT;
2988       } else {
2989         SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
2990         SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
2991       }
2992       SigEntry::add_entry(_sig, bt, ss.as_symbol());
2993       if (bt != T_VOID) {
2994         arg_num++;
2995       }
2996     }
2997   }
2998 
2999   // Compute the non-scalarized calling convention
3000   _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3001   _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3002 
3003   // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3004   if (has_scalarized && !_method->is_native()) {
3005     _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3006     _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3007 
3008     _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3009     _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3010 
3011     _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3012     _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3013 
3014     // Upper bound on stack arguments to avoid hitting the argument limit and
3015     // bailing out of compilation ("unsupported incoming calling sequence").
3016     // TODO we need a reasonable limit (flag?) here
3017     if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 75) {
3018       return; // Success
3019     }
3020   }

3021 
3022   // No scalarized args
3023   _sig_cc = _sig;
3024   _regs_cc = _regs;
3025   _args_on_stack_cc = _args_on_stack;
3026 
3027   _sig_cc_ro = _sig;
3028   _regs_cc_ro = _regs;
3029   _args_on_stack_cc_ro = _args_on_stack;
3030 }
3031 
3032 void CompiledEntrySignature::initialize_from_fingerprint(AdapterFingerPrint* fingerprint) {
3033   _has_inline_recv = fingerprint->has_ro_adapter();
3034 
3035   int value_object_count = 0;
3036   BasicType prev_bt = T_ILLEGAL;
3037   bool has_scalarized_arguments = false;
3038   bool long_prev = false;
3039   int long_prev_offset = -1;
3040   bool skipping_inline_recv = false;
3041   bool receiver_handled = false;
3042 
3043   fingerprint->iterate_args([&] (const AdapterFingerPrint::Element& arg) {
3044     BasicType bt = arg.bt();
3045     int offset = arg.offset();
3046 
3047     if (long_prev) {
3048       long_prev = false;
3049       BasicType bt_to_add;
3050       if (bt == T_VOID) {
3051         bt_to_add = T_LONG;
3052       } else {
3053         bt_to_add = T_OBJECT;
3054       }
3055       if (value_object_count == 0) {
3056         SigEntry::add_entry(_sig, bt_to_add);
3057       }
3058       assert(long_prev_offset != 0, "no buffer argument here");
3059       SigEntry::add_entry(_sig_cc, bt_to_add, nullptr, long_prev_offset);
3060       if (!skipping_inline_recv) {
3061         SigEntry::add_entry(_sig_cc_ro, bt_to_add, nullptr, long_prev_offset);
3062       }
3063     }
3064 
3065     switch (bt) {
3066       case T_VOID:
3067         if (prev_bt != T_LONG && prev_bt != T_DOUBLE) {
3068           assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
3069           value_object_count--;
3070           SigEntry::add_entry(_sig_cc, T_VOID, nullptr, offset);
3071           if (!skipping_inline_recv) {
3072             SigEntry::add_entry(_sig_cc_ro, T_VOID, nullptr, offset);
3073           } else if (value_object_count == 0) {
3074             skipping_inline_recv = false;
3075           }
3076           assert(value_object_count >= 0, "invalid value object count");
3077         } else {
3078           // Nothing to add for _sig: We already added an addition T_VOID in add_entry() when adding T_LONG or T_DOUBLE.
3079         }
3080         break;
3081       case T_INT:
3082       case T_FLOAT:
3083       case T_DOUBLE:
3084         if (value_object_count == 0) {
3085           SigEntry::add_entry(_sig, bt);
3086         }
3087         SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3088         if (!skipping_inline_recv) {
3089           SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3090         }
3091         break;
3092       case T_LONG:
3093         long_prev = true;
3094         long_prev_offset = offset;
3095         break;
3096       case T_BOOLEAN:
3097       case T_CHAR:
3098       case T_BYTE:
3099       case T_SHORT:
3100       case T_OBJECT:
3101       case T_ARRAY:
3102         assert(value_object_count > 0, "must be value object field");
3103         assert(offset != 0 || (bt == T_OBJECT && prev_bt == T_METADATA), "buffer input expected here");
3104         SigEntry::add_entry(_sig_cc, bt, nullptr, offset, offset == -1, offset == 0);
3105         if (!skipping_inline_recv) {
3106           SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset, offset == -1, offset == 0);
3107         }
3108         break;
3109       case T_METADATA:
3110         assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
3111         if (value_object_count == 0) {
3112           SigEntry::add_entry(_sig, T_OBJECT);
3113         }
3114         SigEntry::add_entry(_sig_cc, T_METADATA, nullptr, offset);
3115         if (!skipping_inline_recv) {
3116           if (!receiver_handled && _has_inline_recv && value_object_count == 0) {
3117             SigEntry::add_entry(_sig_cc_ro, T_OBJECT);
3118             skipping_inline_recv = true;
3119             receiver_handled = true;
3120           } else {
3121             SigEntry::add_entry(_sig_cc_ro, T_METADATA, nullptr, offset);
3122           }
3123         }
3124         value_object_count++;
3125         has_scalarized_arguments = true;
3126         break;
3127       default: {
3128         fatal("Unexpected BasicType: %s", basictype_to_str(bt));
3129       }
3130     }
3131     prev_bt = bt;
3132   });
3133 
3134   if (long_prev) {
3135     // If previous bt was T_LONG and we reached the end of the signature, we know that it must be a T_OBJECT.
3136     SigEntry::add_entry(_sig, T_OBJECT);
3137     SigEntry::add_entry(_sig_cc, T_OBJECT);
3138     SigEntry::add_entry(_sig_cc_ro, T_OBJECT);
3139   }
3140   assert(value_object_count == 0, "invalid value object count");
3141 
3142 #ifdef ASSERT
3143   if (_has_inline_recv) {
3144     // In RO signatures, inline receivers must be represented as a single T_OBJECT
3145     assert(_sig_cc_ro->length() >= 1, "sig_cc_ro must include receiver");
3146     assert(_sig_cc_ro->at(0)._bt == T_OBJECT,
3147            "sig_cc_ro must represent inline receiver as T_OBJECT");
3148     assert(_sig_cc_ro->length() <= _sig_cc->length(),
3149            "sig_cc_ro must not be longer than sig_cc");
3150   }
3151 #endif
3152 
3153   _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3154   _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3155 
3156   // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3157   if (has_scalarized_arguments) {
3158     _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3159     _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3160 
3161     _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3162     _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3163 
3164     _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3165     _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3166   } else {
3167     // No scalarized args
3168     _sig_cc = _sig;
3169     _regs_cc = _regs;
3170     _args_on_stack_cc = _args_on_stack;
3171 
3172     _sig_cc_ro = _sig;
3173     _regs_cc_ro = _regs;
3174     _args_on_stack_cc_ro = _args_on_stack;
3175   }
3176 
3177 #ifdef ASSERT
3178   {
3179     AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(_sig_cc, _has_inline_recv);
3180     assert(fingerprint->equals(compare_fp), "%s - %s", fingerprint->as_string(), compare_fp->as_string());
3181     AdapterFingerPrint::deallocate(compare_fp);
3182   }
3183 #endif
3184 }
3185 
3186 const char* AdapterHandlerEntry::_entry_names[] = {
3187   "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
3188 };
3189 
3190 #ifdef ASSERT
3191 void AdapterHandlerLibrary::verify_adapter_sharing(CompiledEntrySignature& ces, AdapterHandlerEntry* cached_entry) {
3192   // we can only check for the same code if there is any
3193 #ifndef ZERO
3194   AdapterHandlerEntry* comparison_entry = create_adapter(ces, false, true);
3195   assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
3196   assert(comparison_entry->compare_code(cached_entry), "code must match");
3197   // Release the one just created
3198   AdapterHandlerEntry::deallocate(comparison_entry);
3199 # endif // ZERO
3200 }
3201 #endif /* ASSERT*/
3202 
3203 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3204   assert(!method->is_abstract() || InlineTypePassFieldsAsArgs, "abstract methods do not have adapters");
3205   // Use customized signature handler.  Need to lock around updates to
3206   // the _adapter_handler_table (it is not safe for concurrent readers
3207   // and a single writer: this could be fixed if it becomes a
3208   // problem).
3209 
3210   // Fast-path for trivial adapters
3211   AdapterHandlerEntry* entry = get_simple_adapter(method);
3212   if (entry != nullptr) {
3213     return entry;
3214   }
3215 
3216   ResourceMark rm;
3217   bool new_entry = false;
3218 
3219   CompiledEntrySignature ces(method());
3220   ces.compute_calling_conventions();
3221   if (ces.has_scalarized_args()) {
3222     if (!method->has_scalarized_args()) {
3223       method->set_has_scalarized_args();
3224     }
3225     if (ces.c1_needs_stack_repair()) {
3226       method->set_c1_needs_stack_repair();
3227     }
3228     if (ces.c2_needs_stack_repair() && !method->c2_needs_stack_repair()) {
3229       method->set_c2_needs_stack_repair();
3230     }
3231   }
3232 




3233   {
3234     MutexLocker mu(AdapterHandlerLibrary_lock);
3235 
3236     // Lookup method signature's fingerprint
3237     entry = lookup(ces.sig_cc(), ces.has_inline_recv());
3238 
3239     if (entry != nullptr) {
3240 #ifndef ZERO
3241       assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
3242 #endif
3243 #ifdef ASSERT
3244       if (!entry->in_aot_cache() && VerifyAdapterSharing) {
3245         verify_adapter_sharing(ces, entry);
3246       }
3247 #endif
3248     } else {
3249       entry = create_adapter(ces, /* allocate_code_blob */ true);
3250       if (entry != nullptr) {
3251         new_entry = true;
3252       }
3253     }
3254   }
3255 
3256   // Outside of the lock
3257   if (new_entry) {
3258     post_adapter_creation(entry);
3259   }
3260   return entry;
3261 }
3262 
3263 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
3264   ResourceMark rm;
3265   const char* name = AdapterHandlerLibrary::name(handler);
3266   const uint32_t id = AdapterHandlerLibrary::id(handler);
3267 
3268   CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
3269   if (blob != nullptr) {

3284   }
3285   insts_size = adapter_blob->code_size();
3286   st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
3287                 handler->fingerprint()->as_basic_args_string(),
3288                 handler->fingerprint()->as_string(), insts_size);
3289   st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
3290   if (Verbose || PrintStubCode) {
3291     address first_pc = adapter_blob->content_begin();
3292     if (first_pc != nullptr) {
3293       Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
3294       st->cr();
3295     }
3296   }
3297 }
3298 #endif // PRODUCT
3299 
3300 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
3301                                               int entry_offset[AdapterBlob::ENTRY_COUNT]) {
3302   entry_offset[AdapterBlob::I2C] = 0;
3303   entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
3304   entry_offset[AdapterBlob::C2I_Inline] = entry_address[AdapterBlob::C2I_Inline] - entry_address[AdapterBlob::I2C];
3305   entry_offset[AdapterBlob::C2I_Inline_RO] = entry_address[AdapterBlob::C2I_Inline_RO] - entry_address[AdapterBlob::I2C];
3306   entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
3307   entry_offset[AdapterBlob::C2I_Unverified_Inline] = entry_address[AdapterBlob::C2I_Unverified_Inline] - entry_address[AdapterBlob::I2C];
3308   if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
3309     entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
3310   } else {
3311     entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
3312   }
3313 }
3314 
3315 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
3316                                                   CompiledEntrySignature& ces,
3317                                                   bool allocate_code_blob,
3318                                                   bool is_transient) {
3319   if (log_is_enabled(Info, perf, class, link)) {
3320     ClassLoader::perf_method_adapters_count()->inc();
3321   }
3322 
3323 #ifndef ZERO
3324   AdapterBlob* adapter_blob = nullptr;
3325   BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3326   CodeBuffer buffer(buf);
3327   short buffer_locs[20];
3328   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3329                                          sizeof(buffer_locs)/sizeof(relocInfo));
3330   MacroAssembler masm(&buffer);
3331   address entry_address[AdapterBlob::ENTRY_COUNT];

3332 
3333   // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage


3334   SharedRuntime::generate_i2c2i_adapters(&masm,
3335                                          ces.args_on_stack(),
3336                                          ces.sig(),
3337                                          ces.regs(),
3338                                          ces.sig_cc(),
3339                                          ces.regs_cc(),
3340                                          ces.sig_cc_ro(),
3341                                          ces.regs_cc_ro(),
3342                                          entry_address,
3343                                          adapter_blob,
3344                                          allocate_code_blob);
3345 
3346   if (ces.has_scalarized_args()) {
3347     // Save a C heap allocated version of the scalarized signature and store it in the adapter
3348     GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3349     heap_sig->appendAll(ces.sig_cc());
3350     handler->set_sig_cc(heap_sig);
3351     heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro()->length(), mtInternal);
3352     heap_sig->appendAll(ces.sig_cc_ro());
3353     handler->set_sig_cc_ro(heap_sig);
3354   }
3355   // On zero there is no code to save and no need to create a blob and
3356   // or relocate the handler.
3357   int entry_offset[AdapterBlob::ENTRY_COUNT];
3358   address_to_offset(entry_address, entry_offset);
3359 #ifdef ASSERT
3360   if (VerifyAdapterSharing) {
3361     handler->save_code(buf->code_begin(), buffer.insts_size());
3362     if (is_transient) {
3363       return true;
3364     }
3365   }
3366 #endif

3367   if (adapter_blob == nullptr) {
3368     // CodeCache is full, disable compilation
3369     // Ought to log this but compile log is only per compile thread
3370     // and we're some non descript Java thread.
3371     return false;
3372   }
3373   handler->set_adapter_blob(adapter_blob);
3374   if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
3375     // try to save generated code
3376     const char* name = AdapterHandlerLibrary::name(handler);
3377     const uint32_t id = AdapterHandlerLibrary::id(handler);
3378     bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
3379     assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
3380   }
3381 #endif // ZERO
3382 
3383 #ifndef PRODUCT
3384   // debugging support
3385   if (PrintAdapterHandlers || PrintStubCode) {
3386     print_adapter_handler_info(tty, handler);
3387   }
3388 #endif
3389 
3390   return true;
3391 }
3392 
3393 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(CompiledEntrySignature& ces,
3394                                                            bool allocate_code_blob,
3395                                                            bool is_transient) {
3396   AdapterFingerPrint* fp = AdapterFingerPrint::allocate(ces.sig_cc(), ces.has_inline_recv());
3397 #ifdef ASSERT
3398   // Verify that we can successfully restore the compiled entry signature object.
3399   CompiledEntrySignature ces_verify;
3400   ces_verify.initialize_from_fingerprint(fp);
3401 #endif
3402   AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
3403   if (!generate_adapter_code(handler, ces, allocate_code_blob, is_transient)) {
3404     AdapterHandlerEntry::deallocate(handler);
3405     return nullptr;
3406   }
3407   if (!is_transient) {
3408     assert_lock_strong(AdapterHandlerLibrary_lock);
3409     _adapter_handler_table->put(fp, handler);
3410   }
3411   return handler;
3412 }
3413 
3414 #if INCLUDE_CDS
3415 void AdapterHandlerEntry::remove_unshareable_info() {
3416 #ifdef ASSERT
3417    _saved_code = nullptr;
3418    _saved_code_length = 0;
3419 #endif // ASSERT
3420    _adapter_blob = nullptr;
3421    _linked = false;
3422    _sig_cc = nullptr;
3423    _sig_cc_ro = nullptr;
3424 }
3425 
3426 class CopyAdapterTableToArchive : StackObj {
3427 private:
3428   CompactHashtableWriter* _writer;
3429   ArchiveBuilder* _builder;
3430 public:
3431   CopyAdapterTableToArchive(CompactHashtableWriter* writer) : _writer(writer),
3432                                                              _builder(ArchiveBuilder::current())
3433   {}
3434 
3435   bool do_entry(AdapterFingerPrint* fp, AdapterHandlerEntry* entry) {
3436     LogStreamHandle(Trace, aot) lsh;
3437     if (ArchiveBuilder::current()->has_been_archived((address)entry)) {
3438       assert(ArchiveBuilder::current()->has_been_archived((address)fp), "must be");
3439       AdapterFingerPrint* buffered_fp = ArchiveBuilder::current()->get_buffered_addr(fp);
3440       assert(buffered_fp != nullptr,"sanity check");
3441       AdapterHandlerEntry* buffered_entry = ArchiveBuilder::current()->get_buffered_addr(entry);
3442       assert(buffered_entry != nullptr,"sanity check");
3443 

3483   }
3484 #endif
3485 }
3486 
3487 // This method is used during production run to link archived adapters (stored in AOT Cache)
3488 // to their code in AOT Code Cache
3489 void AdapterHandlerEntry::link() {
3490   ResourceMark rm;
3491   assert(_fingerprint != nullptr, "_fingerprint must not be null");
3492   bool generate_code = false;
3493   // Generate code only if AOTCodeCache is not available, or
3494   // caching adapters is disabled, or we fail to link
3495   // the AdapterHandlerEntry to its code in the AOTCodeCache
3496   if (AOTCodeCache::is_using_adapter()) {
3497     AdapterHandlerLibrary::link_aot_adapter_handler(this);
3498     // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
3499     if (_adapter_blob == nullptr) {
3500       log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
3501       generate_code = true;
3502     }
3503 
3504     if (get_sig_cc() == nullptr) {
3505       // Calling conventions have to be regenerated at runtime and are accessed through method adapters,
3506       // which are archived in the AOT code cache. If the adapters are not regenerated, the
3507       // calling conventions should be regenerated here.
3508       CompiledEntrySignature ces;
3509       ces.initialize_from_fingerprint(_fingerprint);
3510       if (ces.has_scalarized_args()) {
3511         // Save a C heap allocated version of the scalarized signature and store it in the adapter
3512         GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3513         heap_sig->appendAll(ces.sig_cc());
3514         set_sig_cc(heap_sig);
3515         heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro()->length(), mtInternal);
3516         heap_sig->appendAll(ces.sig_cc_ro());
3517         set_sig_cc_ro(heap_sig);
3518       }
3519     }
3520   } else {
3521     generate_code = true;
3522   }
3523   if (generate_code) {
3524     CompiledEntrySignature ces;
3525     ces.initialize_from_fingerprint(_fingerprint);
3526     if (!AdapterHandlerLibrary::generate_adapter_code(this, ces, true, false)) {
3527       // Don't throw exceptions during VM initialization because java.lang.* classes
3528       // might not have been initialized, causing problems when constructing the
3529       // Java exception object.
3530       vm_exit_during_initialization("Out of space in CodeCache for adapters");
3531     }
3532   }
3533   if (_adapter_blob != nullptr) {
3534     post_adapter_creation(this);
3535   }
3536   assert(_linked, "AdapterHandlerEntry must now be linked");
3537 }
3538 
3539 void AdapterHandlerLibrary::link_aot_adapters() {
3540   uint max_id = 0;
3541   assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3542   /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3543    * That implies adapter ids of the adapters in the cache may not be contiguous.
3544    * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3545    * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3546    * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3547    */
3548   _aot_adapter_handler_table.iterate_all([&](AdapterHandlerEntry* entry) {
3549     assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3550     entry->link();
3551     max_id = MAX2(max_id, entry->id());
3552   });
3553   // Set adapter id to the maximum id found in the AOTCache
3554   assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3555   _id_counter = max_id;
3556 }
3557 
3558 // This method is called during production run to lookup simple adapters
3559 // in the archived adapter handler table
3560 void AdapterHandlerLibrary::lookup_simple_adapters() {
3561   assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3562 
3563   MutexLocker mu(AdapterHandlerLibrary_lock);
3564   ResourceMark rm;
3565   CompiledEntrySignature no_args;
3566   no_args.compute_calling_conventions();
3567   _no_arg_handler = lookup(no_args.sig_cc(), no_args.has_inline_recv());
3568 
3569   CompiledEntrySignature obj_args;
3570   SigEntry::add_entry(obj_args.sig(), T_OBJECT);
3571   obj_args.compute_calling_conventions();
3572   _obj_arg_handler = lookup(obj_args.sig_cc(), obj_args.has_inline_recv());
3573 
3574   CompiledEntrySignature int_args;
3575   SigEntry::add_entry(int_args.sig(), T_INT);
3576   int_args.compute_calling_conventions();
3577   _int_arg_handler = lookup(int_args.sig_cc(), int_args.has_inline_recv());
3578 
3579   CompiledEntrySignature obj_int_args;
3580   SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
3581   SigEntry::add_entry(obj_int_args.sig(), T_INT);
3582   obj_int_args.compute_calling_conventions();
3583   _obj_int_arg_handler = lookup(obj_int_args.sig_cc(), obj_int_args.has_inline_recv());
3584 
3585   CompiledEntrySignature obj_obj_args;
3586   SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3587   SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3588   obj_obj_args.compute_calling_conventions();
3589   _obj_obj_arg_handler = lookup(obj_obj_args.sig_cc(), obj_obj_args.has_inline_recv());
3590 
3591   assert(_no_arg_handler != nullptr &&
3592          _obj_arg_handler != nullptr &&
3593          _int_arg_handler != nullptr &&
3594          _obj_int_arg_handler != nullptr &&
3595          _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3596   assert(_no_arg_handler->is_linked() &&
3597          _obj_arg_handler->is_linked() &&
3598          _int_arg_handler->is_linked() &&
3599          _obj_int_arg_handler->is_linked() &&
3600          _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3601 }
3602 #endif // INCLUDE_CDS
3603 
3604 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3605   LogStreamHandle(Trace, aot) lsh;
3606   if (lsh.is_enabled()) {
3607     lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3608     lsh.cr();
3609   }
3610   it->push(&_fingerprint);
3611 }
3612 
3613 AdapterHandlerEntry::~AdapterHandlerEntry() {
3614   if (_fingerprint != nullptr) {
3615     AdapterFingerPrint::deallocate(_fingerprint);
3616     _fingerprint = nullptr;
3617   }
3618   if (_sig_cc != nullptr) {
3619     delete _sig_cc;
3620   }
3621   if (_sig_cc_ro != nullptr) {
3622     delete _sig_cc_ro;
3623   }
3624 #ifdef ASSERT
3625   FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3626 #endif
3627   FreeHeap(this);
3628 }
3629 
3630 
3631 #ifdef ASSERT
3632 // Capture the code before relocation so that it can be compared
3633 // against other versions.  If the code is captured after relocation
3634 // then relative instructions won't be equivalent.
3635 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3636   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3637   _saved_code_length = length;
3638   memcpy(_saved_code, buffer, length);
3639 }
3640 
3641 
3642 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3643   assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");

3691 
3692       struct { double data[20]; } locs_buf;
3693       struct { double data[20]; } stubs_locs_buf;
3694       buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3695 #if defined(AARCH64) || defined(PPC64)
3696       // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3697       // in the constant pool to ensure ordering between the barrier and oops
3698       // accesses. For native_wrappers we need a constant.
3699       // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3700       // static java call that is resolved in the runtime.
3701       if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3702         buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3703       }
3704 #endif
3705       buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3706       MacroAssembler _masm(&buffer);
3707 
3708       // Fill in the signature array, for the calling-convention call.
3709       const int total_args_passed = method->size_of_parameters();
3710 
3711       BasicType stack_sig_bt[16];
3712       VMRegPair stack_regs[16];
3713       BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3714       VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3715 
3716       int i = 0;
3717       if (!method->is_static()) {  // Pass in receiver first
3718         sig_bt[i++] = T_OBJECT;
3719       }
3720       SignatureStream ss(method->signature());
3721       for (; !ss.at_return_type(); ss.next()) {
3722         sig_bt[i++] = ss.type();  // Collect remaining bits of signature
3723         if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3724           sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
3725         }
3726       }
3727       assert(i == total_args_passed, "");
3728       BasicType ret_type = ss.type();
3729 
3730       // Now get the compiled-Java arguments layout.
3731       SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3732 
3733       // Generate the compiled-to-native wrapper code
3734       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3735 
3736       if (nm != nullptr) {
3737         {
3738           MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3739           if (nm->make_in_use()) {
3740             method->set_code(method, nm);
3741           }
3742         }
3743 
3744         DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3745         if (directive->PrintAssemblyOption) {
3746           nm->print_code();
3747         }
3748         DirectivesStack::release(directive);

3956       if (b == handler->adapter_blob()) {
3957         found = true;
3958         st->print("Adapter for signature: ");
3959         handler->print_adapter_on(st);
3960         return false; // abort iteration
3961       } else {
3962         return true; // keep looking
3963       }
3964     };
3965     assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3966     _adapter_handler_table->iterate(findblob_runtime_table);
3967   }
3968   assert(found, "Should have found handler");
3969 }
3970 
3971 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3972   st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3973   if (adapter_blob() != nullptr) {
3974     st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3975     st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3976     st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3977     st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3978     st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3979     st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3980     if (get_c2i_no_clinit_check_entry() != nullptr) {
3981       st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3982     }
3983   }
3984   st->cr();
3985 }
3986 
3987 #ifndef PRODUCT
3988 
3989 void AdapterHandlerLibrary::print_statistics() {
3990   print_table_statistics();
3991 }
3992 
3993 #endif /* PRODUCT */
3994 
3995 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3996   assert(current == JavaThread::current(), "pre-condition");
3997   StackOverflow* overflow_state = current->stack_overflow_state();
3998   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3999   overflow_state->set_reserved_stack_activation(current->stack_base());

4046         event.set_method(method);
4047         event.commit();
4048       }
4049     }
4050   }
4051   return activation;
4052 }
4053 
4054 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
4055   // After any safepoint, just before going back to compiled code,
4056   // we inform the GC that we will be doing initializing writes to
4057   // this object in the future without emitting card-marks, so
4058   // GC may take any compensating steps.
4059 
4060   oop new_obj = current->vm_result_oop();
4061   if (new_obj == nullptr) return;
4062 
4063   BarrierSet *bs = BarrierSet::barrier_set();
4064   bs->on_slowpath_allocation_exit(current, new_obj);
4065 }
4066 
4067 // We are at a compiled code to interpreter call. We need backing
4068 // buffers for all inline type arguments. Allocate an object array to
4069 // hold them (convenient because once we're done with it we don't have
4070 // to worry about freeing it).
4071 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, bool from_c1, TRAPS) {
4072   assert(InlineTypePassFieldsAsArgs, "no reason to call this");
4073   ResourceMark rm;
4074 
4075   // Retrieve arguments passed at the call
4076   RegisterMap reg_map2(THREAD,
4077                        RegisterMap::UpdateMap::include,
4078                        RegisterMap::ProcessFrames::include,
4079                        RegisterMap::WalkContinuation::skip);
4080   frame stubFrame = THREAD->last_frame();
4081   frame callerFrame = stubFrame.sender(&reg_map2);
4082   if (from_c1) {
4083     callerFrame = callerFrame.sender(&reg_map2);
4084   }
4085   int arg_size;
4086   const GrowableArray<SigEntry>* sig = allocate_receiver ? callee->adapter()->get_sig_cc() : callee->adapter()->get_sig_cc_ro();
4087   assert(sig != nullptr, "sig should never be null");
4088   TempNewSymbol tmp_sig = SigEntry::create_symbol(sig);
4089   VMRegPair* reg_pairs = find_callee_arguments(tmp_sig, false, false, &arg_size);
4090 
4091   int nb_slots = 0;
4092   InstanceKlass* holder = callee->method_holder();
4093   allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
4094   if (allocate_receiver) {
4095     nb_slots++;
4096   }
4097   int arg_num = callee->is_static() ? 0 : 1;
4098   for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4099     BasicType bt = ss.type();
4100     if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4101       nb_slots++;
4102     }
4103     if (bt != T_VOID) {
4104       arg_num++;
4105     }
4106   }
4107   objArrayOop array_oop = nullptr;
4108   objArrayHandle array;
4109   arg_num = callee->is_static() ? 0 : 1;
4110   int i = 0;
4111   uint pos = 0;
4112   uint depth = 0;
4113   uint ignored = 0;
4114   if (allocate_receiver) {
4115     assert(sig->at(pos)._bt == T_METADATA, "scalarized value expected");
4116     pos++;
4117     ignored++;
4118     depth++;
4119     assert(sig->at(pos)._bt == T_OBJECT, "buffer argument");
4120     uint reg_pos = 0;
4121     assert(reg_pos < (uint)arg_size, "");
4122     VMRegPair reg_pair = reg_pairs[reg_pos];
4123     oop* buffer = callerFrame.oopmapreg_to_oop_location(reg_pair.first(), &reg_map2);
4124     instanceHandle h_buffer(THREAD, (instanceOop)*buffer);
4125     InlineKlass* vk = InlineKlass::cast(holder);
4126     if (h_buffer.not_null()) {
4127       assert(h_buffer->klass() == vk, "buffer not of expected class");
4128     } else {
4129       // Only allocate if buffer passed at the call is null
4130       if (array_oop == nullptr) {
4131         array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
4132         array = objArrayHandle(THREAD, array_oop);
4133       }
4134       oop res = vk->allocate_instance(CHECK_NULL);
4135       array->obj_at_put(i, res);
4136     }
4137     i++;
4138   }
4139   for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4140     BasicType bt = ss.type();
4141     if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4142       while (true) {
4143         BasicType bt = sig->at(pos)._bt;
4144         if (bt == T_METADATA) {
4145           depth++;
4146           ignored++;
4147           if (depth == 1) {
4148             break;
4149           }
4150         } else if (bt == T_VOID && sig->at(pos - 1)._bt != T_LONG && sig->at(pos - 1)._bt != T_DOUBLE) {
4151           ignored++;
4152           depth--;
4153         }
4154         pos++;
4155       }
4156       pos++;
4157       assert(sig->at(pos)._bt == T_OBJECT, "buffer argument expected");
4158       uint reg_pos = pos - ignored;
4159       assert(reg_pos < (uint)arg_size, "out of bound register?");
4160       VMRegPair reg_pair = reg_pairs[reg_pos];
4161       oop* buffer = callerFrame.oopmapreg_to_oop_location(reg_pair.first(), &reg_map2);
4162       instanceHandle h_buffer(THREAD, (instanceOop)*buffer);
4163       InlineKlass* vk = ss.as_inline_klass(holder);
4164       assert(vk != nullptr, "Unexpected klass");
4165       if (h_buffer.not_null()) {
4166         assert(h_buffer->klass() == vk, "buffer not of expected class");
4167       } else {
4168         // Only allocate if buffer passed at the call is null
4169         if (array_oop == nullptr) {
4170           array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
4171           array = objArrayHandle(THREAD, array_oop);
4172         }
4173         oop res = vk->allocate_instance(CHECK_NULL);
4174         array->obj_at_put(i, res);
4175       }
4176       i++;
4177     }
4178     if (bt != T_VOID) {
4179       arg_num++;
4180     }
4181   }
4182   return array();
4183 }
4184 
4185 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
4186   methodHandle callee(current, callee_method);
4187   oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, false, CHECK);
4188   current->set_vm_result_oop(array);
4189   current->set_vm_result_metadata(callee()); // TODO: required to keep callee live?
4190 JRT_END
4191 
4192 // We're returning from an interpreted method: load each field into a
4193 // register following the calling convention
4194 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
4195 {
4196   assert(res->klass()->is_inline_klass(), "only inline types here");
4197   ResourceMark rm;
4198   RegisterMap reg_map(current,
4199                       RegisterMap::UpdateMap::include,
4200                       RegisterMap::ProcessFrames::include,
4201                       RegisterMap::WalkContinuation::skip);
4202   frame stubFrame = current->last_frame();
4203   frame callerFrame = stubFrame.sender(&reg_map);
4204   assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
4205 
4206   InlineKlass* vk = InlineKlass::cast(res->klass());
4207 
4208   const Array<SigEntry>* sig_vk = vk->extended_sig();
4209   const Array<VMRegPair>* regs = vk->return_regs();
4210 
4211   if (regs == nullptr) {
4212     // The fields of the inline klass don't fit in registers, bail out
4213     return;
4214   }
4215 
4216   int j = 1;
4217   for (int i = 0; i < sig_vk->length(); i++) {
4218     BasicType bt = sig_vk->at(i)._bt;
4219     if (bt == T_METADATA) {
4220       continue;
4221     }
4222     if (bt == T_VOID) {
4223       if (sig_vk->at(i-1)._bt == T_LONG ||
4224           sig_vk->at(i-1)._bt == T_DOUBLE) {
4225         j++;
4226       }
4227       continue;
4228     }
4229     int off = sig_vk->at(i)._offset;
4230     assert(off > 0, "offset in object should be positive");
4231     VMRegPair pair = regs->at(j);
4232     address loc = reg_map.location(pair.first(), nullptr);
4233     guarantee(loc != nullptr, "bad register save location");
4234     switch(bt) {
4235     case T_BOOLEAN:
4236       *(jboolean*)loc = res->bool_field(off);
4237       break;
4238     case T_CHAR:
4239       *(jchar*)loc = res->char_field(off);
4240       break;
4241     case T_BYTE:
4242       *(jbyte*)loc = res->byte_field(off);
4243       break;
4244     case T_SHORT:
4245       *(jshort*)loc = res->short_field(off);
4246       break;
4247     case T_INT: {
4248       *(jint*)loc = res->int_field(off);
4249       break;
4250     }
4251     case T_LONG:
4252 #ifdef _LP64
4253       *(intptr_t*)loc = res->long_field(off);
4254 #else
4255       Unimplemented();
4256 #endif
4257       break;
4258     case T_OBJECT:
4259     case T_ARRAY: {
4260       *(oop*)loc = res->obj_field(off);
4261       break;
4262     }
4263     case T_FLOAT:
4264       *(jfloat*)loc = res->float_field(off);
4265       break;
4266     case T_DOUBLE:
4267       *(jdouble*)loc = res->double_field(off);
4268       break;
4269     default:
4270       ShouldNotReachHere();
4271     }
4272     j++;
4273   }
4274   assert(j == regs->length(), "missed a field?");
4275 
4276 #ifdef ASSERT
4277   VMRegPair pair = regs->at(0);
4278   address loc = reg_map.location(pair.first(), nullptr);
4279   assert(*(oopDesc**)loc == res, "overwritten object");
4280 #endif
4281 
4282   current->set_vm_result_oop(res);
4283 }
4284 JRT_END
4285 
4286 // We've returned to an interpreted method, the interpreter needs a
4287 // reference to an inline type instance. Allocate it and initialize it
4288 // from field's values in registers.
4289 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
4290 {
4291   ResourceMark rm;
4292   RegisterMap reg_map(current,
4293                       RegisterMap::UpdateMap::include,
4294                       RegisterMap::ProcessFrames::include,
4295                       RegisterMap::WalkContinuation::skip);
4296   frame stubFrame = current->last_frame();
4297   frame callerFrame = stubFrame.sender(&reg_map);
4298 
4299 #ifdef ASSERT
4300   InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
4301 #endif
4302 
4303   if (!is_set_nth_bit(res, 0)) {
4304     // We're not returning with inline type fields in registers (the
4305     // calling convention didn't allow it for this inline klass)
4306     assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
4307     current->set_vm_result_oop((oopDesc*)res);
4308     assert(verif_vk == nullptr, "broken calling convention");
4309     return;
4310   }
4311 
4312   clear_nth_bit(res, 0);
4313   InlineKlass* vk = (InlineKlass*)res;
4314   assert(verif_vk == vk, "broken calling convention");
4315   assert(Metaspace::contains((void*)res), "should be klass");
4316 
4317   // Allocate handles for every oop field so they are safe in case of
4318   // a safepoint when allocating
4319   GrowableArray<Handle> handles;
4320   vk->save_oop_fields(reg_map, handles);
4321 
4322   // It's unsafe to safepoint until we are here
4323   JRT_BLOCK;
4324   {
4325     JavaThread* THREAD = current;
4326     oop vt = vk->realloc_result(reg_map, handles, CHECK);
4327     current->set_vm_result_oop(vt);
4328   }
4329   JRT_BLOCK_END;
4330 }
4331 JRT_END
< prev index next >