28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/stringTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/aotCodeCache.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/compiledIC.hpp"
35 #include "code/nmethod.inline.hpp"
36 #include "code/scopeDesc.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "compiler/abstractCompiler.hpp"
39 #include "compiler/compileBroker.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "interpreter/interpreterRuntime.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "jvm.h"
47 #include "logging/log.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "memory/universe.hpp"
50 #include "metaprogramming/primitiveConversions.hpp"
51 #include "oops/klass.hpp"
52 #include "oops/method.inline.hpp"
53 #include "oops/objArrayKlass.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "prims/forte.hpp"
56 #include "prims/jvmtiExport.hpp"
57 #include "prims/jvmtiThreadState.hpp"
58 #include "prims/methodHandles.hpp"
59 #include "prims/nativeLookup.hpp"
60 #include "runtime/arguments.hpp"
61 #include "runtime/atomicAccess.hpp"
62 #include "runtime/basicLock.inline.hpp"
63 #include "runtime/frame.inline.hpp"
64 #include "runtime/handles.inline.hpp"
65 #include "runtime/init.hpp"
66 #include "runtime/interfaceSupport.inline.hpp"
67 #include "runtime/java.hpp"
68 #include "runtime/javaCalls.hpp"
69 #include "runtime/jniHandles.inline.hpp"
70 #include "runtime/osThread.hpp"
71 #include "runtime/perfData.hpp"
72 #include "runtime/sharedRuntime.hpp"
73 #include "runtime/stackWatermarkSet.hpp"
74 #include "runtime/stubRoutines.hpp"
75 #include "runtime/synchronizer.inline.hpp"
76 #include "runtime/timerTrace.hpp"
77 #include "runtime/vframe.inline.hpp"
78 #include "runtime/vframeArray.hpp"
79 #include "runtime/vm_version.hpp"
80 #include "utilities/copy.hpp"
81 #include "utilities/dtrace.hpp"
82 #include "utilities/events.hpp"
83 #include "utilities/globalDefinitions.hpp"
84 #include "utilities/hashTable.hpp"
85 #include "utilities/macros.hpp"
86 #include "utilities/xmlstream.hpp"
87 #ifdef COMPILER1
88 #include "c1/c1_Runtime1.hpp"
89 #endif
90 #if INCLUDE_JFR
91 #include "jfr/jfr.inline.hpp"
92 #endif
1226 // for a call current in progress, i.e., arguments has been pushed on stack
1227 // but callee has not been invoked yet. Caller frame must be compiled.
1228 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1229 CallInfo& callinfo, TRAPS) {
1230 Handle receiver;
1231 Handle nullHandle; // create a handy null handle for exception returns
1232 JavaThread* current = THREAD;
1233
1234 assert(!vfst.at_end(), "Java frame must exist");
1235
1236 // Find caller and bci from vframe
1237 methodHandle caller(current, vfst.method());
1238 int bci = vfst.bci();
1239
1240 if (caller->is_continuation_enter_intrinsic()) {
1241 bc = Bytecodes::_invokestatic;
1242 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1243 return receiver;
1244 }
1245
1246 Bytecode_invoke bytecode(caller, bci);
1247 int bytecode_index = bytecode.index();
1248 bc = bytecode.invoke_code();
1249
1250 methodHandle attached_method(current, extract_attached_method(vfst));
1251 if (attached_method.not_null()) {
1252 Method* callee = bytecode.static_target(CHECK_NH);
1253 vmIntrinsics::ID id = callee->intrinsic_id();
1254 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1255 // it attaches statically resolved method to the call site.
1256 if (MethodHandles::is_signature_polymorphic(id) &&
1257 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1258 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1259
1260 // Adjust invocation mode according to the attached method.
1261 switch (bc) {
1262 case Bytecodes::_invokevirtual:
1263 if (attached_method->method_holder()->is_interface()) {
1264 bc = Bytecodes::_invokeinterface;
1265 }
1266 break;
1267 case Bytecodes::_invokeinterface:
1268 if (!attached_method->method_holder()->is_interface()) {
1269 bc = Bytecodes::_invokevirtual;
1270 }
1271 break;
1272 case Bytecodes::_invokehandle:
1273 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1274 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1275 : Bytecodes::_invokevirtual;
1276 }
1277 break;
1278 default:
1279 break;
1280 }
1281 }
1282 }
1283
1284 assert(bc != Bytecodes::_illegal, "not initialized");
1285
1286 bool has_receiver = bc != Bytecodes::_invokestatic &&
1287 bc != Bytecodes::_invokedynamic &&
1288 bc != Bytecodes::_invokehandle;
1289
1290 // Find receiver for non-static call
1291 if (has_receiver) {
1292 // This register map must be update since we need to find the receiver for
1293 // compiled frames. The receiver might be in a register.
1294 RegisterMap reg_map2(current,
1295 RegisterMap::UpdateMap::include,
1296 RegisterMap::ProcessFrames::include,
1297 RegisterMap::WalkContinuation::skip);
1298 frame stubFrame = current->last_frame();
1299 // Caller-frame is a compiled frame
1300 frame callerFrame = stubFrame.sender(®_map2);
1301
1302 if (attached_method.is_null()) {
1303 Method* callee = bytecode.static_target(CHECK_NH);
1304 if (callee == nullptr) {
1305 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1306 }
1307 }
1308
1309 // Retrieve from a compiled argument list
1310 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1311 assert(oopDesc::is_oop_or_null(receiver()), "");
1312
1313 if (receiver.is_null()) {
1314 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1315 }
1316 }
1317
1318 // Resolve method
1319 if (attached_method.not_null()) {
1320 // Parameterized by attached method.
1321 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1322 } else {
1323 // Parameterized by bytecode.
1324 constantPoolHandle constants(current, caller->constants());
1325 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1326 }
1327
1328 #ifdef ASSERT
1329 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1330 if (has_receiver) {
1331 assert(receiver.not_null(), "should have thrown exception");
1332 Klass* receiver_klass = receiver->klass();
1333 Klass* rk = nullptr;
1334 if (attached_method.not_null()) {
1335 // In case there's resolved method attached, use its holder during the check.
1336 rk = attached_method->method_holder();
1337 } else {
1338 // Klass is already loaded.
1339 constantPoolHandle constants(current, caller->constants());
1340 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1341 }
1342 Klass* static_receiver_klass = rk;
1343 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1344 "actual receiver must be subclass of static receiver klass");
1345 if (receiver_klass->is_instance_klass()) {
1346 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1347 tty->print_cr("ERROR: Klass not yet initialized!!");
1348 receiver_klass->print();
1349 }
1350 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1351 }
1352 }
1353 #endif
1354
1355 return receiver;
1356 }
1357
1358 methodHandle SharedRuntime::find_callee_method(TRAPS) {
1359 JavaThread* current = THREAD;
1360 ResourceMark rm(current);
1361 // We need first to check if any Java activations (compiled, interpreted)
1362 // exist on the stack since last JavaCall. If not, we need
1363 // to get the target method from the JavaCall wrapper.
1364 vframeStream vfst(current, true); // Do not skip any javaCalls
1365 methodHandle callee_method;
1366 if (vfst.at_end()) {
1367 // No Java frames were found on stack since we did the JavaCall.
1368 // Hence the stack can only contain an entry_frame. We need to
1369 // find the target method from the stub frame.
1370 RegisterMap reg_map(current,
1371 RegisterMap::UpdateMap::skip,
1372 RegisterMap::ProcessFrames::include,
1373 RegisterMap::WalkContinuation::skip);
1374 frame fr = current->last_frame();
1375 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1376 fr = fr.sender(®_map);
1377 assert(fr.is_entry_frame(), "must be");
1378 // fr is now pointing to the entry frame.
1379 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1380 } else {
1381 Bytecodes::Code bc;
1382 CallInfo callinfo;
1383 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1384 callee_method = methodHandle(current, callinfo.selected_method());
1385 }
1386 assert(callee_method()->is_method(), "must be");
1387 return callee_method;
1388 }
1389
1390 // Resolves a call.
1391 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1392 JavaThread* current = THREAD;
1393 ResourceMark rm(current);
1394 RegisterMap cbl_map(current,
1395 RegisterMap::UpdateMap::skip,
1396 RegisterMap::ProcessFrames::include,
1397 RegisterMap::WalkContinuation::skip);
1398 frame caller_frame = current->last_frame().sender(&cbl_map);
1399
1400 CodeBlob* caller_cb = caller_frame.cb();
1401 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1402 nmethod* caller_nm = caller_cb->as_nmethod();
1403
1404 // determine call info & receiver
1405 // note: a) receiver is null for static calls
1406 // b) an exception is thrown if receiver is null for non-static calls
1407 CallInfo call_info;
1408 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1409 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1410
1411 NoSafepointVerifier nsv;
1412
1413 methodHandle callee_method(current, call_info.selected_method());
1414
1415 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1416 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1417 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1418 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1419 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1420
1421 assert(!caller_nm->is_unloading(), "It should not be unloading");
1422
1423 #ifndef PRODUCT
1424 // tracing/debugging/statistics
1425 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1426 (is_virtual) ? (&_resolve_virtual_ctr) :
1427 (&_resolve_static_ctr);
1428 AtomicAccess::inc(addr);
1429
1430 if (TraceCallFixup) {
1431 ResourceMark rm(current);
1432 tty->print("resolving %s%s (%s) call to",
1433 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1434 Bytecodes::name(invoke_code));
1435 callee_method->print_short_name(tty);
1436 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1437 p2i(caller_frame.pc()), p2i(callee_method->code()));
1438 }
1439 #endif
1440
1441 if (invoke_code == Bytecodes::_invokestatic) {
1442 assert(callee_method->method_holder()->is_initialized() ||
1443 callee_method->method_holder()->is_reentrant_initialization(current),
1444 "invalid class initialization state for invoke_static");
1445 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1446 // In order to keep class initialization check, do not patch call
1447 // site for static call when the class is not fully initialized.
1448 // Proper check is enforced by call site re-resolution on every invocation.
1449 //
1450 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1451 // explicit class initialization check is put in nmethod entry (VEP).
1452 assert(callee_method->method_holder()->is_linked(), "must be");
1453 return callee_method;
1454 }
1455 }
1456
1457
1458 // JSR 292 key invariant:
1459 // If the resolved method is a MethodHandle invoke target, the call
1460 // site must be a MethodHandle call site, because the lambda form might tail-call
1461 // leaving the stack in a state unknown to either caller or callee
1462
1463 // Compute entry points. The computation of the entry points is independent of
1464 // patching the call.
1465
1466 // Make sure the callee nmethod does not get deoptimized and removed before
1467 // we are done patching the code.
1468
1469
1470 CompiledICLocker ml(caller_nm);
1471 if (is_virtual && !is_optimized) {
1472 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1473 inline_cache->update(&call_info, receiver->klass());
1474 } else {
1475 // Callsite is a direct call - set it to the destination method
1476 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1477 callsite->set(callee_method);
1478 }
1479
1480 return callee_method;
1481 }
1482
1483 // Inline caches exist only in compiled code
1484 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1485 #ifdef ASSERT
1486 RegisterMap reg_map(current,
1487 RegisterMap::UpdateMap::skip,
1488 RegisterMap::ProcessFrames::include,
1489 RegisterMap::WalkContinuation::skip);
1490 frame stub_frame = current->last_frame();
1491 assert(stub_frame.is_runtime_frame(), "sanity check");
1492 frame caller_frame = stub_frame.sender(®_map);
1493 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1494 #endif /* ASSERT */
1495
1496 methodHandle callee_method;
1497 JRT_BLOCK
1498 callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1499 // Return Method* through TLS
1500 current->set_vm_result_metadata(callee_method());
1501 JRT_BLOCK_END
1502 // return compiled code entry point after potential safepoints
1503 return get_resolved_entry(current, callee_method);
1504 JRT_END
1505
1506
1507 // Handle call site that has been made non-entrant
1508 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1509 // 6243940 We might end up in here if the callee is deoptimized
1510 // as we race to call it. We don't want to take a safepoint if
1511 // the caller was interpreted because the caller frame will look
1512 // interpreted to the stack walkers and arguments are now
1513 // "compiled" so it is much better to make this transition
1514 // invisible to the stack walking code. The i2c path will
1515 // place the callee method in the callee_target. It is stashed
1516 // there because if we try and find the callee by normal means a
1517 // safepoint is possible and have trouble gc'ing the compiled args.
1518 RegisterMap reg_map(current,
1519 RegisterMap::UpdateMap::skip,
1520 RegisterMap::ProcessFrames::include,
1521 RegisterMap::WalkContinuation::skip);
1522 frame stub_frame = current->last_frame();
1523 assert(stub_frame.is_runtime_frame(), "sanity check");
1524 frame caller_frame = stub_frame.sender(®_map);
1525
1526 if (caller_frame.is_interpreted_frame() ||
1527 caller_frame.is_entry_frame() ||
1528 caller_frame.is_upcall_stub_frame()) {
1529 Method* callee = current->callee_target();
1530 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1531 current->set_vm_result_metadata(callee);
1532 current->set_callee_target(nullptr);
1533 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1534 // Bypass class initialization checks in c2i when caller is in native.
1535 // JNI calls to static methods don't have class initialization checks.
1536 // Fast class initialization checks are present in c2i adapters and call into
1537 // SharedRuntime::handle_wrong_method() on the slow path.
1538 //
1539 // JVM upcalls may land here as well, but there's a proper check present in
1540 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1541 // so bypassing it in c2i adapter is benign.
1542 return callee->get_c2i_no_clinit_check_entry();
1543 } else {
1544 return callee->get_c2i_entry();
1545 }
1546 }
1547
1548 // Must be compiled to compiled path which is safe to stackwalk
1549 methodHandle callee_method;
1550 JRT_BLOCK
1551 // Force resolving of caller (if we called from compiled frame)
1552 callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1553 current->set_vm_result_metadata(callee_method());
1554 JRT_BLOCK_END
1555 // return compiled code entry point after potential safepoints
1556 return get_resolved_entry(current, callee_method);
1557 JRT_END
1558
1559 // Handle abstract method call
1560 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1561 // Verbose error message for AbstractMethodError.
1562 // Get the called method from the invoke bytecode.
1563 vframeStream vfst(current, true);
1564 assert(!vfst.at_end(), "Java frame must exist");
1565 methodHandle caller(current, vfst.method());
1566 Bytecode_invoke invoke(caller, vfst.bci());
1567 DEBUG_ONLY( invoke.verify(); )
1568
1569 // Find the compiled caller frame.
1570 RegisterMap reg_map(current,
1571 RegisterMap::UpdateMap::include,
1572 RegisterMap::ProcessFrames::include,
1573 RegisterMap::WalkContinuation::skip);
1574 frame stubFrame = current->last_frame();
1575 assert(stubFrame.is_runtime_frame(), "must be");
1576 frame callerFrame = stubFrame.sender(®_map);
1577 assert(callerFrame.is_compiled_frame(), "must be");
1578
1579 // Install exception and return forward entry.
1580 address res = SharedRuntime::throw_AbstractMethodError_entry();
1581 JRT_BLOCK
1582 methodHandle callee(current, invoke.static_target(current));
1583 if (!callee.is_null()) {
1584 oop recv = callerFrame.retrieve_receiver(®_map);
1585 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1586 res = StubRoutines::forward_exception_entry();
1587 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1588 }
1589 JRT_BLOCK_END
1590 return res;
1591 JRT_END
1592
1593 // return verified_code_entry if interp_only_mode is not set for the current thread;
1594 // otherwise return c2i entry.
1595 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method) {
1596 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1597 // In interp_only_mode we need to go to the interpreted entry
1598 // The c2i won't patch in this mode -- see fixup_callers_callsite
1599 return callee_method->get_c2i_entry();
1600 }
1601 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1602 return callee_method->verified_code_entry();
1603 }
1604
1605 // resolve a static call and patch code
1606 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1607 methodHandle callee_method;
1608 bool enter_special = false;
1609 JRT_BLOCK
1610 callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1611 current->set_vm_result_metadata(callee_method());
1612 JRT_BLOCK_END
1613 // return compiled code entry point after potential safepoints
1614 return get_resolved_entry(current, callee_method);
1615 JRT_END
1616
1617 // resolve virtual call and update inline cache to monomorphic
1618 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1619 methodHandle callee_method;
1620 JRT_BLOCK
1621 callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1622 current->set_vm_result_metadata(callee_method());
1623 JRT_BLOCK_END
1624 // return compiled code entry point after potential safepoints
1625 return get_resolved_entry(current, callee_method);
1626 JRT_END
1627
1628
1629 // Resolve a virtual call that can be statically bound (e.g., always
1630 // monomorphic, so it has no inline cache). Patch code to resolved target.
1631 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1632 methodHandle callee_method;
1633 JRT_BLOCK
1634 callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1635 current->set_vm_result_metadata(callee_method());
1636 JRT_BLOCK_END
1637 // return compiled code entry point after potential safepoints
1638 return get_resolved_entry(current, callee_method);
1639 JRT_END
1640
1641 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1642 JavaThread* current = THREAD;
1643 ResourceMark rm(current);
1644 CallInfo call_info;
1645 Bytecodes::Code bc;
1646
1647 // receiver is null for static calls. An exception is thrown for null
1648 // receivers for non-static calls
1649 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1650
1651 methodHandle callee_method(current, call_info.selected_method());
1652
1653 #ifndef PRODUCT
1654 AtomicAccess::inc(&_ic_miss_ctr);
1655
1656 // Statistics & Tracing
1657 if (TraceCallFixup) {
1658 ResourceMark rm(current);
1659 tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1660 callee_method->print_short_name(tty);
1661 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1662 }
1663
1664 if (ICMissHistogram) {
1665 MutexLocker m(VMStatistic_lock);
1666 RegisterMap reg_map(current,
1667 RegisterMap::UpdateMap::skip,
1668 RegisterMap::ProcessFrames::include,
1669 RegisterMap::WalkContinuation::skip);
1670 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1671 // produce statistics under the lock
1672 trace_ic_miss(f.pc());
1673 }
1674 #endif
1675
1676 // install an event collector so that when a vtable stub is created the
1677 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1678 // event can't be posted when the stub is created as locks are held
1679 // - instead the event will be deferred until the event collector goes
1680 // out of scope.
1681 JvmtiDynamicCodeEventCollector event_collector;
1682
1683 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1684 RegisterMap reg_map(current,
1685 RegisterMap::UpdateMap::skip,
1686 RegisterMap::ProcessFrames::include,
1687 RegisterMap::WalkContinuation::skip);
1688 frame caller_frame = current->last_frame().sender(®_map);
1689 CodeBlob* cb = caller_frame.cb();
1690 nmethod* caller_nm = cb->as_nmethod();
1691
1692 CompiledICLocker ml(caller_nm);
1693 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1694 inline_cache->update(&call_info, receiver()->klass());
1695
1696 return callee_method;
1697 }
1698
1699 //
1700 // Resets a call-site in compiled code so it will get resolved again.
1701 // This routines handles both virtual call sites, optimized virtual call
1702 // sites, and static call sites. Typically used to change a call sites
1703 // destination from compiled to interpreted.
1704 //
1705 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1706 JavaThread* current = THREAD;
1707 ResourceMark rm(current);
1708 RegisterMap reg_map(current,
1709 RegisterMap::UpdateMap::skip,
1710 RegisterMap::ProcessFrames::include,
1711 RegisterMap::WalkContinuation::skip);
1712 frame stub_frame = current->last_frame();
1713 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1714 frame caller = stub_frame.sender(®_map);
1715
1716 // Do nothing if the frame isn't a live compiled frame.
1717 // nmethod could be deoptimized by the time we get here
1718 // so no update to the caller is needed.
1719
1720 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1721 (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1722
1723 address pc = caller.pc();
1724
1725 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1726 assert(caller_nm != nullptr, "did not find caller nmethod");
1727
1728 // Default call_addr is the location of the "basic" call.
1729 // Determine the address of the call we a reresolving. With
1730 // Inline Caches we will always find a recognizable call.
1731 // With Inline Caches disabled we may or may not find a
1732 // recognizable call. We will always find a call for static
1733 // calls and for optimized virtual calls. For vanilla virtual
1734 // calls it depends on the state of the UseInlineCaches switch.
1735 //
1736 // With Inline Caches disabled we can get here for a virtual call
1737 // for two reasons:
1738 // 1 - calling an abstract method. The vtable for abstract methods
1739 // will run us thru handle_wrong_method and we will eventually
1740 // end up in the interpreter to throw the ame.
1741 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1742 // call and between the time we fetch the entry address and
1743 // we jump to it the target gets deoptimized. Similar to 1
1744 // we will wind up in the interprter (thru a c2i with c2).
1745 //
1746 CompiledICLocker ml(caller_nm);
1747 address call_addr = caller_nm->call_instruction_address(pc);
1748
1749 if (call_addr != nullptr) {
1750 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1751 // bytes back in the instruction stream so we must also check for reloc info.
1752 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1753 bool ret = iter.next(); // Get item
1754 if (ret) {
1755 switch (iter.type()) {
1756 case relocInfo::static_call_type:
1757 case relocInfo::opt_virtual_call_type: {
1758 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1759 cdc->set_to_clean();
1760 break;
1761 }
1762
1763 case relocInfo::virtual_call_type: {
1764 // compiled, dispatched call (which used to call an interpreted method)
1765 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1766 inline_cache->set_to_clean();
1767 break;
1768 }
1769 default:
1770 break;
1771 }
1772 }
1773 }
1774 }
1775
1776 methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1777
1778
1779 #ifndef PRODUCT
1780 AtomicAccess::inc(&_wrong_method_ctr);
1781
1782 if (TraceCallFixup) {
1783 ResourceMark rm(current);
1784 tty->print("handle_wrong_method reresolving call to");
1785 callee_method->print_short_name(tty);
1786 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1787 }
1788 #endif
1789
1790 return callee_method;
1791 }
1792
1793 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1794 // The faulting unsafe accesses should be changed to throw the error
1795 // synchronously instead. Meanwhile the faulting instruction will be
1796 // skipped over (effectively turning it into a no-op) and an
1797 // asynchronous exception will be raised which the thread will
1798 // handle at a later point. If the instruction is a load it will
1799 // return garbage.
1800
1801 // Request an async exception.
1802 thread->set_pending_unsafe_access_error();
1803
1804 // Return address of next instruction to execute.
1970 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
1971
1972 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1973 if (message == nullptr) {
1974 // Shouldn't happen, but don't cause even more problems if it does
1975 message = const_cast<char*>(caster_klass->external_name());
1976 } else {
1977 jio_snprintf(message,
1978 msglen,
1979 "class %s cannot be cast to class %s (%s%s%s)",
1980 caster_name,
1981 target_name,
1982 caster_klass_description,
1983 klass_separator,
1984 target_klass_description
1985 );
1986 }
1987 return message;
1988 }
1989
1990 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1991 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
1992 JRT_END
1993
1994 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
1995 if (!SafepointSynchronize::is_synchronizing()) {
1996 // Only try quick_enter() if we're not trying to reach a safepoint
1997 // so that the calling thread reaches the safepoint more quickly.
1998 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
1999 return;
2000 }
2001 }
2002 // NO_ASYNC required because an async exception on the state transition destructor
2003 // would leave you with the lock held and it would never be released.
2004 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2005 // and the model is that an exception implies the method failed.
2006 JRT_BLOCK_NO_ASYNC
2007 Handle h_obj(THREAD, obj);
2008 ObjectSynchronizer::enter(h_obj, lock, current);
2009 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2203 tty->print_cr("Note 1: counter updates are not MT-safe.");
2204 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2205 tty->print_cr(" %% in nested categories are relative to their category");
2206 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2207 tty->cr();
2208
2209 MethodArityHistogram h;
2210 }
2211 #endif
2212
2213 #ifndef PRODUCT
2214 static int _lookups; // number of calls to lookup
2215 static int _equals; // number of buckets checked with matching hash
2216 static int _archived_hits; // number of successful lookups in archived table
2217 static int _runtime_hits; // number of successful lookups in runtime table
2218 #endif
2219
2220 // A simple wrapper class around the calling convention information
2221 // that allows sharing of adapters for the same calling convention.
2222 class AdapterFingerPrint : public MetaspaceObj {
2223 private:
2224 enum {
2225 _basic_type_bits = 4,
2226 _basic_type_mask = right_n_bits(_basic_type_bits),
2227 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2228 };
2229 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2230 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2231
2232 int _length;
2233
2234 static int data_offset() { return sizeof(AdapterFingerPrint); }
2235 int* data_pointer() {
2236 return (int*)((address)this + data_offset());
2237 }
2238
2239 // Private construtor. Use allocate() to get an instance.
2240 AdapterFingerPrint(int total_args_passed, BasicType* sig_bt, int len) {
2241 int* data = data_pointer();
2242 // Pack the BasicTypes with 8 per int
2243 assert(len == length(total_args_passed), "sanity");
2244 _length = len;
2245 int sig_index = 0;
2246 for (int index = 0; index < _length; index++) {
2247 int value = 0;
2248 for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2249 int bt = adapter_encoding(sig_bt[sig_index++]);
2250 assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2251 value = (value << _basic_type_bits) | bt;
2252 }
2253 data[index] = value;
2254 }
2255 }
2256
2257 // Call deallocate instead
2258 ~AdapterFingerPrint() {
2259 ShouldNotCallThis();
2260 }
2261
2262 static int length(int total_args) {
2263 return (total_args + (_basic_types_per_int-1)) / _basic_types_per_int;
2264 }
2265
2266 static int compute_size_in_words(int len) {
2267 return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(int)));
2268 }
2269
2270 // Remap BasicTypes that are handled equivalently by the adapters.
2271 // These are correct for the current system but someday it might be
2272 // necessary to make this mapping platform dependent.
2273 static int adapter_encoding(BasicType in) {
2274 switch (in) {
2275 case T_BOOLEAN:
2276 case T_BYTE:
2277 case T_SHORT:
2278 case T_CHAR:
2279 // There are all promoted to T_INT in the calling convention
2280 return T_INT;
2281
2282 case T_OBJECT:
2283 case T_ARRAY:
2284 // In other words, we assume that any register good enough for
2285 // an int or long is good enough for a managed pointer.
2286 #ifdef _LP64
2287 return T_LONG;
2288 #else
2289 return T_INT;
2290 #endif
2291
2292 case T_INT:
2293 case T_LONG:
2294 case T_FLOAT:
2295 case T_DOUBLE:
2296 case T_VOID:
2297 return in;
2298
2299 default:
2300 ShouldNotReachHere();
2301 return T_CONFLICT;
2302 }
2303 }
2304
2305 void* operator new(size_t size, size_t fp_size) throw() {
2306 assert(fp_size >= size, "sanity check");
2307 void* p = AllocateHeap(fp_size, mtCode);
2308 memset(p, 0, fp_size);
2309 return p;
2310 }
2311
2312 template<typename Function>
2313 void iterate_args(Function function) {
2314 for (int i = 0; i < length(); i++) {
2315 unsigned val = (unsigned)value(i);
2316 // args are packed so that first/lower arguments are in the highest
2317 // bits of each int value, so iterate from highest to the lowest
2318 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2319 unsigned v = (val >> j) & _basic_type_mask;
2320 if (v == 0) {
2321 continue;
2322 }
2323 function(v);
2324 }
2325 }
2326 }
2327
2328 public:
2329 static AdapterFingerPrint* allocate(int total_args_passed, BasicType* sig_bt) {
2330 int len = length(total_args_passed);
2331 int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2332 AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(total_args_passed, sig_bt, len);
2333 assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2334 return afp;
2335 }
2336
2337 static void deallocate(AdapterFingerPrint* fp) {
2338 FreeHeap(fp);
2339 }
2340
2341 int value(int index) {
2342 int* data = data_pointer();
2343 return data[index];
2344 }
2345
2346 int length() {
2347 return _length;
2348 }
2349
2350 unsigned int compute_hash() {
2351 int hash = 0;
2352 for (int i = 0; i < length(); i++) {
2353 int v = value(i);
2354 //Add arithmetic operation to the hash, like +3 to improve hashing
2355 hash = ((hash << 8) ^ v ^ (hash >> 5)) + 3;
2356 }
2357 return (unsigned int)hash;
2358 }
2359
2360 const char* as_string() {
2361 stringStream st;
2362 st.print("0x");
2363 for (int i = 0; i < length(); i++) {
2364 st.print("%x", value(i));
2365 }
2366 return st.as_string();
2367 }
2368
2369 const char* as_basic_args_string() {
2370 stringStream st;
2371 bool long_prev = false;
2372 iterate_args([&] (int arg) {
2373 if (long_prev) {
2374 long_prev = false;
2375 if (arg == T_VOID) {
2376 st.print("J");
2377 } else {
2378 st.print("L");
2379 }
2380 }
2381 switch (arg) {
2382 case T_INT: st.print("I"); break;
2383 case T_LONG: long_prev = true; break;
2384 case T_FLOAT: st.print("F"); break;
2385 case T_DOUBLE: st.print("D"); break;
2386 case T_VOID: break;
2387 default: ShouldNotReachHere();
2388 }
2389 });
2390 if (long_prev) {
2391 st.print("L");
2392 }
2393 return st.as_string();
2394 }
2395
2396 BasicType* as_basic_type(int& nargs) {
2397 nargs = 0;
2398 GrowableArray<BasicType> btarray;
2399 bool long_prev = false;
2400
2401 iterate_args([&] (int arg) {
2402 if (long_prev) {
2403 long_prev = false;
2404 if (arg == T_VOID) {
2405 btarray.append(T_LONG);
2406 } else {
2407 btarray.append(T_OBJECT); // it could be T_ARRAY; it shouldn't matter
2408 }
2409 }
2410 switch (arg) {
2411 case T_INT: // fallthrough
2412 case T_FLOAT: // fallthrough
2413 case T_DOUBLE:
2414 case T_VOID:
2415 btarray.append((BasicType)arg);
2416 break;
2417 case T_LONG:
2418 long_prev = true;
2419 break;
2420 default: ShouldNotReachHere();
2421 }
2422 });
2423
2424 if (long_prev) {
2425 btarray.append(T_OBJECT);
2426 }
2427
2428 nargs = btarray.length();
2429 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nargs);
2430 int index = 0;
2431 GrowableArrayIterator<BasicType> iter = btarray.begin();
2432 while (iter != btarray.end()) {
2433 sig_bt[index++] = *iter;
2434 ++iter;
2435 }
2436 assert(index == btarray.length(), "sanity check");
2437 #ifdef ASSERT
2438 {
2439 AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(nargs, sig_bt);
2440 assert(this->equals(compare_fp), "sanity check");
2441 AdapterFingerPrint::deallocate(compare_fp);
2442 }
2443 #endif
2444 return sig_bt;
2445 }
2446
2447 bool equals(AdapterFingerPrint* other) {
2448 if (other->_length != _length) {
2449 return false;
2450 } else {
2451 for (int i = 0; i < _length; i++) {
2452 if (value(i) != other->value(i)) {
2453 return false;
2454 }
2455 }
2456 }
2457 return true;
2458 }
2459
2460 // methods required by virtue of being a MetaspaceObj
2461 void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2462 int size() const { return compute_size_in_words(_length); }
2463 MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2464
2465 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2466 NOT_PRODUCT(_equals++);
2467 return fp1->equals(fp2);
2468 }
2469
2470 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2471 return fp->compute_hash();
2472 }
2475 #if INCLUDE_CDS
2476 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2477 return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2478 }
2479
2480 class ArchivedAdapterTable : public OffsetCompactHashtable<
2481 AdapterFingerPrint*,
2482 AdapterHandlerEntry*,
2483 adapter_fp_equals_compact_hashtable_entry> {};
2484 #endif // INCLUDE_CDS
2485
2486 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2487 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2488 AnyObj::C_HEAP, mtCode,
2489 AdapterFingerPrint::compute_hash,
2490 AdapterFingerPrint::equals>;
2491 static AdapterHandlerTable* _adapter_handler_table;
2492 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2493
2494 // Find a entry with the same fingerprint if it exists
2495 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(int total_args_passed, BasicType* sig_bt) {
2496 NOT_PRODUCT(_lookups++);
2497 assert_lock_strong(AdapterHandlerLibrary_lock);
2498 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2499 AdapterHandlerEntry* entry = nullptr;
2500 #if INCLUDE_CDS
2501 // if we are building the archive then the archived adapter table is
2502 // not valid and we need to use the ones added to the runtime table
2503 if (AOTCodeCache::is_using_adapter()) {
2504 // Search archived table first. It is read-only table so can be searched without lock
2505 entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2506 #ifndef PRODUCT
2507 if (entry != nullptr) {
2508 _archived_hits++;
2509 }
2510 #endif
2511 }
2512 #endif // INCLUDE_CDS
2513 if (entry == nullptr) {
2514 assert_lock_strong(AdapterHandlerLibrary_lock);
2515 AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2516 if (entry_p != nullptr) {
2517 entry = *entry_p;
2518 assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2535 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2536 ts.print(tty, "AdapterHandlerTable");
2537 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2538 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2539 int total_hits = _archived_hits + _runtime_hits;
2540 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2541 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2542 }
2543 #endif
2544
2545 // ---------------------------------------------------------------------------
2546 // Implementation of AdapterHandlerLibrary
2547 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2548 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2549 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2550 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2551 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2552 #if INCLUDE_CDS
2553 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2554 #endif // INCLUDE_CDS
2555 static const int AdapterHandlerLibrary_size = 16*K;
2556 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2557 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2558
2559 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2560 assert(_buffer != nullptr, "should be initialized");
2561 return _buffer;
2562 }
2563
2564 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2565 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2566 AdapterBlob* adapter_blob = entry->adapter_blob();
2567 char blob_id[256];
2568 jio_snprintf(blob_id,
2569 sizeof(blob_id),
2570 "%s(%s)",
2571 adapter_blob->name(),
2572 entry->fingerprint()->as_string());
2573 if (Forte::is_enabled()) {
2574 Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2575 }
2583 void AdapterHandlerLibrary::initialize() {
2584 {
2585 ResourceMark rm;
2586 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2587 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2588 }
2589
2590 #if INCLUDE_CDS
2591 // Link adapters in AOT Cache to their code in AOT Code Cache
2592 if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2593 link_aot_adapters();
2594 lookup_simple_adapters();
2595 return;
2596 }
2597 #endif // INCLUDE_CDS
2598
2599 ResourceMark rm;
2600 {
2601 MutexLocker mu(AdapterHandlerLibrary_lock);
2602
2603 _no_arg_handler = create_adapter(0, nullptr);
2604
2605 BasicType obj_args[] = { T_OBJECT };
2606 _obj_arg_handler = create_adapter(1, obj_args);
2607
2608 BasicType int_args[] = { T_INT };
2609 _int_arg_handler = create_adapter(1, int_args);
2610
2611 BasicType obj_int_args[] = { T_OBJECT, T_INT };
2612 _obj_int_arg_handler = create_adapter(2, obj_int_args);
2613
2614 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2615 _obj_obj_arg_handler = create_adapter(2, obj_obj_args);
2616
2617 // we should always get an entry back but we don't have any
2618 // associated blob on Zero
2619 assert(_no_arg_handler != nullptr &&
2620 _obj_arg_handler != nullptr &&
2621 _int_arg_handler != nullptr &&
2622 _obj_int_arg_handler != nullptr &&
2623 _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2624 }
2625
2626 // Outside of the lock
2627 #ifndef ZERO
2628 // no blobs to register when we are on Zero
2629 post_adapter_creation(_no_arg_handler);
2630 post_adapter_creation(_obj_arg_handler);
2631 post_adapter_creation(_int_arg_handler);
2632 post_adapter_creation(_obj_int_arg_handler);
2633 post_adapter_creation(_obj_obj_arg_handler);
2634 #endif // ZERO
2635 }
2636
2637 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2638 uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2639 assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2640 return AdapterHandlerEntry::allocate(id, fingerprint);
2641 }
2642
2643 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2644 int total_args_passed = method->size_of_parameters(); // All args on stack
2645 if (total_args_passed == 0) {
2646 return _no_arg_handler;
2647 } else if (total_args_passed == 1) {
2648 if (!method->is_static()) {
2649 return _obj_arg_handler;
2650 }
2651 switch (method->signature()->char_at(1)) {
2652 case JVM_SIGNATURE_CLASS:
2653 case JVM_SIGNATURE_ARRAY:
2654 return _obj_arg_handler;
2655 case JVM_SIGNATURE_INT:
2656 case JVM_SIGNATURE_BOOLEAN:
2657 case JVM_SIGNATURE_CHAR:
2658 case JVM_SIGNATURE_BYTE:
2659 case JVM_SIGNATURE_SHORT:
2660 return _int_arg_handler;
2661 }
2662 } else if (total_args_passed == 2 &&
2663 !method->is_static()) {
2664 switch (method->signature()->char_at(1)) {
2665 case JVM_SIGNATURE_CLASS:
2666 case JVM_SIGNATURE_ARRAY:
2667 return _obj_obj_arg_handler;
2668 case JVM_SIGNATURE_INT:
2669 case JVM_SIGNATURE_BOOLEAN:
2670 case JVM_SIGNATURE_CHAR:
2671 case JVM_SIGNATURE_BYTE:
2672 case JVM_SIGNATURE_SHORT:
2673 return _obj_int_arg_handler;
2674 }
2675 }
2676 return nullptr;
2677 }
2678
2679 class AdapterSignatureIterator : public SignatureIterator {
2680 private:
2681 BasicType stack_sig_bt[16];
2682 BasicType* sig_bt;
2683 int index;
2684
2685 public:
2686 AdapterSignatureIterator(Symbol* signature,
2687 fingerprint_t fingerprint,
2688 bool is_static,
2689 int total_args_passed) :
2690 SignatureIterator(signature, fingerprint),
2691 index(0)
2692 {
2693 sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2694 if (!is_static) { // Pass in receiver first
2695 sig_bt[index++] = T_OBJECT;
2696 }
2697 do_parameters_on(this);
2698 }
2699
2700 BasicType* basic_types() {
2701 return sig_bt;
2702 }
2703
2704 #ifdef ASSERT
2705 int slots() {
2706 return index;
2707 }
2708 #endif
2709
2710 private:
2711
2712 friend class SignatureIterator; // so do_parameters_on can call do_type
2713 void do_type(BasicType type) {
2714 sig_bt[index++] = type;
2715 if (type == T_LONG || type == T_DOUBLE) {
2716 sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots
2717 }
2718 }
2719 };
2720
2721
2722 const char* AdapterHandlerEntry::_entry_names[] = {
2723 "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
2724 };
2725
2726 #ifdef ASSERT
2727 void AdapterHandlerLibrary::verify_adapter_sharing(int total_args_passed, BasicType* sig_bt, AdapterHandlerEntry* cached_entry) {
2728 // we can only check for the same code if there is any
2729 #ifndef ZERO
2730 AdapterHandlerEntry* comparison_entry = create_adapter(total_args_passed, sig_bt, true);
2731 assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
2732 assert(comparison_entry->compare_code(cached_entry), "code must match");
2733 // Release the one just created
2734 AdapterHandlerEntry::deallocate(comparison_entry);
2735 # endif // ZERO
2736 }
2737 #endif /* ASSERT*/
2738
2739 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2740 assert(!method->is_abstract(), "abstract methods do not have adapters");
2741 // Use customized signature handler. Need to lock around updates to
2742 // the _adapter_handler_table (it is not safe for concurrent readers
2743 // and a single writer: this could be fixed if it becomes a
2744 // problem).
2745
2746 // Fast-path for trivial adapters
2747 AdapterHandlerEntry* entry = get_simple_adapter(method);
2748 if (entry != nullptr) {
2749 return entry;
2750 }
2751
2752 ResourceMark rm;
2753 bool new_entry = false;
2754
2755 // Fill in the signature array, for the calling-convention call.
2756 int total_args_passed = method->size_of_parameters(); // All args on stack
2757
2758 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2759 method->is_static(), total_args_passed);
2760 assert(si.slots() == total_args_passed, "");
2761 BasicType* sig_bt = si.basic_types();
2762 {
2763 MutexLocker mu(AdapterHandlerLibrary_lock);
2764
2765 // Lookup method signature's fingerprint
2766 entry = lookup(total_args_passed, sig_bt);
2767
2768 if (entry != nullptr) {
2769 #ifndef ZERO
2770 assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
2771 #endif
2772 #ifdef ASSERT
2773 if (!entry->in_aot_cache() && VerifyAdapterSharing) {
2774 verify_adapter_sharing(total_args_passed, sig_bt, entry);
2775 }
2776 #endif
2777 } else {
2778 entry = create_adapter(total_args_passed, sig_bt);
2779 if (entry != nullptr) {
2780 new_entry = true;
2781 }
2782 }
2783 }
2784
2785 // Outside of the lock
2786 if (new_entry) {
2787 post_adapter_creation(entry);
2788 }
2789 return entry;
2790 }
2791
2792 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
2793 ResourceMark rm;
2794 const char* name = AdapterHandlerLibrary::name(handler);
2795 const uint32_t id = AdapterHandlerLibrary::id(handler);
2796
2797 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
2798 if (blob != nullptr) {
2813 }
2814 insts_size = adapter_blob->code_size();
2815 st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
2816 handler->fingerprint()->as_basic_args_string(),
2817 handler->fingerprint()->as_string(), insts_size);
2818 st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
2819 if (Verbose || PrintStubCode) {
2820 address first_pc = adapter_blob->content_begin();
2821 if (first_pc != nullptr) {
2822 Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
2823 st->cr();
2824 }
2825 }
2826 }
2827 #endif // PRODUCT
2828
2829 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
2830 int entry_offset[AdapterBlob::ENTRY_COUNT]) {
2831 entry_offset[AdapterBlob::I2C] = 0;
2832 entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
2833 entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
2834 if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
2835 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
2836 } else {
2837 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
2838 }
2839 }
2840
2841 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
2842 int total_args_passed,
2843 BasicType* sig_bt,
2844 bool is_transient) {
2845 if (log_is_enabled(Info, perf, class, link)) {
2846 ClassLoader::perf_method_adapters_count()->inc();
2847 }
2848
2849 #ifndef ZERO
2850 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2851 CodeBuffer buffer(buf);
2852 short buffer_locs[20];
2853 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2854 sizeof(buffer_locs)/sizeof(relocInfo));
2855 MacroAssembler masm(&buffer);
2856 VMRegPair stack_regs[16];
2857 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2858
2859 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2860 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2861 address entry_address[AdapterBlob::ENTRY_COUNT];
2862 SharedRuntime::generate_i2c2i_adapters(&masm,
2863 total_args_passed,
2864 comp_args_on_stack,
2865 sig_bt,
2866 regs,
2867 entry_address);
2868 // On zero there is no code to save and no need to create a blob and
2869 // or relocate the handler.
2870 int entry_offset[AdapterBlob::ENTRY_COUNT];
2871 address_to_offset(entry_address, entry_offset);
2872 #ifdef ASSERT
2873 if (VerifyAdapterSharing) {
2874 handler->save_code(buf->code_begin(), buffer.insts_size());
2875 if (is_transient) {
2876 return true;
2877 }
2878 }
2879 #endif
2880 AdapterBlob* adapter_blob = AdapterBlob::create(&buffer, entry_offset);
2881 if (adapter_blob == nullptr) {
2882 // CodeCache is full, disable compilation
2883 // Ought to log this but compile log is only per compile thread
2884 // and we're some non descript Java thread.
2885 return false;
2886 }
2887 handler->set_adapter_blob(adapter_blob);
2888 if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
2889 // try to save generated code
2890 const char* name = AdapterHandlerLibrary::name(handler);
2891 const uint32_t id = AdapterHandlerLibrary::id(handler);
2892 bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
2893 assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
2894 }
2895 #endif // ZERO
2896
2897 #ifndef PRODUCT
2898 // debugging support
2899 if (PrintAdapterHandlers || PrintStubCode) {
2900 print_adapter_handler_info(tty, handler);
2901 }
2902 #endif
2903
2904 return true;
2905 }
2906
2907 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(int total_args_passed,
2908 BasicType* sig_bt,
2909 bool is_transient) {
2910 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2911 AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
2912 if (!generate_adapter_code(handler, total_args_passed, sig_bt, is_transient)) {
2913 AdapterHandlerEntry::deallocate(handler);
2914 return nullptr;
2915 }
2916 if (!is_transient) {
2917 assert_lock_strong(AdapterHandlerLibrary_lock);
2918 _adapter_handler_table->put(fp, handler);
2919 }
2920 return handler;
2921 }
2922
2923 #if INCLUDE_CDS
2924 void AdapterHandlerEntry::remove_unshareable_info() {
2925 #ifdef ASSERT
2926 _saved_code = nullptr;
2927 _saved_code_length = 0;
2928 #endif // ASSERT
2929 _adapter_blob = nullptr;
2930 _linked = false;
2931 }
2932
2995 // This method is used during production run to link archived adapters (stored in AOT Cache)
2996 // to their code in AOT Code Cache
2997 void AdapterHandlerEntry::link() {
2998 ResourceMark rm;
2999 assert(_fingerprint != nullptr, "_fingerprint must not be null");
3000 bool generate_code = false;
3001 // Generate code only if AOTCodeCache is not available, or
3002 // caching adapters is disabled, or we fail to link
3003 // the AdapterHandlerEntry to its code in the AOTCodeCache
3004 if (AOTCodeCache::is_using_adapter()) {
3005 AdapterHandlerLibrary::link_aot_adapter_handler(this);
3006 // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
3007 if (_adapter_blob == nullptr) {
3008 log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
3009 generate_code = true;
3010 }
3011 } else {
3012 generate_code = true;
3013 }
3014 if (generate_code) {
3015 int nargs;
3016 BasicType* bt = _fingerprint->as_basic_type(nargs);
3017 if (!AdapterHandlerLibrary::generate_adapter_code(this, nargs, bt, /* is_transient */ false)) {
3018 // Don't throw exceptions during VM initialization because java.lang.* classes
3019 // might not have been initialized, causing problems when constructing the
3020 // Java exception object.
3021 vm_exit_during_initialization("Out of space in CodeCache for adapters");
3022 }
3023 }
3024 if (_adapter_blob != nullptr) {
3025 post_adapter_creation(this);
3026 }
3027 assert(_linked, "AdapterHandlerEntry must now be linked");
3028 }
3029
3030 void AdapterHandlerLibrary::link_aot_adapters() {
3031 uint max_id = 0;
3032 assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3033 /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3034 * That implies adapter ids of the adapters in the cache may not be contiguous.
3035 * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3036 * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3037 * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3038 */
3039 _aot_adapter_handler_table.iterate([&](AdapterHandlerEntry* entry) {
3040 assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3041 entry->link();
3042 max_id = MAX2(max_id, entry->id());
3043 });
3044 // Set adapter id to the maximum id found in the AOTCache
3045 assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3046 _id_counter = max_id;
3047 }
3048
3049 // This method is called during production run to lookup simple adapters
3050 // in the archived adapter handler table
3051 void AdapterHandlerLibrary::lookup_simple_adapters() {
3052 assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3053
3054 MutexLocker mu(AdapterHandlerLibrary_lock);
3055 _no_arg_handler = lookup(0, nullptr);
3056
3057 BasicType obj_args[] = { T_OBJECT };
3058 _obj_arg_handler = lookup(1, obj_args);
3059
3060 BasicType int_args[] = { T_INT };
3061 _int_arg_handler = lookup(1, int_args);
3062
3063 BasicType obj_int_args[] = { T_OBJECT, T_INT };
3064 _obj_int_arg_handler = lookup(2, obj_int_args);
3065
3066 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
3067 _obj_obj_arg_handler = lookup(2, obj_obj_args);
3068
3069 assert(_no_arg_handler != nullptr &&
3070 _obj_arg_handler != nullptr &&
3071 _int_arg_handler != nullptr &&
3072 _obj_int_arg_handler != nullptr &&
3073 _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3074 assert(_no_arg_handler->is_linked() &&
3075 _obj_arg_handler->is_linked() &&
3076 _int_arg_handler->is_linked() &&
3077 _obj_int_arg_handler->is_linked() &&
3078 _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3079 }
3080 #endif // INCLUDE_CDS
3081
3082 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3083 LogStreamHandle(Trace, aot) lsh;
3084 if (lsh.is_enabled()) {
3085 lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3086 lsh.cr();
3087 }
3088 it->push(&_fingerprint);
3089 }
3090
3091 AdapterHandlerEntry::~AdapterHandlerEntry() {
3092 if (_fingerprint != nullptr) {
3093 AdapterFingerPrint::deallocate(_fingerprint);
3094 _fingerprint = nullptr;
3095 }
3096 #ifdef ASSERT
3097 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3098 #endif
3099 FreeHeap(this);
3100 }
3101
3102
3103 #ifdef ASSERT
3104 // Capture the code before relocation so that it can be compared
3105 // against other versions. If the code is captured after relocation
3106 // then relative instructions won't be equivalent.
3107 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3108 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3109 _saved_code_length = length;
3110 memcpy(_saved_code, buffer, length);
3111 }
3112
3113
3114 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3115 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3163
3164 struct { double data[20]; } locs_buf;
3165 struct { double data[20]; } stubs_locs_buf;
3166 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3167 #if defined(AARCH64) || defined(PPC64)
3168 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3169 // in the constant pool to ensure ordering between the barrier and oops
3170 // accesses. For native_wrappers we need a constant.
3171 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3172 // static java call that is resolved in the runtime.
3173 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3174 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3175 }
3176 #endif
3177 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3178 MacroAssembler _masm(&buffer);
3179
3180 // Fill in the signature array, for the calling-convention call.
3181 const int total_args_passed = method->size_of_parameters();
3182
3183 VMRegPair stack_regs[16];
3184 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3185
3186 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3187 method->is_static(), total_args_passed);
3188 BasicType* sig_bt = si.basic_types();
3189 assert(si.slots() == total_args_passed, "");
3190 BasicType ret_type = si.return_type();
3191
3192 // Now get the compiled-Java arguments layout.
3193 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3194
3195 // Generate the compiled-to-native wrapper code
3196 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3197
3198 if (nm != nullptr) {
3199 {
3200 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3201 if (nm->make_in_use()) {
3202 method->set_code(method, nm);
3203 }
3204 }
3205
3206 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3207 if (directive->PrintAssemblyOption) {
3208 nm->print_code();
3209 }
3210 DirectivesStack::release(directive);
3438 if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3439 found = true;
3440 st->print("Adapter for signature: ");
3441 a->print_adapter_on(st);
3442 return true;
3443 } else {
3444 return false; // keep looking
3445 }
3446 };
3447 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3448 _adapter_handler_table->iterate(findblob_runtime_table);
3449 }
3450 assert(found, "Should have found handler");
3451 }
3452
3453 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3454 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3455 if (adapter_blob() != nullptr) {
3456 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3457 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3458 st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3459 if (get_c2i_no_clinit_check_entry() != nullptr) {
3460 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3461 }
3462 }
3463 st->cr();
3464 }
3465
3466 #ifndef PRODUCT
3467
3468 void AdapterHandlerLibrary::print_statistics() {
3469 print_table_statistics();
3470 }
3471
3472 #endif /* PRODUCT */
3473
3474 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3475 assert(current == JavaThread::current(), "pre-condition");
3476 StackOverflow* overflow_state = current->stack_overflow_state();
3477 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3478 overflow_state->set_reserved_stack_activation(current->stack_base());
3525 event.set_method(method);
3526 event.commit();
3527 }
3528 }
3529 }
3530 return activation;
3531 }
3532
3533 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3534 // After any safepoint, just before going back to compiled code,
3535 // we inform the GC that we will be doing initializing writes to
3536 // this object in the future without emitting card-marks, so
3537 // GC may take any compensating steps.
3538
3539 oop new_obj = current->vm_result_oop();
3540 if (new_obj == nullptr) return;
3541
3542 BarrierSet *bs = BarrierSet::barrier_set();
3543 bs->on_slowpath_allocation_exit(current, new_obj);
3544 }
|
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/stringTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/aotCodeCache.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/compiledIC.hpp"
35 #include "code/nmethod.inline.hpp"
36 #include "code/scopeDesc.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "compiler/abstractCompiler.hpp"
39 #include "compiler/compileBroker.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "interpreter/interpreterRuntime.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "jvm.h"
47 #include "logging/log.hpp"
48 #include "memory/oopFactory.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "metaprogramming/primitiveConversions.hpp"
52 #include "oops/access.hpp"
53 #include "oops/fieldStreams.inline.hpp"
54 #include "oops/inlineKlass.inline.hpp"
55 #include "oops/klass.hpp"
56 #include "oops/method.inline.hpp"
57 #include "oops/objArrayKlass.hpp"
58 #include "oops/objArrayOop.inline.hpp"
59 #include "oops/oop.inline.hpp"
60 #include "prims/forte.hpp"
61 #include "prims/jvmtiExport.hpp"
62 #include "prims/jvmtiThreadState.hpp"
63 #include "prims/methodHandles.hpp"
64 #include "prims/nativeLookup.hpp"
65 #include "runtime/arguments.hpp"
66 #include "runtime/atomicAccess.hpp"
67 #include "runtime/basicLock.inline.hpp"
68 #include "runtime/frame.inline.hpp"
69 #include "runtime/handles.inline.hpp"
70 #include "runtime/init.hpp"
71 #include "runtime/interfaceSupport.inline.hpp"
72 #include "runtime/java.hpp"
73 #include "runtime/javaCalls.hpp"
74 #include "runtime/jniHandles.inline.hpp"
75 #include "runtime/osThread.hpp"
76 #include "runtime/perfData.hpp"
77 #include "runtime/sharedRuntime.hpp"
78 #include "runtime/signature.hpp"
79 #include "runtime/stackWatermarkSet.hpp"
80 #include "runtime/stubRoutines.hpp"
81 #include "runtime/synchronizer.inline.hpp"
82 #include "runtime/timerTrace.hpp"
83 #include "runtime/vframe.inline.hpp"
84 #include "runtime/vframeArray.hpp"
85 #include "runtime/vm_version.hpp"
86 #include "utilities/copy.hpp"
87 #include "utilities/dtrace.hpp"
88 #include "utilities/events.hpp"
89 #include "utilities/globalDefinitions.hpp"
90 #include "utilities/hashTable.hpp"
91 #include "utilities/macros.hpp"
92 #include "utilities/xmlstream.hpp"
93 #ifdef COMPILER1
94 #include "c1/c1_Runtime1.hpp"
95 #endif
96 #if INCLUDE_JFR
97 #include "jfr/jfr.inline.hpp"
98 #endif
1232 // for a call current in progress, i.e., arguments has been pushed on stack
1233 // but callee has not been invoked yet. Caller frame must be compiled.
1234 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1235 CallInfo& callinfo, TRAPS) {
1236 Handle receiver;
1237 Handle nullHandle; // create a handy null handle for exception returns
1238 JavaThread* current = THREAD;
1239
1240 assert(!vfst.at_end(), "Java frame must exist");
1241
1242 // Find caller and bci from vframe
1243 methodHandle caller(current, vfst.method());
1244 int bci = vfst.bci();
1245
1246 if (caller->is_continuation_enter_intrinsic()) {
1247 bc = Bytecodes::_invokestatic;
1248 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1249 return receiver;
1250 }
1251
1252 // Substitutability test implementation piggy backs on static call resolution
1253 Bytecodes::Code code = caller->java_code_at(bci);
1254 if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1255 bc = Bytecodes::_invokestatic;
1256 methodHandle attached_method(THREAD, extract_attached_method(vfst));
1257 assert(attached_method.not_null(), "must have attached method");
1258 vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1259 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1260 #ifdef ASSERT
1261 Symbol* subst_method_name = UseAltSubstitutabilityMethod ? vmSymbols::isSubstitutableAlt_name() : vmSymbols::isSubstitutable_name();
1262 Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(subst_method_name, vmSymbols::object_object_boolean_signature());
1263 assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1264 #endif
1265 return receiver;
1266 }
1267
1268 Bytecode_invoke bytecode(caller, bci);
1269 int bytecode_index = bytecode.index();
1270 bc = bytecode.invoke_code();
1271
1272 methodHandle attached_method(current, extract_attached_method(vfst));
1273 if (attached_method.not_null()) {
1274 Method* callee = bytecode.static_target(CHECK_NH);
1275 vmIntrinsics::ID id = callee->intrinsic_id();
1276 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1277 // it attaches statically resolved method to the call site.
1278 if (MethodHandles::is_signature_polymorphic(id) &&
1279 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1280 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1281
1282 // Adjust invocation mode according to the attached method.
1283 switch (bc) {
1284 case Bytecodes::_invokevirtual:
1285 if (attached_method->method_holder()->is_interface()) {
1286 bc = Bytecodes::_invokeinterface;
1287 }
1288 break;
1289 case Bytecodes::_invokeinterface:
1290 if (!attached_method->method_holder()->is_interface()) {
1291 bc = Bytecodes::_invokevirtual;
1292 }
1293 break;
1294 case Bytecodes::_invokehandle:
1295 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1296 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1297 : Bytecodes::_invokevirtual;
1298 }
1299 break;
1300 default:
1301 break;
1302 }
1303 } else {
1304 assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1305 if (!attached_method->method_holder()->is_inline_klass()) {
1306 // Ignore the attached method in this case to not confuse below code
1307 attached_method = methodHandle(current, nullptr);
1308 }
1309 }
1310 }
1311
1312 assert(bc != Bytecodes::_illegal, "not initialized");
1313
1314 bool has_receiver = bc != Bytecodes::_invokestatic &&
1315 bc != Bytecodes::_invokedynamic &&
1316 bc != Bytecodes::_invokehandle;
1317 bool check_null_and_abstract = true;
1318
1319 // Find receiver for non-static call
1320 if (has_receiver) {
1321 // This register map must be update since we need to find the receiver for
1322 // compiled frames. The receiver might be in a register.
1323 RegisterMap reg_map2(current,
1324 RegisterMap::UpdateMap::include,
1325 RegisterMap::ProcessFrames::include,
1326 RegisterMap::WalkContinuation::skip);
1327 frame stubFrame = current->last_frame();
1328 // Caller-frame is a compiled frame
1329 frame callerFrame = stubFrame.sender(®_map2);
1330
1331 Method* callee = attached_method();
1332 if (callee == nullptr) {
1333 callee = bytecode.static_target(CHECK_NH);
1334 if (callee == nullptr) {
1335 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1336 }
1337 }
1338 bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->as_nmethod()->is_compiled_by_c1();
1339 if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1340 // If the receiver is an inline type that is passed as fields, no oop is available
1341 // Resolve the call without receiver null checking.
1342 assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1343 assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1344 if (bc == Bytecodes::_invokeinterface) {
1345 bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1346 }
1347 check_null_and_abstract = false;
1348 } else {
1349 // Retrieve from a compiled argument list
1350 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1351 assert(oopDesc::is_oop_or_null(receiver()), "");
1352 if (receiver.is_null()) {
1353 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1354 }
1355 }
1356 }
1357
1358 // Resolve method
1359 if (attached_method.not_null()) {
1360 // Parameterized by attached method.
1361 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1362 } else {
1363 // Parameterized by bytecode.
1364 constantPoolHandle constants(current, caller->constants());
1365 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1366 }
1367
1368 #ifdef ASSERT
1369 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1370 if (has_receiver && check_null_and_abstract) {
1371 assert(receiver.not_null(), "should have thrown exception");
1372 Klass* receiver_klass = receiver->klass();
1373 Klass* rk = nullptr;
1374 if (attached_method.not_null()) {
1375 // In case there's resolved method attached, use its holder during the check.
1376 rk = attached_method->method_holder();
1377 } else {
1378 // Klass is already loaded.
1379 constantPoolHandle constants(current, caller->constants());
1380 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1381 }
1382 Klass* static_receiver_klass = rk;
1383 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1384 "actual receiver must be subclass of static receiver klass");
1385 if (receiver_klass->is_instance_klass()) {
1386 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1387 tty->print_cr("ERROR: Klass not yet initialized!!");
1388 receiver_klass->print();
1389 }
1390 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1391 }
1392 }
1393 #endif
1394
1395 return receiver;
1396 }
1397
1398 methodHandle SharedRuntime::find_callee_method(bool& caller_does_not_scalarize, TRAPS) {
1399 JavaThread* current = THREAD;
1400 ResourceMark rm(current);
1401 // We need first to check if any Java activations (compiled, interpreted)
1402 // exist on the stack since last JavaCall. If not, we need
1403 // to get the target method from the JavaCall wrapper.
1404 vframeStream vfst(current, true); // Do not skip any javaCalls
1405 methodHandle callee_method;
1406 if (vfst.at_end()) {
1407 // No Java frames were found on stack since we did the JavaCall.
1408 // Hence the stack can only contain an entry_frame. We need to
1409 // find the target method from the stub frame.
1410 RegisterMap reg_map(current,
1411 RegisterMap::UpdateMap::skip,
1412 RegisterMap::ProcessFrames::include,
1413 RegisterMap::WalkContinuation::skip);
1414 frame fr = current->last_frame();
1415 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1416 fr = fr.sender(®_map);
1417 assert(fr.is_entry_frame(), "must be");
1418 // fr is now pointing to the entry frame.
1419 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1420 } else {
1421 Bytecodes::Code bc;
1422 CallInfo callinfo;
1423 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1424 // Calls via mismatching methods are always non-scalarized
1425 if (callinfo.resolved_method()->mismatch()) {
1426 caller_does_not_scalarize = true;
1427 }
1428 callee_method = methodHandle(current, callinfo.selected_method());
1429 }
1430 assert(callee_method()->is_method(), "must be");
1431 return callee_method;
1432 }
1433
1434 // Resolves a call.
1435 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1436 JavaThread* current = THREAD;
1437 ResourceMark rm(current);
1438 RegisterMap cbl_map(current,
1439 RegisterMap::UpdateMap::skip,
1440 RegisterMap::ProcessFrames::include,
1441 RegisterMap::WalkContinuation::skip);
1442 frame caller_frame = current->last_frame().sender(&cbl_map);
1443
1444 CodeBlob* caller_cb = caller_frame.cb();
1445 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1446 nmethod* caller_nm = caller_cb->as_nmethod();
1447
1448 // determine call info & receiver
1449 // note: a) receiver is null for static calls
1450 // b) an exception is thrown if receiver is null for non-static calls
1451 CallInfo call_info;
1452 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1453 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1454
1455 NoSafepointVerifier nsv;
1456
1457 methodHandle callee_method(current, call_info.selected_method());
1458 // Calls via mismatching methods are always non-scalarized
1459 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1460 caller_does_not_scalarize = true;
1461 }
1462
1463 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1464 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1465 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1466 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1467 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1468
1469 assert(!caller_nm->is_unloading(), "It should not be unloading");
1470
1471 #ifndef PRODUCT
1472 // tracing/debugging/statistics
1473 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1474 (is_virtual) ? (&_resolve_virtual_ctr) :
1475 (&_resolve_static_ctr);
1476 AtomicAccess::inc(addr);
1477
1478 if (TraceCallFixup) {
1479 ResourceMark rm(current);
1480 tty->print("resolving %s%s (%s) %s call to",
1481 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1482 Bytecodes::name(invoke_code), (caller_does_not_scalarize) ? "non-scalar" : "");
1483 callee_method->print_short_name(tty);
1484 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1485 p2i(caller_frame.pc()), p2i(callee_method->code()));
1486 }
1487 #endif
1488
1489 if (invoke_code == Bytecodes::_invokestatic) {
1490 assert(callee_method->method_holder()->is_initialized() ||
1491 callee_method->method_holder()->is_reentrant_initialization(current),
1492 "invalid class initialization state for invoke_static");
1493 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1494 // In order to keep class initialization check, do not patch call
1495 // site for static call when the class is not fully initialized.
1496 // Proper check is enforced by call site re-resolution on every invocation.
1497 //
1498 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1499 // explicit class initialization check is put in nmethod entry (VEP).
1500 assert(callee_method->method_holder()->is_linked(), "must be");
1501 return callee_method;
1502 }
1503 }
1504
1505
1506 // JSR 292 key invariant:
1507 // If the resolved method is a MethodHandle invoke target, the call
1508 // site must be a MethodHandle call site, because the lambda form might tail-call
1509 // leaving the stack in a state unknown to either caller or callee
1510
1511 // Compute entry points. The computation of the entry points is independent of
1512 // patching the call.
1513
1514 // Make sure the callee nmethod does not get deoptimized and removed before
1515 // we are done patching the code.
1516
1517
1518 CompiledICLocker ml(caller_nm);
1519 if (is_virtual && !is_optimized) {
1520 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1521 inline_cache->update(&call_info, receiver->klass(), caller_does_not_scalarize);
1522 } else {
1523 // Callsite is a direct call - set it to the destination method
1524 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1525 callsite->set(callee_method, caller_does_not_scalarize);
1526 }
1527
1528 return callee_method;
1529 }
1530
1531 // Inline caches exist only in compiled code
1532 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1533 #ifdef ASSERT
1534 RegisterMap reg_map(current,
1535 RegisterMap::UpdateMap::skip,
1536 RegisterMap::ProcessFrames::include,
1537 RegisterMap::WalkContinuation::skip);
1538 frame stub_frame = current->last_frame();
1539 assert(stub_frame.is_runtime_frame(), "sanity check");
1540 frame caller_frame = stub_frame.sender(®_map);
1541 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1542 #endif /* ASSERT */
1543
1544 methodHandle callee_method;
1545 bool caller_does_not_scalarize = false;
1546 JRT_BLOCK
1547 callee_method = SharedRuntime::handle_ic_miss_helper(caller_does_not_scalarize, CHECK_NULL);
1548 // Return Method* through TLS
1549 current->set_vm_result_metadata(callee_method());
1550 JRT_BLOCK_END
1551 // return compiled code entry point after potential safepoints
1552 return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1553 JRT_END
1554
1555
1556 // Handle call site that has been made non-entrant
1557 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1558 // 6243940 We might end up in here if the callee is deoptimized
1559 // as we race to call it. We don't want to take a safepoint if
1560 // the caller was interpreted because the caller frame will look
1561 // interpreted to the stack walkers and arguments are now
1562 // "compiled" so it is much better to make this transition
1563 // invisible to the stack walking code. The i2c path will
1564 // place the callee method in the callee_target. It is stashed
1565 // there because if we try and find the callee by normal means a
1566 // safepoint is possible and have trouble gc'ing the compiled args.
1567 RegisterMap reg_map(current,
1568 RegisterMap::UpdateMap::skip,
1569 RegisterMap::ProcessFrames::include,
1570 RegisterMap::WalkContinuation::skip);
1571 frame stub_frame = current->last_frame();
1572 assert(stub_frame.is_runtime_frame(), "sanity check");
1573 frame caller_frame = stub_frame.sender(®_map);
1574
1575 if (caller_frame.is_interpreted_frame() ||
1576 caller_frame.is_entry_frame() ||
1577 caller_frame.is_upcall_stub_frame()) {
1578 Method* callee = current->callee_target();
1579 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1580 current->set_vm_result_metadata(callee);
1581 current->set_callee_target(nullptr);
1582 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1583 // Bypass class initialization checks in c2i when caller is in native.
1584 // JNI calls to static methods don't have class initialization checks.
1585 // Fast class initialization checks are present in c2i adapters and call into
1586 // SharedRuntime::handle_wrong_method() on the slow path.
1587 //
1588 // JVM upcalls may land here as well, but there's a proper check present in
1589 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1590 // so bypassing it in c2i adapter is benign.
1591 return callee->get_c2i_no_clinit_check_entry();
1592 } else {
1593 if (caller_frame.is_interpreted_frame()) {
1594 return callee->get_c2i_inline_entry();
1595 } else {
1596 return callee->get_c2i_entry();
1597 }
1598 }
1599 }
1600
1601 // Must be compiled to compiled path which is safe to stackwalk
1602 methodHandle callee_method;
1603 bool is_static_call = false;
1604 bool is_optimized = false;
1605 bool caller_does_not_scalarize = false;
1606 JRT_BLOCK
1607 // Force resolving of caller (if we called from compiled frame)
1608 callee_method = SharedRuntime::reresolve_call_site(is_optimized, caller_does_not_scalarize, CHECK_NULL);
1609 current->set_vm_result_metadata(callee_method());
1610 JRT_BLOCK_END
1611 // return compiled code entry point after potential safepoints
1612 return get_resolved_entry(current, callee_method, callee_method->is_static(), is_optimized, caller_does_not_scalarize);
1613 JRT_END
1614
1615 // Handle abstract method call
1616 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1617 // Verbose error message for AbstractMethodError.
1618 // Get the called method from the invoke bytecode.
1619 vframeStream vfst(current, true);
1620 assert(!vfst.at_end(), "Java frame must exist");
1621 methodHandle caller(current, vfst.method());
1622 Bytecode_invoke invoke(caller, vfst.bci());
1623 DEBUG_ONLY( invoke.verify(); )
1624
1625 // Find the compiled caller frame.
1626 RegisterMap reg_map(current,
1627 RegisterMap::UpdateMap::include,
1628 RegisterMap::ProcessFrames::include,
1629 RegisterMap::WalkContinuation::skip);
1630 frame stubFrame = current->last_frame();
1631 assert(stubFrame.is_runtime_frame(), "must be");
1632 frame callerFrame = stubFrame.sender(®_map);
1633 assert(callerFrame.is_compiled_frame(), "must be");
1634
1635 // Install exception and return forward entry.
1636 address res = SharedRuntime::throw_AbstractMethodError_entry();
1637 JRT_BLOCK
1638 methodHandle callee(current, invoke.static_target(current));
1639 if (!callee.is_null()) {
1640 oop recv = callerFrame.retrieve_receiver(®_map);
1641 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1642 res = StubRoutines::forward_exception_entry();
1643 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1644 }
1645 JRT_BLOCK_END
1646 return res;
1647 JRT_END
1648
1649 // return verified_code_entry if interp_only_mode is not set for the current thread;
1650 // otherwise return c2i entry.
1651 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method,
1652 bool is_static_call, bool is_optimized, bool caller_does_not_scalarize) {
1653 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1654 // In interp_only_mode we need to go to the interpreted entry
1655 // The c2i won't patch in this mode -- see fixup_callers_callsite
1656 return callee_method->get_c2i_entry();
1657 }
1658
1659 if (caller_does_not_scalarize) {
1660 assert(callee_method->verified_inline_code_entry() != nullptr, "Jump to zero!");
1661 return callee_method->verified_inline_code_entry();
1662 } else if (is_static_call || is_optimized) {
1663 assert(callee_method->verified_code_entry() != nullptr, "Jump to zero!");
1664 return callee_method->verified_code_entry();
1665 } else {
1666 assert(callee_method->verified_inline_ro_code_entry() != nullptr, "Jump to zero!");
1667 return callee_method->verified_inline_ro_code_entry();
1668 }
1669 }
1670
1671 // resolve a static call and patch code
1672 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1673 methodHandle callee_method;
1674 bool caller_does_not_scalarize = false;
1675 bool enter_special = false;
1676 JRT_BLOCK
1677 callee_method = SharedRuntime::resolve_helper(false, false, caller_does_not_scalarize, CHECK_NULL);
1678 current->set_vm_result_metadata(callee_method());
1679 JRT_BLOCK_END
1680 // return compiled code entry point after potential safepoints
1681 return get_resolved_entry(current, callee_method, true, false, caller_does_not_scalarize);
1682 JRT_END
1683
1684 // resolve virtual call and update inline cache to monomorphic
1685 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1686 methodHandle callee_method;
1687 bool caller_does_not_scalarize = false;
1688 JRT_BLOCK
1689 callee_method = SharedRuntime::resolve_helper(true, false, caller_does_not_scalarize, CHECK_NULL);
1690 current->set_vm_result_metadata(callee_method());
1691 JRT_BLOCK_END
1692 // return compiled code entry point after potential safepoints
1693 return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1694 JRT_END
1695
1696
1697 // Resolve a virtual call that can be statically bound (e.g., always
1698 // monomorphic, so it has no inline cache). Patch code to resolved target.
1699 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1700 methodHandle callee_method;
1701 bool caller_does_not_scalarize = false;
1702 JRT_BLOCK
1703 callee_method = SharedRuntime::resolve_helper(true, true, caller_does_not_scalarize, CHECK_NULL);
1704 current->set_vm_result_metadata(callee_method());
1705 JRT_BLOCK_END
1706 // return compiled code entry point after potential safepoints
1707 return get_resolved_entry(current, callee_method, false, true, caller_does_not_scalarize);
1708 JRT_END
1709
1710
1711
1712 methodHandle SharedRuntime::handle_ic_miss_helper(bool& caller_does_not_scalarize, TRAPS) {
1713 JavaThread* current = THREAD;
1714 ResourceMark rm(current);
1715 CallInfo call_info;
1716 Bytecodes::Code bc;
1717
1718 // receiver is null for static calls. An exception is thrown for null
1719 // receivers for non-static calls
1720 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1721
1722 methodHandle callee_method(current, call_info.selected_method());
1723
1724 #ifndef PRODUCT
1725 AtomicAccess::inc(&_ic_miss_ctr);
1726
1727 // Statistics & Tracing
1728 if (TraceCallFixup) {
1729 ResourceMark rm(current);
1730 tty->print("IC miss (%s) %s call to", Bytecodes::name(bc), (caller_does_not_scalarize) ? "non-scalar" : "");
1731 callee_method->print_short_name(tty);
1732 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1733 }
1734
1735 if (ICMissHistogram) {
1736 MutexLocker m(VMStatistic_lock);
1737 RegisterMap reg_map(current,
1738 RegisterMap::UpdateMap::skip,
1739 RegisterMap::ProcessFrames::include,
1740 RegisterMap::WalkContinuation::skip);
1741 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1742 // produce statistics under the lock
1743 trace_ic_miss(f.pc());
1744 }
1745 #endif
1746
1747 // install an event collector so that when a vtable stub is created the
1748 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1749 // event can't be posted when the stub is created as locks are held
1750 // - instead the event will be deferred until the event collector goes
1751 // out of scope.
1752 JvmtiDynamicCodeEventCollector event_collector;
1753
1754 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1755 RegisterMap reg_map(current,
1756 RegisterMap::UpdateMap::skip,
1757 RegisterMap::ProcessFrames::include,
1758 RegisterMap::WalkContinuation::skip);
1759 frame caller_frame = current->last_frame().sender(®_map);
1760 CodeBlob* cb = caller_frame.cb();
1761 nmethod* caller_nm = cb->as_nmethod();
1762 // Calls via mismatching methods are always non-scalarized
1763 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1764 caller_does_not_scalarize = true;
1765 }
1766
1767 CompiledICLocker ml(caller_nm);
1768 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1769 inline_cache->update(&call_info, receiver()->klass(), caller_does_not_scalarize);
1770
1771 return callee_method;
1772 }
1773
1774 //
1775 // Resets a call-site in compiled code so it will get resolved again.
1776 // This routines handles both virtual call sites, optimized virtual call
1777 // sites, and static call sites. Typically used to change a call sites
1778 // destination from compiled to interpreted.
1779 //
1780 methodHandle SharedRuntime::reresolve_call_site(bool& is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1781 JavaThread* current = THREAD;
1782 ResourceMark rm(current);
1783 RegisterMap reg_map(current,
1784 RegisterMap::UpdateMap::skip,
1785 RegisterMap::ProcessFrames::include,
1786 RegisterMap::WalkContinuation::skip);
1787 frame stub_frame = current->last_frame();
1788 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1789 frame caller = stub_frame.sender(®_map);
1790 if (caller.is_compiled_frame()) {
1791 caller_does_not_scalarize = caller.cb()->as_nmethod()->is_compiled_by_c1();
1792 }
1793 assert(!caller.is_interpreted_frame(), "must be compiled");
1794
1795 // If the frame isn't a live compiled frame (i.e. deoptimized by the time we get here), no IC clearing must be done
1796 // for the caller. However, when the caller is C2 compiled and the callee a C1 or C2 compiled method, then we still
1797 // need to figure out whether it was an optimized virtual call with an inline type receiver. Otherwise, we end up
1798 // using the wrong method entry point and accidentally skip the buffering of the receiver.
1799 methodHandle callee_method = find_callee_method(caller_does_not_scalarize, CHECK_(methodHandle()));
1800 const bool caller_is_compiled_and_not_deoptimized = caller.is_compiled_frame() && !caller.is_deoptimized_frame();
1801 const bool caller_is_continuation_enter_intrinsic =
1802 caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic();
1803 const bool do_IC_clearing = caller_is_compiled_and_not_deoptimized || caller_is_continuation_enter_intrinsic;
1804
1805 const bool callee_compiled_with_scalarized_receiver = callee_method->has_compiled_code() &&
1806 !callee_method()->is_static() &&
1807 callee_method()->is_scalarized_arg(0);
1808 const bool compute_is_optimized = !caller_does_not_scalarize && callee_compiled_with_scalarized_receiver;
1809
1810 if (do_IC_clearing || compute_is_optimized) {
1811 address pc = caller.pc();
1812
1813 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1814 assert(caller_nm != nullptr, "did not find caller nmethod");
1815
1816 // Default call_addr is the location of the "basic" call.
1817 // Determine the address of the call we a reresolving. With
1818 // Inline Caches we will always find a recognizable call.
1819 // With Inline Caches disabled we may or may not find a
1820 // recognizable call. We will always find a call for static
1821 // calls and for optimized virtual calls. For vanilla virtual
1822 // calls it depends on the state of the UseInlineCaches switch.
1823 //
1824 // With Inline Caches disabled we can get here for a virtual call
1825 // for two reasons:
1826 // 1 - calling an abstract method. The vtable for abstract methods
1827 // will run us thru handle_wrong_method and we will eventually
1828 // end up in the interpreter to throw the ame.
1829 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1830 // call and between the time we fetch the entry address and
1831 // we jump to it the target gets deoptimized. Similar to 1
1832 // we will wind up in the interprter (thru a c2i with c2).
1833 //
1834 CompiledICLocker ml(caller_nm);
1835 address call_addr = caller_nm->call_instruction_address(pc);
1836
1837 if (call_addr != nullptr) {
1838 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1839 // bytes back in the instruction stream so we must also check for reloc info.
1840 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1841 bool ret = iter.next(); // Get item
1842 if (ret) {
1843 is_optimized = false;
1844 switch (iter.type()) {
1845 case relocInfo::static_call_type:
1846 assert(callee_method->is_static(), "must be");
1847 case relocInfo::opt_virtual_call_type: {
1848 is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1849 if (do_IC_clearing) {
1850 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1851 cdc->set_to_clean();
1852 }
1853 break;
1854 }
1855 case relocInfo::virtual_call_type: {
1856 if (do_IC_clearing) {
1857 // compiled, dispatched call (which used to call an interpreted method)
1858 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1859 inline_cache->set_to_clean();
1860 }
1861 break;
1862 }
1863 default:
1864 break;
1865 }
1866 }
1867 }
1868 }
1869
1870 #ifndef PRODUCT
1871 AtomicAccess::inc(&_wrong_method_ctr);
1872
1873 if (TraceCallFixup) {
1874 ResourceMark rm(current);
1875 tty->print("handle_wrong_method reresolving %s call to", (caller_does_not_scalarize) ? "non-scalar" : "");
1876 callee_method->print_short_name(tty);
1877 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1878 }
1879 #endif
1880
1881 return callee_method;
1882 }
1883
1884 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1885 // The faulting unsafe accesses should be changed to throw the error
1886 // synchronously instead. Meanwhile the faulting instruction will be
1887 // skipped over (effectively turning it into a no-op) and an
1888 // asynchronous exception will be raised which the thread will
1889 // handle at a later point. If the instruction is a load it will
1890 // return garbage.
1891
1892 // Request an async exception.
1893 thread->set_pending_unsafe_access_error();
1894
1895 // Return address of next instruction to execute.
2061 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
2062
2063 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
2064 if (message == nullptr) {
2065 // Shouldn't happen, but don't cause even more problems if it does
2066 message = const_cast<char*>(caster_klass->external_name());
2067 } else {
2068 jio_snprintf(message,
2069 msglen,
2070 "class %s cannot be cast to class %s (%s%s%s)",
2071 caster_name,
2072 target_name,
2073 caster_klass_description,
2074 klass_separator,
2075 target_klass_description
2076 );
2077 }
2078 return message;
2079 }
2080
2081 char* SharedRuntime::generate_identity_exception_message(JavaThread* current, Klass* klass) {
2082 assert(klass->is_inline_klass(), "Must be a concrete value class");
2083 const char* desc = "Cannot synchronize on an instance of value class ";
2084 const char* className = klass->external_name();
2085 size_t msglen = strlen(desc) + strlen(className) + 1;
2086 char* message = NEW_RESOURCE_ARRAY(char, msglen);
2087 if (nullptr == message) {
2088 // Out of memory: can't create detailed error message
2089 message = const_cast<char*>(klass->external_name());
2090 } else {
2091 jio_snprintf(message, msglen, "%s%s", desc, className);
2092 }
2093 return message;
2094 }
2095
2096 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2097 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2098 JRT_END
2099
2100 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2101 if (!SafepointSynchronize::is_synchronizing()) {
2102 // Only try quick_enter() if we're not trying to reach a safepoint
2103 // so that the calling thread reaches the safepoint more quickly.
2104 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2105 return;
2106 }
2107 }
2108 // NO_ASYNC required because an async exception on the state transition destructor
2109 // would leave you with the lock held and it would never be released.
2110 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2111 // and the model is that an exception implies the method failed.
2112 JRT_BLOCK_NO_ASYNC
2113 Handle h_obj(THREAD, obj);
2114 ObjectSynchronizer::enter(h_obj, lock, current);
2115 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2309 tty->print_cr("Note 1: counter updates are not MT-safe.");
2310 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2311 tty->print_cr(" %% in nested categories are relative to their category");
2312 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2313 tty->cr();
2314
2315 MethodArityHistogram h;
2316 }
2317 #endif
2318
2319 #ifndef PRODUCT
2320 static int _lookups; // number of calls to lookup
2321 static int _equals; // number of buckets checked with matching hash
2322 static int _archived_hits; // number of successful lookups in archived table
2323 static int _runtime_hits; // number of successful lookups in runtime table
2324 #endif
2325
2326 // A simple wrapper class around the calling convention information
2327 // that allows sharing of adapters for the same calling convention.
2328 class AdapterFingerPrint : public MetaspaceObj {
2329 public:
2330 class Element {
2331 private:
2332 // The highest byte is the type of the argument. The remaining bytes contain the offset of the
2333 // field if it is flattened in the calling convention, -1 otherwise.
2334 juint _payload;
2335
2336 static constexpr int offset_bit_width = 24;
2337 static constexpr juint offset_bit_mask = (1 << offset_bit_width) - 1;
2338 public:
2339 Element(BasicType bt, int offset) : _payload((static_cast<juint>(bt) << offset_bit_width) | (juint(offset) & offset_bit_mask)) {
2340 assert(offset >= -1 && offset < jint(offset_bit_mask), "invalid offset %d", offset);
2341 }
2342
2343 BasicType bt() const {
2344 return static_cast<BasicType>(_payload >> offset_bit_width);
2345 }
2346
2347 int offset() const {
2348 juint res = _payload & offset_bit_mask;
2349 return res == offset_bit_mask ? -1 : res;
2350 }
2351
2352 juint hash() const {
2353 return _payload;
2354 }
2355
2356 bool operator!=(const Element& other) const {
2357 return _payload != other._payload;
2358 }
2359 };
2360
2361 private:
2362 const bool _has_ro_adapter;
2363 const int _length;
2364
2365 static int data_offset() { return sizeof(AdapterFingerPrint); }
2366 Element* data_pointer() {
2367 return reinterpret_cast<Element*>(reinterpret_cast<address>(this) + data_offset());
2368 }
2369
2370 const Element& element_at(int index) {
2371 assert(index < length(), "index %d out of bounds for length %d", index, length());
2372 Element* data = data_pointer();
2373 return data[index];
2374 }
2375
2376 // Private construtor. Use allocate() to get an instance.
2377 AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter)
2378 : _has_ro_adapter(has_ro_adapter), _length(total_args_passed_in_sig(sig)) {
2379 Element* data = data_pointer();
2380 BasicType prev_bt = T_ILLEGAL;
2381 int vt_count = 0;
2382 for (int index = 0; index < _length; index++) {
2383 const SigEntry& sig_entry = sig->at(index);
2384 BasicType bt = sig_entry._bt;
2385 if (bt == T_METADATA) {
2386 // Found start of inline type in signature
2387 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2388 vt_count++;
2389 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2390 // Found end of inline type in signature
2391 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2392 vt_count--;
2393 assert(vt_count >= 0, "invalid vt_count");
2394 } else if (vt_count == 0) {
2395 // Widen fields that are not part of a scalarized inline type argument
2396 assert(sig_entry._offset == -1, "invalid offset for argument that is not a flattened field %d", sig_entry._offset);
2397 bt = adapter_encoding(bt);
2398 }
2399
2400 ::new(&data[index]) Element(bt, sig_entry._offset);
2401 prev_bt = bt;
2402 }
2403 assert(vt_count == 0, "invalid vt_count");
2404 }
2405
2406 // Call deallocate instead
2407 ~AdapterFingerPrint() {
2408 ShouldNotCallThis();
2409 }
2410
2411 static int total_args_passed_in_sig(const GrowableArray<SigEntry>* sig) {
2412 return (sig != nullptr) ? sig->length() : 0;
2413 }
2414
2415 static int compute_size_in_words(int len) {
2416 return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(Element)));
2417 }
2418
2419 // Remap BasicTypes that are handled equivalently by the adapters.
2420 // These are correct for the current system but someday it might be
2421 // necessary to make this mapping platform dependent.
2422 static BasicType adapter_encoding(BasicType in) {
2423 switch (in) {
2424 case T_BOOLEAN:
2425 case T_BYTE:
2426 case T_SHORT:
2427 case T_CHAR:
2428 // They are all promoted to T_INT in the calling convention
2429 return T_INT;
2430
2431 case T_OBJECT:
2432 case T_ARRAY:
2433 // In other words, we assume that any register good enough for
2434 // an int or long is good enough for a managed pointer.
2435 #ifdef _LP64
2436 return T_LONG;
2437 #else
2438 return T_INT;
2439 #endif
2440
2441 case T_INT:
2442 case T_LONG:
2443 case T_FLOAT:
2444 case T_DOUBLE:
2445 case T_VOID:
2446 return in;
2447
2448 default:
2449 ShouldNotReachHere();
2450 return T_CONFLICT;
2451 }
2452 }
2453
2454 void* operator new(size_t size, size_t fp_size) throw() {
2455 assert(fp_size >= size, "sanity check");
2456 void* p = AllocateHeap(fp_size, mtCode);
2457 memset(p, 0, fp_size);
2458 return p;
2459 }
2460
2461 public:
2462 template<typename Function>
2463 void iterate_args(Function function) {
2464 for (int i = 0; i < length(); i++) {
2465 function(element_at(i));
2466 }
2467 }
2468
2469 static AdapterFingerPrint* allocate(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2470 int len = total_args_passed_in_sig(sig);
2471 int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2472 AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(sig, has_ro_adapter);
2473 assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2474 return afp;
2475 }
2476
2477 static void deallocate(AdapterFingerPrint* fp) {
2478 FreeHeap(fp);
2479 }
2480
2481 bool has_ro_adapter() const {
2482 return _has_ro_adapter;
2483 }
2484
2485 int length() const {
2486 return _length;
2487 }
2488
2489 unsigned int compute_hash() {
2490 int hash = 0;
2491 for (int i = 0; i < length(); i++) {
2492 const Element& v = element_at(i);
2493 //Add arithmetic operation to the hash, like +3 to improve hashing
2494 hash = ((hash << 8) ^ v.hash() ^ (hash >> 5)) + 3;
2495 }
2496 return (unsigned int)hash;
2497 }
2498
2499 const char* as_string() {
2500 stringStream st;
2501 st.print("{");
2502 if (_has_ro_adapter) {
2503 st.print("has_ro_adapter");
2504 } else {
2505 st.print("no_ro_adapter");
2506 }
2507 for (int i = 0; i < length(); i++) {
2508 st.print(", ");
2509 const Element& elem = element_at(i);
2510 st.print("{%s, %d}", type2name(elem.bt()), elem.offset());
2511 }
2512 st.print("}");
2513 return st.as_string();
2514 }
2515
2516 const char* as_basic_args_string() {
2517 stringStream st;
2518 bool long_prev = false;
2519 iterate_args([&] (const Element& arg) {
2520 if (long_prev) {
2521 long_prev = false;
2522 if (arg.bt() == T_VOID) {
2523 st.print("J");
2524 } else {
2525 st.print("L");
2526 }
2527 }
2528 if (arg.bt() == T_LONG) {
2529 long_prev = true;
2530 } else if (arg.bt() != T_VOID) {
2531 st.print("%c", type2char(arg.bt()));
2532 }
2533 });
2534 if (long_prev) {
2535 st.print("L");
2536 }
2537 return st.as_string();
2538 }
2539
2540 bool equals(AdapterFingerPrint* other) {
2541 if (other->_has_ro_adapter != _has_ro_adapter) {
2542 return false;
2543 } else if (other->_length != _length) {
2544 return false;
2545 } else {
2546 for (int i = 0; i < _length; i++) {
2547 if (element_at(i) != other->element_at(i)) {
2548 return false;
2549 }
2550 }
2551 }
2552 return true;
2553 }
2554
2555 // methods required by virtue of being a MetaspaceObj
2556 void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2557 int size() const { return compute_size_in_words(_length); }
2558 MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2559
2560 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2561 NOT_PRODUCT(_equals++);
2562 return fp1->equals(fp2);
2563 }
2564
2565 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2566 return fp->compute_hash();
2567 }
2570 #if INCLUDE_CDS
2571 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2572 return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2573 }
2574
2575 class ArchivedAdapterTable : public OffsetCompactHashtable<
2576 AdapterFingerPrint*,
2577 AdapterHandlerEntry*,
2578 adapter_fp_equals_compact_hashtable_entry> {};
2579 #endif // INCLUDE_CDS
2580
2581 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2582 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2583 AnyObj::C_HEAP, mtCode,
2584 AdapterFingerPrint::compute_hash,
2585 AdapterFingerPrint::equals>;
2586 static AdapterHandlerTable* _adapter_handler_table;
2587 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2588
2589 // Find a entry with the same fingerprint if it exists
2590 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter) {
2591 NOT_PRODUCT(_lookups++);
2592 assert_lock_strong(AdapterHandlerLibrary_lock);
2593 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(sig, has_ro_adapter);
2594 AdapterHandlerEntry* entry = nullptr;
2595 #if INCLUDE_CDS
2596 // if we are building the archive then the archived adapter table is
2597 // not valid and we need to use the ones added to the runtime table
2598 if (AOTCodeCache::is_using_adapter()) {
2599 // Search archived table first. It is read-only table so can be searched without lock
2600 entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2601 #ifndef PRODUCT
2602 if (entry != nullptr) {
2603 _archived_hits++;
2604 }
2605 #endif
2606 }
2607 #endif // INCLUDE_CDS
2608 if (entry == nullptr) {
2609 assert_lock_strong(AdapterHandlerLibrary_lock);
2610 AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2611 if (entry_p != nullptr) {
2612 entry = *entry_p;
2613 assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2630 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2631 ts.print(tty, "AdapterHandlerTable");
2632 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2633 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2634 int total_hits = _archived_hits + _runtime_hits;
2635 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2636 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2637 }
2638 #endif
2639
2640 // ---------------------------------------------------------------------------
2641 // Implementation of AdapterHandlerLibrary
2642 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2643 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2644 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2645 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2646 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2647 #if INCLUDE_CDS
2648 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2649 #endif // INCLUDE_CDS
2650 static const int AdapterHandlerLibrary_size = 48*K;
2651 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2652 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2653
2654 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2655 assert(_buffer != nullptr, "should be initialized");
2656 return _buffer;
2657 }
2658
2659 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2660 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2661 AdapterBlob* adapter_blob = entry->adapter_blob();
2662 char blob_id[256];
2663 jio_snprintf(blob_id,
2664 sizeof(blob_id),
2665 "%s(%s)",
2666 adapter_blob->name(),
2667 entry->fingerprint()->as_string());
2668 if (Forte::is_enabled()) {
2669 Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2670 }
2678 void AdapterHandlerLibrary::initialize() {
2679 {
2680 ResourceMark rm;
2681 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2682 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2683 }
2684
2685 #if INCLUDE_CDS
2686 // Link adapters in AOT Cache to their code in AOT Code Cache
2687 if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2688 link_aot_adapters();
2689 lookup_simple_adapters();
2690 return;
2691 }
2692 #endif // INCLUDE_CDS
2693
2694 ResourceMark rm;
2695 {
2696 MutexLocker mu(AdapterHandlerLibrary_lock);
2697
2698 CompiledEntrySignature no_args;
2699 no_args.compute_calling_conventions();
2700 _no_arg_handler = create_adapter(no_args, true);
2701
2702 CompiledEntrySignature obj_args;
2703 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
2704 obj_args.compute_calling_conventions();
2705 _obj_arg_handler = create_adapter(obj_args, true);
2706
2707 CompiledEntrySignature int_args;
2708 SigEntry::add_entry(int_args.sig(), T_INT);
2709 int_args.compute_calling_conventions();
2710 _int_arg_handler = create_adapter(int_args, true);
2711
2712 CompiledEntrySignature obj_int_args;
2713 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
2714 SigEntry::add_entry(obj_int_args.sig(), T_INT);
2715 obj_int_args.compute_calling_conventions();
2716 _obj_int_arg_handler = create_adapter(obj_int_args, true);
2717
2718 CompiledEntrySignature obj_obj_args;
2719 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2720 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2721 obj_obj_args.compute_calling_conventions();
2722 _obj_obj_arg_handler = create_adapter(obj_obj_args, true);
2723
2724 // we should always get an entry back but we don't have any
2725 // associated blob on Zero
2726 assert(_no_arg_handler != nullptr &&
2727 _obj_arg_handler != nullptr &&
2728 _int_arg_handler != nullptr &&
2729 _obj_int_arg_handler != nullptr &&
2730 _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2731 }
2732
2733 // Outside of the lock
2734 #ifndef ZERO
2735 // no blobs to register when we are on Zero
2736 post_adapter_creation(_no_arg_handler);
2737 post_adapter_creation(_obj_arg_handler);
2738 post_adapter_creation(_int_arg_handler);
2739 post_adapter_creation(_obj_int_arg_handler);
2740 post_adapter_creation(_obj_obj_arg_handler);
2741 #endif // ZERO
2742 }
2743
2744 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2745 uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2746 assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2747 return AdapterHandlerEntry::allocate(id, fingerprint);
2748 }
2749
2750 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2751 int total_args_passed = method->size_of_parameters(); // All args on stack
2752 if (total_args_passed == 0) {
2753 return _no_arg_handler;
2754 } else if (total_args_passed == 1) {
2755 if (!method->is_static()) {
2756 if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2757 return nullptr;
2758 }
2759 return _obj_arg_handler;
2760 }
2761 switch (method->signature()->char_at(1)) {
2762 case JVM_SIGNATURE_CLASS: {
2763 if (InlineTypePassFieldsAsArgs) {
2764 SignatureStream ss(method->signature());
2765 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2766 if (vk != nullptr) {
2767 return nullptr;
2768 }
2769 }
2770 return _obj_arg_handler;
2771 }
2772 case JVM_SIGNATURE_ARRAY:
2773 return _obj_arg_handler;
2774 case JVM_SIGNATURE_INT:
2775 case JVM_SIGNATURE_BOOLEAN:
2776 case JVM_SIGNATURE_CHAR:
2777 case JVM_SIGNATURE_BYTE:
2778 case JVM_SIGNATURE_SHORT:
2779 return _int_arg_handler;
2780 }
2781 } else if (total_args_passed == 2 &&
2782 !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2783 switch (method->signature()->char_at(1)) {
2784 case JVM_SIGNATURE_CLASS: {
2785 if (InlineTypePassFieldsAsArgs) {
2786 SignatureStream ss(method->signature());
2787 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2788 if (vk != nullptr) {
2789 return nullptr;
2790 }
2791 }
2792 return _obj_obj_arg_handler;
2793 }
2794 case JVM_SIGNATURE_ARRAY:
2795 return _obj_obj_arg_handler;
2796 case JVM_SIGNATURE_INT:
2797 case JVM_SIGNATURE_BOOLEAN:
2798 case JVM_SIGNATURE_CHAR:
2799 case JVM_SIGNATURE_BYTE:
2800 case JVM_SIGNATURE_SHORT:
2801 return _obj_int_arg_handler;
2802 }
2803 }
2804 return nullptr;
2805 }
2806
2807 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2808 _method(method), _num_inline_args(0), _has_inline_recv(false),
2809 _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2810 _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2811 _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2812 _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2813 _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2814 _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2815 }
2816
2817 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2818 // or the same entry for VEP and VIEP(RO).
2819 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2820 if (!has_scalarized_args()) {
2821 // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2822 return CodeOffsets::Verified_Entry;
2823 }
2824 if (_method->is_static()) {
2825 // Static methods don't need VIEP(RO)
2826 return CodeOffsets::Verified_Entry;
2827 }
2828
2829 if (has_inline_recv()) {
2830 if (num_inline_args() == 1) {
2831 // Share same entry for VIEP and VIEP(RO).
2832 // This is quite common: we have an instance method in an InlineKlass that has
2833 // no inline type args other than <this>.
2834 return CodeOffsets::Verified_Inline_Entry;
2835 } else {
2836 assert(num_inline_args() > 1, "must be");
2837 // No sharing:
2838 // VIEP(RO) -- <this> is passed as object
2839 // VEP -- <this> is passed as fields
2840 return CodeOffsets::Verified_Inline_Entry_RO;
2841 }
2842 }
2843
2844 // Either a static method, or <this> is not an inline type
2845 if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2846 // No sharing:
2847 // Some arguments are passed on the stack, and we have inserted reserved entries
2848 // into the VEP, but we never insert reserved entries into the VIEP(RO).
2849 return CodeOffsets::Verified_Inline_Entry_RO;
2850 } else {
2851 // Share same entry for VEP and VIEP(RO).
2852 return CodeOffsets::Verified_Entry;
2853 }
2854 }
2855
2856 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2857 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2858 if (_supers != nullptr) {
2859 return _supers;
2860 }
2861 _supers = new GrowableArray<Method*>();
2862 // Skip private, static, and <init> methods
2863 if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2864 return _supers;
2865 }
2866 Symbol* name = _method->name();
2867 Symbol* signature = _method->signature();
2868 const Klass* holder = _method->method_holder()->super();
2869 Symbol* holder_name = holder->name();
2870 ThreadInVMfromUnknown tiv;
2871 JavaThread* current = JavaThread::current();
2872 HandleMark hm(current);
2873 Handle loader(current, _method->method_holder()->class_loader());
2874
2875 // Walk up the class hierarchy and search for super methods
2876 while (holder != nullptr) {
2877 Method* super_method = holder->lookup_method(name, signature);
2878 if (super_method == nullptr) {
2879 break;
2880 }
2881 if (!super_method->is_static() && !super_method->is_private() &&
2882 (!super_method->is_package_private() ||
2883 super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
2884 _supers->push(super_method);
2885 }
2886 holder = super_method->method_holder()->super();
2887 }
2888 // Search interfaces for super methods
2889 Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
2890 for (int i = 0; i < interfaces->length(); ++i) {
2891 Method* m = interfaces->at(i)->lookup_method(name, signature);
2892 if (m != nullptr && !m->is_static() && m->is_public()) {
2893 _supers->push(m);
2894 }
2895 }
2896 return _supers;
2897 }
2898
2899 // Iterate over arguments and compute scalarized and non-scalarized signatures
2900 void CompiledEntrySignature::compute_calling_conventions(bool init) {
2901 bool has_scalarized = false;
2902 if (_method != nullptr) {
2903 InstanceKlass* holder = _method->method_holder();
2904 int arg_num = 0;
2905 if (!_method->is_static()) {
2906 // We shouldn't scalarize 'this' in a value class constructor
2907 if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() && !_method->is_object_constructor() &&
2908 (init || _method->is_scalarized_arg(arg_num))) {
2909 _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
2910 has_scalarized = true;
2911 _has_inline_recv = true;
2912 _num_inline_args++;
2913 } else {
2914 SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
2915 }
2916 SigEntry::add_entry(_sig, T_OBJECT, holder->name());
2917 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
2918 arg_num++;
2919 }
2920 for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
2921 BasicType bt = ss.type();
2922 if (bt == T_OBJECT) {
2923 InlineKlass* vk = ss.as_inline_klass(holder);
2924 if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
2925 // Check for a calling convention mismatch with super method(s)
2926 bool scalar_super = false;
2927 bool non_scalar_super = false;
2928 GrowableArray<Method*>* supers = get_supers();
2929 for (int i = 0; i < supers->length(); ++i) {
2930 Method* super_method = supers->at(i);
2931 if (super_method->is_scalarized_arg(arg_num)) {
2932 scalar_super = true;
2933 } else {
2934 non_scalar_super = true;
2935 }
2936 }
2937 #ifdef ASSERT
2938 // Randomly enable below code paths for stress testing
2939 bool stress = init && StressCallingConvention;
2940 if (stress && (os::random() & 1) == 1) {
2941 non_scalar_super = true;
2942 if ((os::random() & 1) == 1) {
2943 scalar_super = true;
2944 }
2945 }
2946 #endif
2947 if (non_scalar_super) {
2948 // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
2949 if (scalar_super) {
2950 // Found non-scalar *and* scalar super methods. We can't handle both.
2951 // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
2952 for (int i = 0; i < supers->length(); ++i) {
2953 Method* super_method = supers->at(i);
2954 if (super_method->is_scalarized_arg(arg_num) DEBUG_ONLY(|| (stress && (os::random() & 1) == 1))) {
2955 super_method->set_mismatch();
2956 MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
2957 JavaThread* thread = JavaThread::current();
2958 HandleMark hm(thread);
2959 methodHandle mh(thread, super_method);
2960 DeoptimizationScope deopt_scope;
2961 CodeCache::mark_for_deoptimization(&deopt_scope, mh());
2962 deopt_scope.deoptimize_marked();
2963 }
2964 }
2965 }
2966 // Fall back to non-scalarized calling convention
2967 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2968 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2969 } else {
2970 _num_inline_args++;
2971 has_scalarized = true;
2972 int last = _sig_cc->length();
2973 int last_ro = _sig_cc_ro->length();
2974 _sig_cc->appendAll(vk->extended_sig());
2975 _sig_cc_ro->appendAll(vk->extended_sig());
2976 if (bt == T_OBJECT) {
2977 // Nullable inline type argument, insert InlineTypeNode::NullMarker field right after T_METADATA delimiter
2978 _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, nullptr, true));
2979 _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, nullptr, true));
2980 }
2981 }
2982 } else {
2983 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2984 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2985 }
2986 bt = T_OBJECT;
2987 } else {
2988 SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
2989 SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
2990 }
2991 SigEntry::add_entry(_sig, bt, ss.as_symbol());
2992 if (bt != T_VOID) {
2993 arg_num++;
2994 }
2995 }
2996 }
2997
2998 // Compute the non-scalarized calling convention
2999 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3000 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3001
3002 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3003 if (has_scalarized && !_method->is_native()) {
3004 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3005 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3006
3007 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3008 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3009
3010 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3011 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3012
3013 // Upper bound on stack arguments to avoid hitting the argument limit and
3014 // bailing out of compilation ("unsupported incoming calling sequence").
3015 // TODO we need a reasonable limit (flag?) here
3016 if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
3017 return; // Success
3018 }
3019 }
3020
3021 // No scalarized args
3022 _sig_cc = _sig;
3023 _regs_cc = _regs;
3024 _args_on_stack_cc = _args_on_stack;
3025
3026 _sig_cc_ro = _sig;
3027 _regs_cc_ro = _regs;
3028 _args_on_stack_cc_ro = _args_on_stack;
3029 }
3030
3031 void CompiledEntrySignature::initialize_from_fingerprint(AdapterFingerPrint* fingerprint) {
3032 _has_inline_recv = fingerprint->has_ro_adapter();
3033
3034 int value_object_count = 0;
3035 BasicType prev_bt = T_ILLEGAL;
3036 bool has_scalarized_arguments = false;
3037 bool long_prev = false;
3038 int long_prev_offset = -1;
3039
3040 fingerprint->iterate_args([&] (const AdapterFingerPrint::Element& arg) {
3041 BasicType bt = arg.bt();
3042 int offset = arg.offset();
3043
3044 if (long_prev) {
3045 long_prev = false;
3046 BasicType bt_to_add;
3047 if (bt == T_VOID) {
3048 bt_to_add = T_LONG;
3049 } else {
3050 bt_to_add = T_OBJECT;
3051 }
3052 if (value_object_count == 0) {
3053 SigEntry::add_entry(_sig, bt_to_add);
3054 }
3055 SigEntry::add_entry(_sig_cc, bt_to_add, nullptr, long_prev_offset);
3056 SigEntry::add_entry(_sig_cc_ro, bt_to_add, nullptr, long_prev_offset);
3057 }
3058
3059 switch (bt) {
3060 case T_VOID:
3061 if (prev_bt != T_LONG && prev_bt != T_DOUBLE) {
3062 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
3063 value_object_count--;
3064 SigEntry::add_entry(_sig_cc, T_VOID, nullptr, offset);
3065 SigEntry::add_entry(_sig_cc_ro, T_VOID, nullptr, offset);
3066 assert(value_object_count >= 0, "invalid value object count");
3067 } else {
3068 // Nothing to add for _sig: We already added an addition T_VOID in add_entry() when adding T_LONG or T_DOUBLE.
3069 }
3070 break;
3071 case T_INT:
3072 case T_FLOAT:
3073 case T_DOUBLE:
3074 if (value_object_count == 0) {
3075 SigEntry::add_entry(_sig, bt);
3076 }
3077 SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3078 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3079 break;
3080 case T_LONG:
3081 long_prev = true;
3082 long_prev_offset = offset;
3083 break;
3084 case T_BOOLEAN:
3085 case T_CHAR:
3086 case T_BYTE:
3087 case T_SHORT:
3088 case T_OBJECT:
3089 case T_ARRAY:
3090 assert(value_object_count > 0, "must be value object field");
3091 SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3092 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3093 break;
3094 case T_METADATA:
3095 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
3096 if (value_object_count == 0) {
3097 SigEntry::add_entry(_sig, T_OBJECT);
3098 }
3099 SigEntry::add_entry(_sig_cc, T_METADATA, nullptr, offset);
3100 SigEntry::add_entry(_sig_cc_ro, T_METADATA, nullptr, offset);
3101 value_object_count++;
3102 has_scalarized_arguments = true;
3103 break;
3104 default: {
3105 fatal("Unexpected BasicType: %s", basictype_to_str(bt));
3106 }
3107 }
3108 prev_bt = bt;
3109 });
3110
3111 if (long_prev) {
3112 // If previous bt was T_LONG and we reached the end of the signature, we know that it must be a T_OBJECT.
3113 SigEntry::add_entry(_sig, T_OBJECT);
3114 SigEntry::add_entry(_sig_cc, T_OBJECT);
3115 SigEntry::add_entry(_sig_cc_ro, T_OBJECT);
3116 }
3117 assert(value_object_count == 0, "invalid value object count");
3118
3119 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3120 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3121
3122 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3123 if (has_scalarized_arguments) {
3124 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3125 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3126
3127 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3128 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3129
3130 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3131 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3132 } else {
3133 // No scalarized args
3134 _sig_cc = _sig;
3135 _regs_cc = _regs;
3136 _args_on_stack_cc = _args_on_stack;
3137
3138 _sig_cc_ro = _sig;
3139 _regs_cc_ro = _regs;
3140 _args_on_stack_cc_ro = _args_on_stack;
3141 }
3142
3143 #ifdef ASSERT
3144 {
3145 AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(_sig_cc, _has_inline_recv);
3146 assert(fingerprint->equals(compare_fp), "%s - %s", fingerprint->as_string(), compare_fp->as_string());
3147 AdapterFingerPrint::deallocate(compare_fp);
3148 }
3149 #endif
3150 }
3151
3152 const char* AdapterHandlerEntry::_entry_names[] = {
3153 "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
3154 };
3155
3156 #ifdef ASSERT
3157 void AdapterHandlerLibrary::verify_adapter_sharing(CompiledEntrySignature& ces, AdapterHandlerEntry* cached_entry) {
3158 // we can only check for the same code if there is any
3159 #ifndef ZERO
3160 AdapterHandlerEntry* comparison_entry = create_adapter(ces, false, true);
3161 assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
3162 assert(comparison_entry->compare_code(cached_entry), "code must match");
3163 // Release the one just created
3164 AdapterHandlerEntry::deallocate(comparison_entry);
3165 # endif // ZERO
3166 }
3167 #endif /* ASSERT*/
3168
3169 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3170 assert(!method->is_abstract(), "abstract methods do not have adapters");
3171 // Use customized signature handler. Need to lock around updates to
3172 // the _adapter_handler_table (it is not safe for concurrent readers
3173 // and a single writer: this could be fixed if it becomes a
3174 // problem).
3175
3176 // Fast-path for trivial adapters
3177 AdapterHandlerEntry* entry = get_simple_adapter(method);
3178 if (entry != nullptr) {
3179 return entry;
3180 }
3181
3182 ResourceMark rm;
3183 bool new_entry = false;
3184
3185 CompiledEntrySignature ces(method());
3186 ces.compute_calling_conventions();
3187 if (ces.has_scalarized_args()) {
3188 if (!method->has_scalarized_args()) {
3189 method->set_has_scalarized_args();
3190 }
3191 if (ces.c1_needs_stack_repair()) {
3192 method->set_c1_needs_stack_repair();
3193 }
3194 if (ces.c2_needs_stack_repair() && !method->c2_needs_stack_repair()) {
3195 method->set_c2_needs_stack_repair();
3196 }
3197 }
3198
3199 {
3200 MutexLocker mu(AdapterHandlerLibrary_lock);
3201
3202 // Lookup method signature's fingerprint
3203 entry = lookup(ces.sig_cc(), ces.has_inline_recv());
3204
3205 if (entry != nullptr) {
3206 #ifndef ZERO
3207 assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
3208 #endif
3209 #ifdef ASSERT
3210 if (!entry->in_aot_cache() && VerifyAdapterSharing) {
3211 verify_adapter_sharing(ces, entry);
3212 }
3213 #endif
3214 } else {
3215 entry = create_adapter(ces, /* allocate_code_blob */ true);
3216 if (entry != nullptr) {
3217 new_entry = true;
3218 }
3219 }
3220 }
3221
3222 // Outside of the lock
3223 if (new_entry) {
3224 post_adapter_creation(entry);
3225 }
3226 return entry;
3227 }
3228
3229 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
3230 ResourceMark rm;
3231 const char* name = AdapterHandlerLibrary::name(handler);
3232 const uint32_t id = AdapterHandlerLibrary::id(handler);
3233
3234 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
3235 if (blob != nullptr) {
3250 }
3251 insts_size = adapter_blob->code_size();
3252 st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
3253 handler->fingerprint()->as_basic_args_string(),
3254 handler->fingerprint()->as_string(), insts_size);
3255 st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
3256 if (Verbose || PrintStubCode) {
3257 address first_pc = adapter_blob->content_begin();
3258 if (first_pc != nullptr) {
3259 Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
3260 st->cr();
3261 }
3262 }
3263 }
3264 #endif // PRODUCT
3265
3266 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
3267 int entry_offset[AdapterBlob::ENTRY_COUNT]) {
3268 entry_offset[AdapterBlob::I2C] = 0;
3269 entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
3270 entry_offset[AdapterBlob::C2I_Inline] = entry_address[AdapterBlob::C2I_Inline] - entry_address[AdapterBlob::I2C];
3271 entry_offset[AdapterBlob::C2I_Inline_RO] = entry_address[AdapterBlob::C2I_Inline_RO] - entry_address[AdapterBlob::I2C];
3272 entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
3273 entry_offset[AdapterBlob::C2I_Unverified_Inline] = entry_address[AdapterBlob::C2I_Unverified_Inline] - entry_address[AdapterBlob::I2C];
3274 if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
3275 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
3276 } else {
3277 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
3278 }
3279 }
3280
3281 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
3282 CompiledEntrySignature& ces,
3283 bool allocate_code_blob,
3284 bool is_transient) {
3285 if (log_is_enabled(Info, perf, class, link)) {
3286 ClassLoader::perf_method_adapters_count()->inc();
3287 }
3288
3289 #ifndef ZERO
3290 AdapterBlob* adapter_blob = nullptr;
3291 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3292 CodeBuffer buffer(buf);
3293 short buffer_locs[20];
3294 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3295 sizeof(buffer_locs)/sizeof(relocInfo));
3296 MacroAssembler masm(&buffer);
3297 address entry_address[AdapterBlob::ENTRY_COUNT];
3298
3299 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
3300 SharedRuntime::generate_i2c2i_adapters(&masm,
3301 ces.args_on_stack(),
3302 ces.sig(),
3303 ces.regs(),
3304 ces.sig_cc(),
3305 ces.regs_cc(),
3306 ces.sig_cc_ro(),
3307 ces.regs_cc_ro(),
3308 entry_address,
3309 adapter_blob,
3310 allocate_code_blob);
3311
3312 if (ces.has_scalarized_args()) {
3313 // Save a C heap allocated version of the scalarized signature and store it in the adapter
3314 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3315 heap_sig->appendAll(ces.sig_cc());
3316 handler->set_sig_cc(heap_sig);
3317 }
3318 // On zero there is no code to save and no need to create a blob and
3319 // or relocate the handler.
3320 int entry_offset[AdapterBlob::ENTRY_COUNT];
3321 address_to_offset(entry_address, entry_offset);
3322 #ifdef ASSERT
3323 if (VerifyAdapterSharing) {
3324 handler->save_code(buf->code_begin(), buffer.insts_size());
3325 if (is_transient) {
3326 return true;
3327 }
3328 }
3329 #endif
3330 if (adapter_blob == nullptr) {
3331 // CodeCache is full, disable compilation
3332 // Ought to log this but compile log is only per compile thread
3333 // and we're some non descript Java thread.
3334 return false;
3335 }
3336 handler->set_adapter_blob(adapter_blob);
3337 if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
3338 // try to save generated code
3339 const char* name = AdapterHandlerLibrary::name(handler);
3340 const uint32_t id = AdapterHandlerLibrary::id(handler);
3341 bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
3342 assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
3343 }
3344 #endif // ZERO
3345
3346 #ifndef PRODUCT
3347 // debugging support
3348 if (PrintAdapterHandlers || PrintStubCode) {
3349 print_adapter_handler_info(tty, handler);
3350 }
3351 #endif
3352
3353 return true;
3354 }
3355
3356 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(CompiledEntrySignature& ces,
3357 bool allocate_code_blob,
3358 bool is_transient) {
3359 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(ces.sig_cc(), ces.has_inline_recv());
3360 #ifdef ASSERT
3361 // Verify that we can successfully restore the compiled entry signature object.
3362 CompiledEntrySignature ces_verify;
3363 ces_verify.initialize_from_fingerprint(fp);
3364 #endif
3365 AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
3366 if (!generate_adapter_code(handler, ces, allocate_code_blob, is_transient)) {
3367 AdapterHandlerEntry::deallocate(handler);
3368 return nullptr;
3369 }
3370 if (!is_transient) {
3371 assert_lock_strong(AdapterHandlerLibrary_lock);
3372 _adapter_handler_table->put(fp, handler);
3373 }
3374 return handler;
3375 }
3376
3377 #if INCLUDE_CDS
3378 void AdapterHandlerEntry::remove_unshareable_info() {
3379 #ifdef ASSERT
3380 _saved_code = nullptr;
3381 _saved_code_length = 0;
3382 #endif // ASSERT
3383 _adapter_blob = nullptr;
3384 _linked = false;
3385 }
3386
3449 // This method is used during production run to link archived adapters (stored in AOT Cache)
3450 // to their code in AOT Code Cache
3451 void AdapterHandlerEntry::link() {
3452 ResourceMark rm;
3453 assert(_fingerprint != nullptr, "_fingerprint must not be null");
3454 bool generate_code = false;
3455 // Generate code only if AOTCodeCache is not available, or
3456 // caching adapters is disabled, or we fail to link
3457 // the AdapterHandlerEntry to its code in the AOTCodeCache
3458 if (AOTCodeCache::is_using_adapter()) {
3459 AdapterHandlerLibrary::link_aot_adapter_handler(this);
3460 // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
3461 if (_adapter_blob == nullptr) {
3462 log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
3463 generate_code = true;
3464 }
3465 } else {
3466 generate_code = true;
3467 }
3468 if (generate_code) {
3469 CompiledEntrySignature ces;
3470 ces.initialize_from_fingerprint(_fingerprint);
3471 if (!AdapterHandlerLibrary::generate_adapter_code(this, ces, true, false)) {
3472 // Don't throw exceptions during VM initialization because java.lang.* classes
3473 // might not have been initialized, causing problems when constructing the
3474 // Java exception object.
3475 vm_exit_during_initialization("Out of space in CodeCache for adapters");
3476 }
3477 }
3478 if (_adapter_blob != nullptr) {
3479 post_adapter_creation(this);
3480 }
3481 assert(_linked, "AdapterHandlerEntry must now be linked");
3482 }
3483
3484 void AdapterHandlerLibrary::link_aot_adapters() {
3485 uint max_id = 0;
3486 assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3487 /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3488 * That implies adapter ids of the adapters in the cache may not be contiguous.
3489 * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3490 * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3491 * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3492 */
3493 _aot_adapter_handler_table.iterate([&](AdapterHandlerEntry* entry) {
3494 assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3495 entry->link();
3496 max_id = MAX2(max_id, entry->id());
3497 });
3498 // Set adapter id to the maximum id found in the AOTCache
3499 assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3500 _id_counter = max_id;
3501 }
3502
3503 // This method is called during production run to lookup simple adapters
3504 // in the archived adapter handler table
3505 void AdapterHandlerLibrary::lookup_simple_adapters() {
3506 assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3507
3508 MutexLocker mu(AdapterHandlerLibrary_lock);
3509 ResourceMark rm;
3510 CompiledEntrySignature no_args;
3511 no_args.compute_calling_conventions();
3512 _no_arg_handler = lookup(no_args.sig_cc(), no_args.has_inline_recv());
3513
3514 CompiledEntrySignature obj_args;
3515 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
3516 obj_args.compute_calling_conventions();
3517 _obj_arg_handler = lookup(obj_args.sig_cc(), obj_args.has_inline_recv());
3518
3519 CompiledEntrySignature int_args;
3520 SigEntry::add_entry(int_args.sig(), T_INT);
3521 int_args.compute_calling_conventions();
3522 _int_arg_handler = lookup(int_args.sig_cc(), int_args.has_inline_recv());
3523
3524 CompiledEntrySignature obj_int_args;
3525 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
3526 SigEntry::add_entry(obj_int_args.sig(), T_INT);
3527 obj_int_args.compute_calling_conventions();
3528 _obj_int_arg_handler = lookup(obj_int_args.sig_cc(), obj_int_args.has_inline_recv());
3529
3530 CompiledEntrySignature obj_obj_args;
3531 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3532 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3533 obj_obj_args.compute_calling_conventions();
3534 _obj_obj_arg_handler = lookup(obj_obj_args.sig_cc(), obj_obj_args.has_inline_recv());
3535
3536 assert(_no_arg_handler != nullptr &&
3537 _obj_arg_handler != nullptr &&
3538 _int_arg_handler != nullptr &&
3539 _obj_int_arg_handler != nullptr &&
3540 _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3541 assert(_no_arg_handler->is_linked() &&
3542 _obj_arg_handler->is_linked() &&
3543 _int_arg_handler->is_linked() &&
3544 _obj_int_arg_handler->is_linked() &&
3545 _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3546 }
3547 #endif // INCLUDE_CDS
3548
3549 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3550 LogStreamHandle(Trace, aot) lsh;
3551 if (lsh.is_enabled()) {
3552 lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3553 lsh.cr();
3554 }
3555 it->push(&_fingerprint);
3556 }
3557
3558 AdapterHandlerEntry::~AdapterHandlerEntry() {
3559 if (_fingerprint != nullptr) {
3560 AdapterFingerPrint::deallocate(_fingerprint);
3561 _fingerprint = nullptr;
3562 }
3563 if (_sig_cc != nullptr) {
3564 delete _sig_cc;
3565 }
3566 #ifdef ASSERT
3567 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3568 #endif
3569 FreeHeap(this);
3570 }
3571
3572
3573 #ifdef ASSERT
3574 // Capture the code before relocation so that it can be compared
3575 // against other versions. If the code is captured after relocation
3576 // then relative instructions won't be equivalent.
3577 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3578 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3579 _saved_code_length = length;
3580 memcpy(_saved_code, buffer, length);
3581 }
3582
3583
3584 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3585 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3633
3634 struct { double data[20]; } locs_buf;
3635 struct { double data[20]; } stubs_locs_buf;
3636 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3637 #if defined(AARCH64) || defined(PPC64)
3638 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3639 // in the constant pool to ensure ordering between the barrier and oops
3640 // accesses. For native_wrappers we need a constant.
3641 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3642 // static java call that is resolved in the runtime.
3643 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3644 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3645 }
3646 #endif
3647 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3648 MacroAssembler _masm(&buffer);
3649
3650 // Fill in the signature array, for the calling-convention call.
3651 const int total_args_passed = method->size_of_parameters();
3652
3653 BasicType stack_sig_bt[16];
3654 VMRegPair stack_regs[16];
3655 BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3656 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3657
3658 int i = 0;
3659 if (!method->is_static()) { // Pass in receiver first
3660 sig_bt[i++] = T_OBJECT;
3661 }
3662 SignatureStream ss(method->signature());
3663 for (; !ss.at_return_type(); ss.next()) {
3664 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
3665 if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3666 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
3667 }
3668 }
3669 assert(i == total_args_passed, "");
3670 BasicType ret_type = ss.type();
3671
3672 // Now get the compiled-Java arguments layout.
3673 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3674
3675 // Generate the compiled-to-native wrapper code
3676 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3677
3678 if (nm != nullptr) {
3679 {
3680 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3681 if (nm->make_in_use()) {
3682 method->set_code(method, nm);
3683 }
3684 }
3685
3686 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3687 if (directive->PrintAssemblyOption) {
3688 nm->print_code();
3689 }
3690 DirectivesStack::release(directive);
3918 if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3919 found = true;
3920 st->print("Adapter for signature: ");
3921 a->print_adapter_on(st);
3922 return true;
3923 } else {
3924 return false; // keep looking
3925 }
3926 };
3927 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3928 _adapter_handler_table->iterate(findblob_runtime_table);
3929 }
3930 assert(found, "Should have found handler");
3931 }
3932
3933 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3934 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3935 if (adapter_blob() != nullptr) {
3936 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3937 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3938 st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3939 st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3940 st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3941 st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3942 if (get_c2i_no_clinit_check_entry() != nullptr) {
3943 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3944 }
3945 }
3946 st->cr();
3947 }
3948
3949 #ifndef PRODUCT
3950
3951 void AdapterHandlerLibrary::print_statistics() {
3952 print_table_statistics();
3953 }
3954
3955 #endif /* PRODUCT */
3956
3957 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3958 assert(current == JavaThread::current(), "pre-condition");
3959 StackOverflow* overflow_state = current->stack_overflow_state();
3960 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3961 overflow_state->set_reserved_stack_activation(current->stack_base());
4008 event.set_method(method);
4009 event.commit();
4010 }
4011 }
4012 }
4013 return activation;
4014 }
4015
4016 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
4017 // After any safepoint, just before going back to compiled code,
4018 // we inform the GC that we will be doing initializing writes to
4019 // this object in the future without emitting card-marks, so
4020 // GC may take any compensating steps.
4021
4022 oop new_obj = current->vm_result_oop();
4023 if (new_obj == nullptr) return;
4024
4025 BarrierSet *bs = BarrierSet::barrier_set();
4026 bs->on_slowpath_allocation_exit(current, new_obj);
4027 }
4028
4029 // We are at a compiled code to interpreter call. We need backing
4030 // buffers for all inline type arguments. Allocate an object array to
4031 // hold them (convenient because once we're done with it we don't have
4032 // to worry about freeing it).
4033 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
4034 assert(InlineTypePassFieldsAsArgs, "no reason to call this");
4035 ResourceMark rm;
4036
4037 int nb_slots = 0;
4038 InstanceKlass* holder = callee->method_holder();
4039 allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
4040 if (allocate_receiver) {
4041 nb_slots++;
4042 }
4043 int arg_num = callee->is_static() ? 0 : 1;
4044 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4045 BasicType bt = ss.type();
4046 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4047 nb_slots++;
4048 }
4049 if (bt != T_VOID) {
4050 arg_num++;
4051 }
4052 }
4053 objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
4054 objArrayHandle array(THREAD, array_oop);
4055 arg_num = callee->is_static() ? 0 : 1;
4056 int i = 0;
4057 if (allocate_receiver) {
4058 InlineKlass* vk = InlineKlass::cast(holder);
4059 oop res = vk->allocate_instance(CHECK_NULL);
4060 array->obj_at_put(i++, res);
4061 }
4062 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4063 BasicType bt = ss.type();
4064 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4065 InlineKlass* vk = ss.as_inline_klass(holder);
4066 assert(vk != nullptr, "Unexpected klass");
4067 oop res = vk->allocate_instance(CHECK_NULL);
4068 array->obj_at_put(i++, res);
4069 }
4070 if (bt != T_VOID) {
4071 arg_num++;
4072 }
4073 }
4074 return array();
4075 }
4076
4077 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
4078 methodHandle callee(current, callee_method);
4079 oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
4080 current->set_vm_result_oop(array);
4081 current->set_vm_result_metadata(callee()); // TODO: required to keep callee live?
4082 JRT_END
4083
4084 // We're returning from an interpreted method: load each field into a
4085 // register following the calling convention
4086 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
4087 {
4088 assert(res->klass()->is_inline_klass(), "only inline types here");
4089 ResourceMark rm;
4090 RegisterMap reg_map(current,
4091 RegisterMap::UpdateMap::include,
4092 RegisterMap::ProcessFrames::include,
4093 RegisterMap::WalkContinuation::skip);
4094 frame stubFrame = current->last_frame();
4095 frame callerFrame = stubFrame.sender(®_map);
4096 assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
4097
4098 InlineKlass* vk = InlineKlass::cast(res->klass());
4099
4100 const Array<SigEntry>* sig_vk = vk->extended_sig();
4101 const Array<VMRegPair>* regs = vk->return_regs();
4102
4103 if (regs == nullptr) {
4104 // The fields of the inline klass don't fit in registers, bail out
4105 return;
4106 }
4107
4108 int j = 1;
4109 for (int i = 0; i < sig_vk->length(); i++) {
4110 BasicType bt = sig_vk->at(i)._bt;
4111 if (bt == T_METADATA) {
4112 continue;
4113 }
4114 if (bt == T_VOID) {
4115 if (sig_vk->at(i-1)._bt == T_LONG ||
4116 sig_vk->at(i-1)._bt == T_DOUBLE) {
4117 j++;
4118 }
4119 continue;
4120 }
4121 int off = sig_vk->at(i)._offset;
4122 assert(off > 0, "offset in object should be positive");
4123 VMRegPair pair = regs->at(j);
4124 address loc = reg_map.location(pair.first(), nullptr);
4125 switch(bt) {
4126 case T_BOOLEAN:
4127 *(jboolean*)loc = res->bool_field(off);
4128 break;
4129 case T_CHAR:
4130 *(jchar*)loc = res->char_field(off);
4131 break;
4132 case T_BYTE:
4133 *(jbyte*)loc = res->byte_field(off);
4134 break;
4135 case T_SHORT:
4136 *(jshort*)loc = res->short_field(off);
4137 break;
4138 case T_INT: {
4139 *(jint*)loc = res->int_field(off);
4140 break;
4141 }
4142 case T_LONG:
4143 #ifdef _LP64
4144 *(intptr_t*)loc = res->long_field(off);
4145 #else
4146 Unimplemented();
4147 #endif
4148 break;
4149 case T_OBJECT:
4150 case T_ARRAY: {
4151 *(oop*)loc = res->obj_field(off);
4152 break;
4153 }
4154 case T_FLOAT:
4155 *(jfloat*)loc = res->float_field(off);
4156 break;
4157 case T_DOUBLE:
4158 *(jdouble*)loc = res->double_field(off);
4159 break;
4160 default:
4161 ShouldNotReachHere();
4162 }
4163 j++;
4164 }
4165 assert(j == regs->length(), "missed a field?");
4166
4167 #ifdef ASSERT
4168 VMRegPair pair = regs->at(0);
4169 address loc = reg_map.location(pair.first(), nullptr);
4170 assert(*(oopDesc**)loc == res, "overwritten object");
4171 #endif
4172
4173 current->set_vm_result_oop(res);
4174 }
4175 JRT_END
4176
4177 // We've returned to an interpreted method, the interpreter needs a
4178 // reference to an inline type instance. Allocate it and initialize it
4179 // from field's values in registers.
4180 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
4181 {
4182 ResourceMark rm;
4183 RegisterMap reg_map(current,
4184 RegisterMap::UpdateMap::include,
4185 RegisterMap::ProcessFrames::include,
4186 RegisterMap::WalkContinuation::skip);
4187 frame stubFrame = current->last_frame();
4188 frame callerFrame = stubFrame.sender(®_map);
4189
4190 #ifdef ASSERT
4191 InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
4192 #endif
4193
4194 if (!is_set_nth_bit(res, 0)) {
4195 // We're not returning with inline type fields in registers (the
4196 // calling convention didn't allow it for this inline klass)
4197 assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
4198 current->set_vm_result_oop((oopDesc*)res);
4199 assert(verif_vk == nullptr, "broken calling convention");
4200 return;
4201 }
4202
4203 clear_nth_bit(res, 0);
4204 InlineKlass* vk = (InlineKlass*)res;
4205 assert(verif_vk == vk, "broken calling convention");
4206 assert(Metaspace::contains((void*)res), "should be klass");
4207
4208 // Allocate handles for every oop field so they are safe in case of
4209 // a safepoint when allocating
4210 GrowableArray<Handle> handles;
4211 vk->save_oop_fields(reg_map, handles);
4212
4213 // It's unsafe to safepoint until we are here
4214 JRT_BLOCK;
4215 {
4216 JavaThread* THREAD = current;
4217 oop vt = vk->realloc_result(reg_map, handles, CHECK);
4218 current->set_vm_result_oop(vt);
4219 }
4220 JRT_BLOCK_END;
4221 }
4222 JRT_END
|