28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/stringTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/aotCodeCache.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/compiledIC.hpp"
35 #include "code/nmethod.inline.hpp"
36 #include "code/scopeDesc.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "compiler/abstractCompiler.hpp"
39 #include "compiler/compileBroker.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "interpreter/interpreterRuntime.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "jvm.h"
47 #include "logging/log.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "memory/universe.hpp"
50 #include "metaprogramming/primitiveConversions.hpp"
51 #include "oops/klass.hpp"
52 #include "oops/method.inline.hpp"
53 #include "oops/objArrayKlass.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "prims/forte.hpp"
56 #include "prims/jvmtiExport.hpp"
57 #include "prims/jvmtiThreadState.hpp"
58 #include "prims/methodHandles.hpp"
59 #include "prims/nativeLookup.hpp"
60 #include "runtime/arguments.hpp"
61 #include "runtime/atomicAccess.hpp"
62 #include "runtime/basicLock.inline.hpp"
63 #include "runtime/frame.inline.hpp"
64 #include "runtime/handles.inline.hpp"
65 #include "runtime/init.hpp"
66 #include "runtime/interfaceSupport.inline.hpp"
67 #include "runtime/java.hpp"
68 #include "runtime/javaCalls.hpp"
69 #include "runtime/jniHandles.inline.hpp"
70 #include "runtime/osThread.hpp"
71 #include "runtime/perfData.hpp"
72 #include "runtime/sharedRuntime.hpp"
73 #include "runtime/stackWatermarkSet.hpp"
74 #include "runtime/stubRoutines.hpp"
75 #include "runtime/synchronizer.inline.hpp"
76 #include "runtime/timerTrace.hpp"
77 #include "runtime/vframe.inline.hpp"
78 #include "runtime/vframeArray.hpp"
79 #include "runtime/vm_version.hpp"
80 #include "utilities/copy.hpp"
81 #include "utilities/dtrace.hpp"
82 #include "utilities/events.hpp"
83 #include "utilities/globalDefinitions.hpp"
84 #include "utilities/hashTable.hpp"
85 #include "utilities/macros.hpp"
86 #include "utilities/xmlstream.hpp"
87 #ifdef COMPILER1
88 #include "c1/c1_Runtime1.hpp"
89 #endif
90 #ifdef COMPILER2
91 #include "opto/runtime.hpp"
92 #endif
1234 // for a call current in progress, i.e., arguments has been pushed on stack
1235 // but callee has not been invoked yet. Caller frame must be compiled.
1236 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1237 CallInfo& callinfo, TRAPS) {
1238 Handle receiver;
1239 Handle nullHandle; // create a handy null handle for exception returns
1240 JavaThread* current = THREAD;
1241
1242 assert(!vfst.at_end(), "Java frame must exist");
1243
1244 // Find caller and bci from vframe
1245 methodHandle caller(current, vfst.method());
1246 int bci = vfst.bci();
1247
1248 if (caller->is_continuation_enter_intrinsic()) {
1249 bc = Bytecodes::_invokestatic;
1250 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1251 return receiver;
1252 }
1253
1254 Bytecode_invoke bytecode(caller, bci);
1255 int bytecode_index = bytecode.index();
1256 bc = bytecode.invoke_code();
1257
1258 methodHandle attached_method(current, extract_attached_method(vfst));
1259 if (attached_method.not_null()) {
1260 Method* callee = bytecode.static_target(CHECK_NH);
1261 vmIntrinsics::ID id = callee->intrinsic_id();
1262 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1263 // it attaches statically resolved method to the call site.
1264 if (MethodHandles::is_signature_polymorphic(id) &&
1265 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1266 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1267
1268 // Adjust invocation mode according to the attached method.
1269 switch (bc) {
1270 case Bytecodes::_invokevirtual:
1271 if (attached_method->method_holder()->is_interface()) {
1272 bc = Bytecodes::_invokeinterface;
1273 }
1274 break;
1275 case Bytecodes::_invokeinterface:
1276 if (!attached_method->method_holder()->is_interface()) {
1277 bc = Bytecodes::_invokevirtual;
1278 }
1279 break;
1280 case Bytecodes::_invokehandle:
1281 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1282 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1283 : Bytecodes::_invokevirtual;
1284 }
1285 break;
1286 default:
1287 break;
1288 }
1289 }
1290 }
1291
1292 assert(bc != Bytecodes::_illegal, "not initialized");
1293
1294 bool has_receiver = bc != Bytecodes::_invokestatic &&
1295 bc != Bytecodes::_invokedynamic &&
1296 bc != Bytecodes::_invokehandle;
1297
1298 // Find receiver for non-static call
1299 if (has_receiver) {
1300 // This register map must be update since we need to find the receiver for
1301 // compiled frames. The receiver might be in a register.
1302 RegisterMap reg_map2(current,
1303 RegisterMap::UpdateMap::include,
1304 RegisterMap::ProcessFrames::include,
1305 RegisterMap::WalkContinuation::skip);
1306 frame stubFrame = current->last_frame();
1307 // Caller-frame is a compiled frame
1308 frame callerFrame = stubFrame.sender(®_map2);
1309
1310 if (attached_method.is_null()) {
1311 Method* callee = bytecode.static_target(CHECK_NH);
1312 if (callee == nullptr) {
1313 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1314 }
1315 }
1316
1317 // Retrieve from a compiled argument list
1318 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1319 assert(oopDesc::is_oop_or_null(receiver()), "");
1320
1321 if (receiver.is_null()) {
1322 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1323 }
1324 }
1325
1326 // Resolve method
1327 if (attached_method.not_null()) {
1328 // Parameterized by attached method.
1329 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1330 } else {
1331 // Parameterized by bytecode.
1332 constantPoolHandle constants(current, caller->constants());
1333 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1334 }
1335
1336 #ifdef ASSERT
1337 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1338 if (has_receiver) {
1339 assert(receiver.not_null(), "should have thrown exception");
1340 Klass* receiver_klass = receiver->klass();
1341 Klass* rk = nullptr;
1342 if (attached_method.not_null()) {
1343 // In case there's resolved method attached, use its holder during the check.
1344 rk = attached_method->method_holder();
1345 } else {
1346 // Klass is already loaded.
1347 constantPoolHandle constants(current, caller->constants());
1348 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1349 }
1350 Klass* static_receiver_klass = rk;
1351 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1352 "actual receiver must be subclass of static receiver klass");
1353 if (receiver_klass->is_instance_klass()) {
1354 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1355 tty->print_cr("ERROR: Klass not yet initialized!!");
1356 receiver_klass->print();
1357 }
1358 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1359 }
1360 }
1361 #endif
1362
1363 return receiver;
1364 }
1365
1366 methodHandle SharedRuntime::find_callee_method(TRAPS) {
1367 JavaThread* current = THREAD;
1368 ResourceMark rm(current);
1369 // We need first to check if any Java activations (compiled, interpreted)
1370 // exist on the stack since last JavaCall. If not, we need
1371 // to get the target method from the JavaCall wrapper.
1372 vframeStream vfst(current, true); // Do not skip any javaCalls
1373 methodHandle callee_method;
1374 if (vfst.at_end()) {
1375 // No Java frames were found on stack since we did the JavaCall.
1376 // Hence the stack can only contain an entry_frame. We need to
1377 // find the target method from the stub frame.
1378 RegisterMap reg_map(current,
1379 RegisterMap::UpdateMap::skip,
1380 RegisterMap::ProcessFrames::include,
1381 RegisterMap::WalkContinuation::skip);
1382 frame fr = current->last_frame();
1383 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1384 fr = fr.sender(®_map);
1385 assert(fr.is_entry_frame(), "must be");
1386 // fr is now pointing to the entry frame.
1387 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1388 } else {
1389 Bytecodes::Code bc;
1390 CallInfo callinfo;
1391 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1392 callee_method = methodHandle(current, callinfo.selected_method());
1393 }
1394 assert(callee_method()->is_method(), "must be");
1395 return callee_method;
1396 }
1397
1398 // Resolves a call.
1399 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1400 JavaThread* current = THREAD;
1401 ResourceMark rm(current);
1402 RegisterMap cbl_map(current,
1403 RegisterMap::UpdateMap::skip,
1404 RegisterMap::ProcessFrames::include,
1405 RegisterMap::WalkContinuation::skip);
1406 frame caller_frame = current->last_frame().sender(&cbl_map);
1407
1408 CodeBlob* caller_cb = caller_frame.cb();
1409 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1410 nmethod* caller_nm = caller_cb->as_nmethod();
1411
1412 // determine call info & receiver
1413 // note: a) receiver is null for static calls
1414 // b) an exception is thrown if receiver is null for non-static calls
1415 CallInfo call_info;
1416 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1417 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1418
1419 NoSafepointVerifier nsv;
1420
1421 methodHandle callee_method(current, call_info.selected_method());
1422
1423 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1424 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1425 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1426 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1427 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1428
1429 assert(!caller_nm->is_unloading(), "It should not be unloading");
1430
1431 #ifndef PRODUCT
1432 // tracing/debugging/statistics
1433 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1434 (is_virtual) ? (&_resolve_virtual_ctr) :
1435 (&_resolve_static_ctr);
1436 AtomicAccess::inc(addr);
1437
1438 if (TraceCallFixup) {
1439 ResourceMark rm(current);
1440 tty->print("resolving %s%s (%s) call to",
1441 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1442 Bytecodes::name(invoke_code));
1443 callee_method->print_short_name(tty);
1444 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1445 p2i(caller_frame.pc()), p2i(callee_method->code()));
1446 }
1447 #endif
1448
1449 if (invoke_code == Bytecodes::_invokestatic) {
1450 assert(callee_method->method_holder()->is_initialized() ||
1451 callee_method->method_holder()->is_reentrant_initialization(current),
1452 "invalid class initialization state for invoke_static");
1453 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1454 // In order to keep class initialization check, do not patch call
1455 // site for static call when the class is not fully initialized.
1456 // Proper check is enforced by call site re-resolution on every invocation.
1457 //
1458 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1459 // explicit class initialization check is put in nmethod entry (VEP).
1460 assert(callee_method->method_holder()->is_linked(), "must be");
1461 return callee_method;
1462 }
1463 }
1464
1465
1466 // JSR 292 key invariant:
1467 // If the resolved method is a MethodHandle invoke target, the call
1468 // site must be a MethodHandle call site, because the lambda form might tail-call
1469 // leaving the stack in a state unknown to either caller or callee
1470
1471 // Compute entry points. The computation of the entry points is independent of
1472 // patching the call.
1473
1474 // Make sure the callee nmethod does not get deoptimized and removed before
1475 // we are done patching the code.
1476
1477
1478 CompiledICLocker ml(caller_nm);
1479 if (is_virtual && !is_optimized) {
1480 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1481 inline_cache->update(&call_info, receiver->klass());
1482 } else {
1483 // Callsite is a direct call - set it to the destination method
1484 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1485 callsite->set(callee_method);
1486 }
1487
1488 return callee_method;
1489 }
1490
1491 // Inline caches exist only in compiled code
1492 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1493 #ifdef ASSERT
1494 RegisterMap reg_map(current,
1495 RegisterMap::UpdateMap::skip,
1496 RegisterMap::ProcessFrames::include,
1497 RegisterMap::WalkContinuation::skip);
1498 frame stub_frame = current->last_frame();
1499 assert(stub_frame.is_runtime_frame(), "sanity check");
1500 frame caller_frame = stub_frame.sender(®_map);
1501 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1502 #endif /* ASSERT */
1503
1504 methodHandle callee_method;
1505 JRT_BLOCK
1506 callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1507 // Return Method* through TLS
1508 current->set_vm_result_metadata(callee_method());
1509 JRT_BLOCK_END
1510 // return compiled code entry point after potential safepoints
1511 return get_resolved_entry(current, callee_method);
1512 JRT_END
1513
1514
1515 // Handle call site that has been made non-entrant
1516 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1517 // 6243940 We might end up in here if the callee is deoptimized
1518 // as we race to call it. We don't want to take a safepoint if
1519 // the caller was interpreted because the caller frame will look
1520 // interpreted to the stack walkers and arguments are now
1521 // "compiled" so it is much better to make this transition
1522 // invisible to the stack walking code. The i2c path will
1523 // place the callee method in the callee_target. It is stashed
1524 // there because if we try and find the callee by normal means a
1525 // safepoint is possible and have trouble gc'ing the compiled args.
1526 RegisterMap reg_map(current,
1527 RegisterMap::UpdateMap::skip,
1528 RegisterMap::ProcessFrames::include,
1529 RegisterMap::WalkContinuation::skip);
1530 frame stub_frame = current->last_frame();
1531 assert(stub_frame.is_runtime_frame(), "sanity check");
1532 frame caller_frame = stub_frame.sender(®_map);
1533
1534 if (caller_frame.is_interpreted_frame() ||
1535 caller_frame.is_entry_frame() ||
1536 caller_frame.is_upcall_stub_frame()) {
1537 Method* callee = current->callee_target();
1538 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1539 current->set_vm_result_metadata(callee);
1540 current->set_callee_target(nullptr);
1541 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1542 // Bypass class initialization checks in c2i when caller is in native.
1543 // JNI calls to static methods don't have class initialization checks.
1544 // Fast class initialization checks are present in c2i adapters and call into
1545 // SharedRuntime::handle_wrong_method() on the slow path.
1546 //
1547 // JVM upcalls may land here as well, but there's a proper check present in
1548 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1549 // so bypassing it in c2i adapter is benign.
1550 return callee->get_c2i_no_clinit_check_entry();
1551 } else {
1552 return callee->get_c2i_entry();
1553 }
1554 }
1555
1556 // Must be compiled to compiled path which is safe to stackwalk
1557 methodHandle callee_method;
1558 JRT_BLOCK
1559 // Force resolving of caller (if we called from compiled frame)
1560 callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1561 current->set_vm_result_metadata(callee_method());
1562 JRT_BLOCK_END
1563 // return compiled code entry point after potential safepoints
1564 return get_resolved_entry(current, callee_method);
1565 JRT_END
1566
1567 // Handle abstract method call
1568 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1569 // Verbose error message for AbstractMethodError.
1570 // Get the called method from the invoke bytecode.
1571 vframeStream vfst(current, true);
1572 assert(!vfst.at_end(), "Java frame must exist");
1573 methodHandle caller(current, vfst.method());
1574 Bytecode_invoke invoke(caller, vfst.bci());
1575 DEBUG_ONLY( invoke.verify(); )
1576
1577 // Find the compiled caller frame.
1578 RegisterMap reg_map(current,
1579 RegisterMap::UpdateMap::include,
1580 RegisterMap::ProcessFrames::include,
1581 RegisterMap::WalkContinuation::skip);
1582 frame stubFrame = current->last_frame();
1583 assert(stubFrame.is_runtime_frame(), "must be");
1584 frame callerFrame = stubFrame.sender(®_map);
1585 assert(callerFrame.is_compiled_frame(), "must be");
1586
1587 // Install exception and return forward entry.
1588 address res = SharedRuntime::throw_AbstractMethodError_entry();
1589 JRT_BLOCK
1590 methodHandle callee(current, invoke.static_target(current));
1591 if (!callee.is_null()) {
1592 oop recv = callerFrame.retrieve_receiver(®_map);
1593 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1594 res = StubRoutines::forward_exception_entry();
1595 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1596 }
1597 JRT_BLOCK_END
1598 return res;
1599 JRT_END
1600
1601 // return verified_code_entry if interp_only_mode is not set for the current thread;
1602 // otherwise return c2i entry.
1603 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method) {
1604 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1605 // In interp_only_mode we need to go to the interpreted entry
1606 // The c2i won't patch in this mode -- see fixup_callers_callsite
1607 return callee_method->get_c2i_entry();
1608 }
1609 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1610 return callee_method->verified_code_entry();
1611 }
1612
1613 // resolve a static call and patch code
1614 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1615 methodHandle callee_method;
1616 bool enter_special = false;
1617 JRT_BLOCK
1618 callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1619 current->set_vm_result_metadata(callee_method());
1620 JRT_BLOCK_END
1621 // return compiled code entry point after potential safepoints
1622 return get_resolved_entry(current, callee_method);
1623 JRT_END
1624
1625 // resolve virtual call and update inline cache to monomorphic
1626 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1627 methodHandle callee_method;
1628 JRT_BLOCK
1629 callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1630 current->set_vm_result_metadata(callee_method());
1631 JRT_BLOCK_END
1632 // return compiled code entry point after potential safepoints
1633 return get_resolved_entry(current, callee_method);
1634 JRT_END
1635
1636
1637 // Resolve a virtual call that can be statically bound (e.g., always
1638 // monomorphic, so it has no inline cache). Patch code to resolved target.
1639 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1640 methodHandle callee_method;
1641 JRT_BLOCK
1642 callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1643 current->set_vm_result_metadata(callee_method());
1644 JRT_BLOCK_END
1645 // return compiled code entry point after potential safepoints
1646 return get_resolved_entry(current, callee_method);
1647 JRT_END
1648
1649 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1650 JavaThread* current = THREAD;
1651 ResourceMark rm(current);
1652 CallInfo call_info;
1653 Bytecodes::Code bc;
1654
1655 // receiver is null for static calls. An exception is thrown for null
1656 // receivers for non-static calls
1657 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1658
1659 methodHandle callee_method(current, call_info.selected_method());
1660
1661 #ifndef PRODUCT
1662 AtomicAccess::inc(&_ic_miss_ctr);
1663
1664 // Statistics & Tracing
1665 if (TraceCallFixup) {
1666 ResourceMark rm(current);
1667 tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1668 callee_method->print_short_name(tty);
1669 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1670 }
1671
1672 if (ICMissHistogram) {
1673 MutexLocker m(VMStatistic_lock);
1674 RegisterMap reg_map(current,
1675 RegisterMap::UpdateMap::skip,
1676 RegisterMap::ProcessFrames::include,
1677 RegisterMap::WalkContinuation::skip);
1678 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1679 // produce statistics under the lock
1680 trace_ic_miss(f.pc());
1681 }
1682 #endif
1683
1684 // install an event collector so that when a vtable stub is created the
1685 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1686 // event can't be posted when the stub is created as locks are held
1687 // - instead the event will be deferred until the event collector goes
1688 // out of scope.
1689 JvmtiDynamicCodeEventCollector event_collector;
1690
1691 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1692 RegisterMap reg_map(current,
1693 RegisterMap::UpdateMap::skip,
1694 RegisterMap::ProcessFrames::include,
1695 RegisterMap::WalkContinuation::skip);
1696 frame caller_frame = current->last_frame().sender(®_map);
1697 CodeBlob* cb = caller_frame.cb();
1698 nmethod* caller_nm = cb->as_nmethod();
1699
1700 CompiledICLocker ml(caller_nm);
1701 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1702 inline_cache->update(&call_info, receiver()->klass());
1703
1704 return callee_method;
1705 }
1706
1707 //
1708 // Resets a call-site in compiled code so it will get resolved again.
1709 // This routines handles both virtual call sites, optimized virtual call
1710 // sites, and static call sites. Typically used to change a call sites
1711 // destination from compiled to interpreted.
1712 //
1713 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1714 JavaThread* current = THREAD;
1715 ResourceMark rm(current);
1716 RegisterMap reg_map(current,
1717 RegisterMap::UpdateMap::skip,
1718 RegisterMap::ProcessFrames::include,
1719 RegisterMap::WalkContinuation::skip);
1720 frame stub_frame = current->last_frame();
1721 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1722 frame caller = stub_frame.sender(®_map);
1723
1724 // Do nothing if the frame isn't a live compiled frame.
1725 // nmethod could be deoptimized by the time we get here
1726 // so no update to the caller is needed.
1727
1728 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1729 (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1730
1731 address pc = caller.pc();
1732
1733 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1734 assert(caller_nm != nullptr, "did not find caller nmethod");
1735
1736 // Default call_addr is the location of the "basic" call.
1737 // Determine the address of the call we a reresolving. With
1738 // Inline Caches we will always find a recognizable call.
1739 // With Inline Caches disabled we may or may not find a
1740 // recognizable call. We will always find a call for static
1741 // calls and for optimized virtual calls. For vanilla virtual
1742 // calls it depends on the state of the UseInlineCaches switch.
1743 //
1744 // With Inline Caches disabled we can get here for a virtual call
1745 // for two reasons:
1746 // 1 - calling an abstract method. The vtable for abstract methods
1747 // will run us thru handle_wrong_method and we will eventually
1748 // end up in the interpreter to throw the ame.
1749 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1750 // call and between the time we fetch the entry address and
1751 // we jump to it the target gets deoptimized. Similar to 1
1752 // we will wind up in the interprter (thru a c2i with c2).
1753 //
1754 CompiledICLocker ml(caller_nm);
1755 address call_addr = caller_nm->call_instruction_address(pc);
1756
1757 if (call_addr != nullptr) {
1758 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1759 // bytes back in the instruction stream so we must also check for reloc info.
1760 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1761 bool ret = iter.next(); // Get item
1762 if (ret) {
1763 switch (iter.type()) {
1764 case relocInfo::static_call_type:
1765 case relocInfo::opt_virtual_call_type: {
1766 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1767 cdc->set_to_clean();
1768 break;
1769 }
1770
1771 case relocInfo::virtual_call_type: {
1772 // compiled, dispatched call (which used to call an interpreted method)
1773 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1774 inline_cache->set_to_clean();
1775 break;
1776 }
1777 default:
1778 break;
1779 }
1780 }
1781 }
1782 }
1783
1784 methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1785
1786
1787 #ifndef PRODUCT
1788 AtomicAccess::inc(&_wrong_method_ctr);
1789
1790 if (TraceCallFixup) {
1791 ResourceMark rm(current);
1792 tty->print("handle_wrong_method reresolving call to");
1793 callee_method->print_short_name(tty);
1794 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1795 }
1796 #endif
1797
1798 return callee_method;
1799 }
1800
1801 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1802 // The faulting unsafe accesses should be changed to throw the error
1803 // synchronously instead. Meanwhile the faulting instruction will be
1804 // skipped over (effectively turning it into a no-op) and an
1805 // asynchronous exception will be raised which the thread will
1806 // handle at a later point. If the instruction is a load it will
1807 // return garbage.
1808
1809 // Request an async exception.
1810 thread->set_pending_unsafe_access_error();
1811
1812 // Return address of next instruction to execute.
1978 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
1979
1980 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1981 if (message == nullptr) {
1982 // Shouldn't happen, but don't cause even more problems if it does
1983 message = const_cast<char*>(caster_klass->external_name());
1984 } else {
1985 jio_snprintf(message,
1986 msglen,
1987 "class %s cannot be cast to class %s (%s%s%s)",
1988 caster_name,
1989 target_name,
1990 caster_klass_description,
1991 klass_separator,
1992 target_klass_description
1993 );
1994 }
1995 return message;
1996 }
1997
1998 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1999 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2000 JRT_END
2001
2002 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2003 if (!SafepointSynchronize::is_synchronizing()) {
2004 // Only try quick_enter() if we're not trying to reach a safepoint
2005 // so that the calling thread reaches the safepoint more quickly.
2006 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2007 return;
2008 }
2009 }
2010 // NO_ASYNC required because an async exception on the state transition destructor
2011 // would leave you with the lock held and it would never be released.
2012 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2013 // and the model is that an exception implies the method failed.
2014 JRT_BLOCK_NO_ASYNC
2015 Handle h_obj(THREAD, obj);
2016 ObjectSynchronizer::enter(h_obj, lock, current);
2017 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2211 tty->print_cr("Note 1: counter updates are not MT-safe.");
2212 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2213 tty->print_cr(" %% in nested categories are relative to their category");
2214 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2215 tty->cr();
2216
2217 MethodArityHistogram h;
2218 }
2219 #endif
2220
2221 #ifndef PRODUCT
2222 static int _lookups; // number of calls to lookup
2223 static int _equals; // number of buckets checked with matching hash
2224 static int _archived_hits; // number of successful lookups in archived table
2225 static int _runtime_hits; // number of successful lookups in runtime table
2226 #endif
2227
2228 // A simple wrapper class around the calling convention information
2229 // that allows sharing of adapters for the same calling convention.
2230 class AdapterFingerPrint : public MetaspaceObj {
2231 private:
2232 enum {
2233 _basic_type_bits = 4,
2234 _basic_type_mask = right_n_bits(_basic_type_bits),
2235 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2236 };
2237 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2238 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2239
2240 int _length;
2241
2242 static int data_offset() { return sizeof(AdapterFingerPrint); }
2243 int* data_pointer() {
2244 return (int*)((address)this + data_offset());
2245 }
2246
2247 // Private construtor. Use allocate() to get an instance.
2248 AdapterFingerPrint(int total_args_passed, BasicType* sig_bt, int len) {
2249 int* data = data_pointer();
2250 // Pack the BasicTypes with 8 per int
2251 assert(len == length(total_args_passed), "sanity");
2252 _length = len;
2253 int sig_index = 0;
2254 for (int index = 0; index < _length; index++) {
2255 int value = 0;
2256 for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2257 int bt = adapter_encoding(sig_bt[sig_index++]);
2258 assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2259 value = (value << _basic_type_bits) | bt;
2260 }
2261 data[index] = value;
2262 }
2263 }
2264
2265 // Call deallocate instead
2266 ~AdapterFingerPrint() {
2267 ShouldNotCallThis();
2268 }
2269
2270 static int length(int total_args) {
2271 return (total_args + (_basic_types_per_int-1)) / _basic_types_per_int;
2272 }
2273
2274 static int compute_size_in_words(int len) {
2275 return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(int)));
2276 }
2277
2278 // Remap BasicTypes that are handled equivalently by the adapters.
2279 // These are correct for the current system but someday it might be
2280 // necessary to make this mapping platform dependent.
2281 static int adapter_encoding(BasicType in) {
2282 switch (in) {
2283 case T_BOOLEAN:
2284 case T_BYTE:
2285 case T_SHORT:
2286 case T_CHAR:
2287 // There are all promoted to T_INT in the calling convention
2288 return T_INT;
2289
2290 case T_OBJECT:
2291 case T_ARRAY:
2292 // In other words, we assume that any register good enough for
2293 // an int or long is good enough for a managed pointer.
2294 #ifdef _LP64
2295 return T_LONG;
2296 #else
2297 return T_INT;
2298 #endif
2299
2300 case T_INT:
2301 case T_LONG:
2302 case T_FLOAT:
2303 case T_DOUBLE:
2304 case T_VOID:
2305 return in;
2306
2307 default:
2308 ShouldNotReachHere();
2309 return T_CONFLICT;
2310 }
2311 }
2312
2313 void* operator new(size_t size, size_t fp_size) throw() {
2314 assert(fp_size >= size, "sanity check");
2315 void* p = AllocateHeap(fp_size, mtCode);
2316 memset(p, 0, fp_size);
2317 return p;
2318 }
2319
2320 template<typename Function>
2321 void iterate_args(Function function) {
2322 for (int i = 0; i < length(); i++) {
2323 unsigned val = (unsigned)value(i);
2324 // args are packed so that first/lower arguments are in the highest
2325 // bits of each int value, so iterate from highest to the lowest
2326 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2327 unsigned v = (val >> j) & _basic_type_mask;
2328 if (v == 0) {
2329 continue;
2330 }
2331 function(v);
2332 }
2333 }
2334 }
2335
2336 public:
2337 static AdapterFingerPrint* allocate(int total_args_passed, BasicType* sig_bt) {
2338 int len = length(total_args_passed);
2339 int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2340 AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(total_args_passed, sig_bt, len);
2341 assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2342 return afp;
2343 }
2344
2345 static void deallocate(AdapterFingerPrint* fp) {
2346 FreeHeap(fp);
2347 }
2348
2349 int value(int index) {
2350 int* data = data_pointer();
2351 return data[index];
2352 }
2353
2354 int length() {
2355 return _length;
2356 }
2357
2358 unsigned int compute_hash() {
2359 int hash = 0;
2360 for (int i = 0; i < length(); i++) {
2361 int v = value(i);
2362 //Add arithmetic operation to the hash, like +3 to improve hashing
2363 hash = ((hash << 8) ^ v ^ (hash >> 5)) + 3;
2364 }
2365 return (unsigned int)hash;
2366 }
2367
2368 const char* as_string() {
2369 stringStream st;
2370 st.print("0x");
2371 for (int i = 0; i < length(); i++) {
2372 st.print("%x", value(i));
2373 }
2374 return st.as_string();
2375 }
2376
2377 const char* as_basic_args_string() {
2378 stringStream st;
2379 bool long_prev = false;
2380 iterate_args([&] (int arg) {
2381 if (long_prev) {
2382 long_prev = false;
2383 if (arg == T_VOID) {
2384 st.print("J");
2385 } else {
2386 st.print("L");
2387 }
2388 }
2389 switch (arg) {
2390 case T_INT: st.print("I"); break;
2391 case T_LONG: long_prev = true; break;
2392 case T_FLOAT: st.print("F"); break;
2393 case T_DOUBLE: st.print("D"); break;
2394 case T_VOID: break;
2395 default: ShouldNotReachHere();
2396 }
2397 });
2398 if (long_prev) {
2399 st.print("L");
2400 }
2401 return st.as_string();
2402 }
2403
2404 BasicType* as_basic_type(int& nargs) {
2405 nargs = 0;
2406 GrowableArray<BasicType> btarray;
2407 bool long_prev = false;
2408
2409 iterate_args([&] (int arg) {
2410 if (long_prev) {
2411 long_prev = false;
2412 if (arg == T_VOID) {
2413 btarray.append(T_LONG);
2414 } else {
2415 btarray.append(T_OBJECT); // it could be T_ARRAY; it shouldn't matter
2416 }
2417 }
2418 switch (arg) {
2419 case T_INT: // fallthrough
2420 case T_FLOAT: // fallthrough
2421 case T_DOUBLE:
2422 case T_VOID:
2423 btarray.append((BasicType)arg);
2424 break;
2425 case T_LONG:
2426 long_prev = true;
2427 break;
2428 default: ShouldNotReachHere();
2429 }
2430 });
2431
2432 if (long_prev) {
2433 btarray.append(T_OBJECT);
2434 }
2435
2436 nargs = btarray.length();
2437 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nargs);
2438 int index = 0;
2439 GrowableArrayIterator<BasicType> iter = btarray.begin();
2440 while (iter != btarray.end()) {
2441 sig_bt[index++] = *iter;
2442 ++iter;
2443 }
2444 assert(index == btarray.length(), "sanity check");
2445 #ifdef ASSERT
2446 {
2447 AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(nargs, sig_bt);
2448 assert(this->equals(compare_fp), "sanity check");
2449 AdapterFingerPrint::deallocate(compare_fp);
2450 }
2451 #endif
2452 return sig_bt;
2453 }
2454
2455 bool equals(AdapterFingerPrint* other) {
2456 if (other->_length != _length) {
2457 return false;
2458 } else {
2459 for (int i = 0; i < _length; i++) {
2460 if (value(i) != other->value(i)) {
2461 return false;
2462 }
2463 }
2464 }
2465 return true;
2466 }
2467
2468 // methods required by virtue of being a MetaspaceObj
2469 void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2470 int size() const { return compute_size_in_words(_length); }
2471 MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2472
2473 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2474 NOT_PRODUCT(_equals++);
2475 return fp1->equals(fp2);
2476 }
2477
2478 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2479 return fp->compute_hash();
2480 }
2483 #if INCLUDE_CDS
2484 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2485 return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2486 }
2487
2488 class ArchivedAdapterTable : public OffsetCompactHashtable<
2489 AdapterFingerPrint*,
2490 AdapterHandlerEntry*,
2491 adapter_fp_equals_compact_hashtable_entry> {};
2492 #endif // INCLUDE_CDS
2493
2494 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2495 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2496 AnyObj::C_HEAP, mtCode,
2497 AdapterFingerPrint::compute_hash,
2498 AdapterFingerPrint::equals>;
2499 static AdapterHandlerTable* _adapter_handler_table;
2500 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2501
2502 // Find a entry with the same fingerprint if it exists
2503 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(int total_args_passed, BasicType* sig_bt) {
2504 NOT_PRODUCT(_lookups++);
2505 assert_lock_strong(AdapterHandlerLibrary_lock);
2506 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2507 AdapterHandlerEntry* entry = nullptr;
2508 #if INCLUDE_CDS
2509 // if we are building the archive then the archived adapter table is
2510 // not valid and we need to use the ones added to the runtime table
2511 if (AOTCodeCache::is_using_adapter()) {
2512 // Search archived table first. It is read-only table so can be searched without lock
2513 entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2514 #ifndef PRODUCT
2515 if (entry != nullptr) {
2516 _archived_hits++;
2517 }
2518 #endif
2519 }
2520 #endif // INCLUDE_CDS
2521 if (entry == nullptr) {
2522 assert_lock_strong(AdapterHandlerLibrary_lock);
2523 AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2524 if (entry_p != nullptr) {
2525 entry = *entry_p;
2526 assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2543 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2544 ts.print(tty, "AdapterHandlerTable");
2545 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2546 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2547 int total_hits = _archived_hits + _runtime_hits;
2548 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2549 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2550 }
2551 #endif
2552
2553 // ---------------------------------------------------------------------------
2554 // Implementation of AdapterHandlerLibrary
2555 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2556 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2557 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2558 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2559 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2560 #if INCLUDE_CDS
2561 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2562 #endif // INCLUDE_CDS
2563 static const int AdapterHandlerLibrary_size = 16*K;
2564 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2565 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2566
2567 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2568 assert(_buffer != nullptr, "should be initialized");
2569 return _buffer;
2570 }
2571
2572 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2573 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2574 AdapterBlob* adapter_blob = entry->adapter_blob();
2575 char blob_id[256];
2576 jio_snprintf(blob_id,
2577 sizeof(blob_id),
2578 "%s(%s)",
2579 adapter_blob->name(),
2580 entry->fingerprint()->as_string());
2581 if (Forte::is_enabled()) {
2582 Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2583 }
2591 void AdapterHandlerLibrary::initialize() {
2592 {
2593 ResourceMark rm;
2594 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2595 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2596 }
2597
2598 #if INCLUDE_CDS
2599 // Link adapters in AOT Cache to their code in AOT Code Cache
2600 if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2601 link_aot_adapters();
2602 lookup_simple_adapters();
2603 return;
2604 }
2605 #endif // INCLUDE_CDS
2606
2607 ResourceMark rm;
2608 {
2609 MutexLocker mu(AdapterHandlerLibrary_lock);
2610
2611 _no_arg_handler = create_adapter(0, nullptr);
2612
2613 BasicType obj_args[] = { T_OBJECT };
2614 _obj_arg_handler = create_adapter(1, obj_args);
2615
2616 BasicType int_args[] = { T_INT };
2617 _int_arg_handler = create_adapter(1, int_args);
2618
2619 BasicType obj_int_args[] = { T_OBJECT, T_INT };
2620 _obj_int_arg_handler = create_adapter(2, obj_int_args);
2621
2622 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2623 _obj_obj_arg_handler = create_adapter(2, obj_obj_args);
2624
2625 // we should always get an entry back but we don't have any
2626 // associated blob on Zero
2627 assert(_no_arg_handler != nullptr &&
2628 _obj_arg_handler != nullptr &&
2629 _int_arg_handler != nullptr &&
2630 _obj_int_arg_handler != nullptr &&
2631 _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2632 }
2633
2634 // Outside of the lock
2635 #ifndef ZERO
2636 // no blobs to register when we are on Zero
2637 post_adapter_creation(_no_arg_handler);
2638 post_adapter_creation(_obj_arg_handler);
2639 post_adapter_creation(_int_arg_handler);
2640 post_adapter_creation(_obj_int_arg_handler);
2641 post_adapter_creation(_obj_obj_arg_handler);
2642 #endif // ZERO
2643 }
2644
2645 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2646 uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2647 assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2648 return AdapterHandlerEntry::allocate(id, fingerprint);
2649 }
2650
2651 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2652 int total_args_passed = method->size_of_parameters(); // All args on stack
2653 if (total_args_passed == 0) {
2654 return _no_arg_handler;
2655 } else if (total_args_passed == 1) {
2656 if (!method->is_static()) {
2657 return _obj_arg_handler;
2658 }
2659 switch (method->signature()->char_at(1)) {
2660 case JVM_SIGNATURE_CLASS:
2661 case JVM_SIGNATURE_ARRAY:
2662 return _obj_arg_handler;
2663 case JVM_SIGNATURE_INT:
2664 case JVM_SIGNATURE_BOOLEAN:
2665 case JVM_SIGNATURE_CHAR:
2666 case JVM_SIGNATURE_BYTE:
2667 case JVM_SIGNATURE_SHORT:
2668 return _int_arg_handler;
2669 }
2670 } else if (total_args_passed == 2 &&
2671 !method->is_static()) {
2672 switch (method->signature()->char_at(1)) {
2673 case JVM_SIGNATURE_CLASS:
2674 case JVM_SIGNATURE_ARRAY:
2675 return _obj_obj_arg_handler;
2676 case JVM_SIGNATURE_INT:
2677 case JVM_SIGNATURE_BOOLEAN:
2678 case JVM_SIGNATURE_CHAR:
2679 case JVM_SIGNATURE_BYTE:
2680 case JVM_SIGNATURE_SHORT:
2681 return _obj_int_arg_handler;
2682 }
2683 }
2684 return nullptr;
2685 }
2686
2687 class AdapterSignatureIterator : public SignatureIterator {
2688 private:
2689 BasicType stack_sig_bt[16];
2690 BasicType* sig_bt;
2691 int index;
2692
2693 public:
2694 AdapterSignatureIterator(Symbol* signature,
2695 fingerprint_t fingerprint,
2696 bool is_static,
2697 int total_args_passed) :
2698 SignatureIterator(signature, fingerprint),
2699 index(0)
2700 {
2701 sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2702 if (!is_static) { // Pass in receiver first
2703 sig_bt[index++] = T_OBJECT;
2704 }
2705 do_parameters_on(this);
2706 }
2707
2708 BasicType* basic_types() {
2709 return sig_bt;
2710 }
2711
2712 #ifdef ASSERT
2713 int slots() {
2714 return index;
2715 }
2716 #endif
2717
2718 private:
2719
2720 friend class SignatureIterator; // so do_parameters_on can call do_type
2721 void do_type(BasicType type) {
2722 sig_bt[index++] = type;
2723 if (type == T_LONG || type == T_DOUBLE) {
2724 sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots
2725 }
2726 }
2727 };
2728
2729
2730 const char* AdapterHandlerEntry::_entry_names[] = {
2731 "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
2732 };
2733
2734 #ifdef ASSERT
2735 void AdapterHandlerLibrary::verify_adapter_sharing(int total_args_passed, BasicType* sig_bt, AdapterHandlerEntry* cached_entry) {
2736 // we can only check for the same code if there is any
2737 #ifndef ZERO
2738 AdapterHandlerEntry* comparison_entry = create_adapter(total_args_passed, sig_bt, true);
2739 assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
2740 assert(comparison_entry->compare_code(cached_entry), "code must match");
2741 // Release the one just created
2742 AdapterHandlerEntry::deallocate(comparison_entry);
2743 # endif // ZERO
2744 }
2745 #endif /* ASSERT*/
2746
2747 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2748 assert(!method->is_abstract(), "abstract methods do not have adapters");
2749 // Use customized signature handler. Need to lock around updates to
2750 // the _adapter_handler_table (it is not safe for concurrent readers
2751 // and a single writer: this could be fixed if it becomes a
2752 // problem).
2753
2754 // Fast-path for trivial adapters
2755 AdapterHandlerEntry* entry = get_simple_adapter(method);
2756 if (entry != nullptr) {
2757 return entry;
2758 }
2759
2760 ResourceMark rm;
2761 bool new_entry = false;
2762
2763 // Fill in the signature array, for the calling-convention call.
2764 int total_args_passed = method->size_of_parameters(); // All args on stack
2765
2766 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2767 method->is_static(), total_args_passed);
2768 assert(si.slots() == total_args_passed, "");
2769 BasicType* sig_bt = si.basic_types();
2770 {
2771 MutexLocker mu(AdapterHandlerLibrary_lock);
2772
2773 // Lookup method signature's fingerprint
2774 entry = lookup(total_args_passed, sig_bt);
2775
2776 if (entry != nullptr) {
2777 #ifndef ZERO
2778 assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
2779 #endif
2780 #ifdef ASSERT
2781 if (!entry->in_aot_cache() && VerifyAdapterSharing) {
2782 verify_adapter_sharing(total_args_passed, sig_bt, entry);
2783 }
2784 #endif
2785 } else {
2786 entry = create_adapter(total_args_passed, sig_bt);
2787 if (entry != nullptr) {
2788 new_entry = true;
2789 }
2790 }
2791 }
2792
2793 // Outside of the lock
2794 if (new_entry) {
2795 post_adapter_creation(entry);
2796 }
2797 return entry;
2798 }
2799
2800 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
2801 ResourceMark rm;
2802 const char* name = AdapterHandlerLibrary::name(handler);
2803 const uint32_t id = AdapterHandlerLibrary::id(handler);
2804
2805 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
2806 if (blob != nullptr) {
2821 }
2822 insts_size = adapter_blob->code_size();
2823 st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
2824 handler->fingerprint()->as_basic_args_string(),
2825 handler->fingerprint()->as_string(), insts_size);
2826 st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
2827 if (Verbose || PrintStubCode) {
2828 address first_pc = adapter_blob->content_begin();
2829 if (first_pc != nullptr) {
2830 Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
2831 st->cr();
2832 }
2833 }
2834 }
2835 #endif // PRODUCT
2836
2837 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
2838 int entry_offset[AdapterBlob::ENTRY_COUNT]) {
2839 entry_offset[AdapterBlob::I2C] = 0;
2840 entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
2841 entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
2842 if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
2843 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
2844 } else {
2845 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
2846 }
2847 }
2848
2849 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
2850 int total_args_passed,
2851 BasicType* sig_bt,
2852 bool is_transient) {
2853 if (log_is_enabled(Info, perf, class, link)) {
2854 ClassLoader::perf_method_adapters_count()->inc();
2855 }
2856
2857 #ifndef ZERO
2858 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2859 CodeBuffer buffer(buf);
2860 short buffer_locs[20];
2861 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2862 sizeof(buffer_locs)/sizeof(relocInfo));
2863 MacroAssembler masm(&buffer);
2864 VMRegPair stack_regs[16];
2865 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2866
2867 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2868 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2869 address entry_address[AdapterBlob::ENTRY_COUNT];
2870 SharedRuntime::generate_i2c2i_adapters(&masm,
2871 total_args_passed,
2872 comp_args_on_stack,
2873 sig_bt,
2874 regs,
2875 entry_address);
2876 // On zero there is no code to save and no need to create a blob and
2877 // or relocate the handler.
2878 int entry_offset[AdapterBlob::ENTRY_COUNT];
2879 address_to_offset(entry_address, entry_offset);
2880 #ifdef ASSERT
2881 if (VerifyAdapterSharing) {
2882 handler->save_code(buf->code_begin(), buffer.insts_size());
2883 if (is_transient) {
2884 return true;
2885 }
2886 }
2887 #endif
2888 AdapterBlob* adapter_blob = AdapterBlob::create(&buffer, entry_offset);
2889 if (adapter_blob == nullptr) {
2890 // CodeCache is full, disable compilation
2891 // Ought to log this but compile log is only per compile thread
2892 // and we're some non descript Java thread.
2893 return false;
2894 }
2895 handler->set_adapter_blob(adapter_blob);
2896 if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
2897 // try to save generated code
2898 const char* name = AdapterHandlerLibrary::name(handler);
2899 const uint32_t id = AdapterHandlerLibrary::id(handler);
2900 bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
2901 assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
2902 }
2903 #endif // ZERO
2904
2905 #ifndef PRODUCT
2906 // debugging support
2907 if (PrintAdapterHandlers || PrintStubCode) {
2908 print_adapter_handler_info(tty, handler);
2909 }
2910 #endif
2911
2912 return true;
2913 }
2914
2915 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(int total_args_passed,
2916 BasicType* sig_bt,
2917 bool is_transient) {
2918 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2919 AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
2920 if (!generate_adapter_code(handler, total_args_passed, sig_bt, is_transient)) {
2921 AdapterHandlerEntry::deallocate(handler);
2922 return nullptr;
2923 }
2924 if (!is_transient) {
2925 assert_lock_strong(AdapterHandlerLibrary_lock);
2926 _adapter_handler_table->put(fp, handler);
2927 }
2928 return handler;
2929 }
2930
2931 #if INCLUDE_CDS
2932 void AdapterHandlerEntry::remove_unshareable_info() {
2933 #ifdef ASSERT
2934 _saved_code = nullptr;
2935 _saved_code_length = 0;
2936 #endif // ASSERT
2937 _adapter_blob = nullptr;
2938 _linked = false;
2939 }
2940
3003 // This method is used during production run to link archived adapters (stored in AOT Cache)
3004 // to their code in AOT Code Cache
3005 void AdapterHandlerEntry::link() {
3006 ResourceMark rm;
3007 assert(_fingerprint != nullptr, "_fingerprint must not be null");
3008 bool generate_code = false;
3009 // Generate code only if AOTCodeCache is not available, or
3010 // caching adapters is disabled, or we fail to link
3011 // the AdapterHandlerEntry to its code in the AOTCodeCache
3012 if (AOTCodeCache::is_using_adapter()) {
3013 AdapterHandlerLibrary::link_aot_adapter_handler(this);
3014 // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
3015 if (_adapter_blob == nullptr) {
3016 log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
3017 generate_code = true;
3018 }
3019 } else {
3020 generate_code = true;
3021 }
3022 if (generate_code) {
3023 int nargs;
3024 BasicType* bt = _fingerprint->as_basic_type(nargs);
3025 if (!AdapterHandlerLibrary::generate_adapter_code(this, nargs, bt, /* is_transient */ false)) {
3026 // Don't throw exceptions during VM initialization because java.lang.* classes
3027 // might not have been initialized, causing problems when constructing the
3028 // Java exception object.
3029 vm_exit_during_initialization("Out of space in CodeCache for adapters");
3030 }
3031 }
3032 if (_adapter_blob != nullptr) {
3033 post_adapter_creation(this);
3034 }
3035 assert(_linked, "AdapterHandlerEntry must now be linked");
3036 }
3037
3038 void AdapterHandlerLibrary::link_aot_adapters() {
3039 uint max_id = 0;
3040 assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3041 /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3042 * That implies adapter ids of the adapters in the cache may not be contiguous.
3043 * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3044 * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3045 * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3046 */
3047 _aot_adapter_handler_table.iterate([&](AdapterHandlerEntry* entry) {
3048 assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3049 entry->link();
3050 max_id = MAX2(max_id, entry->id());
3051 });
3052 // Set adapter id to the maximum id found in the AOTCache
3053 assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3054 _id_counter = max_id;
3055 }
3056
3057 // This method is called during production run to lookup simple adapters
3058 // in the archived adapter handler table
3059 void AdapterHandlerLibrary::lookup_simple_adapters() {
3060 assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3061
3062 MutexLocker mu(AdapterHandlerLibrary_lock);
3063 _no_arg_handler = lookup(0, nullptr);
3064
3065 BasicType obj_args[] = { T_OBJECT };
3066 _obj_arg_handler = lookup(1, obj_args);
3067
3068 BasicType int_args[] = { T_INT };
3069 _int_arg_handler = lookup(1, int_args);
3070
3071 BasicType obj_int_args[] = { T_OBJECT, T_INT };
3072 _obj_int_arg_handler = lookup(2, obj_int_args);
3073
3074 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
3075 _obj_obj_arg_handler = lookup(2, obj_obj_args);
3076
3077 assert(_no_arg_handler != nullptr &&
3078 _obj_arg_handler != nullptr &&
3079 _int_arg_handler != nullptr &&
3080 _obj_int_arg_handler != nullptr &&
3081 _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3082 assert(_no_arg_handler->is_linked() &&
3083 _obj_arg_handler->is_linked() &&
3084 _int_arg_handler->is_linked() &&
3085 _obj_int_arg_handler->is_linked() &&
3086 _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3087 }
3088 #endif // INCLUDE_CDS
3089
3090 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3091 LogStreamHandle(Trace, aot) lsh;
3092 if (lsh.is_enabled()) {
3093 lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3094 lsh.cr();
3095 }
3096 it->push(&_fingerprint);
3097 }
3098
3099 AdapterHandlerEntry::~AdapterHandlerEntry() {
3100 if (_fingerprint != nullptr) {
3101 AdapterFingerPrint::deallocate(_fingerprint);
3102 _fingerprint = nullptr;
3103 }
3104 #ifdef ASSERT
3105 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3106 #endif
3107 FreeHeap(this);
3108 }
3109
3110
3111 #ifdef ASSERT
3112 // Capture the code before relocation so that it can be compared
3113 // against other versions. If the code is captured after relocation
3114 // then relative instructions won't be equivalent.
3115 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3116 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3117 _saved_code_length = length;
3118 memcpy(_saved_code, buffer, length);
3119 }
3120
3121
3122 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3123 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3171
3172 struct { double data[20]; } locs_buf;
3173 struct { double data[20]; } stubs_locs_buf;
3174 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3175 #if defined(AARCH64) || defined(PPC64)
3176 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3177 // in the constant pool to ensure ordering between the barrier and oops
3178 // accesses. For native_wrappers we need a constant.
3179 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3180 // static java call that is resolved in the runtime.
3181 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3182 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3183 }
3184 #endif
3185 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3186 MacroAssembler _masm(&buffer);
3187
3188 // Fill in the signature array, for the calling-convention call.
3189 const int total_args_passed = method->size_of_parameters();
3190
3191 VMRegPair stack_regs[16];
3192 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3193
3194 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3195 method->is_static(), total_args_passed);
3196 BasicType* sig_bt = si.basic_types();
3197 assert(si.slots() == total_args_passed, "");
3198 BasicType ret_type = si.return_type();
3199
3200 // Now get the compiled-Java arguments layout.
3201 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3202
3203 // Generate the compiled-to-native wrapper code
3204 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3205
3206 if (nm != nullptr) {
3207 {
3208 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3209 if (nm->make_in_use()) {
3210 method->set_code(method, nm);
3211 }
3212 }
3213
3214 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3215 if (directive->PrintAssemblyOption) {
3216 nm->print_code();
3217 }
3218 DirectivesStack::release(directive);
3446 if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3447 found = true;
3448 st->print("Adapter for signature: ");
3449 a->print_adapter_on(st);
3450 return true;
3451 } else {
3452 return false; // keep looking
3453 }
3454 };
3455 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3456 _adapter_handler_table->iterate(findblob_runtime_table);
3457 }
3458 assert(found, "Should have found handler");
3459 }
3460
3461 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3462 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3463 if (adapter_blob() != nullptr) {
3464 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3465 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3466 st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3467 if (get_c2i_no_clinit_check_entry() != nullptr) {
3468 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3469 }
3470 }
3471 st->cr();
3472 }
3473
3474 #ifndef PRODUCT
3475
3476 void AdapterHandlerLibrary::print_statistics() {
3477 print_table_statistics();
3478 }
3479
3480 #endif /* PRODUCT */
3481
3482 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3483 assert(current == JavaThread::current(), "pre-condition");
3484 StackOverflow* overflow_state = current->stack_overflow_state();
3485 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3486 overflow_state->set_reserved_stack_activation(current->stack_base());
3533 event.set_method(method);
3534 event.commit();
3535 }
3536 }
3537 }
3538 return activation;
3539 }
3540
3541 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3542 // After any safepoint, just before going back to compiled code,
3543 // we inform the GC that we will be doing initializing writes to
3544 // this object in the future without emitting card-marks, so
3545 // GC may take any compensating steps.
3546
3547 oop new_obj = current->vm_result_oop();
3548 if (new_obj == nullptr) return;
3549
3550 BarrierSet *bs = BarrierSet::barrier_set();
3551 bs->on_slowpath_allocation_exit(current, new_obj);
3552 }
|
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/stringTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/aotCodeCache.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/compiledIC.hpp"
35 #include "code/nmethod.inline.hpp"
36 #include "code/scopeDesc.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "compiler/abstractCompiler.hpp"
39 #include "compiler/compileBroker.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "interpreter/interpreterRuntime.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "jvm.h"
47 #include "logging/log.hpp"
48 #include "memory/oopFactory.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "metaprogramming/primitiveConversions.hpp"
52 #include "oops/access.hpp"
53 #include "oops/fieldStreams.inline.hpp"
54 #include "oops/inlineKlass.inline.hpp"
55 #include "oops/klass.hpp"
56 #include "oops/method.inline.hpp"
57 #include "oops/objArrayKlass.hpp"
58 #include "oops/objArrayOop.inline.hpp"
59 #include "oops/oop.inline.hpp"
60 #include "prims/forte.hpp"
61 #include "prims/jvmtiExport.hpp"
62 #include "prims/jvmtiThreadState.hpp"
63 #include "prims/methodHandles.hpp"
64 #include "prims/nativeLookup.hpp"
65 #include "runtime/arguments.hpp"
66 #include "runtime/atomicAccess.hpp"
67 #include "runtime/basicLock.inline.hpp"
68 #include "runtime/frame.inline.hpp"
69 #include "runtime/handles.inline.hpp"
70 #include "runtime/init.hpp"
71 #include "runtime/interfaceSupport.inline.hpp"
72 #include "runtime/java.hpp"
73 #include "runtime/javaCalls.hpp"
74 #include "runtime/jniHandles.inline.hpp"
75 #include "runtime/osThread.hpp"
76 #include "runtime/perfData.hpp"
77 #include "runtime/sharedRuntime.hpp"
78 #include "runtime/signature.hpp"
79 #include "runtime/stackWatermarkSet.hpp"
80 #include "runtime/stubRoutines.hpp"
81 #include "runtime/synchronizer.inline.hpp"
82 #include "runtime/timerTrace.hpp"
83 #include "runtime/vframe.inline.hpp"
84 #include "runtime/vframeArray.hpp"
85 #include "runtime/vm_version.hpp"
86 #include "utilities/copy.hpp"
87 #include "utilities/dtrace.hpp"
88 #include "utilities/events.hpp"
89 #include "utilities/globalDefinitions.hpp"
90 #include "utilities/hashTable.hpp"
91 #include "utilities/macros.hpp"
92 #include "utilities/xmlstream.hpp"
93 #ifdef COMPILER1
94 #include "c1/c1_Runtime1.hpp"
95 #endif
96 #ifdef COMPILER2
97 #include "opto/runtime.hpp"
98 #endif
1240 // for a call current in progress, i.e., arguments has been pushed on stack
1241 // but callee has not been invoked yet. Caller frame must be compiled.
1242 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1243 CallInfo& callinfo, TRAPS) {
1244 Handle receiver;
1245 Handle nullHandle; // create a handy null handle for exception returns
1246 JavaThread* current = THREAD;
1247
1248 assert(!vfst.at_end(), "Java frame must exist");
1249
1250 // Find caller and bci from vframe
1251 methodHandle caller(current, vfst.method());
1252 int bci = vfst.bci();
1253
1254 if (caller->is_continuation_enter_intrinsic()) {
1255 bc = Bytecodes::_invokestatic;
1256 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1257 return receiver;
1258 }
1259
1260 // Substitutability test implementation piggy backs on static call resolution
1261 Bytecodes::Code code = caller->java_code_at(bci);
1262 if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1263 bc = Bytecodes::_invokestatic;
1264 methodHandle attached_method(THREAD, extract_attached_method(vfst));
1265 assert(attached_method.not_null(), "must have attached method");
1266 vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1267 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1268 #ifdef ASSERT
1269 Symbol* subst_method_name = UseAltSubstitutabilityMethod ? vmSymbols::isSubstitutableAlt_name() : vmSymbols::isSubstitutable_name();
1270 Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(subst_method_name, vmSymbols::object_object_boolean_signature());
1271 assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1272 #endif
1273 return receiver;
1274 }
1275
1276 Bytecode_invoke bytecode(caller, bci);
1277 int bytecode_index = bytecode.index();
1278 bc = bytecode.invoke_code();
1279
1280 methodHandle attached_method(current, extract_attached_method(vfst));
1281 if (attached_method.not_null()) {
1282 Method* callee = bytecode.static_target(CHECK_NH);
1283 vmIntrinsics::ID id = callee->intrinsic_id();
1284 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1285 // it attaches statically resolved method to the call site.
1286 if (MethodHandles::is_signature_polymorphic(id) &&
1287 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1288 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1289
1290 // Adjust invocation mode according to the attached method.
1291 switch (bc) {
1292 case Bytecodes::_invokevirtual:
1293 if (attached_method->method_holder()->is_interface()) {
1294 bc = Bytecodes::_invokeinterface;
1295 }
1296 break;
1297 case Bytecodes::_invokeinterface:
1298 if (!attached_method->method_holder()->is_interface()) {
1299 bc = Bytecodes::_invokevirtual;
1300 }
1301 break;
1302 case Bytecodes::_invokehandle:
1303 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1304 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1305 : Bytecodes::_invokevirtual;
1306 }
1307 break;
1308 default:
1309 break;
1310 }
1311 } else {
1312 assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1313 if (!attached_method->method_holder()->is_inline_klass()) {
1314 // Ignore the attached method in this case to not confuse below code
1315 attached_method = methodHandle(current, nullptr);
1316 }
1317 }
1318 }
1319
1320 assert(bc != Bytecodes::_illegal, "not initialized");
1321
1322 bool has_receiver = bc != Bytecodes::_invokestatic &&
1323 bc != Bytecodes::_invokedynamic &&
1324 bc != Bytecodes::_invokehandle;
1325 bool check_null_and_abstract = true;
1326
1327 // Find receiver for non-static call
1328 if (has_receiver) {
1329 // This register map must be update since we need to find the receiver for
1330 // compiled frames. The receiver might be in a register.
1331 RegisterMap reg_map2(current,
1332 RegisterMap::UpdateMap::include,
1333 RegisterMap::ProcessFrames::include,
1334 RegisterMap::WalkContinuation::skip);
1335 frame stubFrame = current->last_frame();
1336 // Caller-frame is a compiled frame
1337 frame callerFrame = stubFrame.sender(®_map2);
1338
1339 Method* callee = attached_method();
1340 if (callee == nullptr) {
1341 callee = bytecode.static_target(CHECK_NH);
1342 if (callee == nullptr) {
1343 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1344 }
1345 }
1346 bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->as_nmethod()->is_compiled_by_c1();
1347 if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1348 // If the receiver is an inline type that is passed as fields, no oop is available
1349 // Resolve the call without receiver null checking.
1350 assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1351 assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1352 if (bc == Bytecodes::_invokeinterface) {
1353 bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1354 }
1355 check_null_and_abstract = false;
1356 } else {
1357 // Retrieve from a compiled argument list
1358 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1359 assert(oopDesc::is_oop_or_null(receiver()), "");
1360 if (receiver.is_null()) {
1361 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1362 }
1363 }
1364 }
1365
1366 // Resolve method
1367 if (attached_method.not_null()) {
1368 // Parameterized by attached method.
1369 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1370 } else {
1371 // Parameterized by bytecode.
1372 constantPoolHandle constants(current, caller->constants());
1373 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1374 }
1375
1376 #ifdef ASSERT
1377 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1378 if (has_receiver && check_null_and_abstract) {
1379 assert(receiver.not_null(), "should have thrown exception");
1380 Klass* receiver_klass = receiver->klass();
1381 Klass* rk = nullptr;
1382 if (attached_method.not_null()) {
1383 // In case there's resolved method attached, use its holder during the check.
1384 rk = attached_method->method_holder();
1385 } else {
1386 // Klass is already loaded.
1387 constantPoolHandle constants(current, caller->constants());
1388 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1389 }
1390 Klass* static_receiver_klass = rk;
1391 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1392 "actual receiver must be subclass of static receiver klass");
1393 if (receiver_klass->is_instance_klass()) {
1394 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1395 tty->print_cr("ERROR: Klass not yet initialized!!");
1396 receiver_klass->print();
1397 }
1398 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1399 }
1400 }
1401 #endif
1402
1403 return receiver;
1404 }
1405
1406 methodHandle SharedRuntime::find_callee_method(bool& caller_does_not_scalarize, TRAPS) {
1407 JavaThread* current = THREAD;
1408 ResourceMark rm(current);
1409 // We need first to check if any Java activations (compiled, interpreted)
1410 // exist on the stack since last JavaCall. If not, we need
1411 // to get the target method from the JavaCall wrapper.
1412 vframeStream vfst(current, true); // Do not skip any javaCalls
1413 methodHandle callee_method;
1414 if (vfst.at_end()) {
1415 // No Java frames were found on stack since we did the JavaCall.
1416 // Hence the stack can only contain an entry_frame. We need to
1417 // find the target method from the stub frame.
1418 RegisterMap reg_map(current,
1419 RegisterMap::UpdateMap::skip,
1420 RegisterMap::ProcessFrames::include,
1421 RegisterMap::WalkContinuation::skip);
1422 frame fr = current->last_frame();
1423 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1424 fr = fr.sender(®_map);
1425 assert(fr.is_entry_frame(), "must be");
1426 // fr is now pointing to the entry frame.
1427 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1428 } else {
1429 Bytecodes::Code bc;
1430 CallInfo callinfo;
1431 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1432 // Calls via mismatching methods are always non-scalarized
1433 if (callinfo.resolved_method()->mismatch()) {
1434 caller_does_not_scalarize = true;
1435 }
1436 callee_method = methodHandle(current, callinfo.selected_method());
1437 }
1438 assert(callee_method()->is_method(), "must be");
1439 return callee_method;
1440 }
1441
1442 // Resolves a call.
1443 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1444 JavaThread* current = THREAD;
1445 ResourceMark rm(current);
1446 RegisterMap cbl_map(current,
1447 RegisterMap::UpdateMap::skip,
1448 RegisterMap::ProcessFrames::include,
1449 RegisterMap::WalkContinuation::skip);
1450 frame caller_frame = current->last_frame().sender(&cbl_map);
1451
1452 CodeBlob* caller_cb = caller_frame.cb();
1453 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1454 nmethod* caller_nm = caller_cb->as_nmethod();
1455
1456 // determine call info & receiver
1457 // note: a) receiver is null for static calls
1458 // b) an exception is thrown if receiver is null for non-static calls
1459 CallInfo call_info;
1460 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1461 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1462
1463 NoSafepointVerifier nsv;
1464
1465 methodHandle callee_method(current, call_info.selected_method());
1466 // Calls via mismatching methods are always non-scalarized
1467 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1468 caller_does_not_scalarize = true;
1469 }
1470
1471 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1472 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1473 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1474 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1475 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1476
1477 assert(!caller_nm->is_unloading(), "It should not be unloading");
1478
1479 #ifndef PRODUCT
1480 // tracing/debugging/statistics
1481 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1482 (is_virtual) ? (&_resolve_virtual_ctr) :
1483 (&_resolve_static_ctr);
1484 AtomicAccess::inc(addr);
1485
1486 if (TraceCallFixup) {
1487 ResourceMark rm(current);
1488 tty->print("resolving %s%s (%s) %s call to",
1489 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1490 Bytecodes::name(invoke_code), (caller_does_not_scalarize) ? "non-scalar" : "");
1491 callee_method->print_short_name(tty);
1492 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1493 p2i(caller_frame.pc()), p2i(callee_method->code()));
1494 }
1495 #endif
1496
1497 if (invoke_code == Bytecodes::_invokestatic) {
1498 assert(callee_method->method_holder()->is_initialized() ||
1499 callee_method->method_holder()->is_reentrant_initialization(current),
1500 "invalid class initialization state for invoke_static");
1501 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1502 // In order to keep class initialization check, do not patch call
1503 // site for static call when the class is not fully initialized.
1504 // Proper check is enforced by call site re-resolution on every invocation.
1505 //
1506 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1507 // explicit class initialization check is put in nmethod entry (VEP).
1508 assert(callee_method->method_holder()->is_linked(), "must be");
1509 return callee_method;
1510 }
1511 }
1512
1513
1514 // JSR 292 key invariant:
1515 // If the resolved method is a MethodHandle invoke target, the call
1516 // site must be a MethodHandle call site, because the lambda form might tail-call
1517 // leaving the stack in a state unknown to either caller or callee
1518
1519 // Compute entry points. The computation of the entry points is independent of
1520 // patching the call.
1521
1522 // Make sure the callee nmethod does not get deoptimized and removed before
1523 // we are done patching the code.
1524
1525
1526 CompiledICLocker ml(caller_nm);
1527 if (is_virtual && !is_optimized) {
1528 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1529 inline_cache->update(&call_info, receiver->klass(), caller_does_not_scalarize);
1530 } else {
1531 // Callsite is a direct call - set it to the destination method
1532 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1533 callsite->set(callee_method, caller_does_not_scalarize);
1534 }
1535
1536 return callee_method;
1537 }
1538
1539 // Inline caches exist only in compiled code
1540 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1541 #ifdef ASSERT
1542 RegisterMap reg_map(current,
1543 RegisterMap::UpdateMap::skip,
1544 RegisterMap::ProcessFrames::include,
1545 RegisterMap::WalkContinuation::skip);
1546 frame stub_frame = current->last_frame();
1547 assert(stub_frame.is_runtime_frame(), "sanity check");
1548 frame caller_frame = stub_frame.sender(®_map);
1549 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1550 #endif /* ASSERT */
1551
1552 methodHandle callee_method;
1553 bool caller_does_not_scalarize = false;
1554 JRT_BLOCK
1555 callee_method = SharedRuntime::handle_ic_miss_helper(caller_does_not_scalarize, CHECK_NULL);
1556 // Return Method* through TLS
1557 current->set_vm_result_metadata(callee_method());
1558 JRT_BLOCK_END
1559 // return compiled code entry point after potential safepoints
1560 return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1561 JRT_END
1562
1563
1564 // Handle call site that has been made non-entrant
1565 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1566 // 6243940 We might end up in here if the callee is deoptimized
1567 // as we race to call it. We don't want to take a safepoint if
1568 // the caller was interpreted because the caller frame will look
1569 // interpreted to the stack walkers and arguments are now
1570 // "compiled" so it is much better to make this transition
1571 // invisible to the stack walking code. The i2c path will
1572 // place the callee method in the callee_target. It is stashed
1573 // there because if we try and find the callee by normal means a
1574 // safepoint is possible and have trouble gc'ing the compiled args.
1575 RegisterMap reg_map(current,
1576 RegisterMap::UpdateMap::skip,
1577 RegisterMap::ProcessFrames::include,
1578 RegisterMap::WalkContinuation::skip);
1579 frame stub_frame = current->last_frame();
1580 assert(stub_frame.is_runtime_frame(), "sanity check");
1581 frame caller_frame = stub_frame.sender(®_map);
1582
1583 if (caller_frame.is_interpreted_frame() ||
1584 caller_frame.is_entry_frame() ||
1585 caller_frame.is_upcall_stub_frame()) {
1586 Method* callee = current->callee_target();
1587 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1588 current->set_vm_result_metadata(callee);
1589 current->set_callee_target(nullptr);
1590 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1591 // Bypass class initialization checks in c2i when caller is in native.
1592 // JNI calls to static methods don't have class initialization checks.
1593 // Fast class initialization checks are present in c2i adapters and call into
1594 // SharedRuntime::handle_wrong_method() on the slow path.
1595 //
1596 // JVM upcalls may land here as well, but there's a proper check present in
1597 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1598 // so bypassing it in c2i adapter is benign.
1599 return callee->get_c2i_no_clinit_check_entry();
1600 } else {
1601 if (caller_frame.is_interpreted_frame()) {
1602 return callee->get_c2i_inline_entry();
1603 } else {
1604 return callee->get_c2i_entry();
1605 }
1606 }
1607 }
1608
1609 // Must be compiled to compiled path which is safe to stackwalk
1610 methodHandle callee_method;
1611 bool is_static_call = false;
1612 bool is_optimized = false;
1613 bool caller_does_not_scalarize = false;
1614 JRT_BLOCK
1615 // Force resolving of caller (if we called from compiled frame)
1616 callee_method = SharedRuntime::reresolve_call_site(is_optimized, caller_does_not_scalarize, CHECK_NULL);
1617 current->set_vm_result_metadata(callee_method());
1618 JRT_BLOCK_END
1619 // return compiled code entry point after potential safepoints
1620 return get_resolved_entry(current, callee_method, callee_method->is_static(), is_optimized, caller_does_not_scalarize);
1621 JRT_END
1622
1623 // Handle abstract method call
1624 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1625 // Verbose error message for AbstractMethodError.
1626 // Get the called method from the invoke bytecode.
1627 vframeStream vfst(current, true);
1628 assert(!vfst.at_end(), "Java frame must exist");
1629 methodHandle caller(current, vfst.method());
1630 Bytecode_invoke invoke(caller, vfst.bci());
1631 DEBUG_ONLY( invoke.verify(); )
1632
1633 // Find the compiled caller frame.
1634 RegisterMap reg_map(current,
1635 RegisterMap::UpdateMap::include,
1636 RegisterMap::ProcessFrames::include,
1637 RegisterMap::WalkContinuation::skip);
1638 frame stubFrame = current->last_frame();
1639 assert(stubFrame.is_runtime_frame(), "must be");
1640 frame callerFrame = stubFrame.sender(®_map);
1641 assert(callerFrame.is_compiled_frame(), "must be");
1642
1643 // Install exception and return forward entry.
1644 address res = SharedRuntime::throw_AbstractMethodError_entry();
1645 JRT_BLOCK
1646 methodHandle callee(current, invoke.static_target(current));
1647 if (!callee.is_null()) {
1648 oop recv = callerFrame.retrieve_receiver(®_map);
1649 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1650 res = StubRoutines::forward_exception_entry();
1651 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1652 }
1653 JRT_BLOCK_END
1654 return res;
1655 JRT_END
1656
1657 // return verified_code_entry if interp_only_mode is not set for the current thread;
1658 // otherwise return c2i entry.
1659 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method,
1660 bool is_static_call, bool is_optimized, bool caller_does_not_scalarize) {
1661 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1662 // In interp_only_mode we need to go to the interpreted entry
1663 // The c2i won't patch in this mode -- see fixup_callers_callsite
1664 return callee_method->get_c2i_entry();
1665 }
1666
1667 if (caller_does_not_scalarize) {
1668 assert(callee_method->verified_inline_code_entry() != nullptr, "Jump to zero!");
1669 return callee_method->verified_inline_code_entry();
1670 } else if (is_static_call || is_optimized) {
1671 assert(callee_method->verified_code_entry() != nullptr, "Jump to zero!");
1672 return callee_method->verified_code_entry();
1673 } else {
1674 assert(callee_method->verified_inline_ro_code_entry() != nullptr, "Jump to zero!");
1675 return callee_method->verified_inline_ro_code_entry();
1676 }
1677 }
1678
1679 // resolve a static call and patch code
1680 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1681 methodHandle callee_method;
1682 bool caller_does_not_scalarize = false;
1683 bool enter_special = false;
1684 JRT_BLOCK
1685 callee_method = SharedRuntime::resolve_helper(false, false, caller_does_not_scalarize, CHECK_NULL);
1686 current->set_vm_result_metadata(callee_method());
1687 JRT_BLOCK_END
1688 // return compiled code entry point after potential safepoints
1689 return get_resolved_entry(current, callee_method, true, false, caller_does_not_scalarize);
1690 JRT_END
1691
1692 // resolve virtual call and update inline cache to monomorphic
1693 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1694 methodHandle callee_method;
1695 bool caller_does_not_scalarize = false;
1696 JRT_BLOCK
1697 callee_method = SharedRuntime::resolve_helper(true, false, caller_does_not_scalarize, CHECK_NULL);
1698 current->set_vm_result_metadata(callee_method());
1699 JRT_BLOCK_END
1700 // return compiled code entry point after potential safepoints
1701 return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1702 JRT_END
1703
1704
1705 // Resolve a virtual call that can be statically bound (e.g., always
1706 // monomorphic, so it has no inline cache). Patch code to resolved target.
1707 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1708 methodHandle callee_method;
1709 bool caller_does_not_scalarize = false;
1710 JRT_BLOCK
1711 callee_method = SharedRuntime::resolve_helper(true, true, caller_does_not_scalarize, CHECK_NULL);
1712 current->set_vm_result_metadata(callee_method());
1713 JRT_BLOCK_END
1714 // return compiled code entry point after potential safepoints
1715 return get_resolved_entry(current, callee_method, false, true, caller_does_not_scalarize);
1716 JRT_END
1717
1718
1719
1720 methodHandle SharedRuntime::handle_ic_miss_helper(bool& caller_does_not_scalarize, TRAPS) {
1721 JavaThread* current = THREAD;
1722 ResourceMark rm(current);
1723 CallInfo call_info;
1724 Bytecodes::Code bc;
1725
1726 // receiver is null for static calls. An exception is thrown for null
1727 // receivers for non-static calls
1728 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1729
1730 methodHandle callee_method(current, call_info.selected_method());
1731
1732 #ifndef PRODUCT
1733 AtomicAccess::inc(&_ic_miss_ctr);
1734
1735 // Statistics & Tracing
1736 if (TraceCallFixup) {
1737 ResourceMark rm(current);
1738 tty->print("IC miss (%s) %s call to", Bytecodes::name(bc), (caller_does_not_scalarize) ? "non-scalar" : "");
1739 callee_method->print_short_name(tty);
1740 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1741 }
1742
1743 if (ICMissHistogram) {
1744 MutexLocker m(VMStatistic_lock);
1745 RegisterMap reg_map(current,
1746 RegisterMap::UpdateMap::skip,
1747 RegisterMap::ProcessFrames::include,
1748 RegisterMap::WalkContinuation::skip);
1749 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1750 // produce statistics under the lock
1751 trace_ic_miss(f.pc());
1752 }
1753 #endif
1754
1755 // install an event collector so that when a vtable stub is created the
1756 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1757 // event can't be posted when the stub is created as locks are held
1758 // - instead the event will be deferred until the event collector goes
1759 // out of scope.
1760 JvmtiDynamicCodeEventCollector event_collector;
1761
1762 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1763 RegisterMap reg_map(current,
1764 RegisterMap::UpdateMap::skip,
1765 RegisterMap::ProcessFrames::include,
1766 RegisterMap::WalkContinuation::skip);
1767 frame caller_frame = current->last_frame().sender(®_map);
1768 CodeBlob* cb = caller_frame.cb();
1769 nmethod* caller_nm = cb->as_nmethod();
1770 // Calls via mismatching methods are always non-scalarized
1771 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1772 caller_does_not_scalarize = true;
1773 }
1774
1775 CompiledICLocker ml(caller_nm);
1776 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1777 inline_cache->update(&call_info, receiver()->klass(), caller_does_not_scalarize);
1778
1779 return callee_method;
1780 }
1781
1782 //
1783 // Resets a call-site in compiled code so it will get resolved again.
1784 // This routines handles both virtual call sites, optimized virtual call
1785 // sites, and static call sites. Typically used to change a call sites
1786 // destination from compiled to interpreted.
1787 //
1788 methodHandle SharedRuntime::reresolve_call_site(bool& is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1789 JavaThread* current = THREAD;
1790 ResourceMark rm(current);
1791 RegisterMap reg_map(current,
1792 RegisterMap::UpdateMap::skip,
1793 RegisterMap::ProcessFrames::include,
1794 RegisterMap::WalkContinuation::skip);
1795 frame stub_frame = current->last_frame();
1796 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1797 frame caller = stub_frame.sender(®_map);
1798 if (caller.is_compiled_frame()) {
1799 caller_does_not_scalarize = caller.cb()->as_nmethod()->is_compiled_by_c1();
1800 }
1801 assert(!caller.is_interpreted_frame(), "must be compiled");
1802
1803 // If the frame isn't a live compiled frame (i.e. deoptimized by the time we get here), no IC clearing must be done
1804 // for the caller. However, when the caller is C2 compiled and the callee a C1 or C2 compiled method, then we still
1805 // need to figure out whether it was an optimized virtual call with an inline type receiver. Otherwise, we end up
1806 // using the wrong method entry point and accidentally skip the buffering of the receiver.
1807 methodHandle callee_method = find_callee_method(caller_does_not_scalarize, CHECK_(methodHandle()));
1808 const bool caller_is_compiled_and_not_deoptimized = caller.is_compiled_frame() && !caller.is_deoptimized_frame();
1809 const bool caller_is_continuation_enter_intrinsic =
1810 caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic();
1811 const bool do_IC_clearing = caller_is_compiled_and_not_deoptimized || caller_is_continuation_enter_intrinsic;
1812
1813 const bool callee_compiled_with_scalarized_receiver = callee_method->has_compiled_code() &&
1814 !callee_method()->is_static() &&
1815 callee_method()->is_scalarized_arg(0);
1816 const bool compute_is_optimized = !caller_does_not_scalarize && callee_compiled_with_scalarized_receiver;
1817
1818 if (do_IC_clearing || compute_is_optimized) {
1819 address pc = caller.pc();
1820
1821 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1822 assert(caller_nm != nullptr, "did not find caller nmethod");
1823
1824 // Default call_addr is the location of the "basic" call.
1825 // Determine the address of the call we a reresolving. With
1826 // Inline Caches we will always find a recognizable call.
1827 // With Inline Caches disabled we may or may not find a
1828 // recognizable call. We will always find a call for static
1829 // calls and for optimized virtual calls. For vanilla virtual
1830 // calls it depends on the state of the UseInlineCaches switch.
1831 //
1832 // With Inline Caches disabled we can get here for a virtual call
1833 // for two reasons:
1834 // 1 - calling an abstract method. The vtable for abstract methods
1835 // will run us thru handle_wrong_method and we will eventually
1836 // end up in the interpreter to throw the ame.
1837 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1838 // call and between the time we fetch the entry address and
1839 // we jump to it the target gets deoptimized. Similar to 1
1840 // we will wind up in the interprter (thru a c2i with c2).
1841 //
1842 CompiledICLocker ml(caller_nm);
1843 address call_addr = caller_nm->call_instruction_address(pc);
1844
1845 if (call_addr != nullptr) {
1846 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1847 // bytes back in the instruction stream so we must also check for reloc info.
1848 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1849 bool ret = iter.next(); // Get item
1850 if (ret) {
1851 is_optimized = false;
1852 switch (iter.type()) {
1853 case relocInfo::static_call_type:
1854 assert(callee_method->is_static(), "must be");
1855 case relocInfo::opt_virtual_call_type: {
1856 is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1857 if (do_IC_clearing) {
1858 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1859 cdc->set_to_clean();
1860 }
1861 break;
1862 }
1863 case relocInfo::virtual_call_type: {
1864 if (do_IC_clearing) {
1865 // compiled, dispatched call (which used to call an interpreted method)
1866 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1867 inline_cache->set_to_clean();
1868 }
1869 break;
1870 }
1871 default:
1872 break;
1873 }
1874 }
1875 }
1876 }
1877
1878 #ifndef PRODUCT
1879 AtomicAccess::inc(&_wrong_method_ctr);
1880
1881 if (TraceCallFixup) {
1882 ResourceMark rm(current);
1883 tty->print("handle_wrong_method reresolving %s call to", (caller_does_not_scalarize) ? "non-scalar" : "");
1884 callee_method->print_short_name(tty);
1885 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1886 }
1887 #endif
1888
1889 return callee_method;
1890 }
1891
1892 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1893 // The faulting unsafe accesses should be changed to throw the error
1894 // synchronously instead. Meanwhile the faulting instruction will be
1895 // skipped over (effectively turning it into a no-op) and an
1896 // asynchronous exception will be raised which the thread will
1897 // handle at a later point. If the instruction is a load it will
1898 // return garbage.
1899
1900 // Request an async exception.
1901 thread->set_pending_unsafe_access_error();
1902
1903 // Return address of next instruction to execute.
2069 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
2070
2071 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
2072 if (message == nullptr) {
2073 // Shouldn't happen, but don't cause even more problems if it does
2074 message = const_cast<char*>(caster_klass->external_name());
2075 } else {
2076 jio_snprintf(message,
2077 msglen,
2078 "class %s cannot be cast to class %s (%s%s%s)",
2079 caster_name,
2080 target_name,
2081 caster_klass_description,
2082 klass_separator,
2083 target_klass_description
2084 );
2085 }
2086 return message;
2087 }
2088
2089 char* SharedRuntime::generate_identity_exception_message(JavaThread* current, Klass* klass) {
2090 assert(klass->is_inline_klass(), "Must be a concrete value class");
2091 const char* desc = "Cannot synchronize on an instance of value class ";
2092 const char* className = klass->external_name();
2093 size_t msglen = strlen(desc) + strlen(className) + 1;
2094 char* message = NEW_RESOURCE_ARRAY(char, msglen);
2095 if (nullptr == message) {
2096 // Out of memory: can't create detailed error message
2097 message = const_cast<char*>(klass->external_name());
2098 } else {
2099 jio_snprintf(message, msglen, "%s%s", desc, className);
2100 }
2101 return message;
2102 }
2103
2104 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2105 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2106 JRT_END
2107
2108 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2109 if (!SafepointSynchronize::is_synchronizing()) {
2110 // Only try quick_enter() if we're not trying to reach a safepoint
2111 // so that the calling thread reaches the safepoint more quickly.
2112 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2113 return;
2114 }
2115 }
2116 // NO_ASYNC required because an async exception on the state transition destructor
2117 // would leave you with the lock held and it would never be released.
2118 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2119 // and the model is that an exception implies the method failed.
2120 JRT_BLOCK_NO_ASYNC
2121 Handle h_obj(THREAD, obj);
2122 ObjectSynchronizer::enter(h_obj, lock, current);
2123 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2317 tty->print_cr("Note 1: counter updates are not MT-safe.");
2318 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2319 tty->print_cr(" %% in nested categories are relative to their category");
2320 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2321 tty->cr();
2322
2323 MethodArityHistogram h;
2324 }
2325 #endif
2326
2327 #ifndef PRODUCT
2328 static int _lookups; // number of calls to lookup
2329 static int _equals; // number of buckets checked with matching hash
2330 static int _archived_hits; // number of successful lookups in archived table
2331 static int _runtime_hits; // number of successful lookups in runtime table
2332 #endif
2333
2334 // A simple wrapper class around the calling convention information
2335 // that allows sharing of adapters for the same calling convention.
2336 class AdapterFingerPrint : public MetaspaceObj {
2337 public:
2338 class Element {
2339 private:
2340 // The highest byte is the type of the argument. The remaining bytes contain the offset of the
2341 // field if it is flattened in the calling convention, -1 otherwise.
2342 juint _payload;
2343
2344 static constexpr int offset_bit_width = 24;
2345 static constexpr juint offset_bit_mask = (1 << offset_bit_width) - 1;
2346 public:
2347 Element(BasicType bt, int offset) : _payload((static_cast<juint>(bt) << offset_bit_width) | (juint(offset) & offset_bit_mask)) {
2348 assert(offset >= -1 && offset < jint(offset_bit_mask), "invalid offset %d", offset);
2349 }
2350
2351 BasicType bt() const {
2352 return static_cast<BasicType>(_payload >> offset_bit_width);
2353 }
2354
2355 int offset() const {
2356 juint res = _payload & offset_bit_mask;
2357 return res == offset_bit_mask ? -1 : res;
2358 }
2359
2360 juint hash() const {
2361 return _payload;
2362 }
2363
2364 bool operator!=(const Element& other) const {
2365 return _payload != other._payload;
2366 }
2367 };
2368
2369 private:
2370 const bool _has_ro_adapter;
2371 const int _length;
2372
2373 static int data_offset() { return sizeof(AdapterFingerPrint); }
2374 Element* data_pointer() {
2375 return reinterpret_cast<Element*>(reinterpret_cast<address>(this) + data_offset());
2376 }
2377
2378 const Element& element_at(int index) {
2379 assert(index < length(), "index %d out of bounds for length %d", index, length());
2380 Element* data = data_pointer();
2381 return data[index];
2382 }
2383
2384 // Private construtor. Use allocate() to get an instance.
2385 AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter)
2386 : _has_ro_adapter(has_ro_adapter), _length(total_args_passed_in_sig(sig)) {
2387 Element* data = data_pointer();
2388 BasicType prev_bt = T_ILLEGAL;
2389 int vt_count = 0;
2390 for (int index = 0; index < _length; index++) {
2391 const SigEntry& sig_entry = sig->at(index);
2392 BasicType bt = sig_entry._bt;
2393 if (bt == T_METADATA) {
2394 // Found start of inline type in signature
2395 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2396 vt_count++;
2397 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2398 // Found end of inline type in signature
2399 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2400 vt_count--;
2401 assert(vt_count >= 0, "invalid vt_count");
2402 } else if (vt_count == 0) {
2403 // Widen fields that are not part of a scalarized inline type argument
2404 assert(sig_entry._offset == -1, "invalid offset for argument that is not a flattened field %d", sig_entry._offset);
2405 bt = adapter_encoding(bt);
2406 }
2407
2408 ::new(&data[index]) Element(bt, sig_entry._offset);
2409 prev_bt = bt;
2410 }
2411 assert(vt_count == 0, "invalid vt_count");
2412 }
2413
2414 // Call deallocate instead
2415 ~AdapterFingerPrint() {
2416 ShouldNotCallThis();
2417 }
2418
2419 static int total_args_passed_in_sig(const GrowableArray<SigEntry>* sig) {
2420 return (sig != nullptr) ? sig->length() : 0;
2421 }
2422
2423 static int compute_size_in_words(int len) {
2424 return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(Element)));
2425 }
2426
2427 // Remap BasicTypes that are handled equivalently by the adapters.
2428 // These are correct for the current system but someday it might be
2429 // necessary to make this mapping platform dependent.
2430 static BasicType adapter_encoding(BasicType in) {
2431 switch (in) {
2432 case T_BOOLEAN:
2433 case T_BYTE:
2434 case T_SHORT:
2435 case T_CHAR:
2436 // They are all promoted to T_INT in the calling convention
2437 return T_INT;
2438
2439 case T_OBJECT:
2440 case T_ARRAY:
2441 // In other words, we assume that any register good enough for
2442 // an int or long is good enough for a managed pointer.
2443 #ifdef _LP64
2444 return T_LONG;
2445 #else
2446 return T_INT;
2447 #endif
2448
2449 case T_INT:
2450 case T_LONG:
2451 case T_FLOAT:
2452 case T_DOUBLE:
2453 case T_VOID:
2454 return in;
2455
2456 default:
2457 ShouldNotReachHere();
2458 return T_CONFLICT;
2459 }
2460 }
2461
2462 void* operator new(size_t size, size_t fp_size) throw() {
2463 assert(fp_size >= size, "sanity check");
2464 void* p = AllocateHeap(fp_size, mtCode);
2465 memset(p, 0, fp_size);
2466 return p;
2467 }
2468
2469 public:
2470 template<typename Function>
2471 void iterate_args(Function function) {
2472 for (int i = 0; i < length(); i++) {
2473 function(element_at(i));
2474 }
2475 }
2476
2477 static AdapterFingerPrint* allocate(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2478 int len = total_args_passed_in_sig(sig);
2479 int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2480 AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(sig, has_ro_adapter);
2481 assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2482 return afp;
2483 }
2484
2485 static void deallocate(AdapterFingerPrint* fp) {
2486 FreeHeap(fp);
2487 }
2488
2489 bool has_ro_adapter() const {
2490 return _has_ro_adapter;
2491 }
2492
2493 int length() const {
2494 return _length;
2495 }
2496
2497 unsigned int compute_hash() {
2498 int hash = 0;
2499 for (int i = 0; i < length(); i++) {
2500 const Element& v = element_at(i);
2501 //Add arithmetic operation to the hash, like +3 to improve hashing
2502 hash = ((hash << 8) ^ v.hash() ^ (hash >> 5)) + 3;
2503 }
2504 return (unsigned int)hash;
2505 }
2506
2507 const char* as_string() {
2508 stringStream st;
2509 st.print("{");
2510 if (_has_ro_adapter) {
2511 st.print("has_ro_adapter");
2512 } else {
2513 st.print("no_ro_adapter");
2514 }
2515 for (int i = 0; i < length(); i++) {
2516 st.print(", ");
2517 const Element& elem = element_at(i);
2518 st.print("{%s, %d}", type2name(elem.bt()), elem.offset());
2519 }
2520 st.print("}");
2521 return st.as_string();
2522 }
2523
2524 const char* as_basic_args_string() {
2525 stringStream st;
2526 bool long_prev = false;
2527 iterate_args([&] (const Element& arg) {
2528 if (long_prev) {
2529 long_prev = false;
2530 if (arg.bt() == T_VOID) {
2531 st.print("J");
2532 } else {
2533 st.print("L");
2534 }
2535 }
2536 if (arg.bt() == T_LONG) {
2537 long_prev = true;
2538 } else if (arg.bt() != T_VOID) {
2539 st.print("%c", type2char(arg.bt()));
2540 }
2541 });
2542 if (long_prev) {
2543 st.print("L");
2544 }
2545 return st.as_string();
2546 }
2547
2548 bool equals(AdapterFingerPrint* other) {
2549 if (other->_has_ro_adapter != _has_ro_adapter) {
2550 return false;
2551 } else if (other->_length != _length) {
2552 return false;
2553 } else {
2554 for (int i = 0; i < _length; i++) {
2555 if (element_at(i) != other->element_at(i)) {
2556 return false;
2557 }
2558 }
2559 }
2560 return true;
2561 }
2562
2563 // methods required by virtue of being a MetaspaceObj
2564 void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2565 int size() const { return compute_size_in_words(_length); }
2566 MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2567
2568 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2569 NOT_PRODUCT(_equals++);
2570 return fp1->equals(fp2);
2571 }
2572
2573 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2574 return fp->compute_hash();
2575 }
2578 #if INCLUDE_CDS
2579 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2580 return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2581 }
2582
2583 class ArchivedAdapterTable : public OffsetCompactHashtable<
2584 AdapterFingerPrint*,
2585 AdapterHandlerEntry*,
2586 adapter_fp_equals_compact_hashtable_entry> {};
2587 #endif // INCLUDE_CDS
2588
2589 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2590 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2591 AnyObj::C_HEAP, mtCode,
2592 AdapterFingerPrint::compute_hash,
2593 AdapterFingerPrint::equals>;
2594 static AdapterHandlerTable* _adapter_handler_table;
2595 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2596
2597 // Find a entry with the same fingerprint if it exists
2598 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter) {
2599 NOT_PRODUCT(_lookups++);
2600 assert_lock_strong(AdapterHandlerLibrary_lock);
2601 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(sig, has_ro_adapter);
2602 AdapterHandlerEntry* entry = nullptr;
2603 #if INCLUDE_CDS
2604 // if we are building the archive then the archived adapter table is
2605 // not valid and we need to use the ones added to the runtime table
2606 if (AOTCodeCache::is_using_adapter()) {
2607 // Search archived table first. It is read-only table so can be searched without lock
2608 entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2609 #ifndef PRODUCT
2610 if (entry != nullptr) {
2611 _archived_hits++;
2612 }
2613 #endif
2614 }
2615 #endif // INCLUDE_CDS
2616 if (entry == nullptr) {
2617 assert_lock_strong(AdapterHandlerLibrary_lock);
2618 AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2619 if (entry_p != nullptr) {
2620 entry = *entry_p;
2621 assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2638 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2639 ts.print(tty, "AdapterHandlerTable");
2640 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2641 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2642 int total_hits = _archived_hits + _runtime_hits;
2643 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2644 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2645 }
2646 #endif
2647
2648 // ---------------------------------------------------------------------------
2649 // Implementation of AdapterHandlerLibrary
2650 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2651 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2652 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2653 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2654 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2655 #if INCLUDE_CDS
2656 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2657 #endif // INCLUDE_CDS
2658 static const int AdapterHandlerLibrary_size = 48*K;
2659 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2660 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2661
2662 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2663 assert(_buffer != nullptr, "should be initialized");
2664 return _buffer;
2665 }
2666
2667 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2668 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2669 AdapterBlob* adapter_blob = entry->adapter_blob();
2670 char blob_id[256];
2671 jio_snprintf(blob_id,
2672 sizeof(blob_id),
2673 "%s(%s)",
2674 adapter_blob->name(),
2675 entry->fingerprint()->as_string());
2676 if (Forte::is_enabled()) {
2677 Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2678 }
2686 void AdapterHandlerLibrary::initialize() {
2687 {
2688 ResourceMark rm;
2689 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2690 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2691 }
2692
2693 #if INCLUDE_CDS
2694 // Link adapters in AOT Cache to their code in AOT Code Cache
2695 if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2696 link_aot_adapters();
2697 lookup_simple_adapters();
2698 return;
2699 }
2700 #endif // INCLUDE_CDS
2701
2702 ResourceMark rm;
2703 {
2704 MutexLocker mu(AdapterHandlerLibrary_lock);
2705
2706 CompiledEntrySignature no_args;
2707 no_args.compute_calling_conventions();
2708 _no_arg_handler = create_adapter(no_args, true);
2709
2710 CompiledEntrySignature obj_args;
2711 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
2712 obj_args.compute_calling_conventions();
2713 _obj_arg_handler = create_adapter(obj_args, true);
2714
2715 CompiledEntrySignature int_args;
2716 SigEntry::add_entry(int_args.sig(), T_INT);
2717 int_args.compute_calling_conventions();
2718 _int_arg_handler = create_adapter(int_args, true);
2719
2720 CompiledEntrySignature obj_int_args;
2721 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
2722 SigEntry::add_entry(obj_int_args.sig(), T_INT);
2723 obj_int_args.compute_calling_conventions();
2724 _obj_int_arg_handler = create_adapter(obj_int_args, true);
2725
2726 CompiledEntrySignature obj_obj_args;
2727 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2728 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2729 obj_obj_args.compute_calling_conventions();
2730 _obj_obj_arg_handler = create_adapter(obj_obj_args, true);
2731
2732 // we should always get an entry back but we don't have any
2733 // associated blob on Zero
2734 assert(_no_arg_handler != nullptr &&
2735 _obj_arg_handler != nullptr &&
2736 _int_arg_handler != nullptr &&
2737 _obj_int_arg_handler != nullptr &&
2738 _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2739 }
2740
2741 // Outside of the lock
2742 #ifndef ZERO
2743 // no blobs to register when we are on Zero
2744 post_adapter_creation(_no_arg_handler);
2745 post_adapter_creation(_obj_arg_handler);
2746 post_adapter_creation(_int_arg_handler);
2747 post_adapter_creation(_obj_int_arg_handler);
2748 post_adapter_creation(_obj_obj_arg_handler);
2749 #endif // ZERO
2750 }
2751
2752 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2753 uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2754 assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2755 return AdapterHandlerEntry::allocate(id, fingerprint);
2756 }
2757
2758 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2759 int total_args_passed = method->size_of_parameters(); // All args on stack
2760 if (total_args_passed == 0) {
2761 return _no_arg_handler;
2762 } else if (total_args_passed == 1) {
2763 if (!method->is_static()) {
2764 if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2765 return nullptr;
2766 }
2767 return _obj_arg_handler;
2768 }
2769 switch (method->signature()->char_at(1)) {
2770 case JVM_SIGNATURE_CLASS: {
2771 if (InlineTypePassFieldsAsArgs) {
2772 SignatureStream ss(method->signature());
2773 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2774 if (vk != nullptr) {
2775 return nullptr;
2776 }
2777 }
2778 return _obj_arg_handler;
2779 }
2780 case JVM_SIGNATURE_ARRAY:
2781 return _obj_arg_handler;
2782 case JVM_SIGNATURE_INT:
2783 case JVM_SIGNATURE_BOOLEAN:
2784 case JVM_SIGNATURE_CHAR:
2785 case JVM_SIGNATURE_BYTE:
2786 case JVM_SIGNATURE_SHORT:
2787 return _int_arg_handler;
2788 }
2789 } else if (total_args_passed == 2 &&
2790 !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2791 switch (method->signature()->char_at(1)) {
2792 case JVM_SIGNATURE_CLASS: {
2793 if (InlineTypePassFieldsAsArgs) {
2794 SignatureStream ss(method->signature());
2795 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2796 if (vk != nullptr) {
2797 return nullptr;
2798 }
2799 }
2800 return _obj_obj_arg_handler;
2801 }
2802 case JVM_SIGNATURE_ARRAY:
2803 return _obj_obj_arg_handler;
2804 case JVM_SIGNATURE_INT:
2805 case JVM_SIGNATURE_BOOLEAN:
2806 case JVM_SIGNATURE_CHAR:
2807 case JVM_SIGNATURE_BYTE:
2808 case JVM_SIGNATURE_SHORT:
2809 return _obj_int_arg_handler;
2810 }
2811 }
2812 return nullptr;
2813 }
2814
2815 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2816 _method(method), _num_inline_args(0), _has_inline_recv(false),
2817 _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2818 _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2819 _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2820 _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2821 _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2822 _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2823 }
2824
2825 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2826 // or the same entry for VEP and VIEP(RO).
2827 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2828 if (!has_scalarized_args()) {
2829 // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2830 return CodeOffsets::Verified_Entry;
2831 }
2832 if (_method->is_static()) {
2833 // Static methods don't need VIEP(RO)
2834 return CodeOffsets::Verified_Entry;
2835 }
2836
2837 if (has_inline_recv()) {
2838 if (num_inline_args() == 1) {
2839 // Share same entry for VIEP and VIEP(RO).
2840 // This is quite common: we have an instance method in an InlineKlass that has
2841 // no inline type args other than <this>.
2842 return CodeOffsets::Verified_Inline_Entry;
2843 } else {
2844 assert(num_inline_args() > 1, "must be");
2845 // No sharing:
2846 // VIEP(RO) -- <this> is passed as object
2847 // VEP -- <this> is passed as fields
2848 return CodeOffsets::Verified_Inline_Entry_RO;
2849 }
2850 }
2851
2852 // Either a static method, or <this> is not an inline type
2853 if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2854 // No sharing:
2855 // Some arguments are passed on the stack, and we have inserted reserved entries
2856 // into the VEP, but we never insert reserved entries into the VIEP(RO).
2857 return CodeOffsets::Verified_Inline_Entry_RO;
2858 } else {
2859 // Share same entry for VEP and VIEP(RO).
2860 return CodeOffsets::Verified_Entry;
2861 }
2862 }
2863
2864 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2865 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2866 if (_supers != nullptr) {
2867 return _supers;
2868 }
2869 _supers = new GrowableArray<Method*>();
2870 // Skip private, static, and <init> methods
2871 if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2872 return _supers;
2873 }
2874 Symbol* name = _method->name();
2875 Symbol* signature = _method->signature();
2876 const Klass* holder = _method->method_holder()->super();
2877 Symbol* holder_name = holder->name();
2878 ThreadInVMfromUnknown tiv;
2879 JavaThread* current = JavaThread::current();
2880 HandleMark hm(current);
2881 Handle loader(current, _method->method_holder()->class_loader());
2882
2883 // Walk up the class hierarchy and search for super methods
2884 while (holder != nullptr) {
2885 Method* super_method = holder->lookup_method(name, signature);
2886 if (super_method == nullptr) {
2887 break;
2888 }
2889 if (!super_method->is_static() && !super_method->is_private() &&
2890 (!super_method->is_package_private() ||
2891 super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
2892 _supers->push(super_method);
2893 }
2894 holder = super_method->method_holder()->super();
2895 }
2896 // Search interfaces for super methods
2897 Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
2898 for (int i = 0; i < interfaces->length(); ++i) {
2899 Method* m = interfaces->at(i)->lookup_method(name, signature);
2900 if (m != nullptr && !m->is_static() && m->is_public()) {
2901 _supers->push(m);
2902 }
2903 }
2904 return _supers;
2905 }
2906
2907 // Iterate over arguments and compute scalarized and non-scalarized signatures
2908 void CompiledEntrySignature::compute_calling_conventions(bool init) {
2909 bool has_scalarized = false;
2910 if (_method != nullptr) {
2911 InstanceKlass* holder = _method->method_holder();
2912 int arg_num = 0;
2913 if (!_method->is_static()) {
2914 // We shouldn't scalarize 'this' in a value class constructor
2915 if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() && !_method->is_object_constructor() &&
2916 (init || _method->is_scalarized_arg(arg_num))) {
2917 _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
2918 has_scalarized = true;
2919 _has_inline_recv = true;
2920 _num_inline_args++;
2921 } else {
2922 SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
2923 }
2924 SigEntry::add_entry(_sig, T_OBJECT, holder->name());
2925 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
2926 arg_num++;
2927 }
2928 for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
2929 BasicType bt = ss.type();
2930 if (bt == T_OBJECT) {
2931 InlineKlass* vk = ss.as_inline_klass(holder);
2932 if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
2933 // Check for a calling convention mismatch with super method(s)
2934 bool scalar_super = false;
2935 bool non_scalar_super = false;
2936 GrowableArray<Method*>* supers = get_supers();
2937 for (int i = 0; i < supers->length(); ++i) {
2938 Method* super_method = supers->at(i);
2939 if (super_method->is_scalarized_arg(arg_num)) {
2940 scalar_super = true;
2941 } else {
2942 non_scalar_super = true;
2943 }
2944 }
2945 #ifdef ASSERT
2946 // Randomly enable below code paths for stress testing
2947 bool stress = init && StressCallingConvention;
2948 if (stress && (os::random() & 1) == 1) {
2949 non_scalar_super = true;
2950 if ((os::random() & 1) == 1) {
2951 scalar_super = true;
2952 }
2953 }
2954 #endif
2955 if (non_scalar_super) {
2956 // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
2957 if (scalar_super) {
2958 // Found non-scalar *and* scalar super methods. We can't handle both.
2959 // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
2960 for (int i = 0; i < supers->length(); ++i) {
2961 Method* super_method = supers->at(i);
2962 if (super_method->is_scalarized_arg(arg_num) DEBUG_ONLY(|| (stress && (os::random() & 1) == 1))) {
2963 super_method->set_mismatch();
2964 MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
2965 JavaThread* thread = JavaThread::current();
2966 HandleMark hm(thread);
2967 methodHandle mh(thread, super_method);
2968 DeoptimizationScope deopt_scope;
2969 CodeCache::mark_for_deoptimization(&deopt_scope, mh());
2970 deopt_scope.deoptimize_marked();
2971 }
2972 }
2973 }
2974 // Fall back to non-scalarized calling convention
2975 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2976 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2977 } else {
2978 _num_inline_args++;
2979 has_scalarized = true;
2980 int last = _sig_cc->length();
2981 int last_ro = _sig_cc_ro->length();
2982 _sig_cc->appendAll(vk->extended_sig());
2983 _sig_cc_ro->appendAll(vk->extended_sig());
2984 if (bt == T_OBJECT) {
2985 // Nullable inline type argument, insert InlineTypeNode::NullMarker field right after T_METADATA delimiter
2986 _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, nullptr, true));
2987 _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, nullptr, true));
2988 }
2989 }
2990 } else {
2991 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2992 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2993 }
2994 bt = T_OBJECT;
2995 } else {
2996 SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
2997 SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
2998 }
2999 SigEntry::add_entry(_sig, bt, ss.as_symbol());
3000 if (bt != T_VOID) {
3001 arg_num++;
3002 }
3003 }
3004 }
3005
3006 // Compute the non-scalarized calling convention
3007 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3008 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3009
3010 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3011 if (has_scalarized && !_method->is_native()) {
3012 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3013 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3014
3015 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3016 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3017
3018 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3019 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3020
3021 // Upper bound on stack arguments to avoid hitting the argument limit and
3022 // bailing out of compilation ("unsupported incoming calling sequence").
3023 // TODO we need a reasonable limit (flag?) here
3024 if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
3025 return; // Success
3026 }
3027 }
3028
3029 // No scalarized args
3030 _sig_cc = _sig;
3031 _regs_cc = _regs;
3032 _args_on_stack_cc = _args_on_stack;
3033
3034 _sig_cc_ro = _sig;
3035 _regs_cc_ro = _regs;
3036 _args_on_stack_cc_ro = _args_on_stack;
3037 }
3038
3039 void CompiledEntrySignature::initialize_from_fingerprint(AdapterFingerPrint* fingerprint) {
3040 _has_inline_recv = fingerprint->has_ro_adapter();
3041
3042 int value_object_count = 0;
3043 BasicType prev_bt = T_ILLEGAL;
3044 bool has_scalarized_arguments = false;
3045 bool long_prev = false;
3046 int long_prev_offset = -1;
3047
3048 fingerprint->iterate_args([&] (const AdapterFingerPrint::Element& arg) {
3049 BasicType bt = arg.bt();
3050 int offset = arg.offset();
3051
3052 if (long_prev) {
3053 long_prev = false;
3054 BasicType bt_to_add;
3055 if (bt == T_VOID) {
3056 bt_to_add = T_LONG;
3057 } else {
3058 bt_to_add = T_OBJECT;
3059 }
3060 if (value_object_count == 0) {
3061 SigEntry::add_entry(_sig, bt_to_add);
3062 }
3063 SigEntry::add_entry(_sig_cc, bt_to_add, nullptr, long_prev_offset);
3064 SigEntry::add_entry(_sig_cc_ro, bt_to_add, nullptr, long_prev_offset);
3065 }
3066
3067 switch (bt) {
3068 case T_VOID:
3069 if (prev_bt != T_LONG && prev_bt != T_DOUBLE) {
3070 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
3071 value_object_count--;
3072 SigEntry::add_entry(_sig_cc, T_VOID, nullptr, offset);
3073 SigEntry::add_entry(_sig_cc_ro, T_VOID, nullptr, offset);
3074 assert(value_object_count >= 0, "invalid value object count");
3075 } else {
3076 // Nothing to add for _sig: We already added an addition T_VOID in add_entry() when adding T_LONG or T_DOUBLE.
3077 }
3078 break;
3079 case T_INT:
3080 case T_FLOAT:
3081 case T_DOUBLE:
3082 if (value_object_count == 0) {
3083 SigEntry::add_entry(_sig, bt);
3084 }
3085 SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3086 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3087 break;
3088 case T_LONG:
3089 long_prev = true;
3090 long_prev_offset = offset;
3091 break;
3092 case T_BOOLEAN:
3093 case T_CHAR:
3094 case T_BYTE:
3095 case T_SHORT:
3096 case T_OBJECT:
3097 case T_ARRAY:
3098 assert(value_object_count > 0, "must be value object field");
3099 SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3100 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3101 break;
3102 case T_METADATA:
3103 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
3104 if (value_object_count == 0) {
3105 SigEntry::add_entry(_sig, T_OBJECT);
3106 }
3107 SigEntry::add_entry(_sig_cc, T_METADATA, nullptr, offset);
3108 SigEntry::add_entry(_sig_cc_ro, T_METADATA, nullptr, offset);
3109 value_object_count++;
3110 has_scalarized_arguments = true;
3111 break;
3112 default: {
3113 fatal("Unexpected BasicType: %s", basictype_to_str(bt));
3114 }
3115 }
3116 prev_bt = bt;
3117 });
3118
3119 if (long_prev) {
3120 // If previous bt was T_LONG and we reached the end of the signature, we know that it must be a T_OBJECT.
3121 SigEntry::add_entry(_sig, T_OBJECT);
3122 SigEntry::add_entry(_sig_cc, T_OBJECT);
3123 SigEntry::add_entry(_sig_cc_ro, T_OBJECT);
3124 }
3125 assert(value_object_count == 0, "invalid value object count");
3126
3127 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3128 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3129
3130 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3131 if (has_scalarized_arguments) {
3132 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3133 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3134
3135 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3136 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3137
3138 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3139 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3140 } else {
3141 // No scalarized args
3142 _sig_cc = _sig;
3143 _regs_cc = _regs;
3144 _args_on_stack_cc = _args_on_stack;
3145
3146 _sig_cc_ro = _sig;
3147 _regs_cc_ro = _regs;
3148 _args_on_stack_cc_ro = _args_on_stack;
3149 }
3150
3151 #ifdef ASSERT
3152 {
3153 AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(_sig_cc, _has_inline_recv);
3154 assert(fingerprint->equals(compare_fp), "%s - %s", fingerprint->as_string(), compare_fp->as_string());
3155 AdapterFingerPrint::deallocate(compare_fp);
3156 }
3157 #endif
3158 }
3159
3160 const char* AdapterHandlerEntry::_entry_names[] = {
3161 "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
3162 };
3163
3164 #ifdef ASSERT
3165 void AdapterHandlerLibrary::verify_adapter_sharing(CompiledEntrySignature& ces, AdapterHandlerEntry* cached_entry) {
3166 // we can only check for the same code if there is any
3167 #ifndef ZERO
3168 AdapterHandlerEntry* comparison_entry = create_adapter(ces, false, true);
3169 assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
3170 assert(comparison_entry->compare_code(cached_entry), "code must match");
3171 // Release the one just created
3172 AdapterHandlerEntry::deallocate(comparison_entry);
3173 # endif // ZERO
3174 }
3175 #endif /* ASSERT*/
3176
3177 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3178 assert(!method->is_abstract(), "abstract methods do not have adapters");
3179 // Use customized signature handler. Need to lock around updates to
3180 // the _adapter_handler_table (it is not safe for concurrent readers
3181 // and a single writer: this could be fixed if it becomes a
3182 // problem).
3183
3184 // Fast-path for trivial adapters
3185 AdapterHandlerEntry* entry = get_simple_adapter(method);
3186 if (entry != nullptr) {
3187 return entry;
3188 }
3189
3190 ResourceMark rm;
3191 bool new_entry = false;
3192
3193 CompiledEntrySignature ces(method());
3194 ces.compute_calling_conventions();
3195 if (ces.has_scalarized_args()) {
3196 if (!method->has_scalarized_args()) {
3197 method->set_has_scalarized_args();
3198 }
3199 if (ces.c1_needs_stack_repair()) {
3200 method->set_c1_needs_stack_repair();
3201 }
3202 if (ces.c2_needs_stack_repair() && !method->c2_needs_stack_repair()) {
3203 method->set_c2_needs_stack_repair();
3204 }
3205 }
3206
3207 {
3208 MutexLocker mu(AdapterHandlerLibrary_lock);
3209
3210 // Lookup method signature's fingerprint
3211 entry = lookup(ces.sig_cc(), ces.has_inline_recv());
3212
3213 if (entry != nullptr) {
3214 #ifndef ZERO
3215 assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
3216 #endif
3217 #ifdef ASSERT
3218 if (!entry->in_aot_cache() && VerifyAdapterSharing) {
3219 verify_adapter_sharing(ces, entry);
3220 }
3221 #endif
3222 } else {
3223 entry = create_adapter(ces, /* allocate_code_blob */ true);
3224 if (entry != nullptr) {
3225 new_entry = true;
3226 }
3227 }
3228 }
3229
3230 // Outside of the lock
3231 if (new_entry) {
3232 post_adapter_creation(entry);
3233 }
3234 return entry;
3235 }
3236
3237 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
3238 ResourceMark rm;
3239 const char* name = AdapterHandlerLibrary::name(handler);
3240 const uint32_t id = AdapterHandlerLibrary::id(handler);
3241
3242 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
3243 if (blob != nullptr) {
3258 }
3259 insts_size = adapter_blob->code_size();
3260 st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
3261 handler->fingerprint()->as_basic_args_string(),
3262 handler->fingerprint()->as_string(), insts_size);
3263 st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
3264 if (Verbose || PrintStubCode) {
3265 address first_pc = adapter_blob->content_begin();
3266 if (first_pc != nullptr) {
3267 Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
3268 st->cr();
3269 }
3270 }
3271 }
3272 #endif // PRODUCT
3273
3274 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
3275 int entry_offset[AdapterBlob::ENTRY_COUNT]) {
3276 entry_offset[AdapterBlob::I2C] = 0;
3277 entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
3278 entry_offset[AdapterBlob::C2I_Inline] = entry_address[AdapterBlob::C2I_Inline] - entry_address[AdapterBlob::I2C];
3279 entry_offset[AdapterBlob::C2I_Inline_RO] = entry_address[AdapterBlob::C2I_Inline_RO] - entry_address[AdapterBlob::I2C];
3280 entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
3281 entry_offset[AdapterBlob::C2I_Unverified_Inline] = entry_address[AdapterBlob::C2I_Unverified_Inline] - entry_address[AdapterBlob::I2C];
3282 if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
3283 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
3284 } else {
3285 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
3286 }
3287 }
3288
3289 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
3290 CompiledEntrySignature& ces,
3291 bool allocate_code_blob,
3292 bool is_transient) {
3293 if (log_is_enabled(Info, perf, class, link)) {
3294 ClassLoader::perf_method_adapters_count()->inc();
3295 }
3296
3297 #ifndef ZERO
3298 AdapterBlob* adapter_blob = nullptr;
3299 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3300 CodeBuffer buffer(buf);
3301 short buffer_locs[20];
3302 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3303 sizeof(buffer_locs)/sizeof(relocInfo));
3304 MacroAssembler masm(&buffer);
3305 address entry_address[AdapterBlob::ENTRY_COUNT];
3306
3307 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
3308 SharedRuntime::generate_i2c2i_adapters(&masm,
3309 ces.args_on_stack(),
3310 ces.sig(),
3311 ces.regs(),
3312 ces.sig_cc(),
3313 ces.regs_cc(),
3314 ces.sig_cc_ro(),
3315 ces.regs_cc_ro(),
3316 entry_address,
3317 adapter_blob,
3318 allocate_code_blob);
3319
3320 if (ces.has_scalarized_args()) {
3321 // Save a C heap allocated version of the scalarized signature and store it in the adapter
3322 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3323 heap_sig->appendAll(ces.sig_cc());
3324 handler->set_sig_cc(heap_sig);
3325 }
3326 // On zero there is no code to save and no need to create a blob and
3327 // or relocate the handler.
3328 int entry_offset[AdapterBlob::ENTRY_COUNT];
3329 address_to_offset(entry_address, entry_offset);
3330 #ifdef ASSERT
3331 if (VerifyAdapterSharing) {
3332 handler->save_code(buf->code_begin(), buffer.insts_size());
3333 if (is_transient) {
3334 return true;
3335 }
3336 }
3337 #endif
3338 if (adapter_blob == nullptr) {
3339 // CodeCache is full, disable compilation
3340 // Ought to log this but compile log is only per compile thread
3341 // and we're some non descript Java thread.
3342 return false;
3343 }
3344 handler->set_adapter_blob(adapter_blob);
3345 if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
3346 // try to save generated code
3347 const char* name = AdapterHandlerLibrary::name(handler);
3348 const uint32_t id = AdapterHandlerLibrary::id(handler);
3349 bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
3350 assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
3351 }
3352 #endif // ZERO
3353
3354 #ifndef PRODUCT
3355 // debugging support
3356 if (PrintAdapterHandlers || PrintStubCode) {
3357 print_adapter_handler_info(tty, handler);
3358 }
3359 #endif
3360
3361 return true;
3362 }
3363
3364 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(CompiledEntrySignature& ces,
3365 bool allocate_code_blob,
3366 bool is_transient) {
3367 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(ces.sig_cc(), ces.has_inline_recv());
3368 #ifdef ASSERT
3369 // Verify that we can successfully restore the compiled entry signature object.
3370 CompiledEntrySignature ces_verify;
3371 ces_verify.initialize_from_fingerprint(fp);
3372 #endif
3373 AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
3374 if (!generate_adapter_code(handler, ces, allocate_code_blob, is_transient)) {
3375 AdapterHandlerEntry::deallocate(handler);
3376 return nullptr;
3377 }
3378 if (!is_transient) {
3379 assert_lock_strong(AdapterHandlerLibrary_lock);
3380 _adapter_handler_table->put(fp, handler);
3381 }
3382 return handler;
3383 }
3384
3385 #if INCLUDE_CDS
3386 void AdapterHandlerEntry::remove_unshareable_info() {
3387 #ifdef ASSERT
3388 _saved_code = nullptr;
3389 _saved_code_length = 0;
3390 #endif // ASSERT
3391 _adapter_blob = nullptr;
3392 _linked = false;
3393 }
3394
3457 // This method is used during production run to link archived adapters (stored in AOT Cache)
3458 // to their code in AOT Code Cache
3459 void AdapterHandlerEntry::link() {
3460 ResourceMark rm;
3461 assert(_fingerprint != nullptr, "_fingerprint must not be null");
3462 bool generate_code = false;
3463 // Generate code only if AOTCodeCache is not available, or
3464 // caching adapters is disabled, or we fail to link
3465 // the AdapterHandlerEntry to its code in the AOTCodeCache
3466 if (AOTCodeCache::is_using_adapter()) {
3467 AdapterHandlerLibrary::link_aot_adapter_handler(this);
3468 // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
3469 if (_adapter_blob == nullptr) {
3470 log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
3471 generate_code = true;
3472 }
3473 } else {
3474 generate_code = true;
3475 }
3476 if (generate_code) {
3477 CompiledEntrySignature ces;
3478 ces.initialize_from_fingerprint(_fingerprint);
3479 if (!AdapterHandlerLibrary::generate_adapter_code(this, ces, true, false)) {
3480 // Don't throw exceptions during VM initialization because java.lang.* classes
3481 // might not have been initialized, causing problems when constructing the
3482 // Java exception object.
3483 vm_exit_during_initialization("Out of space in CodeCache for adapters");
3484 }
3485 }
3486 if (_adapter_blob != nullptr) {
3487 post_adapter_creation(this);
3488 }
3489 assert(_linked, "AdapterHandlerEntry must now be linked");
3490 }
3491
3492 void AdapterHandlerLibrary::link_aot_adapters() {
3493 uint max_id = 0;
3494 assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3495 /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3496 * That implies adapter ids of the adapters in the cache may not be contiguous.
3497 * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3498 * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3499 * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3500 */
3501 _aot_adapter_handler_table.iterate([&](AdapterHandlerEntry* entry) {
3502 assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3503 entry->link();
3504 max_id = MAX2(max_id, entry->id());
3505 });
3506 // Set adapter id to the maximum id found in the AOTCache
3507 assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3508 _id_counter = max_id;
3509 }
3510
3511 // This method is called during production run to lookup simple adapters
3512 // in the archived adapter handler table
3513 void AdapterHandlerLibrary::lookup_simple_adapters() {
3514 assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3515
3516 MutexLocker mu(AdapterHandlerLibrary_lock);
3517 ResourceMark rm;
3518 CompiledEntrySignature no_args;
3519 no_args.compute_calling_conventions();
3520 _no_arg_handler = lookup(no_args.sig_cc(), no_args.has_inline_recv());
3521
3522 CompiledEntrySignature obj_args;
3523 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
3524 obj_args.compute_calling_conventions();
3525 _obj_arg_handler = lookup(obj_args.sig_cc(), obj_args.has_inline_recv());
3526
3527 CompiledEntrySignature int_args;
3528 SigEntry::add_entry(int_args.sig(), T_INT);
3529 int_args.compute_calling_conventions();
3530 _int_arg_handler = lookup(int_args.sig_cc(), int_args.has_inline_recv());
3531
3532 CompiledEntrySignature obj_int_args;
3533 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
3534 SigEntry::add_entry(obj_int_args.sig(), T_INT);
3535 obj_int_args.compute_calling_conventions();
3536 _obj_int_arg_handler = lookup(obj_int_args.sig_cc(), obj_int_args.has_inline_recv());
3537
3538 CompiledEntrySignature obj_obj_args;
3539 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3540 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3541 obj_obj_args.compute_calling_conventions();
3542 _obj_obj_arg_handler = lookup(obj_obj_args.sig_cc(), obj_obj_args.has_inline_recv());
3543
3544 assert(_no_arg_handler != nullptr &&
3545 _obj_arg_handler != nullptr &&
3546 _int_arg_handler != nullptr &&
3547 _obj_int_arg_handler != nullptr &&
3548 _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3549 assert(_no_arg_handler->is_linked() &&
3550 _obj_arg_handler->is_linked() &&
3551 _int_arg_handler->is_linked() &&
3552 _obj_int_arg_handler->is_linked() &&
3553 _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3554 }
3555 #endif // INCLUDE_CDS
3556
3557 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3558 LogStreamHandle(Trace, aot) lsh;
3559 if (lsh.is_enabled()) {
3560 lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3561 lsh.cr();
3562 }
3563 it->push(&_fingerprint);
3564 }
3565
3566 AdapterHandlerEntry::~AdapterHandlerEntry() {
3567 if (_fingerprint != nullptr) {
3568 AdapterFingerPrint::deallocate(_fingerprint);
3569 _fingerprint = nullptr;
3570 }
3571 if (_sig_cc != nullptr) {
3572 delete _sig_cc;
3573 }
3574 #ifdef ASSERT
3575 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3576 #endif
3577 FreeHeap(this);
3578 }
3579
3580
3581 #ifdef ASSERT
3582 // Capture the code before relocation so that it can be compared
3583 // against other versions. If the code is captured after relocation
3584 // then relative instructions won't be equivalent.
3585 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3586 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3587 _saved_code_length = length;
3588 memcpy(_saved_code, buffer, length);
3589 }
3590
3591
3592 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3593 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3641
3642 struct { double data[20]; } locs_buf;
3643 struct { double data[20]; } stubs_locs_buf;
3644 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3645 #if defined(AARCH64) || defined(PPC64)
3646 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3647 // in the constant pool to ensure ordering between the barrier and oops
3648 // accesses. For native_wrappers we need a constant.
3649 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3650 // static java call that is resolved in the runtime.
3651 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3652 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3653 }
3654 #endif
3655 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3656 MacroAssembler _masm(&buffer);
3657
3658 // Fill in the signature array, for the calling-convention call.
3659 const int total_args_passed = method->size_of_parameters();
3660
3661 BasicType stack_sig_bt[16];
3662 VMRegPair stack_regs[16];
3663 BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3664 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3665
3666 int i = 0;
3667 if (!method->is_static()) { // Pass in receiver first
3668 sig_bt[i++] = T_OBJECT;
3669 }
3670 SignatureStream ss(method->signature());
3671 for (; !ss.at_return_type(); ss.next()) {
3672 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
3673 if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3674 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
3675 }
3676 }
3677 assert(i == total_args_passed, "");
3678 BasicType ret_type = ss.type();
3679
3680 // Now get the compiled-Java arguments layout.
3681 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3682
3683 // Generate the compiled-to-native wrapper code
3684 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3685
3686 if (nm != nullptr) {
3687 {
3688 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3689 if (nm->make_in_use()) {
3690 method->set_code(method, nm);
3691 }
3692 }
3693
3694 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3695 if (directive->PrintAssemblyOption) {
3696 nm->print_code();
3697 }
3698 DirectivesStack::release(directive);
3926 if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3927 found = true;
3928 st->print("Adapter for signature: ");
3929 a->print_adapter_on(st);
3930 return true;
3931 } else {
3932 return false; // keep looking
3933 }
3934 };
3935 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3936 _adapter_handler_table->iterate(findblob_runtime_table);
3937 }
3938 assert(found, "Should have found handler");
3939 }
3940
3941 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3942 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3943 if (adapter_blob() != nullptr) {
3944 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3945 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3946 st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3947 st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3948 st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3949 st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3950 if (get_c2i_no_clinit_check_entry() != nullptr) {
3951 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3952 }
3953 }
3954 st->cr();
3955 }
3956
3957 #ifndef PRODUCT
3958
3959 void AdapterHandlerLibrary::print_statistics() {
3960 print_table_statistics();
3961 }
3962
3963 #endif /* PRODUCT */
3964
3965 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3966 assert(current == JavaThread::current(), "pre-condition");
3967 StackOverflow* overflow_state = current->stack_overflow_state();
3968 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3969 overflow_state->set_reserved_stack_activation(current->stack_base());
4016 event.set_method(method);
4017 event.commit();
4018 }
4019 }
4020 }
4021 return activation;
4022 }
4023
4024 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
4025 // After any safepoint, just before going back to compiled code,
4026 // we inform the GC that we will be doing initializing writes to
4027 // this object in the future without emitting card-marks, so
4028 // GC may take any compensating steps.
4029
4030 oop new_obj = current->vm_result_oop();
4031 if (new_obj == nullptr) return;
4032
4033 BarrierSet *bs = BarrierSet::barrier_set();
4034 bs->on_slowpath_allocation_exit(current, new_obj);
4035 }
4036
4037 // We are at a compiled code to interpreter call. We need backing
4038 // buffers for all inline type arguments. Allocate an object array to
4039 // hold them (convenient because once we're done with it we don't have
4040 // to worry about freeing it).
4041 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
4042 assert(InlineTypePassFieldsAsArgs, "no reason to call this");
4043 ResourceMark rm;
4044
4045 int nb_slots = 0;
4046 InstanceKlass* holder = callee->method_holder();
4047 allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
4048 if (allocate_receiver) {
4049 nb_slots++;
4050 }
4051 int arg_num = callee->is_static() ? 0 : 1;
4052 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4053 BasicType bt = ss.type();
4054 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4055 nb_slots++;
4056 }
4057 if (bt != T_VOID) {
4058 arg_num++;
4059 }
4060 }
4061 objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
4062 objArrayHandle array(THREAD, array_oop);
4063 arg_num = callee->is_static() ? 0 : 1;
4064 int i = 0;
4065 if (allocate_receiver) {
4066 InlineKlass* vk = InlineKlass::cast(holder);
4067 oop res = vk->allocate_instance(CHECK_NULL);
4068 array->obj_at_put(i++, res);
4069 }
4070 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4071 BasicType bt = ss.type();
4072 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4073 InlineKlass* vk = ss.as_inline_klass(holder);
4074 assert(vk != nullptr, "Unexpected klass");
4075 oop res = vk->allocate_instance(CHECK_NULL);
4076 array->obj_at_put(i++, res);
4077 }
4078 if (bt != T_VOID) {
4079 arg_num++;
4080 }
4081 }
4082 return array();
4083 }
4084
4085 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
4086 methodHandle callee(current, callee_method);
4087 oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
4088 current->set_vm_result_oop(array);
4089 current->set_vm_result_metadata(callee()); // TODO: required to keep callee live?
4090 JRT_END
4091
4092 // We're returning from an interpreted method: load each field into a
4093 // register following the calling convention
4094 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
4095 {
4096 assert(res->klass()->is_inline_klass(), "only inline types here");
4097 ResourceMark rm;
4098 RegisterMap reg_map(current,
4099 RegisterMap::UpdateMap::include,
4100 RegisterMap::ProcessFrames::include,
4101 RegisterMap::WalkContinuation::skip);
4102 frame stubFrame = current->last_frame();
4103 frame callerFrame = stubFrame.sender(®_map);
4104 assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
4105
4106 InlineKlass* vk = InlineKlass::cast(res->klass());
4107
4108 const Array<SigEntry>* sig_vk = vk->extended_sig();
4109 const Array<VMRegPair>* regs = vk->return_regs();
4110
4111 if (regs == nullptr) {
4112 // The fields of the inline klass don't fit in registers, bail out
4113 return;
4114 }
4115
4116 int j = 1;
4117 for (int i = 0; i < sig_vk->length(); i++) {
4118 BasicType bt = sig_vk->at(i)._bt;
4119 if (bt == T_METADATA) {
4120 continue;
4121 }
4122 if (bt == T_VOID) {
4123 if (sig_vk->at(i-1)._bt == T_LONG ||
4124 sig_vk->at(i-1)._bt == T_DOUBLE) {
4125 j++;
4126 }
4127 continue;
4128 }
4129 int off = sig_vk->at(i)._offset;
4130 assert(off > 0, "offset in object should be positive");
4131 VMRegPair pair = regs->at(j);
4132 address loc = reg_map.location(pair.first(), nullptr);
4133 switch(bt) {
4134 case T_BOOLEAN:
4135 *(jboolean*)loc = res->bool_field(off);
4136 break;
4137 case T_CHAR:
4138 *(jchar*)loc = res->char_field(off);
4139 break;
4140 case T_BYTE:
4141 *(jbyte*)loc = res->byte_field(off);
4142 break;
4143 case T_SHORT:
4144 *(jshort*)loc = res->short_field(off);
4145 break;
4146 case T_INT: {
4147 *(jint*)loc = res->int_field(off);
4148 break;
4149 }
4150 case T_LONG:
4151 #ifdef _LP64
4152 *(intptr_t*)loc = res->long_field(off);
4153 #else
4154 Unimplemented();
4155 #endif
4156 break;
4157 case T_OBJECT:
4158 case T_ARRAY: {
4159 *(oop*)loc = res->obj_field(off);
4160 break;
4161 }
4162 case T_FLOAT:
4163 *(jfloat*)loc = res->float_field(off);
4164 break;
4165 case T_DOUBLE:
4166 *(jdouble*)loc = res->double_field(off);
4167 break;
4168 default:
4169 ShouldNotReachHere();
4170 }
4171 j++;
4172 }
4173 assert(j == regs->length(), "missed a field?");
4174
4175 #ifdef ASSERT
4176 VMRegPair pair = regs->at(0);
4177 address loc = reg_map.location(pair.first(), nullptr);
4178 assert(*(oopDesc**)loc == res, "overwritten object");
4179 #endif
4180
4181 current->set_vm_result_oop(res);
4182 }
4183 JRT_END
4184
4185 // We've returned to an interpreted method, the interpreter needs a
4186 // reference to an inline type instance. Allocate it and initialize it
4187 // from field's values in registers.
4188 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
4189 {
4190 ResourceMark rm;
4191 RegisterMap reg_map(current,
4192 RegisterMap::UpdateMap::include,
4193 RegisterMap::ProcessFrames::include,
4194 RegisterMap::WalkContinuation::skip);
4195 frame stubFrame = current->last_frame();
4196 frame callerFrame = stubFrame.sender(®_map);
4197
4198 #ifdef ASSERT
4199 InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
4200 #endif
4201
4202 if (!is_set_nth_bit(res, 0)) {
4203 // We're not returning with inline type fields in registers (the
4204 // calling convention didn't allow it for this inline klass)
4205 assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
4206 current->set_vm_result_oop((oopDesc*)res);
4207 assert(verif_vk == nullptr, "broken calling convention");
4208 return;
4209 }
4210
4211 clear_nth_bit(res, 0);
4212 InlineKlass* vk = (InlineKlass*)res;
4213 assert(verif_vk == vk, "broken calling convention");
4214 assert(Metaspace::contains((void*)res), "should be klass");
4215
4216 // Allocate handles for every oop field so they are safe in case of
4217 // a safepoint when allocating
4218 GrowableArray<Handle> handles;
4219 vk->save_oop_fields(reg_map, handles);
4220
4221 // It's unsafe to safepoint until we are here
4222 JRT_BLOCK;
4223 {
4224 JavaThread* THREAD = current;
4225 oop vt = vk->realloc_result(reg_map, handles, CHECK);
4226 current->set_vm_result_oop(vt);
4227 }
4228 JRT_BLOCK_END;
4229 }
4230 JRT_END
|