28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/stringTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/aotCodeCache.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/compiledIC.hpp"
35 #include "code/nmethod.inline.hpp"
36 #include "code/scopeDesc.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "compiler/abstractCompiler.hpp"
39 #include "compiler/compileBroker.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "interpreter/interpreterRuntime.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "jvm.h"
47 #include "logging/log.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "memory/universe.hpp"
50 #include "metaprogramming/primitiveConversions.hpp"
51 #include "oops/klass.hpp"
52 #include "oops/method.inline.hpp"
53 #include "oops/objArrayKlass.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "prims/forte.hpp"
56 #include "prims/jvmtiExport.hpp"
57 #include "prims/jvmtiThreadState.hpp"
58 #include "prims/methodHandles.hpp"
59 #include "prims/nativeLookup.hpp"
60 #include "runtime/arguments.hpp"
61 #include "runtime/atomicAccess.hpp"
62 #include "runtime/basicLock.inline.hpp"
63 #include "runtime/frame.inline.hpp"
64 #include "runtime/handles.inline.hpp"
65 #include "runtime/init.hpp"
66 #include "runtime/interfaceSupport.inline.hpp"
67 #include "runtime/java.hpp"
68 #include "runtime/javaCalls.hpp"
69 #include "runtime/jniHandles.inline.hpp"
70 #include "runtime/perfData.hpp"
71 #include "runtime/sharedRuntime.hpp"
72 #include "runtime/stackWatermarkSet.hpp"
73 #include "runtime/stubRoutines.hpp"
74 #include "runtime/synchronizer.inline.hpp"
75 #include "runtime/timerTrace.hpp"
76 #include "runtime/vframe.inline.hpp"
77 #include "runtime/vframeArray.hpp"
78 #include "runtime/vm_version.hpp"
79 #include "utilities/copy.hpp"
80 #include "utilities/dtrace.hpp"
81 #include "utilities/events.hpp"
82 #include "utilities/globalDefinitions.hpp"
83 #include "utilities/hashTable.hpp"
84 #include "utilities/macros.hpp"
85 #include "utilities/xmlstream.hpp"
86 #ifdef COMPILER1
87 #include "c1/c1_Runtime1.hpp"
88 #endif
89 #if INCLUDE_JFR
90 #include "jfr/jfr.inline.hpp"
91 #endif
1212 // for a call current in progress, i.e., arguments has been pushed on stack
1213 // but callee has not been invoked yet. Caller frame must be compiled.
1214 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1215 CallInfo& callinfo, TRAPS) {
1216 Handle receiver;
1217 Handle nullHandle; // create a handy null handle for exception returns
1218 JavaThread* current = THREAD;
1219
1220 assert(!vfst.at_end(), "Java frame must exist");
1221
1222 // Find caller and bci from vframe
1223 methodHandle caller(current, vfst.method());
1224 int bci = vfst.bci();
1225
1226 if (caller->is_continuation_enter_intrinsic()) {
1227 bc = Bytecodes::_invokestatic;
1228 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1229 return receiver;
1230 }
1231
1232 Bytecode_invoke bytecode(caller, bci);
1233 int bytecode_index = bytecode.index();
1234 bc = bytecode.invoke_code();
1235
1236 methodHandle attached_method(current, extract_attached_method(vfst));
1237 if (attached_method.not_null()) {
1238 Method* callee = bytecode.static_target(CHECK_NH);
1239 vmIntrinsics::ID id = callee->intrinsic_id();
1240 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1241 // it attaches statically resolved method to the call site.
1242 if (MethodHandles::is_signature_polymorphic(id) &&
1243 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1244 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1245
1246 // Adjust invocation mode according to the attached method.
1247 switch (bc) {
1248 case Bytecodes::_invokevirtual:
1249 if (attached_method->method_holder()->is_interface()) {
1250 bc = Bytecodes::_invokeinterface;
1251 }
1252 break;
1253 case Bytecodes::_invokeinterface:
1254 if (!attached_method->method_holder()->is_interface()) {
1255 bc = Bytecodes::_invokevirtual;
1256 }
1257 break;
1258 case Bytecodes::_invokehandle:
1259 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1260 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1261 : Bytecodes::_invokevirtual;
1262 }
1263 break;
1264 default:
1265 break;
1266 }
1267 }
1268 }
1269
1270 assert(bc != Bytecodes::_illegal, "not initialized");
1271
1272 bool has_receiver = bc != Bytecodes::_invokestatic &&
1273 bc != Bytecodes::_invokedynamic &&
1274 bc != Bytecodes::_invokehandle;
1275
1276 // Find receiver for non-static call
1277 if (has_receiver) {
1278 // This register map must be update since we need to find the receiver for
1279 // compiled frames. The receiver might be in a register.
1280 RegisterMap reg_map2(current,
1281 RegisterMap::UpdateMap::include,
1282 RegisterMap::ProcessFrames::include,
1283 RegisterMap::WalkContinuation::skip);
1284 frame stubFrame = current->last_frame();
1285 // Caller-frame is a compiled frame
1286 frame callerFrame = stubFrame.sender(®_map2);
1287
1288 if (attached_method.is_null()) {
1289 Method* callee = bytecode.static_target(CHECK_NH);
1290 if (callee == nullptr) {
1291 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1292 }
1293 }
1294
1295 // Retrieve from a compiled argument list
1296 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1297 assert(oopDesc::is_oop_or_null(receiver()), "");
1298
1299 if (receiver.is_null()) {
1300 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1301 }
1302 }
1303
1304 // Resolve method
1305 if (attached_method.not_null()) {
1306 // Parameterized by attached method.
1307 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1308 } else {
1309 // Parameterized by bytecode.
1310 constantPoolHandle constants(current, caller->constants());
1311 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1312 }
1313
1314 #ifdef ASSERT
1315 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1316 if (has_receiver) {
1317 assert(receiver.not_null(), "should have thrown exception");
1318 Klass* receiver_klass = receiver->klass();
1319 Klass* rk = nullptr;
1320 if (attached_method.not_null()) {
1321 // In case there's resolved method attached, use its holder during the check.
1322 rk = attached_method->method_holder();
1323 } else {
1324 // Klass is already loaded.
1325 constantPoolHandle constants(current, caller->constants());
1326 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1327 }
1328 Klass* static_receiver_klass = rk;
1329 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1330 "actual receiver must be subclass of static receiver klass");
1331 if (receiver_klass->is_instance_klass()) {
1332 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1333 tty->print_cr("ERROR: Klass not yet initialized!!");
1334 receiver_klass->print();
1335 }
1336 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1337 }
1338 }
1339 #endif
1340
1341 return receiver;
1342 }
1343
1344 methodHandle SharedRuntime::find_callee_method(TRAPS) {
1345 JavaThread* current = THREAD;
1346 ResourceMark rm(current);
1347 // We need first to check if any Java activations (compiled, interpreted)
1348 // exist on the stack since last JavaCall. If not, we need
1349 // to get the target method from the JavaCall wrapper.
1350 vframeStream vfst(current, true); // Do not skip any javaCalls
1351 methodHandle callee_method;
1352 if (vfst.at_end()) {
1353 // No Java frames were found on stack since we did the JavaCall.
1354 // Hence the stack can only contain an entry_frame. We need to
1355 // find the target method from the stub frame.
1356 RegisterMap reg_map(current,
1357 RegisterMap::UpdateMap::skip,
1358 RegisterMap::ProcessFrames::include,
1359 RegisterMap::WalkContinuation::skip);
1360 frame fr = current->last_frame();
1361 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1362 fr = fr.sender(®_map);
1363 assert(fr.is_entry_frame(), "must be");
1364 // fr is now pointing to the entry frame.
1365 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1366 } else {
1367 Bytecodes::Code bc;
1368 CallInfo callinfo;
1369 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1370 callee_method = methodHandle(current, callinfo.selected_method());
1371 }
1372 assert(callee_method()->is_method(), "must be");
1373 return callee_method;
1374 }
1375
1376 // Resolves a call.
1377 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1378 JavaThread* current = THREAD;
1379 ResourceMark rm(current);
1380 RegisterMap cbl_map(current,
1381 RegisterMap::UpdateMap::skip,
1382 RegisterMap::ProcessFrames::include,
1383 RegisterMap::WalkContinuation::skip);
1384 frame caller_frame = current->last_frame().sender(&cbl_map);
1385
1386 CodeBlob* caller_cb = caller_frame.cb();
1387 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1388 nmethod* caller_nm = caller_cb->as_nmethod();
1389
1390 // determine call info & receiver
1391 // note: a) receiver is null for static calls
1392 // b) an exception is thrown if receiver is null for non-static calls
1393 CallInfo call_info;
1394 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1395 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1396
1397 NoSafepointVerifier nsv;
1398
1399 methodHandle callee_method(current, call_info.selected_method());
1400
1401 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1402 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1403 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1404 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1405 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1406
1407 assert(!caller_nm->is_unloading(), "It should not be unloading");
1408
1409 #ifndef PRODUCT
1410 // tracing/debugging/statistics
1411 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1412 (is_virtual) ? (&_resolve_virtual_ctr) :
1413 (&_resolve_static_ctr);
1414 AtomicAccess::inc(addr);
1415
1416 if (TraceCallFixup) {
1417 ResourceMark rm(current);
1418 tty->print("resolving %s%s (%s) call to",
1419 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1420 Bytecodes::name(invoke_code));
1421 callee_method->print_short_name(tty);
1422 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1423 p2i(caller_frame.pc()), p2i(callee_method->code()));
1424 }
1425 #endif
1426
1427 if (invoke_code == Bytecodes::_invokestatic) {
1428 assert(callee_method->method_holder()->is_initialized() ||
1429 callee_method->method_holder()->is_reentrant_initialization(current),
1430 "invalid class initialization state for invoke_static");
1431 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1432 // In order to keep class initialization check, do not patch call
1433 // site for static call when the class is not fully initialized.
1434 // Proper check is enforced by call site re-resolution on every invocation.
1435 //
1436 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1437 // explicit class initialization check is put in nmethod entry (VEP).
1438 assert(callee_method->method_holder()->is_linked(), "must be");
1439 return callee_method;
1440 }
1441 }
1442
1443
1444 // JSR 292 key invariant:
1445 // If the resolved method is a MethodHandle invoke target, the call
1446 // site must be a MethodHandle call site, because the lambda form might tail-call
1447 // leaving the stack in a state unknown to either caller or callee
1448
1449 // Compute entry points. The computation of the entry points is independent of
1450 // patching the call.
1451
1452 // Make sure the callee nmethod does not get deoptimized and removed before
1453 // we are done patching the code.
1454
1455
1456 CompiledICLocker ml(caller_nm);
1457 if (is_virtual && !is_optimized) {
1458 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1459 inline_cache->update(&call_info, receiver->klass());
1460 } else {
1461 // Callsite is a direct call - set it to the destination method
1462 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1463 callsite->set(callee_method);
1464 }
1465
1466 return callee_method;
1467 }
1468
1469 // Inline caches exist only in compiled code
1470 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1471 #ifdef ASSERT
1472 RegisterMap reg_map(current,
1473 RegisterMap::UpdateMap::skip,
1474 RegisterMap::ProcessFrames::include,
1475 RegisterMap::WalkContinuation::skip);
1476 frame stub_frame = current->last_frame();
1477 assert(stub_frame.is_runtime_frame(), "sanity check");
1478 frame caller_frame = stub_frame.sender(®_map);
1479 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1480 #endif /* ASSERT */
1481
1482 methodHandle callee_method;
1483 JRT_BLOCK
1484 callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1485 // Return Method* through TLS
1486 current->set_vm_result_metadata(callee_method());
1487 JRT_BLOCK_END
1488 // return compiled code entry point after potential safepoints
1489 return get_resolved_entry(current, callee_method);
1490 JRT_END
1491
1492
1493 // Handle call site that has been made non-entrant
1494 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1495 // 6243940 We might end up in here if the callee is deoptimized
1496 // as we race to call it. We don't want to take a safepoint if
1497 // the caller was interpreted because the caller frame will look
1498 // interpreted to the stack walkers and arguments are now
1499 // "compiled" so it is much better to make this transition
1500 // invisible to the stack walking code. The i2c path will
1501 // place the callee method in the callee_target. It is stashed
1502 // there because if we try and find the callee by normal means a
1503 // safepoint is possible and have trouble gc'ing the compiled args.
1504 RegisterMap reg_map(current,
1505 RegisterMap::UpdateMap::skip,
1506 RegisterMap::ProcessFrames::include,
1507 RegisterMap::WalkContinuation::skip);
1508 frame stub_frame = current->last_frame();
1509 assert(stub_frame.is_runtime_frame(), "sanity check");
1510 frame caller_frame = stub_frame.sender(®_map);
1511
1512 if (caller_frame.is_interpreted_frame() ||
1513 caller_frame.is_entry_frame() ||
1514 caller_frame.is_upcall_stub_frame()) {
1515 Method* callee = current->callee_target();
1516 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1517 current->set_vm_result_metadata(callee);
1518 current->set_callee_target(nullptr);
1519 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1520 // Bypass class initialization checks in c2i when caller is in native.
1521 // JNI calls to static methods don't have class initialization checks.
1522 // Fast class initialization checks are present in c2i adapters and call into
1523 // SharedRuntime::handle_wrong_method() on the slow path.
1524 //
1525 // JVM upcalls may land here as well, but there's a proper check present in
1526 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1527 // so bypassing it in c2i adapter is benign.
1528 return callee->get_c2i_no_clinit_check_entry();
1529 } else {
1530 return callee->get_c2i_entry();
1531 }
1532 }
1533
1534 // Must be compiled to compiled path which is safe to stackwalk
1535 methodHandle callee_method;
1536 JRT_BLOCK
1537 // Force resolving of caller (if we called from compiled frame)
1538 callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1539 current->set_vm_result_metadata(callee_method());
1540 JRT_BLOCK_END
1541 // return compiled code entry point after potential safepoints
1542 return get_resolved_entry(current, callee_method);
1543 JRT_END
1544
1545 // Handle abstract method call
1546 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1547 // Verbose error message for AbstractMethodError.
1548 // Get the called method from the invoke bytecode.
1549 vframeStream vfst(current, true);
1550 assert(!vfst.at_end(), "Java frame must exist");
1551 methodHandle caller(current, vfst.method());
1552 Bytecode_invoke invoke(caller, vfst.bci());
1553 DEBUG_ONLY( invoke.verify(); )
1554
1555 // Find the compiled caller frame.
1556 RegisterMap reg_map(current,
1557 RegisterMap::UpdateMap::include,
1558 RegisterMap::ProcessFrames::include,
1559 RegisterMap::WalkContinuation::skip);
1560 frame stubFrame = current->last_frame();
1561 assert(stubFrame.is_runtime_frame(), "must be");
1562 frame callerFrame = stubFrame.sender(®_map);
1563 assert(callerFrame.is_compiled_frame(), "must be");
1564
1565 // Install exception and return forward entry.
1566 address res = SharedRuntime::throw_AbstractMethodError_entry();
1567 JRT_BLOCK
1568 methodHandle callee(current, invoke.static_target(current));
1569 if (!callee.is_null()) {
1570 oop recv = callerFrame.retrieve_receiver(®_map);
1571 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1572 res = StubRoutines::forward_exception_entry();
1573 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1574 }
1575 JRT_BLOCK_END
1576 return res;
1577 JRT_END
1578
1579 // return verified_code_entry if interp_only_mode is not set for the current thread;
1580 // otherwise return c2i entry.
1581 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method) {
1582 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1583 // In interp_only_mode we need to go to the interpreted entry
1584 // The c2i won't patch in this mode -- see fixup_callers_callsite
1585 return callee_method->get_c2i_entry();
1586 }
1587 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1588 return callee_method->verified_code_entry();
1589 }
1590
1591 // resolve a static call and patch code
1592 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1593 methodHandle callee_method;
1594 bool enter_special = false;
1595 JRT_BLOCK
1596 callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1597 current->set_vm_result_metadata(callee_method());
1598 JRT_BLOCK_END
1599 // return compiled code entry point after potential safepoints
1600 return get_resolved_entry(current, callee_method);
1601 JRT_END
1602
1603 // resolve virtual call and update inline cache to monomorphic
1604 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1605 methodHandle callee_method;
1606 JRT_BLOCK
1607 callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1608 current->set_vm_result_metadata(callee_method());
1609 JRT_BLOCK_END
1610 // return compiled code entry point after potential safepoints
1611 return get_resolved_entry(current, callee_method);
1612 JRT_END
1613
1614
1615 // Resolve a virtual call that can be statically bound (e.g., always
1616 // monomorphic, so it has no inline cache). Patch code to resolved target.
1617 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1618 methodHandle callee_method;
1619 JRT_BLOCK
1620 callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1621 current->set_vm_result_metadata(callee_method());
1622 JRT_BLOCK_END
1623 // return compiled code entry point after potential safepoints
1624 return get_resolved_entry(current, callee_method);
1625 JRT_END
1626
1627 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1628 JavaThread* current = THREAD;
1629 ResourceMark rm(current);
1630 CallInfo call_info;
1631 Bytecodes::Code bc;
1632
1633 // receiver is null for static calls. An exception is thrown for null
1634 // receivers for non-static calls
1635 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1636
1637 methodHandle callee_method(current, call_info.selected_method());
1638
1639 #ifndef PRODUCT
1640 AtomicAccess::inc(&_ic_miss_ctr);
1641
1642 // Statistics & Tracing
1643 if (TraceCallFixup) {
1644 ResourceMark rm(current);
1645 tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1646 callee_method->print_short_name(tty);
1647 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1648 }
1649
1650 if (ICMissHistogram) {
1651 MutexLocker m(VMStatistic_lock);
1652 RegisterMap reg_map(current,
1653 RegisterMap::UpdateMap::skip,
1654 RegisterMap::ProcessFrames::include,
1655 RegisterMap::WalkContinuation::skip);
1656 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1657 // produce statistics under the lock
1658 trace_ic_miss(f.pc());
1659 }
1660 #endif
1661
1662 // install an event collector so that when a vtable stub is created the
1663 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1664 // event can't be posted when the stub is created as locks are held
1665 // - instead the event will be deferred until the event collector goes
1666 // out of scope.
1667 JvmtiDynamicCodeEventCollector event_collector;
1668
1669 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1670 RegisterMap reg_map(current,
1671 RegisterMap::UpdateMap::skip,
1672 RegisterMap::ProcessFrames::include,
1673 RegisterMap::WalkContinuation::skip);
1674 frame caller_frame = current->last_frame().sender(®_map);
1675 CodeBlob* cb = caller_frame.cb();
1676 nmethod* caller_nm = cb->as_nmethod();
1677
1678 CompiledICLocker ml(caller_nm);
1679 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1680 inline_cache->update(&call_info, receiver()->klass());
1681
1682 return callee_method;
1683 }
1684
1685 //
1686 // Resets a call-site in compiled code so it will get resolved again.
1687 // This routines handles both virtual call sites, optimized virtual call
1688 // sites, and static call sites. Typically used to change a call sites
1689 // destination from compiled to interpreted.
1690 //
1691 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1692 JavaThread* current = THREAD;
1693 ResourceMark rm(current);
1694 RegisterMap reg_map(current,
1695 RegisterMap::UpdateMap::skip,
1696 RegisterMap::ProcessFrames::include,
1697 RegisterMap::WalkContinuation::skip);
1698 frame stub_frame = current->last_frame();
1699 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1700 frame caller = stub_frame.sender(®_map);
1701
1702 // Do nothing if the frame isn't a live compiled frame.
1703 // nmethod could be deoptimized by the time we get here
1704 // so no update to the caller is needed.
1705
1706 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1707 (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1708
1709 address pc = caller.pc();
1710
1711 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1712 assert(caller_nm != nullptr, "did not find caller nmethod");
1713
1714 // Default call_addr is the location of the "basic" call.
1715 // Determine the address of the call we a reresolving. With
1716 // Inline Caches we will always find a recognizable call.
1717 // With Inline Caches disabled we may or may not find a
1718 // recognizable call. We will always find a call for static
1719 // calls and for optimized virtual calls. For vanilla virtual
1720 // calls it depends on the state of the UseInlineCaches switch.
1721 //
1722 // With Inline Caches disabled we can get here for a virtual call
1723 // for two reasons:
1724 // 1 - calling an abstract method. The vtable for abstract methods
1725 // will run us thru handle_wrong_method and we will eventually
1726 // end up in the interpreter to throw the ame.
1727 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1728 // call and between the time we fetch the entry address and
1729 // we jump to it the target gets deoptimized. Similar to 1
1730 // we will wind up in the interprter (thru a c2i with c2).
1731 //
1732 CompiledICLocker ml(caller_nm);
1733 address call_addr = caller_nm->call_instruction_address(pc);
1734
1735 if (call_addr != nullptr) {
1736 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1737 // bytes back in the instruction stream so we must also check for reloc info.
1738 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1739 bool ret = iter.next(); // Get item
1740 if (ret) {
1741 switch (iter.type()) {
1742 case relocInfo::static_call_type:
1743 case relocInfo::opt_virtual_call_type: {
1744 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1745 cdc->set_to_clean();
1746 break;
1747 }
1748
1749 case relocInfo::virtual_call_type: {
1750 // compiled, dispatched call (which used to call an interpreted method)
1751 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1752 inline_cache->set_to_clean();
1753 break;
1754 }
1755 default:
1756 break;
1757 }
1758 }
1759 }
1760 }
1761
1762 methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1763
1764
1765 #ifndef PRODUCT
1766 AtomicAccess::inc(&_wrong_method_ctr);
1767
1768 if (TraceCallFixup) {
1769 ResourceMark rm(current);
1770 tty->print("handle_wrong_method reresolving call to");
1771 callee_method->print_short_name(tty);
1772 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1773 }
1774 #endif
1775
1776 return callee_method;
1777 }
1778
1779 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1780 // The faulting unsafe accesses should be changed to throw the error
1781 // synchronously instead. Meanwhile the faulting instruction will be
1782 // skipped over (effectively turning it into a no-op) and an
1783 // asynchronous exception will be raised which the thread will
1784 // handle at a later point. If the instruction is a load it will
1785 // return garbage.
1786
1787 // Request an async exception.
1788 thread->set_pending_unsafe_access_error();
1789
1790 // Return address of next instruction to execute.
1956 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
1957
1958 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1959 if (message == nullptr) {
1960 // Shouldn't happen, but don't cause even more problems if it does
1961 message = const_cast<char*>(caster_klass->external_name());
1962 } else {
1963 jio_snprintf(message,
1964 msglen,
1965 "class %s cannot be cast to class %s (%s%s%s)",
1966 caster_name,
1967 target_name,
1968 caster_klass_description,
1969 klass_separator,
1970 target_klass_description
1971 );
1972 }
1973 return message;
1974 }
1975
1976 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1977 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
1978 JRT_END
1979
1980 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
1981 if (!SafepointSynchronize::is_synchronizing()) {
1982 // Only try quick_enter() if we're not trying to reach a safepoint
1983 // so that the calling thread reaches the safepoint more quickly.
1984 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
1985 return;
1986 }
1987 }
1988 // NO_ASYNC required because an async exception on the state transition destructor
1989 // would leave you with the lock held and it would never be released.
1990 // The normal monitorenter NullPointerException is thrown without acquiring a lock
1991 // and the model is that an exception implies the method failed.
1992 JRT_BLOCK_NO_ASYNC
1993 Handle h_obj(THREAD, obj);
1994 ObjectSynchronizer::enter(h_obj, lock, current);
1995 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2189 tty->print_cr("Note 1: counter updates are not MT-safe.");
2190 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2191 tty->print_cr(" %% in nested categories are relative to their category");
2192 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2193 tty->cr();
2194
2195 MethodArityHistogram h;
2196 }
2197 #endif
2198
2199 #ifndef PRODUCT
2200 static int _lookups; // number of calls to lookup
2201 static int _equals; // number of buckets checked with matching hash
2202 static int _archived_hits; // number of successful lookups in archived table
2203 static int _runtime_hits; // number of successful lookups in runtime table
2204 #endif
2205
2206 // A simple wrapper class around the calling convention information
2207 // that allows sharing of adapters for the same calling convention.
2208 class AdapterFingerPrint : public MetaspaceObj {
2209 private:
2210 enum {
2211 _basic_type_bits = 4,
2212 _basic_type_mask = right_n_bits(_basic_type_bits),
2213 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2214 };
2215 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2216 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2217
2218 int _length;
2219
2220 static int data_offset() { return sizeof(AdapterFingerPrint); }
2221 int* data_pointer() {
2222 return (int*)((address)this + data_offset());
2223 }
2224
2225 // Private construtor. Use allocate() to get an instance.
2226 AdapterFingerPrint(int total_args_passed, BasicType* sig_bt, int len) {
2227 int* data = data_pointer();
2228 // Pack the BasicTypes with 8 per int
2229 assert(len == length(total_args_passed), "sanity");
2230 _length = len;
2231 int sig_index = 0;
2232 for (int index = 0; index < _length; index++) {
2233 int value = 0;
2234 for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2235 int bt = adapter_encoding(sig_bt[sig_index++]);
2236 assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2237 value = (value << _basic_type_bits) | bt;
2238 }
2239 data[index] = value;
2240 }
2241 }
2242
2243 // Call deallocate instead
2244 ~AdapterFingerPrint() {
2245 ShouldNotCallThis();
2246 }
2247
2248 static int length(int total_args) {
2249 return (total_args + (_basic_types_per_int-1)) / _basic_types_per_int;
2250 }
2251
2252 static int compute_size_in_words(int len) {
2253 return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(int)));
2254 }
2255
2256 // Remap BasicTypes that are handled equivalently by the adapters.
2257 // These are correct for the current system but someday it might be
2258 // necessary to make this mapping platform dependent.
2259 static int adapter_encoding(BasicType in) {
2260 switch (in) {
2261 case T_BOOLEAN:
2262 case T_BYTE:
2263 case T_SHORT:
2264 case T_CHAR:
2265 // There are all promoted to T_INT in the calling convention
2266 return T_INT;
2267
2268 case T_OBJECT:
2269 case T_ARRAY:
2270 // In other words, we assume that any register good enough for
2271 // an int or long is good enough for a managed pointer.
2272 #ifdef _LP64
2273 return T_LONG;
2274 #else
2275 return T_INT;
2276 #endif
2277
2278 case T_INT:
2279 case T_LONG:
2280 case T_FLOAT:
2281 case T_DOUBLE:
2282 case T_VOID:
2283 return in;
2284
2285 default:
2286 ShouldNotReachHere();
2287 return T_CONFLICT;
2288 }
2289 }
2290
2291 void* operator new(size_t size, size_t fp_size) throw() {
2292 assert(fp_size >= size, "sanity check");
2293 void* p = AllocateHeap(fp_size, mtCode);
2294 memset(p, 0, fp_size);
2295 return p;
2296 }
2297
2298 template<typename Function>
2299 void iterate_args(Function function) {
2300 for (int i = 0; i < length(); i++) {
2301 unsigned val = (unsigned)value(i);
2302 // args are packed so that first/lower arguments are in the highest
2303 // bits of each int value, so iterate from highest to the lowest
2304 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2305 unsigned v = (val >> j) & _basic_type_mask;
2306 if (v == 0) {
2307 continue;
2308 }
2309 function(v);
2310 }
2311 }
2312 }
2313
2314 public:
2315 static AdapterFingerPrint* allocate(int total_args_passed, BasicType* sig_bt) {
2316 int len = length(total_args_passed);
2317 int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2318 AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(total_args_passed, sig_bt, len);
2319 assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2320 return afp;
2321 }
2322
2323 static void deallocate(AdapterFingerPrint* fp) {
2324 FreeHeap(fp);
2325 }
2326
2327 int value(int index) {
2328 int* data = data_pointer();
2329 return data[index];
2330 }
2331
2332 int length() {
2333 return _length;
2334 }
2335
2336 unsigned int compute_hash() {
2337 int hash = 0;
2338 for (int i = 0; i < length(); i++) {
2339 int v = value(i);
2340 //Add arithmetic operation to the hash, like +3 to improve hashing
2341 hash = ((hash << 8) ^ v ^ (hash >> 5)) + 3;
2342 }
2343 return (unsigned int)hash;
2344 }
2345
2346 const char* as_string() {
2347 stringStream st;
2348 st.print("0x");
2349 for (int i = 0; i < length(); i++) {
2350 st.print("%x", value(i));
2351 }
2352 return st.as_string();
2353 }
2354
2355 const char* as_basic_args_string() {
2356 stringStream st;
2357 bool long_prev = false;
2358 iterate_args([&] (int arg) {
2359 if (long_prev) {
2360 long_prev = false;
2361 if (arg == T_VOID) {
2362 st.print("J");
2363 } else {
2364 st.print("L");
2365 }
2366 }
2367 switch (arg) {
2368 case T_INT: st.print("I"); break;
2369 case T_LONG: long_prev = true; break;
2370 case T_FLOAT: st.print("F"); break;
2371 case T_DOUBLE: st.print("D"); break;
2372 case T_VOID: break;
2373 default: ShouldNotReachHere();
2374 }
2375 });
2376 if (long_prev) {
2377 st.print("L");
2378 }
2379 return st.as_string();
2380 }
2381
2382 BasicType* as_basic_type(int& nargs) {
2383 nargs = 0;
2384 GrowableArray<BasicType> btarray;
2385 bool long_prev = false;
2386
2387 iterate_args([&] (int arg) {
2388 if (long_prev) {
2389 long_prev = false;
2390 if (arg == T_VOID) {
2391 btarray.append(T_LONG);
2392 } else {
2393 btarray.append(T_OBJECT); // it could be T_ARRAY; it shouldn't matter
2394 }
2395 }
2396 switch (arg) {
2397 case T_INT: // fallthrough
2398 case T_FLOAT: // fallthrough
2399 case T_DOUBLE:
2400 case T_VOID:
2401 btarray.append((BasicType)arg);
2402 break;
2403 case T_LONG:
2404 long_prev = true;
2405 break;
2406 default: ShouldNotReachHere();
2407 }
2408 });
2409
2410 if (long_prev) {
2411 btarray.append(T_OBJECT);
2412 }
2413
2414 nargs = btarray.length();
2415 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nargs);
2416 int index = 0;
2417 GrowableArrayIterator<BasicType> iter = btarray.begin();
2418 while (iter != btarray.end()) {
2419 sig_bt[index++] = *iter;
2420 ++iter;
2421 }
2422 assert(index == btarray.length(), "sanity check");
2423 #ifdef ASSERT
2424 {
2425 AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(nargs, sig_bt);
2426 assert(this->equals(compare_fp), "sanity check");
2427 AdapterFingerPrint::deallocate(compare_fp);
2428 }
2429 #endif
2430 return sig_bt;
2431 }
2432
2433 bool equals(AdapterFingerPrint* other) {
2434 if (other->_length != _length) {
2435 return false;
2436 } else {
2437 for (int i = 0; i < _length; i++) {
2438 if (value(i) != other->value(i)) {
2439 return false;
2440 }
2441 }
2442 }
2443 return true;
2444 }
2445
2446 // methods required by virtue of being a MetaspaceObj
2447 void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2448 int size() const { return compute_size_in_words(_length); }
2449 MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2450
2451 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2452 NOT_PRODUCT(_equals++);
2453 return fp1->equals(fp2);
2454 }
2455
2456 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2457 return fp->compute_hash();
2458 }
2461 #if INCLUDE_CDS
2462 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2463 return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2464 }
2465
2466 class ArchivedAdapterTable : public OffsetCompactHashtable<
2467 AdapterFingerPrint*,
2468 AdapterHandlerEntry*,
2469 adapter_fp_equals_compact_hashtable_entry> {};
2470 #endif // INCLUDE_CDS
2471
2472 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2473 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2474 AnyObj::C_HEAP, mtCode,
2475 AdapterFingerPrint::compute_hash,
2476 AdapterFingerPrint::equals>;
2477 static AdapterHandlerTable* _adapter_handler_table;
2478 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2479
2480 // Find a entry with the same fingerprint if it exists
2481 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(int total_args_passed, BasicType* sig_bt) {
2482 NOT_PRODUCT(_lookups++);
2483 assert_lock_strong(AdapterHandlerLibrary_lock);
2484 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2485 AdapterHandlerEntry* entry = nullptr;
2486 #if INCLUDE_CDS
2487 // if we are building the archive then the archived adapter table is
2488 // not valid and we need to use the ones added to the runtime table
2489 if (AOTCodeCache::is_using_adapter()) {
2490 // Search archived table first. It is read-only table so can be searched without lock
2491 entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2492 #ifndef PRODUCT
2493 if (entry != nullptr) {
2494 _archived_hits++;
2495 }
2496 #endif
2497 }
2498 #endif // INCLUDE_CDS
2499 if (entry == nullptr) {
2500 assert_lock_strong(AdapterHandlerLibrary_lock);
2501 AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2502 if (entry_p != nullptr) {
2503 entry = *entry_p;
2504 assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2521 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2522 ts.print(tty, "AdapterHandlerTable");
2523 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2524 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2525 int total_hits = _archived_hits + _runtime_hits;
2526 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2527 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2528 }
2529 #endif
2530
2531 // ---------------------------------------------------------------------------
2532 // Implementation of AdapterHandlerLibrary
2533 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2534 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2535 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2536 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2537 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2538 #if INCLUDE_CDS
2539 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2540 #endif // INCLUDE_CDS
2541 static const int AdapterHandlerLibrary_size = 16*K;
2542 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2543 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2544
2545 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2546 assert(_buffer != nullptr, "should be initialized");
2547 return _buffer;
2548 }
2549
2550 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2551 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2552 AdapterBlob* adapter_blob = entry->adapter_blob();
2553 char blob_id[256];
2554 jio_snprintf(blob_id,
2555 sizeof(blob_id),
2556 "%s(%s)",
2557 adapter_blob->name(),
2558 entry->fingerprint()->as_string());
2559 if (Forte::is_enabled()) {
2560 Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2561 }
2569 void AdapterHandlerLibrary::initialize() {
2570 {
2571 ResourceMark rm;
2572 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2573 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2574 }
2575
2576 #if INCLUDE_CDS
2577 // Link adapters in AOT Cache to their code in AOT Code Cache
2578 if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2579 link_aot_adapters();
2580 lookup_simple_adapters();
2581 return;
2582 }
2583 #endif // INCLUDE_CDS
2584
2585 ResourceMark rm;
2586 {
2587 MutexLocker mu(AdapterHandlerLibrary_lock);
2588
2589 _no_arg_handler = create_adapter(0, nullptr);
2590
2591 BasicType obj_args[] = { T_OBJECT };
2592 _obj_arg_handler = create_adapter(1, obj_args);
2593
2594 BasicType int_args[] = { T_INT };
2595 _int_arg_handler = create_adapter(1, int_args);
2596
2597 BasicType obj_int_args[] = { T_OBJECT, T_INT };
2598 _obj_int_arg_handler = create_adapter(2, obj_int_args);
2599
2600 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2601 _obj_obj_arg_handler = create_adapter(2, obj_obj_args);
2602
2603 // we should always get an entry back but we don't have any
2604 // associated blob on Zero
2605 assert(_no_arg_handler != nullptr &&
2606 _obj_arg_handler != nullptr &&
2607 _int_arg_handler != nullptr &&
2608 _obj_int_arg_handler != nullptr &&
2609 _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2610 }
2611
2612 // Outside of the lock
2613 #ifndef ZERO
2614 // no blobs to register when we are on Zero
2615 post_adapter_creation(_no_arg_handler);
2616 post_adapter_creation(_obj_arg_handler);
2617 post_adapter_creation(_int_arg_handler);
2618 post_adapter_creation(_obj_int_arg_handler);
2619 post_adapter_creation(_obj_obj_arg_handler);
2620 #endif // ZERO
2621 }
2622
2623 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2624 uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2625 assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2626 return AdapterHandlerEntry::allocate(id, fingerprint);
2627 }
2628
2629 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2630 int total_args_passed = method->size_of_parameters(); // All args on stack
2631 if (total_args_passed == 0) {
2632 return _no_arg_handler;
2633 } else if (total_args_passed == 1) {
2634 if (!method->is_static()) {
2635 return _obj_arg_handler;
2636 }
2637 switch (method->signature()->char_at(1)) {
2638 case JVM_SIGNATURE_CLASS:
2639 case JVM_SIGNATURE_ARRAY:
2640 return _obj_arg_handler;
2641 case JVM_SIGNATURE_INT:
2642 case JVM_SIGNATURE_BOOLEAN:
2643 case JVM_SIGNATURE_CHAR:
2644 case JVM_SIGNATURE_BYTE:
2645 case JVM_SIGNATURE_SHORT:
2646 return _int_arg_handler;
2647 }
2648 } else if (total_args_passed == 2 &&
2649 !method->is_static()) {
2650 switch (method->signature()->char_at(1)) {
2651 case JVM_SIGNATURE_CLASS:
2652 case JVM_SIGNATURE_ARRAY:
2653 return _obj_obj_arg_handler;
2654 case JVM_SIGNATURE_INT:
2655 case JVM_SIGNATURE_BOOLEAN:
2656 case JVM_SIGNATURE_CHAR:
2657 case JVM_SIGNATURE_BYTE:
2658 case JVM_SIGNATURE_SHORT:
2659 return _obj_int_arg_handler;
2660 }
2661 }
2662 return nullptr;
2663 }
2664
2665 class AdapterSignatureIterator : public SignatureIterator {
2666 private:
2667 BasicType stack_sig_bt[16];
2668 BasicType* sig_bt;
2669 int index;
2670
2671 public:
2672 AdapterSignatureIterator(Symbol* signature,
2673 fingerprint_t fingerprint,
2674 bool is_static,
2675 int total_args_passed) :
2676 SignatureIterator(signature, fingerprint),
2677 index(0)
2678 {
2679 sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2680 if (!is_static) { // Pass in receiver first
2681 sig_bt[index++] = T_OBJECT;
2682 }
2683 do_parameters_on(this);
2684 }
2685
2686 BasicType* basic_types() {
2687 return sig_bt;
2688 }
2689
2690 #ifdef ASSERT
2691 int slots() {
2692 return index;
2693 }
2694 #endif
2695
2696 private:
2697
2698 friend class SignatureIterator; // so do_parameters_on can call do_type
2699 void do_type(BasicType type) {
2700 sig_bt[index++] = type;
2701 if (type == T_LONG || type == T_DOUBLE) {
2702 sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots
2703 }
2704 }
2705 };
2706
2707
2708 const char* AdapterHandlerEntry::_entry_names[] = {
2709 "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
2710 };
2711
2712 #ifdef ASSERT
2713 void AdapterHandlerLibrary::verify_adapter_sharing(int total_args_passed, BasicType* sig_bt, AdapterHandlerEntry* cached_entry) {
2714 // we can only check for the same code if there is any
2715 #ifndef ZERO
2716 AdapterHandlerEntry* comparison_entry = create_adapter(total_args_passed, sig_bt, true);
2717 assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
2718 assert(comparison_entry->compare_code(cached_entry), "code must match");
2719 // Release the one just created
2720 AdapterHandlerEntry::deallocate(comparison_entry);
2721 # endif // ZERO
2722 }
2723 #endif /* ASSERT*/
2724
2725 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2726 assert(!method->is_abstract(), "abstract methods do not have adapters");
2727 // Use customized signature handler. Need to lock around updates to
2728 // the _adapter_handler_table (it is not safe for concurrent readers
2729 // and a single writer: this could be fixed if it becomes a
2730 // problem).
2731
2732 // Fast-path for trivial adapters
2733 AdapterHandlerEntry* entry = get_simple_adapter(method);
2734 if (entry != nullptr) {
2735 return entry;
2736 }
2737
2738 ResourceMark rm;
2739 bool new_entry = false;
2740
2741 // Fill in the signature array, for the calling-convention call.
2742 int total_args_passed = method->size_of_parameters(); // All args on stack
2743
2744 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2745 method->is_static(), total_args_passed);
2746 assert(si.slots() == total_args_passed, "");
2747 BasicType* sig_bt = si.basic_types();
2748 {
2749 MutexLocker mu(AdapterHandlerLibrary_lock);
2750
2751 // Lookup method signature's fingerprint
2752 entry = lookup(total_args_passed, sig_bt);
2753
2754 if (entry != nullptr) {
2755 #ifndef ZERO
2756 assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
2757 #endif
2758 #ifdef ASSERT
2759 if (!entry->in_aot_cache() && VerifyAdapterSharing) {
2760 verify_adapter_sharing(total_args_passed, sig_bt, entry);
2761 }
2762 #endif
2763 } else {
2764 entry = create_adapter(total_args_passed, sig_bt);
2765 if (entry != nullptr) {
2766 new_entry = true;
2767 }
2768 }
2769 }
2770
2771 // Outside of the lock
2772 if (new_entry) {
2773 post_adapter_creation(entry);
2774 }
2775 return entry;
2776 }
2777
2778 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
2779 ResourceMark rm;
2780 const char* name = AdapterHandlerLibrary::name(handler);
2781 const uint32_t id = AdapterHandlerLibrary::id(handler);
2782
2783 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
2784 if (blob != nullptr) {
2799 }
2800 insts_size = adapter_blob->code_size();
2801 st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
2802 handler->fingerprint()->as_basic_args_string(),
2803 handler->fingerprint()->as_string(), insts_size);
2804 st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
2805 if (Verbose || PrintStubCode) {
2806 address first_pc = adapter_blob->content_begin();
2807 if (first_pc != nullptr) {
2808 Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
2809 st->cr();
2810 }
2811 }
2812 }
2813 #endif // PRODUCT
2814
2815 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
2816 int entry_offset[AdapterBlob::ENTRY_COUNT]) {
2817 entry_offset[AdapterBlob::I2C] = 0;
2818 entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
2819 entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
2820 if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
2821 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
2822 } else {
2823 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
2824 }
2825 }
2826
2827 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
2828 int total_args_passed,
2829 BasicType* sig_bt,
2830 bool is_transient) {
2831 if (log_is_enabled(Info, perf, class, link)) {
2832 ClassLoader::perf_method_adapters_count()->inc();
2833 }
2834
2835 #ifndef ZERO
2836 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2837 CodeBuffer buffer(buf);
2838 short buffer_locs[20];
2839 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2840 sizeof(buffer_locs)/sizeof(relocInfo));
2841 MacroAssembler masm(&buffer);
2842 VMRegPair stack_regs[16];
2843 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2844
2845 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2846 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2847 address entry_address[AdapterBlob::ENTRY_COUNT];
2848 SharedRuntime::generate_i2c2i_adapters(&masm,
2849 total_args_passed,
2850 comp_args_on_stack,
2851 sig_bt,
2852 regs,
2853 entry_address);
2854 // On zero there is no code to save and no need to create a blob and
2855 // or relocate the handler.
2856 int entry_offset[AdapterBlob::ENTRY_COUNT];
2857 address_to_offset(entry_address, entry_offset);
2858 #ifdef ASSERT
2859 if (VerifyAdapterSharing) {
2860 handler->save_code(buf->code_begin(), buffer.insts_size());
2861 if (is_transient) {
2862 return true;
2863 }
2864 }
2865 #endif
2866 AdapterBlob* adapter_blob = AdapterBlob::create(&buffer, entry_offset);
2867 if (adapter_blob == nullptr) {
2868 // CodeCache is full, disable compilation
2869 // Ought to log this but compile log is only per compile thread
2870 // and we're some non descript Java thread.
2871 return false;
2872 }
2873 handler->set_adapter_blob(adapter_blob);
2874 if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
2875 // try to save generated code
2876 const char* name = AdapterHandlerLibrary::name(handler);
2877 const uint32_t id = AdapterHandlerLibrary::id(handler);
2878 bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
2879 assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
2880 }
2881 #endif // ZERO
2882
2883 #ifndef PRODUCT
2884 // debugging support
2885 if (PrintAdapterHandlers || PrintStubCode) {
2886 print_adapter_handler_info(tty, handler);
2887 }
2888 #endif
2889
2890 return true;
2891 }
2892
2893 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(int total_args_passed,
2894 BasicType* sig_bt,
2895 bool is_transient) {
2896 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2897 AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
2898 if (!generate_adapter_code(handler, total_args_passed, sig_bt, is_transient)) {
2899 AdapterHandlerEntry::deallocate(handler);
2900 return nullptr;
2901 }
2902 if (!is_transient) {
2903 assert_lock_strong(AdapterHandlerLibrary_lock);
2904 _adapter_handler_table->put(fp, handler);
2905 }
2906 return handler;
2907 }
2908
2909 #if INCLUDE_CDS
2910 void AdapterHandlerEntry::remove_unshareable_info() {
2911 #ifdef ASSERT
2912 _saved_code = nullptr;
2913 _saved_code_length = 0;
2914 #endif // ASSERT
2915 _adapter_blob = nullptr;
2916 _linked = false;
2917 }
2918
2981 // This method is used during production run to link archived adapters (stored in AOT Cache)
2982 // to their code in AOT Code Cache
2983 void AdapterHandlerEntry::link() {
2984 ResourceMark rm;
2985 assert(_fingerprint != nullptr, "_fingerprint must not be null");
2986 bool generate_code = false;
2987 // Generate code only if AOTCodeCache is not available, or
2988 // caching adapters is disabled, or we fail to link
2989 // the AdapterHandlerEntry to its code in the AOTCodeCache
2990 if (AOTCodeCache::is_using_adapter()) {
2991 AdapterHandlerLibrary::link_aot_adapter_handler(this);
2992 // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
2993 if (_adapter_blob == nullptr) {
2994 log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
2995 generate_code = true;
2996 }
2997 } else {
2998 generate_code = true;
2999 }
3000 if (generate_code) {
3001 int nargs;
3002 BasicType* bt = _fingerprint->as_basic_type(nargs);
3003 if (!AdapterHandlerLibrary::generate_adapter_code(this, nargs, bt, /* is_transient */ false)) {
3004 // Don't throw exceptions during VM initialization because java.lang.* classes
3005 // might not have been initialized, causing problems when constructing the
3006 // Java exception object.
3007 vm_exit_during_initialization("Out of space in CodeCache for adapters");
3008 }
3009 }
3010 if (_adapter_blob != nullptr) {
3011 post_adapter_creation(this);
3012 }
3013 assert(_linked, "AdapterHandlerEntry must now be linked");
3014 }
3015
3016 void AdapterHandlerLibrary::link_aot_adapters() {
3017 uint max_id = 0;
3018 assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3019 /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3020 * That implies adapter ids of the adapters in the cache may not be contiguous.
3021 * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3022 * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3023 * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3024 */
3025 _aot_adapter_handler_table.iterate([&](AdapterHandlerEntry* entry) {
3026 assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3027 entry->link();
3028 max_id = MAX2(max_id, entry->id());
3029 });
3030 // Set adapter id to the maximum id found in the AOTCache
3031 assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3032 _id_counter = max_id;
3033 }
3034
3035 // This method is called during production run to lookup simple adapters
3036 // in the archived adapter handler table
3037 void AdapterHandlerLibrary::lookup_simple_adapters() {
3038 assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3039
3040 MutexLocker mu(AdapterHandlerLibrary_lock);
3041 _no_arg_handler = lookup(0, nullptr);
3042
3043 BasicType obj_args[] = { T_OBJECT };
3044 _obj_arg_handler = lookup(1, obj_args);
3045
3046 BasicType int_args[] = { T_INT };
3047 _int_arg_handler = lookup(1, int_args);
3048
3049 BasicType obj_int_args[] = { T_OBJECT, T_INT };
3050 _obj_int_arg_handler = lookup(2, obj_int_args);
3051
3052 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
3053 _obj_obj_arg_handler = lookup(2, obj_obj_args);
3054
3055 assert(_no_arg_handler != nullptr &&
3056 _obj_arg_handler != nullptr &&
3057 _int_arg_handler != nullptr &&
3058 _obj_int_arg_handler != nullptr &&
3059 _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3060 assert(_no_arg_handler->is_linked() &&
3061 _obj_arg_handler->is_linked() &&
3062 _int_arg_handler->is_linked() &&
3063 _obj_int_arg_handler->is_linked() &&
3064 _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3065 }
3066 #endif // INCLUDE_CDS
3067
3068 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3069 LogStreamHandle(Trace, aot) lsh;
3070 if (lsh.is_enabled()) {
3071 lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3072 lsh.cr();
3073 }
3074 it->push(&_fingerprint);
3075 }
3076
3077 AdapterHandlerEntry::~AdapterHandlerEntry() {
3078 if (_fingerprint != nullptr) {
3079 AdapterFingerPrint::deallocate(_fingerprint);
3080 _fingerprint = nullptr;
3081 }
3082 #ifdef ASSERT
3083 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3084 #endif
3085 FreeHeap(this);
3086 }
3087
3088
3089 #ifdef ASSERT
3090 // Capture the code before relocation so that it can be compared
3091 // against other versions. If the code is captured after relocation
3092 // then relative instructions won't be equivalent.
3093 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3094 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3095 _saved_code_length = length;
3096 memcpy(_saved_code, buffer, length);
3097 }
3098
3099
3100 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3101 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3149
3150 struct { double data[20]; } locs_buf;
3151 struct { double data[20]; } stubs_locs_buf;
3152 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3153 #if defined(AARCH64) || defined(PPC64)
3154 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3155 // in the constant pool to ensure ordering between the barrier and oops
3156 // accesses. For native_wrappers we need a constant.
3157 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3158 // static java call that is resolved in the runtime.
3159 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3160 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3161 }
3162 #endif
3163 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3164 MacroAssembler _masm(&buffer);
3165
3166 // Fill in the signature array, for the calling-convention call.
3167 const int total_args_passed = method->size_of_parameters();
3168
3169 VMRegPair stack_regs[16];
3170 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3171
3172 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3173 method->is_static(), total_args_passed);
3174 BasicType* sig_bt = si.basic_types();
3175 assert(si.slots() == total_args_passed, "");
3176 BasicType ret_type = si.return_type();
3177
3178 // Now get the compiled-Java arguments layout.
3179 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3180
3181 // Generate the compiled-to-native wrapper code
3182 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3183
3184 if (nm != nullptr) {
3185 {
3186 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3187 if (nm->make_in_use()) {
3188 method->set_code(method, nm);
3189 }
3190 }
3191
3192 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3193 if (directive->PrintAssemblyOption) {
3194 nm->print_code();
3195 }
3196 DirectivesStack::release(directive);
3424 if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3425 found = true;
3426 st->print("Adapter for signature: ");
3427 a->print_adapter_on(st);
3428 return true;
3429 } else {
3430 return false; // keep looking
3431 }
3432 };
3433 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3434 _adapter_handler_table->iterate(findblob_runtime_table);
3435 }
3436 assert(found, "Should have found handler");
3437 }
3438
3439 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3440 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3441 if (adapter_blob() != nullptr) {
3442 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3443 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3444 st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3445 if (get_c2i_no_clinit_check_entry() != nullptr) {
3446 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3447 }
3448 }
3449 st->cr();
3450 }
3451
3452 #ifndef PRODUCT
3453
3454 void AdapterHandlerLibrary::print_statistics() {
3455 print_table_statistics();
3456 }
3457
3458 #endif /* PRODUCT */
3459
3460 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3461 assert(current == JavaThread::current(), "pre-condition");
3462 StackOverflow* overflow_state = current->stack_overflow_state();
3463 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3464 overflow_state->set_reserved_stack_activation(current->stack_base());
3511 event.set_method(method);
3512 event.commit();
3513 }
3514 }
3515 }
3516 return activation;
3517 }
3518
3519 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3520 // After any safepoint, just before going back to compiled code,
3521 // we inform the GC that we will be doing initializing writes to
3522 // this object in the future without emitting card-marks, so
3523 // GC may take any compensating steps.
3524
3525 oop new_obj = current->vm_result_oop();
3526 if (new_obj == nullptr) return;
3527
3528 BarrierSet *bs = BarrierSet::barrier_set();
3529 bs->on_slowpath_allocation_exit(current, new_obj);
3530 }
|
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/stringTable.hpp"
30 #include "classfile/vmClasses.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/aotCodeCache.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/compiledIC.hpp"
35 #include "code/nmethod.inline.hpp"
36 #include "code/scopeDesc.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "compiler/abstractCompiler.hpp"
39 #include "compiler/compileBroker.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "gc/shared/barrierSet.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "interpreter/interpreter.hpp"
44 #include "interpreter/interpreterRuntime.hpp"
45 #include "jfr/jfrEvents.hpp"
46 #include "jvm.h"
47 #include "logging/log.hpp"
48 #include "memory/oopFactory.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "metaprogramming/primitiveConversions.hpp"
52 #include "oops/access.hpp"
53 #include "oops/fieldStreams.inline.hpp"
54 #include "oops/inlineKlass.inline.hpp"
55 #include "oops/klass.hpp"
56 #include "oops/method.inline.hpp"
57 #include "oops/objArrayKlass.hpp"
58 #include "oops/objArrayOop.inline.hpp"
59 #include "oops/oop.inline.hpp"
60 #include "prims/forte.hpp"
61 #include "prims/jvmtiExport.hpp"
62 #include "prims/jvmtiThreadState.hpp"
63 #include "prims/methodHandles.hpp"
64 #include "prims/nativeLookup.hpp"
65 #include "runtime/arguments.hpp"
66 #include "runtime/atomicAccess.hpp"
67 #include "runtime/basicLock.inline.hpp"
68 #include "runtime/frame.inline.hpp"
69 #include "runtime/handles.inline.hpp"
70 #include "runtime/init.hpp"
71 #include "runtime/interfaceSupport.inline.hpp"
72 #include "runtime/java.hpp"
73 #include "runtime/javaCalls.hpp"
74 #include "runtime/jniHandles.inline.hpp"
75 #include "runtime/perfData.hpp"
76 #include "runtime/sharedRuntime.hpp"
77 #include "runtime/signature.hpp"
78 #include "runtime/stackWatermarkSet.hpp"
79 #include "runtime/stubRoutines.hpp"
80 #include "runtime/synchronizer.inline.hpp"
81 #include "runtime/timerTrace.hpp"
82 #include "runtime/vframe.inline.hpp"
83 #include "runtime/vframeArray.hpp"
84 #include "runtime/vm_version.hpp"
85 #include "utilities/copy.hpp"
86 #include "utilities/dtrace.hpp"
87 #include "utilities/events.hpp"
88 #include "utilities/globalDefinitions.hpp"
89 #include "utilities/hashTable.hpp"
90 #include "utilities/macros.hpp"
91 #include "utilities/xmlstream.hpp"
92 #ifdef COMPILER1
93 #include "c1/c1_Runtime1.hpp"
94 #endif
95 #if INCLUDE_JFR
96 #include "jfr/jfr.inline.hpp"
97 #endif
1218 // for a call current in progress, i.e., arguments has been pushed on stack
1219 // but callee has not been invoked yet. Caller frame must be compiled.
1220 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1221 CallInfo& callinfo, TRAPS) {
1222 Handle receiver;
1223 Handle nullHandle; // create a handy null handle for exception returns
1224 JavaThread* current = THREAD;
1225
1226 assert(!vfst.at_end(), "Java frame must exist");
1227
1228 // Find caller and bci from vframe
1229 methodHandle caller(current, vfst.method());
1230 int bci = vfst.bci();
1231
1232 if (caller->is_continuation_enter_intrinsic()) {
1233 bc = Bytecodes::_invokestatic;
1234 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1235 return receiver;
1236 }
1237
1238 // Substitutability test implementation piggy backs on static call resolution
1239 Bytecodes::Code code = caller->java_code_at(bci);
1240 if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1241 bc = Bytecodes::_invokestatic;
1242 methodHandle attached_method(THREAD, extract_attached_method(vfst));
1243 assert(attached_method.not_null(), "must have attached method");
1244 vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1245 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1246 #ifdef ASSERT
1247 Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature());
1248 assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1249 #endif
1250 return receiver;
1251 }
1252
1253 Bytecode_invoke bytecode(caller, bci);
1254 int bytecode_index = bytecode.index();
1255 bc = bytecode.invoke_code();
1256
1257 methodHandle attached_method(current, extract_attached_method(vfst));
1258 if (attached_method.not_null()) {
1259 Method* callee = bytecode.static_target(CHECK_NH);
1260 vmIntrinsics::ID id = callee->intrinsic_id();
1261 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1262 // it attaches statically resolved method to the call site.
1263 if (MethodHandles::is_signature_polymorphic(id) &&
1264 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1265 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1266
1267 // Adjust invocation mode according to the attached method.
1268 switch (bc) {
1269 case Bytecodes::_invokevirtual:
1270 if (attached_method->method_holder()->is_interface()) {
1271 bc = Bytecodes::_invokeinterface;
1272 }
1273 break;
1274 case Bytecodes::_invokeinterface:
1275 if (!attached_method->method_holder()->is_interface()) {
1276 bc = Bytecodes::_invokevirtual;
1277 }
1278 break;
1279 case Bytecodes::_invokehandle:
1280 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1281 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1282 : Bytecodes::_invokevirtual;
1283 }
1284 break;
1285 default:
1286 break;
1287 }
1288 } else {
1289 assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1290 if (!attached_method->method_holder()->is_inline_klass()) {
1291 // Ignore the attached method in this case to not confuse below code
1292 attached_method = methodHandle(current, nullptr);
1293 }
1294 }
1295 }
1296
1297 assert(bc != Bytecodes::_illegal, "not initialized");
1298
1299 bool has_receiver = bc != Bytecodes::_invokestatic &&
1300 bc != Bytecodes::_invokedynamic &&
1301 bc != Bytecodes::_invokehandle;
1302 bool check_null_and_abstract = true;
1303
1304 // Find receiver for non-static call
1305 if (has_receiver) {
1306 // This register map must be update since we need to find the receiver for
1307 // compiled frames. The receiver might be in a register.
1308 RegisterMap reg_map2(current,
1309 RegisterMap::UpdateMap::include,
1310 RegisterMap::ProcessFrames::include,
1311 RegisterMap::WalkContinuation::skip);
1312 frame stubFrame = current->last_frame();
1313 // Caller-frame is a compiled frame
1314 frame callerFrame = stubFrame.sender(®_map2);
1315
1316 Method* callee = attached_method();
1317 if (callee == nullptr) {
1318 callee = bytecode.static_target(CHECK_NH);
1319 if (callee == nullptr) {
1320 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1321 }
1322 }
1323 bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->as_nmethod()->is_compiled_by_c1();
1324 if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1325 // If the receiver is an inline type that is passed as fields, no oop is available
1326 // Resolve the call without receiver null checking.
1327 assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1328 assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1329 if (bc == Bytecodes::_invokeinterface) {
1330 bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1331 }
1332 check_null_and_abstract = false;
1333 } else {
1334 // Retrieve from a compiled argument list
1335 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1336 assert(oopDesc::is_oop_or_null(receiver()), "");
1337 if (receiver.is_null()) {
1338 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1339 }
1340 }
1341 }
1342
1343 // Resolve method
1344 if (attached_method.not_null()) {
1345 // Parameterized by attached method.
1346 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1347 } else {
1348 // Parameterized by bytecode.
1349 constantPoolHandle constants(current, caller->constants());
1350 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1351 }
1352
1353 #ifdef ASSERT
1354 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1355 if (has_receiver && check_null_and_abstract) {
1356 assert(receiver.not_null(), "should have thrown exception");
1357 Klass* receiver_klass = receiver->klass();
1358 Klass* rk = nullptr;
1359 if (attached_method.not_null()) {
1360 // In case there's resolved method attached, use its holder during the check.
1361 rk = attached_method->method_holder();
1362 } else {
1363 // Klass is already loaded.
1364 constantPoolHandle constants(current, caller->constants());
1365 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1366 }
1367 Klass* static_receiver_klass = rk;
1368 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1369 "actual receiver must be subclass of static receiver klass");
1370 if (receiver_klass->is_instance_klass()) {
1371 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1372 tty->print_cr("ERROR: Klass not yet initialized!!");
1373 receiver_klass->print();
1374 }
1375 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1376 }
1377 }
1378 #endif
1379
1380 return receiver;
1381 }
1382
1383 methodHandle SharedRuntime::find_callee_method(bool& caller_does_not_scalarize, TRAPS) {
1384 JavaThread* current = THREAD;
1385 ResourceMark rm(current);
1386 // We need first to check if any Java activations (compiled, interpreted)
1387 // exist on the stack since last JavaCall. If not, we need
1388 // to get the target method from the JavaCall wrapper.
1389 vframeStream vfst(current, true); // Do not skip any javaCalls
1390 methodHandle callee_method;
1391 if (vfst.at_end()) {
1392 // No Java frames were found on stack since we did the JavaCall.
1393 // Hence the stack can only contain an entry_frame. We need to
1394 // find the target method from the stub frame.
1395 RegisterMap reg_map(current,
1396 RegisterMap::UpdateMap::skip,
1397 RegisterMap::ProcessFrames::include,
1398 RegisterMap::WalkContinuation::skip);
1399 frame fr = current->last_frame();
1400 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1401 fr = fr.sender(®_map);
1402 assert(fr.is_entry_frame(), "must be");
1403 // fr is now pointing to the entry frame.
1404 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1405 } else {
1406 Bytecodes::Code bc;
1407 CallInfo callinfo;
1408 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1409 // Calls via mismatching methods are always non-scalarized
1410 if (callinfo.resolved_method()->mismatch()) {
1411 caller_does_not_scalarize = true;
1412 }
1413 callee_method = methodHandle(current, callinfo.selected_method());
1414 }
1415 assert(callee_method()->is_method(), "must be");
1416 return callee_method;
1417 }
1418
1419 // Resolves a call.
1420 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1421 JavaThread* current = THREAD;
1422 ResourceMark rm(current);
1423 RegisterMap cbl_map(current,
1424 RegisterMap::UpdateMap::skip,
1425 RegisterMap::ProcessFrames::include,
1426 RegisterMap::WalkContinuation::skip);
1427 frame caller_frame = current->last_frame().sender(&cbl_map);
1428
1429 CodeBlob* caller_cb = caller_frame.cb();
1430 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1431 nmethod* caller_nm = caller_cb->as_nmethod();
1432
1433 // determine call info & receiver
1434 // note: a) receiver is null for static calls
1435 // b) an exception is thrown if receiver is null for non-static calls
1436 CallInfo call_info;
1437 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1438 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1439
1440 NoSafepointVerifier nsv;
1441
1442 methodHandle callee_method(current, call_info.selected_method());
1443 // Calls via mismatching methods are always non-scalarized
1444 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1445 caller_does_not_scalarize = true;
1446 }
1447
1448 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1449 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1450 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1451 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1452 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1453
1454 assert(!caller_nm->is_unloading(), "It should not be unloading");
1455
1456 #ifndef PRODUCT
1457 // tracing/debugging/statistics
1458 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1459 (is_virtual) ? (&_resolve_virtual_ctr) :
1460 (&_resolve_static_ctr);
1461 AtomicAccess::inc(addr);
1462
1463 if (TraceCallFixup) {
1464 ResourceMark rm(current);
1465 tty->print("resolving %s%s (%s) %s call to",
1466 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1467 Bytecodes::name(invoke_code), (caller_does_not_scalarize) ? "non-scalar" : "");
1468 callee_method->print_short_name(tty);
1469 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1470 p2i(caller_frame.pc()), p2i(callee_method->code()));
1471 }
1472 #endif
1473
1474 if (invoke_code == Bytecodes::_invokestatic) {
1475 assert(callee_method->method_holder()->is_initialized() ||
1476 callee_method->method_holder()->is_reentrant_initialization(current),
1477 "invalid class initialization state for invoke_static");
1478 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1479 // In order to keep class initialization check, do not patch call
1480 // site for static call when the class is not fully initialized.
1481 // Proper check is enforced by call site re-resolution on every invocation.
1482 //
1483 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1484 // explicit class initialization check is put in nmethod entry (VEP).
1485 assert(callee_method->method_holder()->is_linked(), "must be");
1486 return callee_method;
1487 }
1488 }
1489
1490
1491 // JSR 292 key invariant:
1492 // If the resolved method is a MethodHandle invoke target, the call
1493 // site must be a MethodHandle call site, because the lambda form might tail-call
1494 // leaving the stack in a state unknown to either caller or callee
1495
1496 // Compute entry points. The computation of the entry points is independent of
1497 // patching the call.
1498
1499 // Make sure the callee nmethod does not get deoptimized and removed before
1500 // we are done patching the code.
1501
1502
1503 CompiledICLocker ml(caller_nm);
1504 if (is_virtual && !is_optimized) {
1505 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1506 inline_cache->update(&call_info, receiver->klass(), caller_does_not_scalarize);
1507 } else {
1508 // Callsite is a direct call - set it to the destination method
1509 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1510 callsite->set(callee_method, caller_does_not_scalarize);
1511 }
1512
1513 return callee_method;
1514 }
1515
1516 // Inline caches exist only in compiled code
1517 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1518 #ifdef ASSERT
1519 RegisterMap reg_map(current,
1520 RegisterMap::UpdateMap::skip,
1521 RegisterMap::ProcessFrames::include,
1522 RegisterMap::WalkContinuation::skip);
1523 frame stub_frame = current->last_frame();
1524 assert(stub_frame.is_runtime_frame(), "sanity check");
1525 frame caller_frame = stub_frame.sender(®_map);
1526 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1527 #endif /* ASSERT */
1528
1529 methodHandle callee_method;
1530 const bool is_optimized = false;
1531 bool caller_does_not_scalarize = false;
1532 JRT_BLOCK
1533 callee_method = SharedRuntime::handle_ic_miss_helper(caller_does_not_scalarize, CHECK_NULL);
1534 // Return Method* through TLS
1535 current->set_vm_result_metadata(callee_method());
1536 JRT_BLOCK_END
1537 // return compiled code entry point after potential safepoints
1538 return get_resolved_entry(current, callee_method, false, is_optimized, caller_does_not_scalarize);
1539 JRT_END
1540
1541
1542 // Handle call site that has been made non-entrant
1543 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1544 // 6243940 We might end up in here if the callee is deoptimized
1545 // as we race to call it. We don't want to take a safepoint if
1546 // the caller was interpreted because the caller frame will look
1547 // interpreted to the stack walkers and arguments are now
1548 // "compiled" so it is much better to make this transition
1549 // invisible to the stack walking code. The i2c path will
1550 // place the callee method in the callee_target. It is stashed
1551 // there because if we try and find the callee by normal means a
1552 // safepoint is possible and have trouble gc'ing the compiled args.
1553 RegisterMap reg_map(current,
1554 RegisterMap::UpdateMap::skip,
1555 RegisterMap::ProcessFrames::include,
1556 RegisterMap::WalkContinuation::skip);
1557 frame stub_frame = current->last_frame();
1558 assert(stub_frame.is_runtime_frame(), "sanity check");
1559 frame caller_frame = stub_frame.sender(®_map);
1560
1561 if (caller_frame.is_interpreted_frame() ||
1562 caller_frame.is_entry_frame() ||
1563 caller_frame.is_upcall_stub_frame()) {
1564 Method* callee = current->callee_target();
1565 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1566 current->set_vm_result_metadata(callee);
1567 current->set_callee_target(nullptr);
1568 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1569 // Bypass class initialization checks in c2i when caller is in native.
1570 // JNI calls to static methods don't have class initialization checks.
1571 // Fast class initialization checks are present in c2i adapters and call into
1572 // SharedRuntime::handle_wrong_method() on the slow path.
1573 //
1574 // JVM upcalls may land here as well, but there's a proper check present in
1575 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1576 // so bypassing it in c2i adapter is benign.
1577 return callee->get_c2i_no_clinit_check_entry();
1578 } else {
1579 if (caller_frame.is_interpreted_frame()) {
1580 return callee->get_c2i_inline_entry();
1581 } else {
1582 return callee->get_c2i_entry();
1583 }
1584 }
1585 }
1586
1587 // Must be compiled to compiled path which is safe to stackwalk
1588 methodHandle callee_method;
1589 bool is_static_call = false;
1590 bool is_optimized = false;
1591 bool caller_does_not_scalarize = false;
1592 JRT_BLOCK
1593 // Force resolving of caller (if we called from compiled frame)
1594 callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_does_not_scalarize, CHECK_NULL);
1595 current->set_vm_result_metadata(callee_method());
1596 JRT_BLOCK_END
1597 // return compiled code entry point after potential safepoints
1598 return get_resolved_entry(current, callee_method, is_static_call, is_optimized, caller_does_not_scalarize);
1599 JRT_END
1600
1601 // Handle abstract method call
1602 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1603 // Verbose error message for AbstractMethodError.
1604 // Get the called method from the invoke bytecode.
1605 vframeStream vfst(current, true);
1606 assert(!vfst.at_end(), "Java frame must exist");
1607 methodHandle caller(current, vfst.method());
1608 Bytecode_invoke invoke(caller, vfst.bci());
1609 DEBUG_ONLY( invoke.verify(); )
1610
1611 // Find the compiled caller frame.
1612 RegisterMap reg_map(current,
1613 RegisterMap::UpdateMap::include,
1614 RegisterMap::ProcessFrames::include,
1615 RegisterMap::WalkContinuation::skip);
1616 frame stubFrame = current->last_frame();
1617 assert(stubFrame.is_runtime_frame(), "must be");
1618 frame callerFrame = stubFrame.sender(®_map);
1619 assert(callerFrame.is_compiled_frame(), "must be");
1620
1621 // Install exception and return forward entry.
1622 address res = SharedRuntime::throw_AbstractMethodError_entry();
1623 JRT_BLOCK
1624 methodHandle callee(current, invoke.static_target(current));
1625 if (!callee.is_null()) {
1626 oop recv = callerFrame.retrieve_receiver(®_map);
1627 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1628 res = StubRoutines::forward_exception_entry();
1629 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1630 }
1631 JRT_BLOCK_END
1632 return res;
1633 JRT_END
1634
1635 // return verified_code_entry if interp_only_mode is not set for the current thread;
1636 // otherwise return c2i entry.
1637 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method,
1638 bool is_static_call, bool is_optimized, bool caller_does_not_scalarize) {
1639 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1640 // In interp_only_mode we need to go to the interpreted entry
1641 // The c2i won't patch in this mode -- see fixup_callers_callsite
1642 return callee_method->get_c2i_entry();
1643 }
1644
1645 if (caller_does_not_scalarize) {
1646 assert(callee_method->verified_inline_code_entry() != nullptr, "Jump to zero!");
1647 return callee_method->verified_inline_code_entry();
1648 } else if (is_static_call || is_optimized) {
1649 assert(callee_method->verified_code_entry() != nullptr, "Jump to zero!");
1650 return callee_method->verified_code_entry();
1651 } else {
1652 assert(callee_method->verified_inline_ro_code_entry() != nullptr, "Jump to zero!");
1653 return callee_method->verified_inline_ro_code_entry();
1654 }
1655 }
1656
1657 // resolve a static call and patch code
1658 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1659 methodHandle callee_method;
1660 bool caller_does_not_scalarize = false;
1661 bool enter_special = false;
1662 JRT_BLOCK
1663 callee_method = SharedRuntime::resolve_helper(false, false, caller_does_not_scalarize, CHECK_NULL);
1664 current->set_vm_result_metadata(callee_method());
1665 JRT_BLOCK_END
1666 // return compiled code entry point after potential safepoints
1667 return get_resolved_entry(current, callee_method, true, false, caller_does_not_scalarize);
1668 JRT_END
1669
1670 // resolve virtual call and update inline cache to monomorphic
1671 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1672 methodHandle callee_method;
1673 bool caller_does_not_scalarize = false;
1674 JRT_BLOCK
1675 callee_method = SharedRuntime::resolve_helper(true, false, caller_does_not_scalarize, CHECK_NULL);
1676 current->set_vm_result_metadata(callee_method());
1677 JRT_BLOCK_END
1678 // return compiled code entry point after potential safepoints
1679 return get_resolved_entry(current, callee_method, false, false, caller_does_not_scalarize);
1680 JRT_END
1681
1682
1683 // Resolve a virtual call that can be statically bound (e.g., always
1684 // monomorphic, so it has no inline cache). Patch code to resolved target.
1685 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1686 methodHandle callee_method;
1687 bool caller_does_not_scalarize = false;
1688 JRT_BLOCK
1689 callee_method = SharedRuntime::resolve_helper(true, true, caller_does_not_scalarize, CHECK_NULL);
1690 current->set_vm_result_metadata(callee_method());
1691 JRT_BLOCK_END
1692 // return compiled code entry point after potential safepoints
1693 return get_resolved_entry(current, callee_method, false, true, caller_does_not_scalarize);
1694 JRT_END
1695
1696
1697
1698 methodHandle SharedRuntime::handle_ic_miss_helper(bool& caller_does_not_scalarize, TRAPS) {
1699 JavaThread* current = THREAD;
1700 ResourceMark rm(current);
1701 CallInfo call_info;
1702 Bytecodes::Code bc;
1703
1704 // receiver is null for static calls. An exception is thrown for null
1705 // receivers for non-static calls
1706 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1707
1708 methodHandle callee_method(current, call_info.selected_method());
1709
1710 #ifndef PRODUCT
1711 AtomicAccess::inc(&_ic_miss_ctr);
1712
1713 // Statistics & Tracing
1714 if (TraceCallFixup) {
1715 ResourceMark rm(current);
1716 tty->print("IC miss (%s) %s call to", Bytecodes::name(bc), (caller_does_not_scalarize) ? "non-scalar" : "");
1717 callee_method->print_short_name(tty);
1718 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1719 }
1720
1721 if (ICMissHistogram) {
1722 MutexLocker m(VMStatistic_lock);
1723 RegisterMap reg_map(current,
1724 RegisterMap::UpdateMap::skip,
1725 RegisterMap::ProcessFrames::include,
1726 RegisterMap::WalkContinuation::skip);
1727 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1728 // produce statistics under the lock
1729 trace_ic_miss(f.pc());
1730 }
1731 #endif
1732
1733 // install an event collector so that when a vtable stub is created the
1734 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1735 // event can't be posted when the stub is created as locks are held
1736 // - instead the event will be deferred until the event collector goes
1737 // out of scope.
1738 JvmtiDynamicCodeEventCollector event_collector;
1739
1740 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1741 RegisterMap reg_map(current,
1742 RegisterMap::UpdateMap::skip,
1743 RegisterMap::ProcessFrames::include,
1744 RegisterMap::WalkContinuation::skip);
1745 frame caller_frame = current->last_frame().sender(®_map);
1746 CodeBlob* cb = caller_frame.cb();
1747 nmethod* caller_nm = cb->as_nmethod();
1748 // Calls via mismatching methods are always non-scalarized
1749 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1750 caller_does_not_scalarize = true;
1751 }
1752
1753 CompiledICLocker ml(caller_nm);
1754 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1755 inline_cache->update(&call_info, receiver()->klass(), caller_does_not_scalarize);
1756
1757 return callee_method;
1758 }
1759
1760 //
1761 // Resets a call-site in compiled code so it will get resolved again.
1762 // This routines handles both virtual call sites, optimized virtual call
1763 // sites, and static call sites. Typically used to change a call sites
1764 // destination from compiled to interpreted.
1765 //
1766 methodHandle SharedRuntime::reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_does_not_scalarize, TRAPS) {
1767 JavaThread* current = THREAD;
1768 ResourceMark rm(current);
1769 RegisterMap reg_map(current,
1770 RegisterMap::UpdateMap::skip,
1771 RegisterMap::ProcessFrames::include,
1772 RegisterMap::WalkContinuation::skip);
1773 frame stub_frame = current->last_frame();
1774 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1775 frame caller = stub_frame.sender(®_map);
1776 if (caller.is_compiled_frame()) {
1777 caller_does_not_scalarize = caller.cb()->as_nmethod()->is_compiled_by_c1();
1778 }
1779
1780 // Do nothing if the frame isn't a live compiled frame.
1781 // nmethod could be deoptimized by the time we get here
1782 // so no update to the caller is needed.
1783
1784 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1785 (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1786
1787 address pc = caller.pc();
1788
1789 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1790 assert(caller_nm != nullptr, "did not find caller nmethod");
1791
1792 // Default call_addr is the location of the "basic" call.
1793 // Determine the address of the call we a reresolving. With
1794 // Inline Caches we will always find a recognizable call.
1795 // With Inline Caches disabled we may or may not find a
1796 // recognizable call. We will always find a call for static
1797 // calls and for optimized virtual calls. For vanilla virtual
1798 // calls it depends on the state of the UseInlineCaches switch.
1799 //
1800 // With Inline Caches disabled we can get here for a virtual call
1801 // for two reasons:
1802 // 1 - calling an abstract method. The vtable for abstract methods
1803 // will run us thru handle_wrong_method and we will eventually
1804 // end up in the interpreter to throw the ame.
1805 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1806 // call and between the time we fetch the entry address and
1807 // we jump to it the target gets deoptimized. Similar to 1
1808 // we will wind up in the interprter (thru a c2i with c2).
1809 //
1810 CompiledICLocker ml(caller_nm);
1811 address call_addr = caller_nm->call_instruction_address(pc);
1812
1813 if (call_addr != nullptr) {
1814 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1815 // bytes back in the instruction stream so we must also check for reloc info.
1816 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1817 bool ret = iter.next(); // Get item
1818 if (ret) {
1819 is_static_call = false;
1820 is_optimized = false;
1821 switch (iter.type()) {
1822 case relocInfo::static_call_type:
1823 is_static_call = true;
1824 case relocInfo::opt_virtual_call_type: {
1825 is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1826 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1827 cdc->set_to_clean();
1828 break;
1829 }
1830 case relocInfo::virtual_call_type: {
1831 // compiled, dispatched call (which used to call an interpreted method)
1832 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1833 inline_cache->set_to_clean();
1834 break;
1835 }
1836 default:
1837 break;
1838 }
1839 }
1840 }
1841 }
1842
1843 methodHandle callee_method = find_callee_method(caller_does_not_scalarize, CHECK_(methodHandle()));
1844
1845 #ifndef PRODUCT
1846 AtomicAccess::inc(&_wrong_method_ctr);
1847
1848 if (TraceCallFixup) {
1849 ResourceMark rm(current);
1850 tty->print("handle_wrong_method reresolving %s call to", (caller_does_not_scalarize) ? "non-scalar" : "");
1851 callee_method->print_short_name(tty);
1852 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1853 }
1854 #endif
1855
1856 return callee_method;
1857 }
1858
1859 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1860 // The faulting unsafe accesses should be changed to throw the error
1861 // synchronously instead. Meanwhile the faulting instruction will be
1862 // skipped over (effectively turning it into a no-op) and an
1863 // asynchronous exception will be raised which the thread will
1864 // handle at a later point. If the instruction is a load it will
1865 // return garbage.
1866
1867 // Request an async exception.
1868 thread->set_pending_unsafe_access_error();
1869
1870 // Return address of next instruction to execute.
2036 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
2037
2038 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
2039 if (message == nullptr) {
2040 // Shouldn't happen, but don't cause even more problems if it does
2041 message = const_cast<char*>(caster_klass->external_name());
2042 } else {
2043 jio_snprintf(message,
2044 msglen,
2045 "class %s cannot be cast to class %s (%s%s%s)",
2046 caster_name,
2047 target_name,
2048 caster_klass_description,
2049 klass_separator,
2050 target_klass_description
2051 );
2052 }
2053 return message;
2054 }
2055
2056 char* SharedRuntime::generate_identity_exception_message(JavaThread* current, Klass* klass) {
2057 assert(klass->is_inline_klass(), "Must be a concrete value class");
2058 const char* desc = "Cannot synchronize on an instance of value class ";
2059 const char* className = klass->external_name();
2060 size_t msglen = strlen(desc) + strlen(className) + 1;
2061 char* message = NEW_RESOURCE_ARRAY(char, msglen);
2062 if (nullptr == message) {
2063 // Out of memory: can't create detailed error message
2064 message = const_cast<char*>(klass->external_name());
2065 } else {
2066 jio_snprintf(message, msglen, "%s%s", desc, className);
2067 }
2068 return message;
2069 }
2070
2071 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2072 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2073 JRT_END
2074
2075 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2076 if (!SafepointSynchronize::is_synchronizing()) {
2077 // Only try quick_enter() if we're not trying to reach a safepoint
2078 // so that the calling thread reaches the safepoint more quickly.
2079 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2080 return;
2081 }
2082 }
2083 // NO_ASYNC required because an async exception on the state transition destructor
2084 // would leave you with the lock held and it would never be released.
2085 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2086 // and the model is that an exception implies the method failed.
2087 JRT_BLOCK_NO_ASYNC
2088 Handle h_obj(THREAD, obj);
2089 ObjectSynchronizer::enter(h_obj, lock, current);
2090 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2284 tty->print_cr("Note 1: counter updates are not MT-safe.");
2285 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2286 tty->print_cr(" %% in nested categories are relative to their category");
2287 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2288 tty->cr();
2289
2290 MethodArityHistogram h;
2291 }
2292 #endif
2293
2294 #ifndef PRODUCT
2295 static int _lookups; // number of calls to lookup
2296 static int _equals; // number of buckets checked with matching hash
2297 static int _archived_hits; // number of successful lookups in archived table
2298 static int _runtime_hits; // number of successful lookups in runtime table
2299 #endif
2300
2301 // A simple wrapper class around the calling convention information
2302 // that allows sharing of adapters for the same calling convention.
2303 class AdapterFingerPrint : public MetaspaceObj {
2304 public:
2305 class Element {
2306 private:
2307 // The highest byte is the type of the argument. The remaining bytes contain the offset of the
2308 // field if it is flattened in the calling convention, -1 otherwise.
2309 juint _payload;
2310
2311 static constexpr int offset_bit_width = 24;
2312 static constexpr juint offset_bit_mask = (1 << offset_bit_width) - 1;
2313 public:
2314 Element(BasicType bt, int offset) : _payload((static_cast<juint>(bt) << offset_bit_width) | (juint(offset) & offset_bit_mask)) {
2315 assert(offset >= -1 && offset < jint(offset_bit_mask), "invalid offset %d", offset);
2316 }
2317
2318 BasicType bt() const {
2319 return static_cast<BasicType>(_payload >> offset_bit_width);
2320 }
2321
2322 int offset() const {
2323 juint res = _payload & offset_bit_mask;
2324 return res == offset_bit_mask ? -1 : res;
2325 }
2326
2327 juint hash() const {
2328 return _payload;
2329 }
2330
2331 bool operator!=(const Element& other) const {
2332 return _payload != other._payload;
2333 }
2334 };
2335
2336 private:
2337 const bool _has_ro_adapter;
2338 const int _length;
2339
2340 static int data_offset() { return sizeof(AdapterFingerPrint); }
2341 Element* data_pointer() {
2342 return reinterpret_cast<Element*>(reinterpret_cast<address>(this) + data_offset());
2343 }
2344
2345 const Element& element_at(int index) {
2346 assert(index < length(), "index %d out of bounds for length %d", index, length());
2347 Element* data = data_pointer();
2348 return data[index];
2349 }
2350
2351 // Private construtor. Use allocate() to get an instance.
2352 AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter)
2353 : _has_ro_adapter(has_ro_adapter), _length(total_args_passed_in_sig(sig)) {
2354 Element* data = data_pointer();
2355 BasicType prev_bt = T_ILLEGAL;
2356 int vt_count = 0;
2357 for (int index = 0; index < _length; index++) {
2358 const SigEntry& sig_entry = sig->at(index);
2359 BasicType bt = sig_entry._bt;
2360 if (bt == T_METADATA) {
2361 // Found start of inline type in signature
2362 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2363 vt_count++;
2364 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2365 // Found end of inline type in signature
2366 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2367 vt_count--;
2368 assert(vt_count >= 0, "invalid vt_count");
2369 } else if (vt_count == 0) {
2370 // Widen fields that are not part of a scalarized inline type argument
2371 assert(sig_entry._offset == -1, "invalid offset for argument that is not a flattened field %d", sig_entry._offset);
2372 bt = adapter_encoding(bt);
2373 }
2374
2375 ::new(&data[index]) Element(bt, sig_entry._offset);
2376 prev_bt = bt;
2377 }
2378 assert(vt_count == 0, "invalid vt_count");
2379 }
2380
2381 // Call deallocate instead
2382 ~AdapterFingerPrint() {
2383 ShouldNotCallThis();
2384 }
2385
2386 static int total_args_passed_in_sig(const GrowableArray<SigEntry>* sig) {
2387 return (sig != nullptr) ? sig->length() : 0;
2388 }
2389
2390 static int compute_size_in_words(int len) {
2391 return (int)heap_word_size(sizeof(AdapterFingerPrint) + (len * sizeof(Element)));
2392 }
2393
2394 // Remap BasicTypes that are handled equivalently by the adapters.
2395 // These are correct for the current system but someday it might be
2396 // necessary to make this mapping platform dependent.
2397 static BasicType adapter_encoding(BasicType in) {
2398 switch (in) {
2399 case T_BOOLEAN:
2400 case T_BYTE:
2401 case T_SHORT:
2402 case T_CHAR:
2403 // They are all promoted to T_INT in the calling convention
2404 return T_INT;
2405
2406 case T_OBJECT:
2407 case T_ARRAY:
2408 // In other words, we assume that any register good enough for
2409 // an int or long is good enough for a managed pointer.
2410 #ifdef _LP64
2411 return T_LONG;
2412 #else
2413 return T_INT;
2414 #endif
2415
2416 case T_INT:
2417 case T_LONG:
2418 case T_FLOAT:
2419 case T_DOUBLE:
2420 case T_VOID:
2421 return in;
2422
2423 default:
2424 ShouldNotReachHere();
2425 return T_CONFLICT;
2426 }
2427 }
2428
2429 void* operator new(size_t size, size_t fp_size) throw() {
2430 assert(fp_size >= size, "sanity check");
2431 void* p = AllocateHeap(fp_size, mtCode);
2432 memset(p, 0, fp_size);
2433 return p;
2434 }
2435
2436 public:
2437 template<typename Function>
2438 void iterate_args(Function function) {
2439 for (int i = 0; i < length(); i++) {
2440 function(element_at(i));
2441 }
2442 }
2443
2444 static AdapterFingerPrint* allocate(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2445 int len = total_args_passed_in_sig(sig);
2446 int size_in_bytes = BytesPerWord * compute_size_in_words(len);
2447 AdapterFingerPrint* afp = new (size_in_bytes) AdapterFingerPrint(sig, has_ro_adapter);
2448 assert((afp->size() * BytesPerWord) == size_in_bytes, "should match");
2449 return afp;
2450 }
2451
2452 static void deallocate(AdapterFingerPrint* fp) {
2453 FreeHeap(fp);
2454 }
2455
2456 bool has_ro_adapter() const {
2457 return _has_ro_adapter;
2458 }
2459
2460 int length() const {
2461 return _length;
2462 }
2463
2464 unsigned int compute_hash() {
2465 int hash = 0;
2466 for (int i = 0; i < length(); i++) {
2467 const Element& v = element_at(i);
2468 //Add arithmetic operation to the hash, like +3 to improve hashing
2469 hash = ((hash << 8) ^ v.hash() ^ (hash >> 5)) + 3;
2470 }
2471 return (unsigned int)hash;
2472 }
2473
2474 const char* as_string() {
2475 stringStream st;
2476 st.print("{");
2477 if (_has_ro_adapter) {
2478 st.print("has_ro_adapter");
2479 } else {
2480 st.print("no_ro_adapter");
2481 }
2482 for (int i = 0; i < length(); i++) {
2483 st.print(", ");
2484 const Element& elem = element_at(i);
2485 st.print("{%s, %d}", type2name(elem.bt()), elem.offset());
2486 }
2487 st.print("}");
2488 return st.as_string();
2489 }
2490
2491 const char* as_basic_args_string() {
2492 stringStream st;
2493 bool long_prev = false;
2494 iterate_args([&] (const Element& arg) {
2495 if (long_prev) {
2496 long_prev = false;
2497 if (arg.bt() == T_VOID) {
2498 st.print("J");
2499 } else {
2500 st.print("L");
2501 }
2502 }
2503 if (arg.bt() == T_LONG) {
2504 long_prev = true;
2505 } else if (arg.bt() != T_VOID) {
2506 st.print("%c", type2char(arg.bt()));
2507 }
2508 });
2509 if (long_prev) {
2510 st.print("L");
2511 }
2512 return st.as_string();
2513 }
2514
2515 bool equals(AdapterFingerPrint* other) {
2516 if (other->_has_ro_adapter != _has_ro_adapter) {
2517 return false;
2518 } else if (other->_length != _length) {
2519 return false;
2520 } else {
2521 for (int i = 0; i < _length; i++) {
2522 if (element_at(i) != other->element_at(i)) {
2523 return false;
2524 }
2525 }
2526 }
2527 return true;
2528 }
2529
2530 // methods required by virtue of being a MetaspaceObj
2531 void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2532 int size() const { return compute_size_in_words(_length); }
2533 MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2534
2535 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2536 NOT_PRODUCT(_equals++);
2537 return fp1->equals(fp2);
2538 }
2539
2540 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2541 return fp->compute_hash();
2542 }
2545 #if INCLUDE_CDS
2546 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2547 return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2548 }
2549
2550 class ArchivedAdapterTable : public OffsetCompactHashtable<
2551 AdapterFingerPrint*,
2552 AdapterHandlerEntry*,
2553 adapter_fp_equals_compact_hashtable_entry> {};
2554 #endif // INCLUDE_CDS
2555
2556 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2557 using AdapterHandlerTable = HashTable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2558 AnyObj::C_HEAP, mtCode,
2559 AdapterFingerPrint::compute_hash,
2560 AdapterFingerPrint::equals>;
2561 static AdapterHandlerTable* _adapter_handler_table;
2562 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2563
2564 // Find a entry with the same fingerprint if it exists
2565 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter) {
2566 NOT_PRODUCT(_lookups++);
2567 assert_lock_strong(AdapterHandlerLibrary_lock);
2568 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(sig, has_ro_adapter);
2569 AdapterHandlerEntry* entry = nullptr;
2570 #if INCLUDE_CDS
2571 // if we are building the archive then the archived adapter table is
2572 // not valid and we need to use the ones added to the runtime table
2573 if (AOTCodeCache::is_using_adapter()) {
2574 // Search archived table first. It is read-only table so can be searched without lock
2575 entry = _aot_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2576 #ifndef PRODUCT
2577 if (entry != nullptr) {
2578 _archived_hits++;
2579 }
2580 #endif
2581 }
2582 #endif // INCLUDE_CDS
2583 if (entry == nullptr) {
2584 assert_lock_strong(AdapterHandlerLibrary_lock);
2585 AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2586 if (entry_p != nullptr) {
2587 entry = *entry_p;
2588 assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2605 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2606 ts.print(tty, "AdapterHandlerTable");
2607 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2608 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2609 int total_hits = _archived_hits + _runtime_hits;
2610 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d)",
2611 _lookups, _equals, total_hits, _archived_hits, _runtime_hits);
2612 }
2613 #endif
2614
2615 // ---------------------------------------------------------------------------
2616 // Implementation of AdapterHandlerLibrary
2617 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2618 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2619 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2620 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2621 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2622 #if INCLUDE_CDS
2623 ArchivedAdapterTable AdapterHandlerLibrary::_aot_adapter_handler_table;
2624 #endif // INCLUDE_CDS
2625 static const int AdapterHandlerLibrary_size = 48*K;
2626 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2627 volatile uint AdapterHandlerLibrary::_id_counter = 0;
2628
2629 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2630 assert(_buffer != nullptr, "should be initialized");
2631 return _buffer;
2632 }
2633
2634 static void post_adapter_creation(const AdapterHandlerEntry* entry) {
2635 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2636 AdapterBlob* adapter_blob = entry->adapter_blob();
2637 char blob_id[256];
2638 jio_snprintf(blob_id,
2639 sizeof(blob_id),
2640 "%s(%s)",
2641 adapter_blob->name(),
2642 entry->fingerprint()->as_string());
2643 if (Forte::is_enabled()) {
2644 Forte::register_stub(blob_id, adapter_blob->content_begin(), adapter_blob->content_end());
2645 }
2653 void AdapterHandlerLibrary::initialize() {
2654 {
2655 ResourceMark rm;
2656 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2657 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2658 }
2659
2660 #if INCLUDE_CDS
2661 // Link adapters in AOT Cache to their code in AOT Code Cache
2662 if (AOTCodeCache::is_using_adapter() && !_aot_adapter_handler_table.empty()) {
2663 link_aot_adapters();
2664 lookup_simple_adapters();
2665 return;
2666 }
2667 #endif // INCLUDE_CDS
2668
2669 ResourceMark rm;
2670 {
2671 MutexLocker mu(AdapterHandlerLibrary_lock);
2672
2673 CompiledEntrySignature no_args;
2674 no_args.compute_calling_conventions();
2675 _no_arg_handler = create_adapter(no_args, true);
2676
2677 CompiledEntrySignature obj_args;
2678 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
2679 obj_args.compute_calling_conventions();
2680 _obj_arg_handler = create_adapter(obj_args, true);
2681
2682 CompiledEntrySignature int_args;
2683 SigEntry::add_entry(int_args.sig(), T_INT);
2684 int_args.compute_calling_conventions();
2685 _int_arg_handler = create_adapter(int_args, true);
2686
2687 CompiledEntrySignature obj_int_args;
2688 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
2689 SigEntry::add_entry(obj_int_args.sig(), T_INT);
2690 obj_int_args.compute_calling_conventions();
2691 _obj_int_arg_handler = create_adapter(obj_int_args, true);
2692
2693 CompiledEntrySignature obj_obj_args;
2694 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2695 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2696 obj_obj_args.compute_calling_conventions();
2697 _obj_obj_arg_handler = create_adapter(obj_obj_args, true);
2698
2699 // we should always get an entry back but we don't have any
2700 // associated blob on Zero
2701 assert(_no_arg_handler != nullptr &&
2702 _obj_arg_handler != nullptr &&
2703 _int_arg_handler != nullptr &&
2704 _obj_int_arg_handler != nullptr &&
2705 _obj_obj_arg_handler != nullptr, "Initial adapter handlers must be properly created");
2706 }
2707
2708 // Outside of the lock
2709 #ifndef ZERO
2710 // no blobs to register when we are on Zero
2711 post_adapter_creation(_no_arg_handler);
2712 post_adapter_creation(_obj_arg_handler);
2713 post_adapter_creation(_int_arg_handler);
2714 post_adapter_creation(_obj_int_arg_handler);
2715 post_adapter_creation(_obj_obj_arg_handler);
2716 #endif // ZERO
2717 }
2718
2719 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint) {
2720 uint id = (uint)AtomicAccess::add((int*)&_id_counter, 1);
2721 assert(id > 0, "we can never overflow because AOT cache cannot contain more than 2^32 methods");
2722 return AdapterHandlerEntry::allocate(id, fingerprint);
2723 }
2724
2725 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2726 int total_args_passed = method->size_of_parameters(); // All args on stack
2727 if (total_args_passed == 0) {
2728 return _no_arg_handler;
2729 } else if (total_args_passed == 1) {
2730 if (!method->is_static()) {
2731 if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2732 return nullptr;
2733 }
2734 return _obj_arg_handler;
2735 }
2736 switch (method->signature()->char_at(1)) {
2737 case JVM_SIGNATURE_CLASS: {
2738 if (InlineTypePassFieldsAsArgs) {
2739 SignatureStream ss(method->signature());
2740 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2741 if (vk != nullptr) {
2742 return nullptr;
2743 }
2744 }
2745 return _obj_arg_handler;
2746 }
2747 case JVM_SIGNATURE_ARRAY:
2748 return _obj_arg_handler;
2749 case JVM_SIGNATURE_INT:
2750 case JVM_SIGNATURE_BOOLEAN:
2751 case JVM_SIGNATURE_CHAR:
2752 case JVM_SIGNATURE_BYTE:
2753 case JVM_SIGNATURE_SHORT:
2754 return _int_arg_handler;
2755 }
2756 } else if (total_args_passed == 2 &&
2757 !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2758 switch (method->signature()->char_at(1)) {
2759 case JVM_SIGNATURE_CLASS: {
2760 if (InlineTypePassFieldsAsArgs) {
2761 SignatureStream ss(method->signature());
2762 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2763 if (vk != nullptr) {
2764 return nullptr;
2765 }
2766 }
2767 return _obj_obj_arg_handler;
2768 }
2769 case JVM_SIGNATURE_ARRAY:
2770 return _obj_obj_arg_handler;
2771 case JVM_SIGNATURE_INT:
2772 case JVM_SIGNATURE_BOOLEAN:
2773 case JVM_SIGNATURE_CHAR:
2774 case JVM_SIGNATURE_BYTE:
2775 case JVM_SIGNATURE_SHORT:
2776 return _obj_int_arg_handler;
2777 }
2778 }
2779 return nullptr;
2780 }
2781
2782 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2783 _method(method), _num_inline_args(0), _has_inline_recv(false),
2784 _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2785 _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2786 _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2787 _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2788 _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2789 _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2790 }
2791
2792 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2793 // or the same entry for VEP and VIEP(RO).
2794 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2795 if (!has_scalarized_args()) {
2796 // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2797 return CodeOffsets::Verified_Entry;
2798 }
2799 if (_method->is_static()) {
2800 // Static methods don't need VIEP(RO)
2801 return CodeOffsets::Verified_Entry;
2802 }
2803
2804 if (has_inline_recv()) {
2805 if (num_inline_args() == 1) {
2806 // Share same entry for VIEP and VIEP(RO).
2807 // This is quite common: we have an instance method in an InlineKlass that has
2808 // no inline type args other than <this>.
2809 return CodeOffsets::Verified_Inline_Entry;
2810 } else {
2811 assert(num_inline_args() > 1, "must be");
2812 // No sharing:
2813 // VIEP(RO) -- <this> is passed as object
2814 // VEP -- <this> is passed as fields
2815 return CodeOffsets::Verified_Inline_Entry_RO;
2816 }
2817 }
2818
2819 // Either a static method, or <this> is not an inline type
2820 if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2821 // No sharing:
2822 // Some arguments are passed on the stack, and we have inserted reserved entries
2823 // into the VEP, but we never insert reserved entries into the VIEP(RO).
2824 return CodeOffsets::Verified_Inline_Entry_RO;
2825 } else {
2826 // Share same entry for VEP and VIEP(RO).
2827 return CodeOffsets::Verified_Entry;
2828 }
2829 }
2830
2831 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2832 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2833 if (_supers != nullptr) {
2834 return _supers;
2835 }
2836 _supers = new GrowableArray<Method*>();
2837 // Skip private, static, and <init> methods
2838 if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2839 return _supers;
2840 }
2841 Symbol* name = _method->name();
2842 Symbol* signature = _method->signature();
2843 const Klass* holder = _method->method_holder()->super();
2844 Symbol* holder_name = holder->name();
2845 ThreadInVMfromUnknown tiv;
2846 JavaThread* current = JavaThread::current();
2847 HandleMark hm(current);
2848 Handle loader(current, _method->method_holder()->class_loader());
2849
2850 // Walk up the class hierarchy and search for super methods
2851 while (holder != nullptr) {
2852 Method* super_method = holder->lookup_method(name, signature);
2853 if (super_method == nullptr) {
2854 break;
2855 }
2856 if (!super_method->is_static() && !super_method->is_private() &&
2857 (!super_method->is_package_private() ||
2858 super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
2859 _supers->push(super_method);
2860 }
2861 holder = super_method->method_holder()->super();
2862 }
2863 // Search interfaces for super methods
2864 Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
2865 for (int i = 0; i < interfaces->length(); ++i) {
2866 Method* m = interfaces->at(i)->lookup_method(name, signature);
2867 if (m != nullptr && !m->is_static() && m->is_public()) {
2868 _supers->push(m);
2869 }
2870 }
2871 return _supers;
2872 }
2873
2874 // Iterate over arguments and compute scalarized and non-scalarized signatures
2875 void CompiledEntrySignature::compute_calling_conventions(bool init) {
2876 bool has_scalarized = false;
2877 if (_method != nullptr) {
2878 InstanceKlass* holder = _method->method_holder();
2879 int arg_num = 0;
2880 if (!_method->is_static()) {
2881 // We shouldn't scalarize 'this' in a value class constructor
2882 if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() && !_method->is_object_constructor() &&
2883 (init || _method->is_scalarized_arg(arg_num))) {
2884 _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
2885 has_scalarized = true;
2886 _has_inline_recv = true;
2887 _num_inline_args++;
2888 } else {
2889 SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
2890 }
2891 SigEntry::add_entry(_sig, T_OBJECT, holder->name());
2892 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
2893 arg_num++;
2894 }
2895 for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
2896 BasicType bt = ss.type();
2897 if (bt == T_OBJECT) {
2898 InlineKlass* vk = ss.as_inline_klass(holder);
2899 if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
2900 // Check for a calling convention mismatch with super method(s)
2901 bool scalar_super = false;
2902 bool non_scalar_super = false;
2903 GrowableArray<Method*>* supers = get_supers();
2904 for (int i = 0; i < supers->length(); ++i) {
2905 Method* super_method = supers->at(i);
2906 if (super_method->is_scalarized_arg(arg_num)) {
2907 scalar_super = true;
2908 } else {
2909 non_scalar_super = true;
2910 }
2911 }
2912 #ifdef ASSERT
2913 // Randomly enable below code paths for stress testing
2914 bool stress = init && StressCallingConvention;
2915 if (stress && (os::random() & 1) == 1) {
2916 non_scalar_super = true;
2917 if ((os::random() & 1) == 1) {
2918 scalar_super = true;
2919 }
2920 }
2921 #endif
2922 if (non_scalar_super) {
2923 // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
2924 if (scalar_super) {
2925 // Found non-scalar *and* scalar super methods. We can't handle both.
2926 // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
2927 for (int i = 0; i < supers->length(); ++i) {
2928 Method* super_method = supers->at(i);
2929 if (super_method->is_scalarized_arg(arg_num) DEBUG_ONLY(|| (stress && (os::random() & 1) == 1))) {
2930 super_method->set_mismatch();
2931 MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
2932 JavaThread* thread = JavaThread::current();
2933 HandleMark hm(thread);
2934 methodHandle mh(thread, super_method);
2935 DeoptimizationScope deopt_scope;
2936 CodeCache::mark_for_deoptimization(&deopt_scope, mh());
2937 deopt_scope.deoptimize_marked();
2938 }
2939 }
2940 }
2941 // Fall back to non-scalarized calling convention
2942 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2943 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2944 } else {
2945 _num_inline_args++;
2946 has_scalarized = true;
2947 int last = _sig_cc->length();
2948 int last_ro = _sig_cc_ro->length();
2949 _sig_cc->appendAll(vk->extended_sig());
2950 _sig_cc_ro->appendAll(vk->extended_sig());
2951 if (bt == T_OBJECT) {
2952 // Nullable inline type argument, insert InlineTypeNode::NullMarker field right after T_METADATA delimiter
2953 _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, nullptr, true));
2954 _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, nullptr, true));
2955 }
2956 }
2957 } else {
2958 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2959 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2960 }
2961 bt = T_OBJECT;
2962 } else {
2963 SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
2964 SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
2965 }
2966 SigEntry::add_entry(_sig, bt, ss.as_symbol());
2967 if (bt != T_VOID) {
2968 arg_num++;
2969 }
2970 }
2971 }
2972
2973 // Compute the non-scalarized calling convention
2974 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
2975 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
2976
2977 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
2978 if (has_scalarized && !_method->is_native()) {
2979 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
2980 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
2981
2982 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
2983 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
2984
2985 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
2986 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
2987
2988 // Upper bound on stack arguments to avoid hitting the argument limit and
2989 // bailing out of compilation ("unsupported incoming calling sequence").
2990 // TODO we need a reasonable limit (flag?) here
2991 if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
2992 return; // Success
2993 }
2994 }
2995
2996 // No scalarized args
2997 _sig_cc = _sig;
2998 _regs_cc = _regs;
2999 _args_on_stack_cc = _args_on_stack;
3000
3001 _sig_cc_ro = _sig;
3002 _regs_cc_ro = _regs;
3003 _args_on_stack_cc_ro = _args_on_stack;
3004 }
3005
3006 void CompiledEntrySignature::initialize_from_fingerprint(AdapterFingerPrint* fingerprint) {
3007 _has_inline_recv = fingerprint->has_ro_adapter();
3008
3009 int value_object_count = 0;
3010 BasicType prev_bt = T_ILLEGAL;
3011 bool has_scalarized_arguments = false;
3012 bool long_prev = false;
3013 int long_prev_offset = -1;
3014
3015 fingerprint->iterate_args([&] (const AdapterFingerPrint::Element& arg) {
3016 BasicType bt = arg.bt();
3017 int offset = arg.offset();
3018
3019 if (long_prev) {
3020 long_prev = false;
3021 BasicType bt_to_add;
3022 if (bt == T_VOID) {
3023 bt_to_add = T_LONG;
3024 } else {
3025 bt_to_add = T_OBJECT;
3026 }
3027 if (value_object_count == 0) {
3028 SigEntry::add_entry(_sig, bt_to_add);
3029 }
3030 SigEntry::add_entry(_sig_cc, bt_to_add, nullptr, long_prev_offset);
3031 SigEntry::add_entry(_sig_cc_ro, bt_to_add, nullptr, long_prev_offset);
3032 }
3033
3034 switch (bt) {
3035 case T_VOID:
3036 if (prev_bt != T_LONG && prev_bt != T_DOUBLE) {
3037 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
3038 value_object_count--;
3039 SigEntry::add_entry(_sig_cc, T_VOID, nullptr, offset);
3040 SigEntry::add_entry(_sig_cc_ro, T_VOID, nullptr, offset);
3041 assert(value_object_count >= 0, "invalid value object count");
3042 } else {
3043 // Nothing to add for _sig: We already added an addition T_VOID in add_entry() when adding T_LONG or T_DOUBLE.
3044 }
3045 break;
3046 case T_INT:
3047 case T_FLOAT:
3048 case T_DOUBLE:
3049 if (value_object_count == 0) {
3050 SigEntry::add_entry(_sig, bt);
3051 }
3052 SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3053 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3054 break;
3055 case T_LONG:
3056 long_prev = true;
3057 long_prev_offset = offset;
3058 break;
3059 case T_BOOLEAN:
3060 case T_CHAR:
3061 case T_BYTE:
3062 case T_SHORT:
3063 case T_OBJECT:
3064 case T_ARRAY:
3065 assert(value_object_count > 0, "must be value object field");
3066 SigEntry::add_entry(_sig_cc, bt, nullptr, offset);
3067 SigEntry::add_entry(_sig_cc_ro, bt, nullptr, offset);
3068 break;
3069 case T_METADATA:
3070 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
3071 if (value_object_count == 0) {
3072 SigEntry::add_entry(_sig, T_OBJECT);
3073 }
3074 SigEntry::add_entry(_sig_cc, T_METADATA, nullptr, offset);
3075 SigEntry::add_entry(_sig_cc_ro, T_METADATA, nullptr, offset);
3076 value_object_count++;
3077 has_scalarized_arguments = true;
3078 break;
3079 default: {
3080 fatal("Unexpected BasicType: %s", basictype_to_str(bt));
3081 }
3082 }
3083 prev_bt = bt;
3084 });
3085
3086 if (long_prev) {
3087 // If previous bt was T_LONG and we reached the end of the signature, we know that it must be a T_OBJECT.
3088 SigEntry::add_entry(_sig, T_OBJECT);
3089 SigEntry::add_entry(_sig_cc, T_OBJECT);
3090 SigEntry::add_entry(_sig_cc_ro, T_OBJECT);
3091 }
3092 assert(value_object_count == 0, "invalid value object count");
3093
3094 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3095 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3096
3097 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3098 if (has_scalarized_arguments) {
3099 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3100 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3101
3102 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3103 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3104
3105 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3106 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3107 } else {
3108 // No scalarized args
3109 _sig_cc = _sig;
3110 _regs_cc = _regs;
3111 _args_on_stack_cc = _args_on_stack;
3112
3113 _sig_cc_ro = _sig;
3114 _regs_cc_ro = _regs;
3115 _args_on_stack_cc_ro = _args_on_stack;
3116 }
3117
3118 #ifdef ASSERT
3119 {
3120 AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(_sig_cc, _has_inline_recv);
3121 assert(fingerprint->equals(compare_fp), "%s - %s", fingerprint->as_string(), compare_fp->as_string());
3122 AdapterFingerPrint::deallocate(compare_fp);
3123 }
3124 #endif
3125 }
3126
3127 const char* AdapterHandlerEntry::_entry_names[] = {
3128 "i2c", "c2i", "c2i_unverified", "c2i_no_clinit_check"
3129 };
3130
3131 #ifdef ASSERT
3132 void AdapterHandlerLibrary::verify_adapter_sharing(CompiledEntrySignature& ces, AdapterHandlerEntry* cached_entry) {
3133 // we can only check for the same code if there is any
3134 #ifndef ZERO
3135 AdapterHandlerEntry* comparison_entry = create_adapter(ces, false, true);
3136 assert(comparison_entry->adapter_blob() == nullptr, "no blob should be created when creating an adapter for comparison");
3137 assert(comparison_entry->compare_code(cached_entry), "code must match");
3138 // Release the one just created
3139 AdapterHandlerEntry::deallocate(comparison_entry);
3140 # endif // ZERO
3141 }
3142 #endif /* ASSERT*/
3143
3144 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3145 assert(!method->is_abstract(), "abstract methods do not have adapters");
3146 // Use customized signature handler. Need to lock around updates to
3147 // the _adapter_handler_table (it is not safe for concurrent readers
3148 // and a single writer: this could be fixed if it becomes a
3149 // problem).
3150
3151 // Fast-path for trivial adapters
3152 AdapterHandlerEntry* entry = get_simple_adapter(method);
3153 if (entry != nullptr) {
3154 return entry;
3155 }
3156
3157 ResourceMark rm;
3158 bool new_entry = false;
3159
3160 CompiledEntrySignature ces(method());
3161 ces.compute_calling_conventions();
3162 if (ces.has_scalarized_args()) {
3163 if (!method->has_scalarized_args()) {
3164 method->set_has_scalarized_args();
3165 }
3166 if (ces.c1_needs_stack_repair()) {
3167 method->set_c1_needs_stack_repair();
3168 }
3169 if (ces.c2_needs_stack_repair() && !method->c2_needs_stack_repair()) {
3170 method->set_c2_needs_stack_repair();
3171 }
3172 }
3173
3174 {
3175 MutexLocker mu(AdapterHandlerLibrary_lock);
3176
3177 // Lookup method signature's fingerprint
3178 entry = lookup(ces.sig_cc(), ces.has_inline_recv());
3179
3180 if (entry != nullptr) {
3181 #ifndef ZERO
3182 assert(entry->is_linked(), "AdapterHandlerEntry must have been linked");
3183 #endif
3184 #ifdef ASSERT
3185 if (!entry->in_aot_cache() && VerifyAdapterSharing) {
3186 verify_adapter_sharing(ces, entry);
3187 }
3188 #endif
3189 } else {
3190 entry = create_adapter(ces, /* allocate_code_blob */ true);
3191 if (entry != nullptr) {
3192 new_entry = true;
3193 }
3194 }
3195 }
3196
3197 // Outside of the lock
3198 if (new_entry) {
3199 post_adapter_creation(entry);
3200 }
3201 return entry;
3202 }
3203
3204 void AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler) {
3205 ResourceMark rm;
3206 const char* name = AdapterHandlerLibrary::name(handler);
3207 const uint32_t id = AdapterHandlerLibrary::id(handler);
3208
3209 CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::Adapter, id, name);
3210 if (blob != nullptr) {
3225 }
3226 insts_size = adapter_blob->code_size();
3227 st->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
3228 handler->fingerprint()->as_basic_args_string(),
3229 handler->fingerprint()->as_string(), insts_size);
3230 st->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
3231 if (Verbose || PrintStubCode) {
3232 address first_pc = adapter_blob->content_begin();
3233 if (first_pc != nullptr) {
3234 Disassembler::decode(first_pc, first_pc + insts_size, st, &adapter_blob->asm_remarks());
3235 st->cr();
3236 }
3237 }
3238 }
3239 #endif // PRODUCT
3240
3241 void AdapterHandlerLibrary::address_to_offset(address entry_address[AdapterBlob::ENTRY_COUNT],
3242 int entry_offset[AdapterBlob::ENTRY_COUNT]) {
3243 entry_offset[AdapterBlob::I2C] = 0;
3244 entry_offset[AdapterBlob::C2I] = entry_address[AdapterBlob::C2I] - entry_address[AdapterBlob::I2C];
3245 entry_offset[AdapterBlob::C2I_Inline] = entry_address[AdapterBlob::C2I_Inline] - entry_address[AdapterBlob::I2C];
3246 entry_offset[AdapterBlob::C2I_Inline_RO] = entry_address[AdapterBlob::C2I_Inline_RO] - entry_address[AdapterBlob::I2C];
3247 entry_offset[AdapterBlob::C2I_Unverified] = entry_address[AdapterBlob::C2I_Unverified] - entry_address[AdapterBlob::I2C];
3248 entry_offset[AdapterBlob::C2I_Unverified_Inline] = entry_address[AdapterBlob::C2I_Unverified_Inline] - entry_address[AdapterBlob::I2C];
3249 if (entry_address[AdapterBlob::C2I_No_Clinit_Check] == nullptr) {
3250 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = -1;
3251 } else {
3252 entry_offset[AdapterBlob::C2I_No_Clinit_Check] = entry_address[AdapterBlob::C2I_No_Clinit_Check] - entry_address[AdapterBlob::I2C];
3253 }
3254 }
3255
3256 bool AdapterHandlerLibrary::generate_adapter_code(AdapterHandlerEntry* handler,
3257 CompiledEntrySignature& ces,
3258 bool allocate_code_blob,
3259 bool is_transient) {
3260 if (log_is_enabled(Info, perf, class, link)) {
3261 ClassLoader::perf_method_adapters_count()->inc();
3262 }
3263
3264 #ifndef ZERO
3265 AdapterBlob* adapter_blob = nullptr;
3266 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3267 CodeBuffer buffer(buf);
3268 short buffer_locs[20];
3269 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3270 sizeof(buffer_locs)/sizeof(relocInfo));
3271 MacroAssembler masm(&buffer);
3272 address entry_address[AdapterBlob::ENTRY_COUNT];
3273
3274 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
3275 SharedRuntime::generate_i2c2i_adapters(&masm,
3276 ces.args_on_stack(),
3277 ces.sig(),
3278 ces.regs(),
3279 ces.sig_cc(),
3280 ces.regs_cc(),
3281 ces.sig_cc_ro(),
3282 ces.regs_cc_ro(),
3283 entry_address,
3284 adapter_blob,
3285 allocate_code_blob);
3286
3287 if (ces.has_scalarized_args()) {
3288 // Save a C heap allocated version of the scalarized signature and store it in the adapter
3289 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3290 heap_sig->appendAll(ces.sig_cc());
3291 handler->set_sig_cc(heap_sig);
3292 }
3293 // On zero there is no code to save and no need to create a blob and
3294 // or relocate the handler.
3295 int entry_offset[AdapterBlob::ENTRY_COUNT];
3296 address_to_offset(entry_address, entry_offset);
3297 #ifdef ASSERT
3298 if (VerifyAdapterSharing) {
3299 handler->save_code(buf->code_begin(), buffer.insts_size());
3300 if (is_transient) {
3301 return true;
3302 }
3303 }
3304 #endif
3305 if (adapter_blob == nullptr) {
3306 // CodeCache is full, disable compilation
3307 // Ought to log this but compile log is only per compile thread
3308 // and we're some non descript Java thread.
3309 return false;
3310 }
3311 handler->set_adapter_blob(adapter_blob);
3312 if (!is_transient && AOTCodeCache::is_dumping_adapter()) {
3313 // try to save generated code
3314 const char* name = AdapterHandlerLibrary::name(handler);
3315 const uint32_t id = AdapterHandlerLibrary::id(handler);
3316 bool success = AOTCodeCache::store_code_blob(*adapter_blob, AOTCodeEntry::Adapter, id, name);
3317 assert(success || !AOTCodeCache::is_dumping_adapter(), "caching of adapter must be disabled");
3318 }
3319 #endif // ZERO
3320
3321 #ifndef PRODUCT
3322 // debugging support
3323 if (PrintAdapterHandlers || PrintStubCode) {
3324 print_adapter_handler_info(tty, handler);
3325 }
3326 #endif
3327
3328 return true;
3329 }
3330
3331 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(CompiledEntrySignature& ces,
3332 bool allocate_code_blob,
3333 bool is_transient) {
3334 AdapterFingerPrint* fp = AdapterFingerPrint::allocate(ces.sig_cc(), ces.has_inline_recv());
3335 #ifdef ASSERT
3336 // Verify that we can successfully restore the compiled entry signature object.
3337 CompiledEntrySignature ces_verify;
3338 ces_verify.initialize_from_fingerprint(fp);
3339 #endif
3340 AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fp);
3341 if (!generate_adapter_code(handler, ces, allocate_code_blob, is_transient)) {
3342 AdapterHandlerEntry::deallocate(handler);
3343 return nullptr;
3344 }
3345 if (!is_transient) {
3346 assert_lock_strong(AdapterHandlerLibrary_lock);
3347 _adapter_handler_table->put(fp, handler);
3348 }
3349 return handler;
3350 }
3351
3352 #if INCLUDE_CDS
3353 void AdapterHandlerEntry::remove_unshareable_info() {
3354 #ifdef ASSERT
3355 _saved_code = nullptr;
3356 _saved_code_length = 0;
3357 #endif // ASSERT
3358 _adapter_blob = nullptr;
3359 _linked = false;
3360 }
3361
3424 // This method is used during production run to link archived adapters (stored in AOT Cache)
3425 // to their code in AOT Code Cache
3426 void AdapterHandlerEntry::link() {
3427 ResourceMark rm;
3428 assert(_fingerprint != nullptr, "_fingerprint must not be null");
3429 bool generate_code = false;
3430 // Generate code only if AOTCodeCache is not available, or
3431 // caching adapters is disabled, or we fail to link
3432 // the AdapterHandlerEntry to its code in the AOTCodeCache
3433 if (AOTCodeCache::is_using_adapter()) {
3434 AdapterHandlerLibrary::link_aot_adapter_handler(this);
3435 // If link_aot_adapter_handler() succeeds, _adapter_blob will be non-null
3436 if (_adapter_blob == nullptr) {
3437 log_warning(aot)("Failed to link AdapterHandlerEntry (fp=%s) to its code in the AOT code cache", _fingerprint->as_basic_args_string());
3438 generate_code = true;
3439 }
3440 } else {
3441 generate_code = true;
3442 }
3443 if (generate_code) {
3444 CompiledEntrySignature ces;
3445 ces.initialize_from_fingerprint(_fingerprint);
3446 if (!AdapterHandlerLibrary::generate_adapter_code(this, ces, true, false)) {
3447 // Don't throw exceptions during VM initialization because java.lang.* classes
3448 // might not have been initialized, causing problems when constructing the
3449 // Java exception object.
3450 vm_exit_during_initialization("Out of space in CodeCache for adapters");
3451 }
3452 }
3453 if (_adapter_blob != nullptr) {
3454 post_adapter_creation(this);
3455 }
3456 assert(_linked, "AdapterHandlerEntry must now be linked");
3457 }
3458
3459 void AdapterHandlerLibrary::link_aot_adapters() {
3460 uint max_id = 0;
3461 assert(AOTCodeCache::is_using_adapter(), "AOT adapters code should be available");
3462 /* It is possible that some adapters generated in assembly phase are not stored in the cache.
3463 * That implies adapter ids of the adapters in the cache may not be contiguous.
3464 * If the size of the _aot_adapter_handler_table is used to initialize _id_counter, then it may
3465 * result in collision of adapter ids between AOT stored handlers and runtime generated handlers.
3466 * To avoid such situation, initialize the _id_counter with the largest adapter id among the AOT stored handlers.
3467 */
3468 _aot_adapter_handler_table.iterate([&](AdapterHandlerEntry* entry) {
3469 assert(!entry->is_linked(), "AdapterHandlerEntry is already linked!");
3470 entry->link();
3471 max_id = MAX2(max_id, entry->id());
3472 });
3473 // Set adapter id to the maximum id found in the AOTCache
3474 assert(_id_counter == 0, "Did not expect new AdapterHandlerEntry to be created at this stage");
3475 _id_counter = max_id;
3476 }
3477
3478 // This method is called during production run to lookup simple adapters
3479 // in the archived adapter handler table
3480 void AdapterHandlerLibrary::lookup_simple_adapters() {
3481 assert(!_aot_adapter_handler_table.empty(), "archived adapter handler table is empty");
3482
3483 MutexLocker mu(AdapterHandlerLibrary_lock);
3484 ResourceMark rm;
3485 CompiledEntrySignature no_args;
3486 no_args.compute_calling_conventions();
3487 _no_arg_handler = lookup(no_args.sig_cc(), no_args.has_inline_recv());
3488
3489 CompiledEntrySignature obj_args;
3490 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
3491 obj_args.compute_calling_conventions();
3492 _obj_arg_handler = lookup(obj_args.sig_cc(), obj_args.has_inline_recv());
3493
3494 CompiledEntrySignature int_args;
3495 SigEntry::add_entry(int_args.sig(), T_INT);
3496 int_args.compute_calling_conventions();
3497 _int_arg_handler = lookup(int_args.sig_cc(), int_args.has_inline_recv());
3498
3499 CompiledEntrySignature obj_int_args;
3500 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
3501 SigEntry::add_entry(obj_int_args.sig(), T_INT);
3502 obj_int_args.compute_calling_conventions();
3503 _obj_int_arg_handler = lookup(obj_int_args.sig_cc(), obj_int_args.has_inline_recv());
3504
3505 CompiledEntrySignature obj_obj_args;
3506 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3507 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
3508 obj_obj_args.compute_calling_conventions();
3509 _obj_obj_arg_handler = lookup(obj_obj_args.sig_cc(), obj_obj_args.has_inline_recv());
3510
3511 assert(_no_arg_handler != nullptr &&
3512 _obj_arg_handler != nullptr &&
3513 _int_arg_handler != nullptr &&
3514 _obj_int_arg_handler != nullptr &&
3515 _obj_obj_arg_handler != nullptr, "Initial adapters not found in archived adapter handler table");
3516 assert(_no_arg_handler->is_linked() &&
3517 _obj_arg_handler->is_linked() &&
3518 _int_arg_handler->is_linked() &&
3519 _obj_int_arg_handler->is_linked() &&
3520 _obj_obj_arg_handler->is_linked(), "Initial adapters not in linked state");
3521 }
3522 #endif // INCLUDE_CDS
3523
3524 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3525 LogStreamHandle(Trace, aot) lsh;
3526 if (lsh.is_enabled()) {
3527 lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3528 lsh.cr();
3529 }
3530 it->push(&_fingerprint);
3531 }
3532
3533 AdapterHandlerEntry::~AdapterHandlerEntry() {
3534 if (_fingerprint != nullptr) {
3535 AdapterFingerPrint::deallocate(_fingerprint);
3536 _fingerprint = nullptr;
3537 }
3538 if (_sig_cc != nullptr) {
3539 delete _sig_cc;
3540 }
3541 #ifdef ASSERT
3542 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3543 #endif
3544 FreeHeap(this);
3545 }
3546
3547
3548 #ifdef ASSERT
3549 // Capture the code before relocation so that it can be compared
3550 // against other versions. If the code is captured after relocation
3551 // then relative instructions won't be equivalent.
3552 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3553 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3554 _saved_code_length = length;
3555 memcpy(_saved_code, buffer, length);
3556 }
3557
3558
3559 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3560 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3608
3609 struct { double data[20]; } locs_buf;
3610 struct { double data[20]; } stubs_locs_buf;
3611 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3612 #if defined(AARCH64) || defined(PPC64)
3613 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3614 // in the constant pool to ensure ordering between the barrier and oops
3615 // accesses. For native_wrappers we need a constant.
3616 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3617 // static java call that is resolved in the runtime.
3618 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3619 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3620 }
3621 #endif
3622 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3623 MacroAssembler _masm(&buffer);
3624
3625 // Fill in the signature array, for the calling-convention call.
3626 const int total_args_passed = method->size_of_parameters();
3627
3628 BasicType stack_sig_bt[16];
3629 VMRegPair stack_regs[16];
3630 BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3631 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3632
3633 int i = 0;
3634 if (!method->is_static()) { // Pass in receiver first
3635 sig_bt[i++] = T_OBJECT;
3636 }
3637 SignatureStream ss(method->signature());
3638 for (; !ss.at_return_type(); ss.next()) {
3639 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
3640 if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3641 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
3642 }
3643 }
3644 assert(i == total_args_passed, "");
3645 BasicType ret_type = ss.type();
3646
3647 // Now get the compiled-Java arguments layout.
3648 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3649
3650 // Generate the compiled-to-native wrapper code
3651 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3652
3653 if (nm != nullptr) {
3654 {
3655 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3656 if (nm->make_in_use()) {
3657 method->set_code(method, nm);
3658 }
3659 }
3660
3661 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3662 if (directive->PrintAssemblyOption) {
3663 nm->print_code();
3664 }
3665 DirectivesStack::release(directive);
3893 if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3894 found = true;
3895 st->print("Adapter for signature: ");
3896 a->print_adapter_on(st);
3897 return true;
3898 } else {
3899 return false; // keep looking
3900 }
3901 };
3902 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3903 _adapter_handler_table->iterate(findblob_runtime_table);
3904 }
3905 assert(found, "Should have found handler");
3906 }
3907
3908 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3909 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3910 if (adapter_blob() != nullptr) {
3911 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3912 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3913 st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3914 st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3915 st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3916 st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3917 if (get_c2i_no_clinit_check_entry() != nullptr) {
3918 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3919 }
3920 }
3921 st->cr();
3922 }
3923
3924 #ifndef PRODUCT
3925
3926 void AdapterHandlerLibrary::print_statistics() {
3927 print_table_statistics();
3928 }
3929
3930 #endif /* PRODUCT */
3931
3932 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3933 assert(current == JavaThread::current(), "pre-condition");
3934 StackOverflow* overflow_state = current->stack_overflow_state();
3935 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3936 overflow_state->set_reserved_stack_activation(current->stack_base());
3983 event.set_method(method);
3984 event.commit();
3985 }
3986 }
3987 }
3988 return activation;
3989 }
3990
3991 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3992 // After any safepoint, just before going back to compiled code,
3993 // we inform the GC that we will be doing initializing writes to
3994 // this object in the future without emitting card-marks, so
3995 // GC may take any compensating steps.
3996
3997 oop new_obj = current->vm_result_oop();
3998 if (new_obj == nullptr) return;
3999
4000 BarrierSet *bs = BarrierSet::barrier_set();
4001 bs->on_slowpath_allocation_exit(current, new_obj);
4002 }
4003
4004 // We are at a compiled code to interpreter call. We need backing
4005 // buffers for all inline type arguments. Allocate an object array to
4006 // hold them (convenient because once we're done with it we don't have
4007 // to worry about freeing it).
4008 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
4009 assert(InlineTypePassFieldsAsArgs, "no reason to call this");
4010 ResourceMark rm;
4011
4012 int nb_slots = 0;
4013 InstanceKlass* holder = callee->method_holder();
4014 allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
4015 if (allocate_receiver) {
4016 nb_slots++;
4017 }
4018 int arg_num = callee->is_static() ? 0 : 1;
4019 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4020 BasicType bt = ss.type();
4021 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4022 nb_slots++;
4023 }
4024 if (bt != T_VOID) {
4025 arg_num++;
4026 }
4027 }
4028 objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
4029 objArrayHandle array(THREAD, array_oop);
4030 arg_num = callee->is_static() ? 0 : 1;
4031 int i = 0;
4032 if (allocate_receiver) {
4033 InlineKlass* vk = InlineKlass::cast(holder);
4034 oop res = vk->allocate_instance(CHECK_NULL);
4035 array->obj_at_put(i++, res);
4036 }
4037 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
4038 BasicType bt = ss.type();
4039 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
4040 InlineKlass* vk = ss.as_inline_klass(holder);
4041 assert(vk != nullptr, "Unexpected klass");
4042 oop res = vk->allocate_instance(CHECK_NULL);
4043 array->obj_at_put(i++, res);
4044 }
4045 if (bt != T_VOID) {
4046 arg_num++;
4047 }
4048 }
4049 return array();
4050 }
4051
4052 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
4053 methodHandle callee(current, callee_method);
4054 oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
4055 current->set_vm_result_oop(array);
4056 current->set_vm_result_metadata(callee()); // TODO: required to keep callee live?
4057 JRT_END
4058
4059 // We're returning from an interpreted method: load each field into a
4060 // register following the calling convention
4061 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
4062 {
4063 assert(res->klass()->is_inline_klass(), "only inline types here");
4064 ResourceMark rm;
4065 RegisterMap reg_map(current,
4066 RegisterMap::UpdateMap::include,
4067 RegisterMap::ProcessFrames::include,
4068 RegisterMap::WalkContinuation::skip);
4069 frame stubFrame = current->last_frame();
4070 frame callerFrame = stubFrame.sender(®_map);
4071 assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
4072
4073 InlineKlass* vk = InlineKlass::cast(res->klass());
4074
4075 const Array<SigEntry>* sig_vk = vk->extended_sig();
4076 const Array<VMRegPair>* regs = vk->return_regs();
4077
4078 if (regs == nullptr) {
4079 // The fields of the inline klass don't fit in registers, bail out
4080 return;
4081 }
4082
4083 int j = 1;
4084 for (int i = 0; i < sig_vk->length(); i++) {
4085 BasicType bt = sig_vk->at(i)._bt;
4086 if (bt == T_METADATA) {
4087 continue;
4088 }
4089 if (bt == T_VOID) {
4090 if (sig_vk->at(i-1)._bt == T_LONG ||
4091 sig_vk->at(i-1)._bt == T_DOUBLE) {
4092 j++;
4093 }
4094 continue;
4095 }
4096 int off = sig_vk->at(i)._offset;
4097 assert(off > 0, "offset in object should be positive");
4098 VMRegPair pair = regs->at(j);
4099 address loc = reg_map.location(pair.first(), nullptr);
4100 switch(bt) {
4101 case T_BOOLEAN:
4102 *(jboolean*)loc = res->bool_field(off);
4103 break;
4104 case T_CHAR:
4105 *(jchar*)loc = res->char_field(off);
4106 break;
4107 case T_BYTE:
4108 *(jbyte*)loc = res->byte_field(off);
4109 break;
4110 case T_SHORT:
4111 *(jshort*)loc = res->short_field(off);
4112 break;
4113 case T_INT: {
4114 *(jint*)loc = res->int_field(off);
4115 break;
4116 }
4117 case T_LONG:
4118 #ifdef _LP64
4119 *(intptr_t*)loc = res->long_field(off);
4120 #else
4121 Unimplemented();
4122 #endif
4123 break;
4124 case T_OBJECT:
4125 case T_ARRAY: {
4126 *(oop*)loc = res->obj_field(off);
4127 break;
4128 }
4129 case T_FLOAT:
4130 *(jfloat*)loc = res->float_field(off);
4131 break;
4132 case T_DOUBLE:
4133 *(jdouble*)loc = res->double_field(off);
4134 break;
4135 default:
4136 ShouldNotReachHere();
4137 }
4138 j++;
4139 }
4140 assert(j == regs->length(), "missed a field?");
4141
4142 #ifdef ASSERT
4143 VMRegPair pair = regs->at(0);
4144 address loc = reg_map.location(pair.first(), nullptr);
4145 assert(*(oopDesc**)loc == res, "overwritten object");
4146 #endif
4147
4148 current->set_vm_result_oop(res);
4149 }
4150 JRT_END
4151
4152 // We've returned to an interpreted method, the interpreter needs a
4153 // reference to an inline type instance. Allocate it and initialize it
4154 // from field's values in registers.
4155 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
4156 {
4157 ResourceMark rm;
4158 RegisterMap reg_map(current,
4159 RegisterMap::UpdateMap::include,
4160 RegisterMap::ProcessFrames::include,
4161 RegisterMap::WalkContinuation::skip);
4162 frame stubFrame = current->last_frame();
4163 frame callerFrame = stubFrame.sender(®_map);
4164
4165 #ifdef ASSERT
4166 InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
4167 #endif
4168
4169 if (!is_set_nth_bit(res, 0)) {
4170 // We're not returning with inline type fields in registers (the
4171 // calling convention didn't allow it for this inline klass)
4172 assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
4173 current->set_vm_result_oop((oopDesc*)res);
4174 assert(verif_vk == nullptr, "broken calling convention");
4175 return;
4176 }
4177
4178 clear_nth_bit(res, 0);
4179 InlineKlass* vk = (InlineKlass*)res;
4180 assert(verif_vk == vk, "broken calling convention");
4181 assert(Metaspace::contains((void*)res), "should be klass");
4182
4183 // Allocate handles for every oop field so they are safe in case of
4184 // a safepoint when allocating
4185 GrowableArray<Handle> handles;
4186 vk->save_oop_fields(reg_map, handles);
4187
4188 // It's unsafe to safepoint until we are here
4189 JRT_BLOCK;
4190 {
4191 JavaThread* THREAD = current;
4192 oop vt = vk->realloc_result(reg_map, handles, CHECK);
4193 current->set_vm_result_oop(vt);
4194 }
4195 JRT_BLOCK_END;
4196 }
4197 JRT_END
|