27 #include "classfile/javaClasses.inline.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "classfile/vmClasses.hpp"
30 #include "classfile/vmSymbols.hpp"
31 #include "code/codeCache.hpp"
32 #include "code/compiledIC.hpp"
33 #include "code/nmethod.inline.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "code/vtableStubs.hpp"
36 #include "compiler/abstractCompiler.hpp"
37 #include "compiler/compileBroker.hpp"
38 #include "compiler/disassembler.hpp"
39 #include "gc/shared/barrierSet.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "gc/shared/gcLocker.inline.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "interpreter/interpreterRuntime.hpp"
44 #include "jvm.h"
45 #include "jfr/jfrEvents.hpp"
46 #include "logging/log.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "memory/universe.hpp"
49 #include "metaprogramming/primitiveConversions.hpp"
50 #include "oops/klass.hpp"
51 #include "oops/method.inline.hpp"
52 #include "oops/objArrayKlass.hpp"
53 #include "oops/oop.inline.hpp"
54 #include "prims/forte.hpp"
55 #include "prims/jvmtiExport.hpp"
56 #include "prims/jvmtiThreadState.hpp"
57 #include "prims/methodHandles.hpp"
58 #include "prims/nativeLookup.hpp"
59 #include "runtime/arguments.hpp"
60 #include "runtime/atomic.hpp"
61 #include "runtime/basicLock.inline.hpp"
62 #include "runtime/frame.inline.hpp"
63 #include "runtime/handles.inline.hpp"
64 #include "runtime/init.hpp"
65 #include "runtime/interfaceSupport.inline.hpp"
66 #include "runtime/java.hpp"
67 #include "runtime/javaCalls.hpp"
68 #include "runtime/jniHandles.inline.hpp"
69 #include "runtime/perfData.hpp"
70 #include "runtime/sharedRuntime.hpp"
71 #include "runtime/stackWatermarkSet.hpp"
72 #include "runtime/stubRoutines.hpp"
73 #include "runtime/synchronizer.inline.hpp"
1170 // for a call current in progress, i.e., arguments has been pushed on stack
1171 // but callee has not been invoked yet. Caller frame must be compiled.
1172 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1173 CallInfo& callinfo, TRAPS) {
1174 Handle receiver;
1175 Handle nullHandle; // create a handy null handle for exception returns
1176 JavaThread* current = THREAD;
1177
1178 assert(!vfst.at_end(), "Java frame must exist");
1179
1180 // Find caller and bci from vframe
1181 methodHandle caller(current, vfst.method());
1182 int bci = vfst.bci();
1183
1184 if (caller->is_continuation_enter_intrinsic()) {
1185 bc = Bytecodes::_invokestatic;
1186 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1187 return receiver;
1188 }
1189
1190 Bytecode_invoke bytecode(caller, bci);
1191 int bytecode_index = bytecode.index();
1192 bc = bytecode.invoke_code();
1193
1194 methodHandle attached_method(current, extract_attached_method(vfst));
1195 if (attached_method.not_null()) {
1196 Method* callee = bytecode.static_target(CHECK_NH);
1197 vmIntrinsics::ID id = callee->intrinsic_id();
1198 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1199 // it attaches statically resolved method to the call site.
1200 if (MethodHandles::is_signature_polymorphic(id) &&
1201 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1202 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1203
1204 // Adjust invocation mode according to the attached method.
1205 switch (bc) {
1206 case Bytecodes::_invokevirtual:
1207 if (attached_method->method_holder()->is_interface()) {
1208 bc = Bytecodes::_invokeinterface;
1209 }
1210 break;
1211 case Bytecodes::_invokeinterface:
1212 if (!attached_method->method_holder()->is_interface()) {
1213 bc = Bytecodes::_invokevirtual;
1214 }
1215 break;
1216 case Bytecodes::_invokehandle:
1217 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1218 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1219 : Bytecodes::_invokevirtual;
1220 }
1221 break;
1222 default:
1223 break;
1224 }
1225 }
1226 }
1227
1228 assert(bc != Bytecodes::_illegal, "not initialized");
1229
1230 bool has_receiver = bc != Bytecodes::_invokestatic &&
1231 bc != Bytecodes::_invokedynamic &&
1232 bc != Bytecodes::_invokehandle;
1233
1234 // Find receiver for non-static call
1235 if (has_receiver) {
1236 // This register map must be update since we need to find the receiver for
1237 // compiled frames. The receiver might be in a register.
1238 RegisterMap reg_map2(current,
1239 RegisterMap::UpdateMap::include,
1240 RegisterMap::ProcessFrames::include,
1241 RegisterMap::WalkContinuation::skip);
1242 frame stubFrame = current->last_frame();
1243 // Caller-frame is a compiled frame
1244 frame callerFrame = stubFrame.sender(®_map2);
1245
1246 if (attached_method.is_null()) {
1247 Method* callee = bytecode.static_target(CHECK_NH);
1248 if (callee == nullptr) {
1249 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1250 }
1251 }
1252
1253 // Retrieve from a compiled argument list
1254 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1255 assert(oopDesc::is_oop_or_null(receiver()), "");
1256
1257 if (receiver.is_null()) {
1258 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1259 }
1260 }
1261
1262 // Resolve method
1263 if (attached_method.not_null()) {
1264 // Parameterized by attached method.
1265 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1266 } else {
1267 // Parameterized by bytecode.
1268 constantPoolHandle constants(current, caller->constants());
1269 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1270 }
1271
1272 #ifdef ASSERT
1273 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1274 if (has_receiver) {
1275 assert(receiver.not_null(), "should have thrown exception");
1276 Klass* receiver_klass = receiver->klass();
1277 Klass* rk = nullptr;
1278 if (attached_method.not_null()) {
1279 // In case there's resolved method attached, use its holder during the check.
1280 rk = attached_method->method_holder();
1281 } else {
1282 // Klass is already loaded.
1283 constantPoolHandle constants(current, caller->constants());
1284 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1285 }
1286 Klass* static_receiver_klass = rk;
1287 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1288 "actual receiver must be subclass of static receiver klass");
1289 if (receiver_klass->is_instance_klass()) {
1290 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1291 tty->print_cr("ERROR: Klass not yet initialized!!");
1292 receiver_klass->print();
1293 }
1294 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1295 }
1296 }
1297 #endif
1298
1299 return receiver;
1300 }
1301
1302 methodHandle SharedRuntime::find_callee_method(TRAPS) {
1303 JavaThread* current = THREAD;
1304 ResourceMark rm(current);
1305 // We need first to check if any Java activations (compiled, interpreted)
1306 // exist on the stack since last JavaCall. If not, we need
1307 // to get the target method from the JavaCall wrapper.
1308 vframeStream vfst(current, true); // Do not skip any javaCalls
1309 methodHandle callee_method;
1310 if (vfst.at_end()) {
1311 // No Java frames were found on stack since we did the JavaCall.
1312 // Hence the stack can only contain an entry_frame. We need to
1313 // find the target method from the stub frame.
1314 RegisterMap reg_map(current,
1315 RegisterMap::UpdateMap::skip,
1316 RegisterMap::ProcessFrames::include,
1317 RegisterMap::WalkContinuation::skip);
1318 frame fr = current->last_frame();
1319 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1320 fr = fr.sender(®_map);
1321 assert(fr.is_entry_frame(), "must be");
1322 // fr is now pointing to the entry frame.
1323 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1324 } else {
1325 Bytecodes::Code bc;
1326 CallInfo callinfo;
1327 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1328 callee_method = methodHandle(current, callinfo.selected_method());
1329 }
1330 assert(callee_method()->is_method(), "must be");
1331 return callee_method;
1332 }
1333
1334 // Resolves a call.
1335 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1336 JavaThread* current = THREAD;
1337 ResourceMark rm(current);
1338 RegisterMap cbl_map(current,
1339 RegisterMap::UpdateMap::skip,
1340 RegisterMap::ProcessFrames::include,
1341 RegisterMap::WalkContinuation::skip);
1342 frame caller_frame = current->last_frame().sender(&cbl_map);
1343
1344 CodeBlob* caller_cb = caller_frame.cb();
1345 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1346 nmethod* caller_nm = caller_cb->as_nmethod();
1347
1348 // determine call info & receiver
1349 // note: a) receiver is null for static calls
1350 // b) an exception is thrown if receiver is null for non-static calls
1351 CallInfo call_info;
1352 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1353 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1354
1355 NoSafepointVerifier nsv;
1356
1357 methodHandle callee_method(current, call_info.selected_method());
1358
1359 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1360 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1361 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1362 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1363 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1364
1365 assert(!caller_nm->is_unloading(), "It should not be unloading");
1366
1367 #ifndef PRODUCT
1368 // tracing/debugging/statistics
1369 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1370 (is_virtual) ? (&_resolve_virtual_ctr) :
1371 (&_resolve_static_ctr);
1372 Atomic::inc(addr);
1373
1374 if (TraceCallFixup) {
1375 ResourceMark rm(current);
1376 tty->print("resolving %s%s (%s) call to",
1377 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1378 Bytecodes::name(invoke_code));
1379 callee_method->print_short_name(tty);
1380 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1381 p2i(caller_frame.pc()), p2i(callee_method->code()));
1382 }
1383 #endif
1384
1385 if (invoke_code == Bytecodes::_invokestatic) {
1386 assert(callee_method->method_holder()->is_initialized() ||
1387 callee_method->method_holder()->is_reentrant_initialization(current),
1388 "invalid class initialization state for invoke_static");
1389 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1390 // In order to keep class initialization check, do not patch call
1391 // site for static call when the class is not fully initialized.
1392 // Proper check is enforced by call site re-resolution on every invocation.
1393 //
1394 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1395 // explicit class initialization check is put in nmethod entry (VEP).
1396 assert(callee_method->method_holder()->is_linked(), "must be");
1397 return callee_method;
1398 }
1399 }
1400
1401
1402 // JSR 292 key invariant:
1403 // If the resolved method is a MethodHandle invoke target, the call
1404 // site must be a MethodHandle call site, because the lambda form might tail-call
1405 // leaving the stack in a state unknown to either caller or callee
1406
1407 // Compute entry points. The computation of the entry points is independent of
1408 // patching the call.
1409
1410 // Make sure the callee nmethod does not get deoptimized and removed before
1411 // we are done patching the code.
1412
1413
1414 CompiledICLocker ml(caller_nm);
1415 if (is_virtual && !is_optimized) {
1416 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1417 inline_cache->update(&call_info, receiver->klass());
1418 } else {
1419 // Callsite is a direct call - set it to the destination method
1420 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1421 callsite->set(callee_method);
1422 }
1423
1424 return callee_method;
1425 }
1426
1427 // Inline caches exist only in compiled code
1428 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1429 #ifdef ASSERT
1430 RegisterMap reg_map(current,
1431 RegisterMap::UpdateMap::skip,
1432 RegisterMap::ProcessFrames::include,
1433 RegisterMap::WalkContinuation::skip);
1434 frame stub_frame = current->last_frame();
1435 assert(stub_frame.is_runtime_frame(), "sanity check");
1436 frame caller_frame = stub_frame.sender(®_map);
1437 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1438 #endif /* ASSERT */
1439
1440 methodHandle callee_method;
1441 JRT_BLOCK
1442 callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1443 // Return Method* through TLS
1444 current->set_vm_result_2(callee_method());
1445 JRT_BLOCK_END
1446 // return compiled code entry point after potential safepoints
1447 return get_resolved_entry(current, callee_method);
1448 JRT_END
1449
1450
1451 // Handle call site that has been made non-entrant
1452 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1453 // 6243940 We might end up in here if the callee is deoptimized
1454 // as we race to call it. We don't want to take a safepoint if
1455 // the caller was interpreted because the caller frame will look
1456 // interpreted to the stack walkers and arguments are now
1457 // "compiled" so it is much better to make this transition
1458 // invisible to the stack walking code. The i2c path will
1459 // place the callee method in the callee_target. It is stashed
1460 // there because if we try and find the callee by normal means a
1461 // safepoint is possible and have trouble gc'ing the compiled args.
1462 RegisterMap reg_map(current,
1463 RegisterMap::UpdateMap::skip,
1464 RegisterMap::ProcessFrames::include,
1465 RegisterMap::WalkContinuation::skip);
1466 frame stub_frame = current->last_frame();
1467 assert(stub_frame.is_runtime_frame(), "sanity check");
1468 frame caller_frame = stub_frame.sender(®_map);
1469
1470 if (caller_frame.is_interpreted_frame() ||
1471 caller_frame.is_entry_frame() ||
1472 caller_frame.is_upcall_stub_frame()) {
1473 Method* callee = current->callee_target();
1474 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1475 current->set_vm_result_2(callee);
1476 current->set_callee_target(nullptr);
1477 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1478 // Bypass class initialization checks in c2i when caller is in native.
1479 // JNI calls to static methods don't have class initialization checks.
1480 // Fast class initialization checks are present in c2i adapters and call into
1481 // SharedRuntime::handle_wrong_method() on the slow path.
1482 //
1483 // JVM upcalls may land here as well, but there's a proper check present in
1484 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1485 // so bypassing it in c2i adapter is benign.
1486 return callee->get_c2i_no_clinit_check_entry();
1487 } else {
1488 return callee->get_c2i_entry();
1489 }
1490 }
1491
1492 // Must be compiled to compiled path which is safe to stackwalk
1493 methodHandle callee_method;
1494 JRT_BLOCK
1495 // Force resolving of caller (if we called from compiled frame)
1496 callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1497 current->set_vm_result_2(callee_method());
1498 JRT_BLOCK_END
1499 // return compiled code entry point after potential safepoints
1500 return get_resolved_entry(current, callee_method);
1501 JRT_END
1502
1503 // Handle abstract method call
1504 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1505 // Verbose error message for AbstractMethodError.
1506 // Get the called method from the invoke bytecode.
1507 vframeStream vfst(current, true);
1508 assert(!vfst.at_end(), "Java frame must exist");
1509 methodHandle caller(current, vfst.method());
1510 Bytecode_invoke invoke(caller, vfst.bci());
1511 DEBUG_ONLY( invoke.verify(); )
1512
1513 // Find the compiled caller frame.
1514 RegisterMap reg_map(current,
1515 RegisterMap::UpdateMap::include,
1516 RegisterMap::ProcessFrames::include,
1517 RegisterMap::WalkContinuation::skip);
1518 frame stubFrame = current->last_frame();
1519 assert(stubFrame.is_runtime_frame(), "must be");
1520 frame callerFrame = stubFrame.sender(®_map);
1521 assert(callerFrame.is_compiled_frame(), "must be");
1522
1523 // Install exception and return forward entry.
1524 address res = SharedRuntime::throw_AbstractMethodError_entry();
1525 JRT_BLOCK
1526 methodHandle callee(current, invoke.static_target(current));
1527 if (!callee.is_null()) {
1528 oop recv = callerFrame.retrieve_receiver(®_map);
1529 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1530 res = StubRoutines::forward_exception_entry();
1531 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1532 }
1533 JRT_BLOCK_END
1534 return res;
1535 JRT_END
1536
1537 // return verified_code_entry if interp_only_mode is not set for the current thread;
1538 // otherwise return c2i entry.
1539 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method) {
1540 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1541 // In interp_only_mode we need to go to the interpreted entry
1542 // The c2i won't patch in this mode -- see fixup_callers_callsite
1543 return callee_method->get_c2i_entry();
1544 }
1545 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1546 return callee_method->verified_code_entry();
1547 }
1548
1549 // resolve a static call and patch code
1550 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1551 methodHandle callee_method;
1552 bool enter_special = false;
1553 JRT_BLOCK
1554 callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1555 current->set_vm_result_2(callee_method());
1556 JRT_BLOCK_END
1557 // return compiled code entry point after potential safepoints
1558 return get_resolved_entry(current, callee_method);
1559 JRT_END
1560
1561 // resolve virtual call and update inline cache to monomorphic
1562 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1563 methodHandle callee_method;
1564 JRT_BLOCK
1565 callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1566 current->set_vm_result_2(callee_method());
1567 JRT_BLOCK_END
1568 // return compiled code entry point after potential safepoints
1569 return get_resolved_entry(current, callee_method);
1570 JRT_END
1571
1572
1573 // Resolve a virtual call that can be statically bound (e.g., always
1574 // monomorphic, so it has no inline cache). Patch code to resolved target.
1575 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1576 methodHandle callee_method;
1577 JRT_BLOCK
1578 callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1579 current->set_vm_result_2(callee_method());
1580 JRT_BLOCK_END
1581 // return compiled code entry point after potential safepoints
1582 return get_resolved_entry(current, callee_method);
1583 JRT_END
1584
1585 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1586 JavaThread* current = THREAD;
1587 ResourceMark rm(current);
1588 CallInfo call_info;
1589 Bytecodes::Code bc;
1590
1591 // receiver is null for static calls. An exception is thrown for null
1592 // receivers for non-static calls
1593 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1594
1595 methodHandle callee_method(current, call_info.selected_method());
1596
1597 #ifndef PRODUCT
1598 Atomic::inc(&_ic_miss_ctr);
1599
1600 // Statistics & Tracing
1601 if (TraceCallFixup) {
1602 ResourceMark rm(current);
1603 tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1604 callee_method->print_short_name(tty);
1605 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1606 }
1607
1608 if (ICMissHistogram) {
1609 MutexLocker m(VMStatistic_lock);
1610 RegisterMap reg_map(current,
1611 RegisterMap::UpdateMap::skip,
1612 RegisterMap::ProcessFrames::include,
1613 RegisterMap::WalkContinuation::skip);
1614 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1615 // produce statistics under the lock
1616 trace_ic_miss(f.pc());
1617 }
1618 #endif
1619
1620 // install an event collector so that when a vtable stub is created the
1621 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1622 // event can't be posted when the stub is created as locks are held
1623 // - instead the event will be deferred until the event collector goes
1624 // out of scope.
1625 JvmtiDynamicCodeEventCollector event_collector;
1626
1627 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1628 RegisterMap reg_map(current,
1629 RegisterMap::UpdateMap::skip,
1630 RegisterMap::ProcessFrames::include,
1631 RegisterMap::WalkContinuation::skip);
1632 frame caller_frame = current->last_frame().sender(®_map);
1633 CodeBlob* cb = caller_frame.cb();
1634 nmethod* caller_nm = cb->as_nmethod();
1635
1636 CompiledICLocker ml(caller_nm);
1637 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1638 inline_cache->update(&call_info, receiver()->klass());
1639
1640 return callee_method;
1641 }
1642
1643 //
1644 // Resets a call-site in compiled code so it will get resolved again.
1645 // This routines handles both virtual call sites, optimized virtual call
1646 // sites, and static call sites. Typically used to change a call sites
1647 // destination from compiled to interpreted.
1648 //
1649 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1650 JavaThread* current = THREAD;
1651 ResourceMark rm(current);
1652 RegisterMap reg_map(current,
1653 RegisterMap::UpdateMap::skip,
1654 RegisterMap::ProcessFrames::include,
1655 RegisterMap::WalkContinuation::skip);
1656 frame stub_frame = current->last_frame();
1657 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1658 frame caller = stub_frame.sender(®_map);
1659
1660 // Do nothing if the frame isn't a live compiled frame.
1661 // nmethod could be deoptimized by the time we get here
1662 // so no update to the caller is needed.
1663
1664 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1665 (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1666
1667 address pc = caller.pc();
1668
1669 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1670 assert(caller_nm != nullptr, "did not find caller nmethod");
1671
1672 // Default call_addr is the location of the "basic" call.
1673 // Determine the address of the call we a reresolving. With
1674 // Inline Caches we will always find a recognizable call.
1675 // With Inline Caches disabled we may or may not find a
1676 // recognizable call. We will always find a call for static
1677 // calls and for optimized virtual calls. For vanilla virtual
1678 // calls it depends on the state of the UseInlineCaches switch.
1679 //
1680 // With Inline Caches disabled we can get here for a virtual call
1681 // for two reasons:
1682 // 1 - calling an abstract method. The vtable for abstract methods
1683 // will run us thru handle_wrong_method and we will eventually
1684 // end up in the interpreter to throw the ame.
1685 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1686 // call and between the time we fetch the entry address and
1687 // we jump to it the target gets deoptimized. Similar to 1
1688 // we will wind up in the interprter (thru a c2i with c2).
1689 //
1690 CompiledICLocker ml(caller_nm);
1691 address call_addr = caller_nm->call_instruction_address(pc);
1692
1693 if (call_addr != nullptr) {
1694 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1695 // bytes back in the instruction stream so we must also check for reloc info.
1696 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1697 bool ret = iter.next(); // Get item
1698 if (ret) {
1699 switch (iter.type()) {
1700 case relocInfo::static_call_type:
1701 case relocInfo::opt_virtual_call_type: {
1702 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1703 cdc->set_to_clean();
1704 break;
1705 }
1706
1707 case relocInfo::virtual_call_type: {
1708 // compiled, dispatched call (which used to call an interpreted method)
1709 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1710 inline_cache->set_to_clean();
1711 break;
1712 }
1713 default:
1714 break;
1715 }
1716 }
1717 }
1718 }
1719
1720 methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1721
1722
1723 #ifndef PRODUCT
1724 Atomic::inc(&_wrong_method_ctr);
1725
1726 if (TraceCallFixup) {
1727 ResourceMark rm(current);
1728 tty->print("handle_wrong_method reresolving call to");
1729 callee_method->print_short_name(tty);
1730 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1731 }
1732 #endif
1733
1734 return callee_method;
1735 }
1736
1737 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1738 // The faulting unsafe accesses should be changed to throw the error
1739 // synchronously instead. Meanwhile the faulting instruction will be
1740 // skipped over (effectively turning it into a no-op) and an
1741 // asynchronous exception will be raised which the thread will
1742 // handle at a later point. If the instruction is a load it will
1743 // return garbage.
1744
1745 // Request an async exception.
1746 thread->set_pending_unsafe_access_error();
1747
1748 // Return address of next instruction to execute.
1914 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
1915
1916 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1917 if (message == nullptr) {
1918 // Shouldn't happen, but don't cause even more problems if it does
1919 message = const_cast<char*>(caster_klass->external_name());
1920 } else {
1921 jio_snprintf(message,
1922 msglen,
1923 "class %s cannot be cast to class %s (%s%s%s)",
1924 caster_name,
1925 target_name,
1926 caster_klass_description,
1927 klass_separator,
1928 target_klass_description
1929 );
1930 }
1931 return message;
1932 }
1933
1934 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1935 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
1936 JRT_END
1937
1938 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
1939 if (!SafepointSynchronize::is_synchronizing()) {
1940 // Only try quick_enter() if we're not trying to reach a safepoint
1941 // so that the calling thread reaches the safepoint more quickly.
1942 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
1943 return;
1944 }
1945 }
1946 // NO_ASYNC required because an async exception on the state transition destructor
1947 // would leave you with the lock held and it would never be released.
1948 // The normal monitorenter NullPointerException is thrown without acquiring a lock
1949 // and the model is that an exception implies the method failed.
1950 JRT_BLOCK_NO_ASYNC
1951 Handle h_obj(THREAD, obj);
1952 ObjectSynchronizer::enter(h_obj, lock, current);
1953 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2164 tty->print_cr(" %% in nested categories are relative to their category");
2165 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2166 tty->cr();
2167
2168 MethodArityHistogram h;
2169 }
2170 #endif
2171
2172 #ifndef PRODUCT
2173 static int _lookups; // number of calls to lookup
2174 static int _equals; // number of buckets checked with matching hash
2175 static int _hits; // number of successful lookups
2176 static int _compact; // number of equals calls with compact signature
2177 #endif
2178
2179 // A simple wrapper class around the calling convention information
2180 // that allows sharing of adapters for the same calling convention.
2181 class AdapterFingerPrint : public CHeapObj<mtCode> {
2182 private:
2183 enum {
2184 _basic_type_bits = 4,
2185 _basic_type_mask = right_n_bits(_basic_type_bits),
2186 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2187 _compact_int_count = 3
2188 };
2189 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2190 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2191
2192 union {
2193 int _compact[_compact_int_count];
2194 int* _fingerprint;
2195 } _value;
2196 int _length; // A negative length indicates the fingerprint is in the compact form,
2197 // Otherwise _value._fingerprint is the array.
2198
2199 // Remap BasicTypes that are handled equivalently by the adapters.
2200 // These are correct for the current system but someday it might be
2201 // necessary to make this mapping platform dependent.
2202 static int adapter_encoding(BasicType in) {
2203 switch (in) {
2204 case T_BOOLEAN:
2205 case T_BYTE:
2206 case T_SHORT:
2207 case T_CHAR:
2208 // There are all promoted to T_INT in the calling convention
2209 return T_INT;
2210
2211 case T_OBJECT:
2212 case T_ARRAY:
2213 // In other words, we assume that any register good enough for
2214 // an int or long is good enough for a managed pointer.
2215 #ifdef _LP64
2216 return T_LONG;
2217 #else
2218 return T_INT;
2219 #endif
2220
2221 case T_INT:
2222 case T_LONG:
2223 case T_FLOAT:
2224 case T_DOUBLE:
2225 case T_VOID:
2226 return in;
2227
2228 default:
2229 ShouldNotReachHere();
2230 return T_CONFLICT;
2231 }
2232 }
2233
2234 public:
2235 AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2236 // The fingerprint is based on the BasicType signature encoded
2237 // into an array of ints with eight entries per int.
2238 int* ptr;
2239 int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2240 if (len <= _compact_int_count) {
2241 assert(_compact_int_count == 3, "else change next line");
2242 _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2243 // Storing the signature encoded as signed chars hits about 98%
2244 // of the time.
2245 _length = -len;
2246 ptr = _value._compact;
2247 } else {
2248 _length = len;
2249 _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2250 ptr = _value._fingerprint;
2251 }
2252
2253 // Now pack the BasicTypes with 8 per int
2254 int sig_index = 0;
2255 for (int index = 0; index < len; index++) {
2256 int value = 0;
2257 for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2258 int bt = adapter_encoding(sig_bt[sig_index++]);
2259 assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2260 value = (value << _basic_type_bits) | bt;
2261 }
2262 ptr[index] = value;
2263 }
2264 }
2265
2266 ~AdapterFingerPrint() {
2267 if (_length > 0) {
2268 FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2269 }
2270 }
2271
2272 int value(int index) {
2273 if (_length < 0) {
2274 return _value._compact[index];
2275 }
2276 return _value._fingerprint[index];
2277 }
2278 int length() {
2279 if (_length < 0) return -_length;
2280 return _length;
2281 }
2282
2283 bool is_compact() {
2308 const char* as_basic_args_string() {
2309 stringStream st;
2310 bool long_prev = false;
2311 for (int i = 0; i < length(); i++) {
2312 unsigned val = (unsigned)value(i);
2313 // args are packed so that first/lower arguments are in the highest
2314 // bits of each int value, so iterate from highest to the lowest
2315 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2316 unsigned v = (val >> j) & _basic_type_mask;
2317 if (v == 0) {
2318 assert(i == length() - 1, "Only expect zeroes in the last word");
2319 continue;
2320 }
2321 if (long_prev) {
2322 long_prev = false;
2323 if (v == T_VOID) {
2324 st.print("J");
2325 } else {
2326 st.print("L");
2327 }
2328 }
2329 switch (v) {
2330 case T_INT: st.print("I"); break;
2331 case T_LONG: long_prev = true; break;
2332 case T_FLOAT: st.print("F"); break;
2333 case T_DOUBLE: st.print("D"); break;
2334 case T_VOID: break;
2335 default: ShouldNotReachHere();
2336 }
2337 }
2338 }
2339 if (long_prev) {
2340 st.print("L");
2341 }
2342 return st.as_string();
2343 }
2344 #endif // !product
2345
2346 bool equals(AdapterFingerPrint* other) {
2347 if (other->_length != _length) {
2348 return false;
2349 }
2350 if (_length < 0) {
2351 assert(_compact_int_count == 3, "else change next line");
2352 return _value._compact[0] == other->_value._compact[0] &&
2353 _value._compact[1] == other->_value._compact[1] &&
2354 _value._compact[2] == other->_value._compact[2];
2355 } else {
2363 }
2364
2365 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2366 NOT_PRODUCT(_equals++);
2367 return fp1->equals(fp2);
2368 }
2369
2370 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2371 return fp->compute_hash();
2372 }
2373 };
2374
2375 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2376 using AdapterHandlerTable = ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2377 AnyObj::C_HEAP, mtCode,
2378 AdapterFingerPrint::compute_hash,
2379 AdapterFingerPrint::equals>;
2380 static AdapterHandlerTable* _adapter_handler_table;
2381
2382 // Find a entry with the same fingerprint if it exists
2383 static AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
2384 NOT_PRODUCT(_lookups++);
2385 assert_lock_strong(AdapterHandlerLibrary_lock);
2386 AdapterFingerPrint fp(total_args_passed, sig_bt);
2387 AdapterHandlerEntry** entry = _adapter_handler_table->get(&fp);
2388 if (entry != nullptr) {
2389 #ifndef PRODUCT
2390 if (fp.is_compact()) _compact++;
2391 _hits++;
2392 #endif
2393 return *entry;
2394 }
2395 return nullptr;
2396 }
2397
2398 #ifndef PRODUCT
2399 static void print_table_statistics() {
2400 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2401 return sizeof(*key) + sizeof(*a);
2402 };
2403 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2404 ts.print(tty, "AdapterHandlerTable");
2405 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2406 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2407 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2408 _lookups, _equals, _hits, _compact);
2409 }
2410 #endif
2411
2412 // ---------------------------------------------------------------------------
2413 // Implementation of AdapterHandlerLibrary
2414 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2415 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2416 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2417 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2418 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2419 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2420 const int AdapterHandlerLibrary_size = 16*K;
2421 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2422
2423 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2424 return _buffer;
2425 }
2426
2427 static void post_adapter_creation(const AdapterBlob* new_adapter,
2428 const AdapterHandlerEntry* entry) {
2429 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2430 char blob_id[256];
2431 jio_snprintf(blob_id,
2432 sizeof(blob_id),
2433 "%s(%s)",
2434 new_adapter->name(),
2435 entry->fingerprint()->as_string());
2436 if (Forte::is_enabled()) {
2437 Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2438 }
2439
2440 if (JvmtiExport::should_post_dynamic_code_generated()) {
2443 }
2444 }
2445
2446 void AdapterHandlerLibrary::initialize() {
2447 ResourceMark rm;
2448 AdapterBlob* no_arg_blob = nullptr;
2449 AdapterBlob* int_arg_blob = nullptr;
2450 AdapterBlob* obj_arg_blob = nullptr;
2451 AdapterBlob* obj_int_arg_blob = nullptr;
2452 AdapterBlob* obj_obj_arg_blob = nullptr;
2453 {
2454 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2455 MutexLocker mu(AdapterHandlerLibrary_lock);
2456
2457 // Create a special handler for abstract methods. Abstract methods
2458 // are never compiled so an i2c entry is somewhat meaningless, but
2459 // throw AbstractMethodError just in case.
2460 // Pass wrong_method_abstract for the c2i transitions to return
2461 // AbstractMethodError for invalid invocations.
2462 address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2463 _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, nullptr),
2464 SharedRuntime::throw_AbstractMethodError_entry(),
2465 wrong_method_abstract, wrong_method_abstract);
2466
2467 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2468 _no_arg_handler = create_adapter(no_arg_blob, 0, nullptr, true);
2469
2470 BasicType obj_args[] = { T_OBJECT };
2471 _obj_arg_handler = create_adapter(obj_arg_blob, 1, obj_args, true);
2472
2473 BasicType int_args[] = { T_INT };
2474 _int_arg_handler = create_adapter(int_arg_blob, 1, int_args, true);
2475
2476 BasicType obj_int_args[] = { T_OBJECT, T_INT };
2477 _obj_int_arg_handler = create_adapter(obj_int_arg_blob, 2, obj_int_args, true);
2478
2479 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2480 _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, 2, obj_obj_args, true);
2481
2482 assert(no_arg_blob != nullptr &&
2483 obj_arg_blob != nullptr &&
2484 int_arg_blob != nullptr &&
2485 obj_int_arg_blob != nullptr &&
2486 obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
2487 }
2488
2489 // Outside of the lock
2490 post_adapter_creation(no_arg_blob, _no_arg_handler);
2491 post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2492 post_adapter_creation(int_arg_blob, _int_arg_handler);
2493 post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2494 post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2495 }
2496
2497 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2498 address i2c_entry,
2499 address c2i_entry,
2500 address c2i_unverified_entry,
2501 address c2i_no_clinit_check_entry) {
2502 // Insert an entry into the table
2503 return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry,
2504 c2i_no_clinit_check_entry);
2505 }
2506
2507 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2508 if (method->is_abstract()) {
2509 return _abstract_method_handler;
2510 }
2511 int total_args_passed = method->size_of_parameters(); // All args on stack
2512 if (total_args_passed == 0) {
2513 return _no_arg_handler;
2514 } else if (total_args_passed == 1) {
2515 if (!method->is_static()) {
2516 return _obj_arg_handler;
2517 }
2518 switch (method->signature()->char_at(1)) {
2519 case JVM_SIGNATURE_CLASS:
2520 case JVM_SIGNATURE_ARRAY:
2521 return _obj_arg_handler;
2522 case JVM_SIGNATURE_INT:
2523 case JVM_SIGNATURE_BOOLEAN:
2524 case JVM_SIGNATURE_CHAR:
2525 case JVM_SIGNATURE_BYTE:
2526 case JVM_SIGNATURE_SHORT:
2527 return _int_arg_handler;
2528 }
2529 } else if (total_args_passed == 2 &&
2530 !method->is_static()) {
2531 switch (method->signature()->char_at(1)) {
2532 case JVM_SIGNATURE_CLASS:
2533 case JVM_SIGNATURE_ARRAY:
2534 return _obj_obj_arg_handler;
2535 case JVM_SIGNATURE_INT:
2536 case JVM_SIGNATURE_BOOLEAN:
2537 case JVM_SIGNATURE_CHAR:
2538 case JVM_SIGNATURE_BYTE:
2539 case JVM_SIGNATURE_SHORT:
2540 return _obj_int_arg_handler;
2541 }
2542 }
2543 return nullptr;
2544 }
2545
2546 class AdapterSignatureIterator : public SignatureIterator {
2547 private:
2548 BasicType stack_sig_bt[16];
2549 BasicType* sig_bt;
2550 int index;
2551
2552 public:
2553 AdapterSignatureIterator(Symbol* signature,
2554 fingerprint_t fingerprint,
2555 bool is_static,
2556 int total_args_passed) :
2557 SignatureIterator(signature, fingerprint),
2558 index(0)
2559 {
2560 sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2561 if (!is_static) { // Pass in receiver first
2562 sig_bt[index++] = T_OBJECT;
2563 }
2564 do_parameters_on(this);
2565 }
2566
2567 BasicType* basic_types() {
2568 return sig_bt;
2569 }
2570
2571 #ifdef ASSERT
2572 int slots() {
2573 return index;
2574 }
2575 #endif
2576
2577 private:
2578
2579 friend class SignatureIterator; // so do_parameters_on can call do_type
2580 void do_type(BasicType type) {
2581 sig_bt[index++] = type;
2582 if (type == T_LONG || type == T_DOUBLE) {
2583 sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots
2584 }
2585 }
2586 };
2587
2588 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2589 // Use customized signature handler. Need to lock around updates to
2590 // the _adapter_handler_table (it is not safe for concurrent readers
2591 // and a single writer: this could be fixed if it becomes a
2592 // problem).
2593
2594 // Fast-path for trivial adapters
2595 AdapterHandlerEntry* entry = get_simple_adapter(method);
2596 if (entry != nullptr) {
2597 return entry;
2598 }
2599
2600 ResourceMark rm;
2601 AdapterBlob* new_adapter = nullptr;
2602
2603 // Fill in the signature array, for the calling-convention call.
2604 int total_args_passed = method->size_of_parameters(); // All args on stack
2605
2606 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2607 method->is_static(), total_args_passed);
2608 assert(si.slots() == total_args_passed, "");
2609 BasicType* sig_bt = si.basic_types();
2610 {
2611 MutexLocker mu(AdapterHandlerLibrary_lock);
2612
2613 // Lookup method signature's fingerprint
2614 entry = lookup(total_args_passed, sig_bt);
2615
2616 if (entry != nullptr) {
2617 #ifdef ASSERT
2618 if (VerifyAdapterSharing) {
2619 AdapterBlob* comparison_blob = nullptr;
2620 AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, total_args_passed, sig_bt, false);
2621 assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
2622 assert(comparison_entry->compare_code(entry), "code must match");
2623 // Release the one just created and return the original
2624 delete comparison_entry;
2625 }
2626 #endif
2627 return entry;
2628 }
2629
2630 entry = create_adapter(new_adapter, total_args_passed, sig_bt, /* allocate_code_blob */ true);
2631 }
2632
2633 // Outside of the lock
2634 if (new_adapter != nullptr) {
2635 post_adapter_creation(new_adapter, entry);
2636 }
2637 return entry;
2638 }
2639
2640 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
2641 int total_args_passed,
2642 BasicType* sig_bt,
2643 bool allocate_code_blob) {
2644 if (log_is_enabled(Info, perf, class, link)) {
2645 ClassLoader::perf_method_adapters_count()->inc();
2646 }
2647
2648 // StubRoutines::_final_stubs_code is initialized after this function can be called. As a result,
2649 // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated prior
2650 // to all StubRoutines::_final_stubs_code being set. Checks refer to runtime range checks generated
2651 // in an I2C stub that ensure that an I2C stub is called from an interpreter frame or stubs.
2652 bool contains_all_checks = StubRoutines::final_stubs_code() != nullptr;
2653
2654 VMRegPair stack_regs[16];
2655 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2656
2657 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2658 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2659 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2660 CodeBuffer buffer(buf);
2661 short buffer_locs[20];
2662 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2663 sizeof(buffer_locs)/sizeof(relocInfo));
2664
2665 // Make a C heap allocated version of the fingerprint to store in the adapter
2666 AdapterFingerPrint* fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
2667 MacroAssembler _masm(&buffer);
2668 AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2669 total_args_passed,
2670 comp_args_on_stack,
2671 sig_bt,
2672 regs,
2673 fingerprint);
2674
2675 #ifdef ASSERT
2676 if (VerifyAdapterSharing) {
2677 entry->save_code(buf->code_begin(), buffer.insts_size());
2678 if (!allocate_code_blob) {
2679 return entry;
2680 }
2681 }
2682 #endif
2683
2684 new_adapter = AdapterBlob::create(&buffer);
2685 NOT_PRODUCT(int insts_size = buffer.insts_size());
2686 if (new_adapter == nullptr) {
2687 // CodeCache is full, disable compilation
2688 // Ought to log this but compile log is only per compile thread
2689 // and we're some non descript Java thread.
2690 return nullptr;
2691 }
2692 entry->relocate(new_adapter->content_begin());
2693 #ifndef PRODUCT
2694 // debugging support
2695 if (PrintAdapterHandlers || PrintStubCode) {
2696 ttyLocker ttyl;
2697 entry->print_adapter_on(tty);
2698 tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
2699 _adapter_handler_table->number_of_entries(), fingerprint->as_basic_args_string(),
2700 fingerprint->as_string(), insts_size);
2701 tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
2702 if (Verbose || PrintStubCode) {
2703 address first_pc = entry->base_address();
2704 if (first_pc != nullptr) {
2706 NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
2707 tty->cr();
2708 }
2709 }
2710 }
2711 #endif
2712
2713 // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
2714 // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
2715 if (contains_all_checks || !VerifyAdapterCalls) {
2716 assert_lock_strong(AdapterHandlerLibrary_lock);
2717 _adapter_handler_table->put(fingerprint, entry);
2718 }
2719 return entry;
2720 }
2721
2722 address AdapterHandlerEntry::base_address() {
2723 address base = _i2c_entry;
2724 if (base == nullptr) base = _c2i_entry;
2725 assert(base <= _c2i_entry || _c2i_entry == nullptr, "");
2726 assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");
2727 assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
2728 return base;
2729 }
2730
2731 void AdapterHandlerEntry::relocate(address new_base) {
2732 address old_base = base_address();
2733 assert(old_base != nullptr, "");
2734 ptrdiff_t delta = new_base - old_base;
2735 if (_i2c_entry != nullptr)
2736 _i2c_entry += delta;
2737 if (_c2i_entry != nullptr)
2738 _c2i_entry += delta;
2739 if (_c2i_unverified_entry != nullptr)
2740 _c2i_unverified_entry += delta;
2741 if (_c2i_no_clinit_check_entry != nullptr)
2742 _c2i_no_clinit_check_entry += delta;
2743 assert(base_address() == new_base, "");
2744 }
2745
2746
2747 AdapterHandlerEntry::~AdapterHandlerEntry() {
2748 delete _fingerprint;
2749 #ifdef ASSERT
2750 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
2751 #endif
2752 }
2753
2754
2755 #ifdef ASSERT
2756 // Capture the code before relocation so that it can be compared
2757 // against other versions. If the code is captured after relocation
2758 // then relative instructions won't be equivalent.
2759 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
2760 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
2761 _saved_code_length = length;
2762 memcpy(_saved_code, buffer, length);
2763 }
2764
2765
2766 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
2767 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
2768
2815
2816 struct { double data[20]; } locs_buf;
2817 struct { double data[20]; } stubs_locs_buf;
2818 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
2819 #if defined(AARCH64) || defined(PPC64)
2820 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
2821 // in the constant pool to ensure ordering between the barrier and oops
2822 // accesses. For native_wrappers we need a constant.
2823 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
2824 // static java call that is resolved in the runtime.
2825 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
2826 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
2827 }
2828 #endif
2829 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
2830 MacroAssembler _masm(&buffer);
2831
2832 // Fill in the signature array, for the calling-convention call.
2833 const int total_args_passed = method->size_of_parameters();
2834
2835 VMRegPair stack_regs[16];
2836 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2837
2838 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2839 method->is_static(), total_args_passed);
2840 BasicType* sig_bt = si.basic_types();
2841 assert(si.slots() == total_args_passed, "");
2842 BasicType ret_type = si.return_type();
2843
2844 // Now get the compiled-Java arguments layout.
2845 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2846
2847 // Generate the compiled-to-native wrapper code
2848 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
2849
2850 if (nm != nullptr) {
2851 {
2852 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
2853 if (nm->make_in_use()) {
2854 method->set_code(method, nm);
2855 }
2856 }
2857
2858 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
2859 if (directive->PrintAssemblyOption) {
2860 nm->print_code();
2861 }
2862 DirectivesStack::release(directive);
3069 st->print("Adapter for signature: ");
3070 a->print_adapter_on(st);
3071 return true;
3072 } else {
3073 return false; // keep looking
3074 }
3075 };
3076 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3077 _adapter_handler_table->iterate(findblob);
3078 assert(found, "Should have found handler");
3079 }
3080
3081 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3082 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3083 if (get_i2c_entry() != nullptr) {
3084 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3085 }
3086 if (get_c2i_entry() != nullptr) {
3087 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3088 }
3089 if (get_c2i_unverified_entry() != nullptr) {
3090 st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3091 }
3092 if (get_c2i_no_clinit_check_entry() != nullptr) {
3093 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3094 }
3095 st->cr();
3096 }
3097
3098 #ifndef PRODUCT
3099
3100 void AdapterHandlerLibrary::print_statistics() {
3101 print_table_statistics();
3102 }
3103
3104 #endif /* PRODUCT */
3105
3106 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3107 assert(current == JavaThread::current(), "pre-condition");
3108 StackOverflow* overflow_state = current->stack_overflow_state();
3109 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3110 overflow_state->set_reserved_stack_activation(current->stack_base());
3159 event.set_method(method);
3160 event.commit();
3161 }
3162 }
3163 }
3164 return activation;
3165 }
3166
3167 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3168 // After any safepoint, just before going back to compiled code,
3169 // we inform the GC that we will be doing initializing writes to
3170 // this object in the future without emitting card-marks, so
3171 // GC may take any compensating steps.
3172
3173 oop new_obj = current->vm_result();
3174 if (new_obj == nullptr) return;
3175
3176 BarrierSet *bs = BarrierSet::barrier_set();
3177 bs->on_slowpath_allocation_exit(current, new_obj);
3178 }
|
27 #include "classfile/javaClasses.inline.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "classfile/vmClasses.hpp"
30 #include "classfile/vmSymbols.hpp"
31 #include "code/codeCache.hpp"
32 #include "code/compiledIC.hpp"
33 #include "code/nmethod.inline.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "code/vtableStubs.hpp"
36 #include "compiler/abstractCompiler.hpp"
37 #include "compiler/compileBroker.hpp"
38 #include "compiler/disassembler.hpp"
39 #include "gc/shared/barrierSet.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "gc/shared/gcLocker.inline.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "interpreter/interpreterRuntime.hpp"
44 #include "jvm.h"
45 #include "jfr/jfrEvents.hpp"
46 #include "logging/log.hpp"
47 #include "memory/oopFactory.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "memory/universe.hpp"
50 #include "oops/access.hpp"
51 #include "oops/fieldStreams.inline.hpp"
52 #include "metaprogramming/primitiveConversions.hpp"
53 #include "oops/klass.hpp"
54 #include "oops/method.inline.hpp"
55 #include "oops/objArrayKlass.hpp"
56 #include "oops/objArrayOop.inline.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "oops/inlineKlass.inline.hpp"
59 #include "prims/forte.hpp"
60 #include "prims/jvmtiExport.hpp"
61 #include "prims/jvmtiThreadState.hpp"
62 #include "prims/methodHandles.hpp"
63 #include "prims/nativeLookup.hpp"
64 #include "runtime/arguments.hpp"
65 #include "runtime/atomic.hpp"
66 #include "runtime/basicLock.inline.hpp"
67 #include "runtime/frame.inline.hpp"
68 #include "runtime/handles.inline.hpp"
69 #include "runtime/init.hpp"
70 #include "runtime/interfaceSupport.inline.hpp"
71 #include "runtime/java.hpp"
72 #include "runtime/javaCalls.hpp"
73 #include "runtime/jniHandles.inline.hpp"
74 #include "runtime/perfData.hpp"
75 #include "runtime/sharedRuntime.hpp"
76 #include "runtime/stackWatermarkSet.hpp"
77 #include "runtime/stubRoutines.hpp"
78 #include "runtime/synchronizer.inline.hpp"
1175 // for a call current in progress, i.e., arguments has been pushed on stack
1176 // but callee has not been invoked yet. Caller frame must be compiled.
1177 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1178 CallInfo& callinfo, TRAPS) {
1179 Handle receiver;
1180 Handle nullHandle; // create a handy null handle for exception returns
1181 JavaThread* current = THREAD;
1182
1183 assert(!vfst.at_end(), "Java frame must exist");
1184
1185 // Find caller and bci from vframe
1186 methodHandle caller(current, vfst.method());
1187 int bci = vfst.bci();
1188
1189 if (caller->is_continuation_enter_intrinsic()) {
1190 bc = Bytecodes::_invokestatic;
1191 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1192 return receiver;
1193 }
1194
1195 // Substitutability test implementation piggy backs on static call resolution
1196 Bytecodes::Code code = caller->java_code_at(bci);
1197 if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1198 bc = Bytecodes::_invokestatic;
1199 methodHandle attached_method(THREAD, extract_attached_method(vfst));
1200 assert(attached_method.not_null(), "must have attached method");
1201 vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1202 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1203 #ifdef ASSERT
1204 Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature());
1205 assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1206 #endif
1207 return receiver;
1208 }
1209
1210 Bytecode_invoke bytecode(caller, bci);
1211 int bytecode_index = bytecode.index();
1212 bc = bytecode.invoke_code();
1213
1214 methodHandle attached_method(current, extract_attached_method(vfst));
1215 if (attached_method.not_null()) {
1216 Method* callee = bytecode.static_target(CHECK_NH);
1217 vmIntrinsics::ID id = callee->intrinsic_id();
1218 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1219 // it attaches statically resolved method to the call site.
1220 if (MethodHandles::is_signature_polymorphic(id) &&
1221 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1222 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1223
1224 // Adjust invocation mode according to the attached method.
1225 switch (bc) {
1226 case Bytecodes::_invokevirtual:
1227 if (attached_method->method_holder()->is_interface()) {
1228 bc = Bytecodes::_invokeinterface;
1229 }
1230 break;
1231 case Bytecodes::_invokeinterface:
1232 if (!attached_method->method_holder()->is_interface()) {
1233 bc = Bytecodes::_invokevirtual;
1234 }
1235 break;
1236 case Bytecodes::_invokehandle:
1237 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1238 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1239 : Bytecodes::_invokevirtual;
1240 }
1241 break;
1242 default:
1243 break;
1244 }
1245 } else {
1246 assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1247 if (!attached_method->method_holder()->is_inline_klass()) {
1248 // Ignore the attached method in this case to not confuse below code
1249 attached_method = methodHandle(current, nullptr);
1250 }
1251 }
1252 }
1253
1254 assert(bc != Bytecodes::_illegal, "not initialized");
1255
1256 bool has_receiver = bc != Bytecodes::_invokestatic &&
1257 bc != Bytecodes::_invokedynamic &&
1258 bc != Bytecodes::_invokehandle;
1259 bool check_null_and_abstract = true;
1260
1261 // Find receiver for non-static call
1262 if (has_receiver) {
1263 // This register map must be update since we need to find the receiver for
1264 // compiled frames. The receiver might be in a register.
1265 RegisterMap reg_map2(current,
1266 RegisterMap::UpdateMap::include,
1267 RegisterMap::ProcessFrames::include,
1268 RegisterMap::WalkContinuation::skip);
1269 frame stubFrame = current->last_frame();
1270 // Caller-frame is a compiled frame
1271 frame callerFrame = stubFrame.sender(®_map2);
1272
1273 Method* callee = attached_method();
1274 if (callee == nullptr) {
1275 callee = bytecode.static_target(CHECK_NH);
1276 if (callee == nullptr) {
1277 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1278 }
1279 }
1280 bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->as_nmethod()->is_compiled_by_c1();
1281 if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1282 // If the receiver is an inline type that is passed as fields, no oop is available
1283 // Resolve the call without receiver null checking.
1284 assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1285 assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1286 if (bc == Bytecodes::_invokeinterface) {
1287 bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1288 }
1289 check_null_and_abstract = false;
1290 } else {
1291 // Retrieve from a compiled argument list
1292 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1293 assert(oopDesc::is_oop_or_null(receiver()), "");
1294 if (receiver.is_null()) {
1295 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1296 }
1297 }
1298 }
1299
1300 // Resolve method
1301 if (attached_method.not_null()) {
1302 // Parameterized by attached method.
1303 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1304 } else {
1305 // Parameterized by bytecode.
1306 constantPoolHandle constants(current, caller->constants());
1307 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1308 }
1309
1310 #ifdef ASSERT
1311 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1312 if (has_receiver && check_null_and_abstract) {
1313 assert(receiver.not_null(), "should have thrown exception");
1314 Klass* receiver_klass = receiver->klass();
1315 Klass* rk = nullptr;
1316 if (attached_method.not_null()) {
1317 // In case there's resolved method attached, use its holder during the check.
1318 rk = attached_method->method_holder();
1319 } else {
1320 // Klass is already loaded.
1321 constantPoolHandle constants(current, caller->constants());
1322 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1323 }
1324 Klass* static_receiver_klass = rk;
1325 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1326 "actual receiver must be subclass of static receiver klass");
1327 if (receiver_klass->is_instance_klass()) {
1328 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1329 tty->print_cr("ERROR: Klass not yet initialized!!");
1330 receiver_klass->print();
1331 }
1332 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1333 }
1334 }
1335 #endif
1336
1337 return receiver;
1338 }
1339
1340 methodHandle SharedRuntime::find_callee_method(bool is_optimized, bool& caller_is_c1, TRAPS) {
1341 JavaThread* current = THREAD;
1342 ResourceMark rm(current);
1343 // We need first to check if any Java activations (compiled, interpreted)
1344 // exist on the stack since last JavaCall. If not, we need
1345 // to get the target method from the JavaCall wrapper.
1346 vframeStream vfst(current, true); // Do not skip any javaCalls
1347 methodHandle callee_method;
1348 if (vfst.at_end()) {
1349 // No Java frames were found on stack since we did the JavaCall.
1350 // Hence the stack can only contain an entry_frame. We need to
1351 // find the target method from the stub frame.
1352 RegisterMap reg_map(current,
1353 RegisterMap::UpdateMap::skip,
1354 RegisterMap::ProcessFrames::include,
1355 RegisterMap::WalkContinuation::skip);
1356 frame fr = current->last_frame();
1357 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1358 fr = fr.sender(®_map);
1359 assert(fr.is_entry_frame(), "must be");
1360 // fr is now pointing to the entry frame.
1361 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1362 } else {
1363 Bytecodes::Code bc;
1364 CallInfo callinfo;
1365 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1366 // Calls via mismatching methods are always non-scalarized
1367 if (callinfo.resolved_method()->mismatch() && !is_optimized) {
1368 caller_is_c1 = true;
1369 }
1370 callee_method = methodHandle(current, callinfo.selected_method());
1371 }
1372 assert(callee_method()->is_method(), "must be");
1373 return callee_method;
1374 }
1375
1376 // Resolves a call.
1377 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_is_c1, TRAPS) {
1378 JavaThread* current = THREAD;
1379 ResourceMark rm(current);
1380 RegisterMap cbl_map(current,
1381 RegisterMap::UpdateMap::skip,
1382 RegisterMap::ProcessFrames::include,
1383 RegisterMap::WalkContinuation::skip);
1384 frame caller_frame = current->last_frame().sender(&cbl_map);
1385
1386 CodeBlob* caller_cb = caller_frame.cb();
1387 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1388 nmethod* caller_nm = caller_cb->as_nmethod();
1389
1390 // determine call info & receiver
1391 // note: a) receiver is null for static calls
1392 // b) an exception is thrown if receiver is null for non-static calls
1393 CallInfo call_info;
1394 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1395 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1396
1397 NoSafepointVerifier nsv;
1398
1399 methodHandle callee_method(current, call_info.selected_method());
1400 // Calls via mismatching methods are always non-scalarized
1401 if (caller_nm->is_compiled_by_c1() || (call_info.resolved_method()->mismatch() && !is_optimized)) {
1402 caller_is_c1 = true;
1403 }
1404
1405 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1406 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1407 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1408 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1409 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1410
1411 assert(!caller_nm->is_unloading(), "It should not be unloading");
1412
1413 #ifndef PRODUCT
1414 // tracing/debugging/statistics
1415 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1416 (is_virtual) ? (&_resolve_virtual_ctr) :
1417 (&_resolve_static_ctr);
1418 Atomic::inc(addr);
1419
1420 if (TraceCallFixup) {
1421 ResourceMark rm(current);
1422 tty->print("resolving %s%s (%s) call%s to",
1423 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1424 Bytecodes::name(invoke_code), (caller_is_c1) ? " from C1" : "");
1425 callee_method->print_short_name(tty);
1426 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1427 p2i(caller_frame.pc()), p2i(callee_method->code()));
1428 }
1429 #endif
1430
1431 if (invoke_code == Bytecodes::_invokestatic) {
1432 assert(callee_method->method_holder()->is_initialized() ||
1433 callee_method->method_holder()->is_reentrant_initialization(current),
1434 "invalid class initialization state for invoke_static");
1435 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1436 // In order to keep class initialization check, do not patch call
1437 // site for static call when the class is not fully initialized.
1438 // Proper check is enforced by call site re-resolution on every invocation.
1439 //
1440 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1441 // explicit class initialization check is put in nmethod entry (VEP).
1442 assert(callee_method->method_holder()->is_linked(), "must be");
1443 return callee_method;
1444 }
1445 }
1446
1447
1448 // JSR 292 key invariant:
1449 // If the resolved method is a MethodHandle invoke target, the call
1450 // site must be a MethodHandle call site, because the lambda form might tail-call
1451 // leaving the stack in a state unknown to either caller or callee
1452
1453 // Compute entry points. The computation of the entry points is independent of
1454 // patching the call.
1455
1456 // Make sure the callee nmethod does not get deoptimized and removed before
1457 // we are done patching the code.
1458
1459
1460 CompiledICLocker ml(caller_nm);
1461 if (is_virtual && !is_optimized) {
1462 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1463 inline_cache->update(&call_info, receiver->klass(), caller_is_c1);
1464 } else {
1465 // Callsite is a direct call - set it to the destination method
1466 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1467 callsite->set(callee_method, caller_is_c1);
1468 }
1469
1470 return callee_method;
1471 }
1472
1473 // Inline caches exist only in compiled code
1474 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1475 #ifdef ASSERT
1476 RegisterMap reg_map(current,
1477 RegisterMap::UpdateMap::skip,
1478 RegisterMap::ProcessFrames::include,
1479 RegisterMap::WalkContinuation::skip);
1480 frame stub_frame = current->last_frame();
1481 assert(stub_frame.is_runtime_frame(), "sanity check");
1482 frame caller_frame = stub_frame.sender(®_map);
1483 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1484 #endif /* ASSERT */
1485
1486 methodHandle callee_method;
1487 bool is_optimized = false;
1488 bool caller_is_c1 = false;
1489 JRT_BLOCK
1490 callee_method = SharedRuntime::handle_ic_miss_helper(is_optimized, caller_is_c1, CHECK_NULL);
1491 // Return Method* through TLS
1492 current->set_vm_result_2(callee_method());
1493 JRT_BLOCK_END
1494 // return compiled code entry point after potential safepoints
1495 return get_resolved_entry(current, callee_method, false, is_optimized, caller_is_c1);
1496 JRT_END
1497
1498
1499 // Handle call site that has been made non-entrant
1500 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1501 // 6243940 We might end up in here if the callee is deoptimized
1502 // as we race to call it. We don't want to take a safepoint if
1503 // the caller was interpreted because the caller frame will look
1504 // interpreted to the stack walkers and arguments are now
1505 // "compiled" so it is much better to make this transition
1506 // invisible to the stack walking code. The i2c path will
1507 // place the callee method in the callee_target. It is stashed
1508 // there because if we try and find the callee by normal means a
1509 // safepoint is possible and have trouble gc'ing the compiled args.
1510 RegisterMap reg_map(current,
1511 RegisterMap::UpdateMap::skip,
1512 RegisterMap::ProcessFrames::include,
1513 RegisterMap::WalkContinuation::skip);
1514 frame stub_frame = current->last_frame();
1515 assert(stub_frame.is_runtime_frame(), "sanity check");
1516 frame caller_frame = stub_frame.sender(®_map);
1517
1518 if (caller_frame.is_interpreted_frame() ||
1519 caller_frame.is_entry_frame() ||
1520 caller_frame.is_upcall_stub_frame()) {
1521 Method* callee = current->callee_target();
1522 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1523 current->set_vm_result_2(callee);
1524 current->set_callee_target(nullptr);
1525 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1526 // Bypass class initialization checks in c2i when caller is in native.
1527 // JNI calls to static methods don't have class initialization checks.
1528 // Fast class initialization checks are present in c2i adapters and call into
1529 // SharedRuntime::handle_wrong_method() on the slow path.
1530 //
1531 // JVM upcalls may land here as well, but there's a proper check present in
1532 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1533 // so bypassing it in c2i adapter is benign.
1534 return callee->get_c2i_no_clinit_check_entry();
1535 } else {
1536 if (caller_frame.is_interpreted_frame()) {
1537 return callee->get_c2i_inline_entry();
1538 } else {
1539 return callee->get_c2i_entry();
1540 }
1541 }
1542 }
1543
1544 // Must be compiled to compiled path which is safe to stackwalk
1545 methodHandle callee_method;
1546 bool is_static_call = false;
1547 bool is_optimized = false;
1548 bool caller_is_c1 = false;
1549 JRT_BLOCK
1550 // Force resolving of caller (if we called from compiled frame)
1551 callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_NULL);
1552 current->set_vm_result_2(callee_method());
1553 JRT_BLOCK_END
1554 // return compiled code entry point after potential safepoints
1555 return get_resolved_entry(current, callee_method, is_static_call, is_optimized, caller_is_c1);
1556 JRT_END
1557
1558 // Handle abstract method call
1559 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1560 // Verbose error message for AbstractMethodError.
1561 // Get the called method from the invoke bytecode.
1562 vframeStream vfst(current, true);
1563 assert(!vfst.at_end(), "Java frame must exist");
1564 methodHandle caller(current, vfst.method());
1565 Bytecode_invoke invoke(caller, vfst.bci());
1566 DEBUG_ONLY( invoke.verify(); )
1567
1568 // Find the compiled caller frame.
1569 RegisterMap reg_map(current,
1570 RegisterMap::UpdateMap::include,
1571 RegisterMap::ProcessFrames::include,
1572 RegisterMap::WalkContinuation::skip);
1573 frame stubFrame = current->last_frame();
1574 assert(stubFrame.is_runtime_frame(), "must be");
1575 frame callerFrame = stubFrame.sender(®_map);
1576 assert(callerFrame.is_compiled_frame(), "must be");
1577
1578 // Install exception and return forward entry.
1579 address res = SharedRuntime::throw_AbstractMethodError_entry();
1580 JRT_BLOCK
1581 methodHandle callee(current, invoke.static_target(current));
1582 if (!callee.is_null()) {
1583 oop recv = callerFrame.retrieve_receiver(®_map);
1584 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1585 res = StubRoutines::forward_exception_entry();
1586 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1587 }
1588 JRT_BLOCK_END
1589 return res;
1590 JRT_END
1591
1592 // return verified_code_entry if interp_only_mode is not set for the current thread;
1593 // otherwise return c2i entry.
1594 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method,
1595 bool is_static_call, bool is_optimized, bool caller_is_c1) {
1596 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1597 // In interp_only_mode we need to go to the interpreted entry
1598 // The c2i won't patch in this mode -- see fixup_callers_callsite
1599 return callee_method->get_c2i_entry();
1600 }
1601
1602 if (caller_is_c1) {
1603 assert(callee_method->verified_inline_code_entry() != nullptr, "Jump to zero!");
1604 return callee_method->verified_inline_code_entry();
1605 } else if (is_static_call || is_optimized) {
1606 assert(callee_method->verified_code_entry() != nullptr, "Jump to zero!");
1607 return callee_method->verified_code_entry();
1608 } else {
1609 assert(callee_method->verified_inline_ro_code_entry() != nullptr, "Jump to zero!");
1610 return callee_method->verified_inline_ro_code_entry();
1611 }
1612 }
1613
1614 // resolve a static call and patch code
1615 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1616 methodHandle callee_method;
1617 bool caller_is_c1 = false;
1618 bool enter_special = false;
1619 JRT_BLOCK
1620 callee_method = SharedRuntime::resolve_helper(false, false, caller_is_c1, CHECK_NULL);
1621 current->set_vm_result_2(callee_method());
1622 JRT_BLOCK_END
1623 // return compiled code entry point after potential safepoints
1624 return get_resolved_entry(current, callee_method, true, false, caller_is_c1);
1625 JRT_END
1626
1627 // resolve virtual call and update inline cache to monomorphic
1628 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1629 methodHandle callee_method;
1630 bool caller_is_c1 = false;
1631 JRT_BLOCK
1632 callee_method = SharedRuntime::resolve_helper(true, false, caller_is_c1, CHECK_NULL);
1633 current->set_vm_result_2(callee_method());
1634 JRT_BLOCK_END
1635 // return compiled code entry point after potential safepoints
1636 return get_resolved_entry(current, callee_method, false, false, caller_is_c1);
1637 JRT_END
1638
1639
1640 // Resolve a virtual call that can be statically bound (e.g., always
1641 // monomorphic, so it has no inline cache). Patch code to resolved target.
1642 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1643 methodHandle callee_method;
1644 bool caller_is_c1 = false;
1645 JRT_BLOCK
1646 callee_method = SharedRuntime::resolve_helper(true, true, caller_is_c1, CHECK_NULL);
1647 current->set_vm_result_2(callee_method());
1648 JRT_BLOCK_END
1649 // return compiled code entry point after potential safepoints
1650 return get_resolved_entry(current, callee_method, false, true, caller_is_c1);
1651 JRT_END
1652
1653
1654
1655 methodHandle SharedRuntime::handle_ic_miss_helper(bool& is_optimized, bool& caller_is_c1, TRAPS) {
1656 JavaThread* current = THREAD;
1657 ResourceMark rm(current);
1658 CallInfo call_info;
1659 Bytecodes::Code bc;
1660
1661 // receiver is null for static calls. An exception is thrown for null
1662 // receivers for non-static calls
1663 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1664
1665 methodHandle callee_method(current, call_info.selected_method());
1666
1667 #ifndef PRODUCT
1668 Atomic::inc(&_ic_miss_ctr);
1669
1670 // Statistics & Tracing
1671 if (TraceCallFixup) {
1672 ResourceMark rm(current);
1673 tty->print("IC miss (%s) call%s to", Bytecodes::name(bc), (caller_is_c1) ? " from C1" : "");
1674 callee_method->print_short_name(tty);
1675 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1676 }
1677
1678 if (ICMissHistogram) {
1679 MutexLocker m(VMStatistic_lock);
1680 RegisterMap reg_map(current,
1681 RegisterMap::UpdateMap::skip,
1682 RegisterMap::ProcessFrames::include,
1683 RegisterMap::WalkContinuation::skip);
1684 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1685 // produce statistics under the lock
1686 trace_ic_miss(f.pc());
1687 }
1688 #endif
1689
1690 // install an event collector so that when a vtable stub is created the
1691 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1692 // event can't be posted when the stub is created as locks are held
1693 // - instead the event will be deferred until the event collector goes
1694 // out of scope.
1695 JvmtiDynamicCodeEventCollector event_collector;
1696
1697 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1698 RegisterMap reg_map(current,
1699 RegisterMap::UpdateMap::skip,
1700 RegisterMap::ProcessFrames::include,
1701 RegisterMap::WalkContinuation::skip);
1702 frame caller_frame = current->last_frame().sender(®_map);
1703 CodeBlob* cb = caller_frame.cb();
1704 nmethod* caller_nm = cb->as_nmethod();
1705 // Calls via mismatching methods are always non-scalarized
1706 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1707 caller_is_c1 = true;
1708 }
1709
1710 CompiledICLocker ml(caller_nm);
1711 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1712 inline_cache->update(&call_info, receiver()->klass(), caller_is_c1);
1713
1714 return callee_method;
1715 }
1716
1717 //
1718 // Resets a call-site in compiled code so it will get resolved again.
1719 // This routines handles both virtual call sites, optimized virtual call
1720 // sites, and static call sites. Typically used to change a call sites
1721 // destination from compiled to interpreted.
1722 //
1723 methodHandle SharedRuntime::reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_is_c1, TRAPS) {
1724 JavaThread* current = THREAD;
1725 ResourceMark rm(current);
1726 RegisterMap reg_map(current,
1727 RegisterMap::UpdateMap::skip,
1728 RegisterMap::ProcessFrames::include,
1729 RegisterMap::WalkContinuation::skip);
1730 frame stub_frame = current->last_frame();
1731 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1732 frame caller = stub_frame.sender(®_map);
1733 if (caller.is_compiled_frame()) {
1734 caller_is_c1 = caller.cb()->as_nmethod()->is_compiled_by_c1();
1735 }
1736
1737 // Do nothing if the frame isn't a live compiled frame.
1738 // nmethod could be deoptimized by the time we get here
1739 // so no update to the caller is needed.
1740
1741 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1742 (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1743
1744 address pc = caller.pc();
1745
1746 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1747 assert(caller_nm != nullptr, "did not find caller nmethod");
1748
1749 // Default call_addr is the location of the "basic" call.
1750 // Determine the address of the call we a reresolving. With
1751 // Inline Caches we will always find a recognizable call.
1752 // With Inline Caches disabled we may or may not find a
1753 // recognizable call. We will always find a call for static
1754 // calls and for optimized virtual calls. For vanilla virtual
1755 // calls it depends on the state of the UseInlineCaches switch.
1756 //
1757 // With Inline Caches disabled we can get here for a virtual call
1758 // for two reasons:
1759 // 1 - calling an abstract method. The vtable for abstract methods
1760 // will run us thru handle_wrong_method and we will eventually
1761 // end up in the interpreter to throw the ame.
1762 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1763 // call and between the time we fetch the entry address and
1764 // we jump to it the target gets deoptimized. Similar to 1
1765 // we will wind up in the interprter (thru a c2i with c2).
1766 //
1767 CompiledICLocker ml(caller_nm);
1768 address call_addr = caller_nm->call_instruction_address(pc);
1769
1770 if (call_addr != nullptr) {
1771 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1772 // bytes back in the instruction stream so we must also check for reloc info.
1773 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1774 bool ret = iter.next(); // Get item
1775 if (ret) {
1776 is_static_call = false;
1777 is_optimized = false;
1778 switch (iter.type()) {
1779 case relocInfo::static_call_type:
1780 is_static_call = true;
1781 case relocInfo::opt_virtual_call_type: {
1782 is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1783 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1784 cdc->set_to_clean();
1785 break;
1786 }
1787 case relocInfo::virtual_call_type: {
1788 // compiled, dispatched call (which used to call an interpreted method)
1789 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1790 inline_cache->set_to_clean();
1791 break;
1792 }
1793 default:
1794 break;
1795 }
1796 }
1797 }
1798 }
1799
1800 methodHandle callee_method = find_callee_method(is_optimized, caller_is_c1, CHECK_(methodHandle()));
1801
1802 #ifndef PRODUCT
1803 Atomic::inc(&_wrong_method_ctr);
1804
1805 if (TraceCallFixup) {
1806 ResourceMark rm(current);
1807 tty->print("handle_wrong_method reresolving call%s to", (caller_is_c1) ? " from C1" : "");
1808 callee_method->print_short_name(tty);
1809 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1810 }
1811 #endif
1812
1813 return callee_method;
1814 }
1815
1816 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1817 // The faulting unsafe accesses should be changed to throw the error
1818 // synchronously instead. Meanwhile the faulting instruction will be
1819 // skipped over (effectively turning it into a no-op) and an
1820 // asynchronous exception will be raised which the thread will
1821 // handle at a later point. If the instruction is a load it will
1822 // return garbage.
1823
1824 // Request an async exception.
1825 thread->set_pending_unsafe_access_error();
1826
1827 // Return address of next instruction to execute.
1993 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
1994
1995 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1996 if (message == nullptr) {
1997 // Shouldn't happen, but don't cause even more problems if it does
1998 message = const_cast<char*>(caster_klass->external_name());
1999 } else {
2000 jio_snprintf(message,
2001 msglen,
2002 "class %s cannot be cast to class %s (%s%s%s)",
2003 caster_name,
2004 target_name,
2005 caster_klass_description,
2006 klass_separator,
2007 target_klass_description
2008 );
2009 }
2010 return message;
2011 }
2012
2013 char* SharedRuntime::generate_identity_exception_message(JavaThread* current, Klass* klass) {
2014 assert(klass->is_inline_klass(), "Must be a concrete value class");
2015 const char* desc = "Cannot synchronize on an instance of value class ";
2016 const char* className = klass->external_name();
2017 size_t msglen = strlen(desc) + strlen(className) + 1;
2018 char* message = NEW_RESOURCE_ARRAY(char, msglen);
2019 if (nullptr == message) {
2020 // Out of memory: can't create detailed error message
2021 message = const_cast<char*>(klass->external_name());
2022 } else {
2023 jio_snprintf(message, msglen, "%s%s", desc, className);
2024 }
2025 return message;
2026 }
2027
2028 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2029 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2030 JRT_END
2031
2032 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2033 if (!SafepointSynchronize::is_synchronizing()) {
2034 // Only try quick_enter() if we're not trying to reach a safepoint
2035 // so that the calling thread reaches the safepoint more quickly.
2036 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2037 return;
2038 }
2039 }
2040 // NO_ASYNC required because an async exception on the state transition destructor
2041 // would leave you with the lock held and it would never be released.
2042 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2043 // and the model is that an exception implies the method failed.
2044 JRT_BLOCK_NO_ASYNC
2045 Handle h_obj(THREAD, obj);
2046 ObjectSynchronizer::enter(h_obj, lock, current);
2047 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2258 tty->print_cr(" %% in nested categories are relative to their category");
2259 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2260 tty->cr();
2261
2262 MethodArityHistogram h;
2263 }
2264 #endif
2265
2266 #ifndef PRODUCT
2267 static int _lookups; // number of calls to lookup
2268 static int _equals; // number of buckets checked with matching hash
2269 static int _hits; // number of successful lookups
2270 static int _compact; // number of equals calls with compact signature
2271 #endif
2272
2273 // A simple wrapper class around the calling convention information
2274 // that allows sharing of adapters for the same calling convention.
2275 class AdapterFingerPrint : public CHeapObj<mtCode> {
2276 private:
2277 enum {
2278 _basic_type_bits = 5,
2279 _basic_type_mask = right_n_bits(_basic_type_bits),
2280 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2281 _compact_int_count = 3
2282 };
2283 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2284 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2285
2286 union {
2287 int _compact[_compact_int_count];
2288 int* _fingerprint;
2289 } _value;
2290 int _length; // A negative length indicates the fingerprint is in the compact form,
2291 // Otherwise _value._fingerprint is the array.
2292
2293 // Remap BasicTypes that are handled equivalently by the adapters.
2294 // These are correct for the current system but someday it might be
2295 // necessary to make this mapping platform dependent.
2296 static BasicType adapter_encoding(BasicType in) {
2297 switch (in) {
2298 case T_BOOLEAN:
2299 case T_BYTE:
2300 case T_SHORT:
2301 case T_CHAR:
2302 // They are all promoted to T_INT in the calling convention
2303 return T_INT;
2304
2305 case T_OBJECT:
2306 case T_ARRAY:
2307 // In other words, we assume that any register good enough for
2308 // an int or long is good enough for a managed pointer.
2309 #ifdef _LP64
2310 return T_LONG;
2311 #else
2312 return T_INT;
2313 #endif
2314
2315 case T_INT:
2316 case T_LONG:
2317 case T_FLOAT:
2318 case T_DOUBLE:
2319 case T_VOID:
2320 return in;
2321
2322 default:
2323 ShouldNotReachHere();
2324 return T_CONFLICT;
2325 }
2326 }
2327
2328 public:
2329 AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2330 // The fingerprint is based on the BasicType signature encoded
2331 // into an array of ints with eight entries per int.
2332 int total_args_passed = (sig != nullptr) ? sig->length() : 0;
2333 int* ptr;
2334 int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2335 if (len <= _compact_int_count) {
2336 assert(_compact_int_count == 3, "else change next line");
2337 _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2338 // Storing the signature encoded as signed chars hits about 98%
2339 // of the time.
2340 _length = -len;
2341 ptr = _value._compact;
2342 } else {
2343 _length = len;
2344 _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2345 ptr = _value._fingerprint;
2346 }
2347
2348 // Now pack the BasicTypes with 8 per int
2349 int sig_index = 0;
2350 BasicType prev_bt = T_ILLEGAL;
2351 int vt_count = 0;
2352 for (int index = 0; index < len; index++) {
2353 int value = 0;
2354 for (int byte = 0; byte < _basic_types_per_int; byte++) {
2355 BasicType bt = T_ILLEGAL;
2356 if (sig_index < total_args_passed) {
2357 bt = sig->at(sig_index++)._bt;
2358 if (bt == T_METADATA) {
2359 // Found start of inline type in signature
2360 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2361 if (sig_index == 1 && has_ro_adapter) {
2362 // With a ro_adapter, replace receiver inline type delimiter by T_VOID to prevent matching
2363 // with other adapters that have the same inline type as first argument and no receiver.
2364 bt = T_VOID;
2365 }
2366 vt_count++;
2367 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2368 // Found end of inline type in signature
2369 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2370 vt_count--;
2371 assert(vt_count >= 0, "invalid vt_count");
2372 } else if (vt_count == 0) {
2373 // Widen fields that are not part of a scalarized inline type argument
2374 bt = adapter_encoding(bt);
2375 }
2376 prev_bt = bt;
2377 }
2378 int bt_val = (bt == T_ILLEGAL) ? 0 : bt;
2379 assert((bt_val & _basic_type_mask) == bt_val, "must fit in 4 bits");
2380 value = (value << _basic_type_bits) | bt_val;
2381 }
2382 ptr[index] = value;
2383 }
2384 assert(vt_count == 0, "invalid vt_count");
2385 }
2386
2387 ~AdapterFingerPrint() {
2388 if (_length > 0) {
2389 FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2390 }
2391 }
2392
2393 int value(int index) {
2394 if (_length < 0) {
2395 return _value._compact[index];
2396 }
2397 return _value._fingerprint[index];
2398 }
2399 int length() {
2400 if (_length < 0) return -_length;
2401 return _length;
2402 }
2403
2404 bool is_compact() {
2429 const char* as_basic_args_string() {
2430 stringStream st;
2431 bool long_prev = false;
2432 for (int i = 0; i < length(); i++) {
2433 unsigned val = (unsigned)value(i);
2434 // args are packed so that first/lower arguments are in the highest
2435 // bits of each int value, so iterate from highest to the lowest
2436 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2437 unsigned v = (val >> j) & _basic_type_mask;
2438 if (v == 0) {
2439 assert(i == length() - 1, "Only expect zeroes in the last word");
2440 continue;
2441 }
2442 if (long_prev) {
2443 long_prev = false;
2444 if (v == T_VOID) {
2445 st.print("J");
2446 } else {
2447 st.print("L");
2448 }
2449 } else if (v == T_LONG) {
2450 long_prev = true;
2451 } else if (v != T_VOID){
2452 st.print("%c", type2char((BasicType)v));
2453 }
2454 }
2455 }
2456 if (long_prev) {
2457 st.print("L");
2458 }
2459 return st.as_string();
2460 }
2461 #endif // !product
2462
2463 bool equals(AdapterFingerPrint* other) {
2464 if (other->_length != _length) {
2465 return false;
2466 }
2467 if (_length < 0) {
2468 assert(_compact_int_count == 3, "else change next line");
2469 return _value._compact[0] == other->_value._compact[0] &&
2470 _value._compact[1] == other->_value._compact[1] &&
2471 _value._compact[2] == other->_value._compact[2];
2472 } else {
2480 }
2481
2482 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2483 NOT_PRODUCT(_equals++);
2484 return fp1->equals(fp2);
2485 }
2486
2487 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2488 return fp->compute_hash();
2489 }
2490 };
2491
2492 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2493 using AdapterHandlerTable = ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2494 AnyObj::C_HEAP, mtCode,
2495 AdapterFingerPrint::compute_hash,
2496 AdapterFingerPrint::equals>;
2497 static AdapterHandlerTable* _adapter_handler_table;
2498
2499 // Find a entry with the same fingerprint if it exists
2500 static AdapterHandlerEntry* lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2501 NOT_PRODUCT(_lookups++);
2502 assert_lock_strong(AdapterHandlerLibrary_lock);
2503 AdapterFingerPrint fp(sig, has_ro_adapter);
2504 AdapterHandlerEntry** entry = _adapter_handler_table->get(&fp);
2505 if (entry != nullptr) {
2506 #ifndef PRODUCT
2507 if (fp.is_compact()) _compact++;
2508 _hits++;
2509 #endif
2510 return *entry;
2511 }
2512 return nullptr;
2513 }
2514
2515 #ifndef PRODUCT
2516 static void print_table_statistics() {
2517 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2518 return sizeof(*key) + sizeof(*a);
2519 };
2520 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2521 ts.print(tty, "AdapterHandlerTable");
2522 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2523 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2524 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2525 _lookups, _equals, _hits, _compact);
2526 }
2527 #endif
2528
2529 // ---------------------------------------------------------------------------
2530 // Implementation of AdapterHandlerLibrary
2531 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2532 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2533 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2534 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2535 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2536 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2537 const int AdapterHandlerLibrary_size = 48*K;
2538 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2539
2540 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2541 return _buffer;
2542 }
2543
2544 static void post_adapter_creation(const AdapterBlob* new_adapter,
2545 const AdapterHandlerEntry* entry) {
2546 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2547 char blob_id[256];
2548 jio_snprintf(blob_id,
2549 sizeof(blob_id),
2550 "%s(%s)",
2551 new_adapter->name(),
2552 entry->fingerprint()->as_string());
2553 if (Forte::is_enabled()) {
2554 Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2555 }
2556
2557 if (JvmtiExport::should_post_dynamic_code_generated()) {
2560 }
2561 }
2562
2563 void AdapterHandlerLibrary::initialize() {
2564 ResourceMark rm;
2565 AdapterBlob* no_arg_blob = nullptr;
2566 AdapterBlob* int_arg_blob = nullptr;
2567 AdapterBlob* obj_arg_blob = nullptr;
2568 AdapterBlob* obj_int_arg_blob = nullptr;
2569 AdapterBlob* obj_obj_arg_blob = nullptr;
2570 {
2571 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2572 MutexLocker mu(AdapterHandlerLibrary_lock);
2573
2574 // Create a special handler for abstract methods. Abstract methods
2575 // are never compiled so an i2c entry is somewhat meaningless, but
2576 // throw AbstractMethodError just in case.
2577 // Pass wrong_method_abstract for the c2i transitions to return
2578 // AbstractMethodError for invalid invocations.
2579 address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2580 _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(nullptr),
2581 SharedRuntime::throw_AbstractMethodError_entry(),
2582 wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
2583 wrong_method_abstract, wrong_method_abstract);
2584 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2585
2586 CompiledEntrySignature no_args;
2587 no_args.compute_calling_conventions();
2588 _no_arg_handler = create_adapter(no_arg_blob, no_args, true);
2589
2590 CompiledEntrySignature obj_args;
2591 SigEntry::add_entry(obj_args.sig(), T_OBJECT, nullptr);
2592 obj_args.compute_calling_conventions();
2593 _obj_arg_handler = create_adapter(obj_arg_blob, obj_args, true);
2594
2595 CompiledEntrySignature int_args;
2596 SigEntry::add_entry(int_args.sig(), T_INT, nullptr);
2597 int_args.compute_calling_conventions();
2598 _int_arg_handler = create_adapter(int_arg_blob, int_args, true);
2599
2600 CompiledEntrySignature obj_int_args;
2601 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT, nullptr);
2602 SigEntry::add_entry(obj_int_args.sig(), T_INT, nullptr);
2603 obj_int_args.compute_calling_conventions();
2604 _obj_int_arg_handler = create_adapter(obj_int_arg_blob, obj_int_args, true);
2605
2606 CompiledEntrySignature obj_obj_args;
2607 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT, nullptr);
2608 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT, nullptr);
2609 obj_obj_args.compute_calling_conventions();
2610 _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, obj_obj_args, true);
2611
2612 assert(no_arg_blob != nullptr &&
2613 obj_arg_blob != nullptr &&
2614 int_arg_blob != nullptr &&
2615 obj_int_arg_blob != nullptr &&
2616 obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
2617 }
2618 return;
2619
2620 // Outside of the lock
2621 post_adapter_creation(no_arg_blob, _no_arg_handler);
2622 post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2623 post_adapter_creation(int_arg_blob, _int_arg_handler);
2624 post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2625 post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2626 }
2627
2628 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2629 address i2c_entry,
2630 address c2i_entry,
2631 address c2i_inline_entry,
2632 address c2i_inline_ro_entry,
2633 address c2i_unverified_entry,
2634 address c2i_unverified_inline_entry,
2635 address c2i_no_clinit_check_entry) {
2636 return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry,
2637 c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
2638 }
2639
2640 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2641 if (method->is_abstract()) {
2642 return nullptr;
2643 }
2644 int total_args_passed = method->size_of_parameters(); // All args on stack
2645 if (total_args_passed == 0) {
2646 return _no_arg_handler;
2647 } else if (total_args_passed == 1) {
2648 if (!method->is_static()) {
2649 if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2650 return nullptr;
2651 }
2652 return _obj_arg_handler;
2653 }
2654 switch (method->signature()->char_at(1)) {
2655 case JVM_SIGNATURE_CLASS: {
2656 if (InlineTypePassFieldsAsArgs) {
2657 SignatureStream ss(method->signature());
2658 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2659 if (vk != nullptr) {
2660 return nullptr;
2661 }
2662 }
2663 return _obj_arg_handler;
2664 }
2665 case JVM_SIGNATURE_ARRAY:
2666 return _obj_arg_handler;
2667 case JVM_SIGNATURE_INT:
2668 case JVM_SIGNATURE_BOOLEAN:
2669 case JVM_SIGNATURE_CHAR:
2670 case JVM_SIGNATURE_BYTE:
2671 case JVM_SIGNATURE_SHORT:
2672 return _int_arg_handler;
2673 }
2674 } else if (total_args_passed == 2 &&
2675 !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2676 switch (method->signature()->char_at(1)) {
2677 case JVM_SIGNATURE_CLASS: {
2678 if (InlineTypePassFieldsAsArgs) {
2679 SignatureStream ss(method->signature());
2680 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2681 if (vk != nullptr) {
2682 return nullptr;
2683 }
2684 }
2685 return _obj_obj_arg_handler;
2686 }
2687 case JVM_SIGNATURE_ARRAY:
2688 return _obj_obj_arg_handler;
2689 case JVM_SIGNATURE_INT:
2690 case JVM_SIGNATURE_BOOLEAN:
2691 case JVM_SIGNATURE_CHAR:
2692 case JVM_SIGNATURE_BYTE:
2693 case JVM_SIGNATURE_SHORT:
2694 return _obj_int_arg_handler;
2695 }
2696 }
2697 return nullptr;
2698 }
2699
2700 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2701 _method(method), _num_inline_args(0), _has_inline_recv(false),
2702 _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2703 _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2704 _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2705 _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2706 _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2707 _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2708 }
2709
2710 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2711 // or the same entry for VEP and VIEP(RO).
2712 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2713 if (!has_scalarized_args()) {
2714 // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2715 return CodeOffsets::Verified_Entry;
2716 }
2717 if (_method->is_static()) {
2718 // Static methods don't need VIEP(RO)
2719 return CodeOffsets::Verified_Entry;
2720 }
2721
2722 if (has_inline_recv()) {
2723 if (num_inline_args() == 1) {
2724 // Share same entry for VIEP and VIEP(RO).
2725 // This is quite common: we have an instance method in an InlineKlass that has
2726 // no inline type args other than <this>.
2727 return CodeOffsets::Verified_Inline_Entry;
2728 } else {
2729 assert(num_inline_args() > 1, "must be");
2730 // No sharing:
2731 // VIEP(RO) -- <this> is passed as object
2732 // VEP -- <this> is passed as fields
2733 return CodeOffsets::Verified_Inline_Entry_RO;
2734 }
2735 }
2736
2737 // Either a static method, or <this> is not an inline type
2738 if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2739 // No sharing:
2740 // Some arguments are passed on the stack, and we have inserted reserved entries
2741 // into the VEP, but we never insert reserved entries into the VIEP(RO).
2742 return CodeOffsets::Verified_Inline_Entry_RO;
2743 } else {
2744 // Share same entry for VEP and VIEP(RO).
2745 return CodeOffsets::Verified_Entry;
2746 }
2747 }
2748
2749 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2750 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2751 if (_supers != nullptr) {
2752 return _supers;
2753 }
2754 _supers = new GrowableArray<Method*>();
2755 // Skip private, static, and <init> methods
2756 if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2757 return _supers;
2758 }
2759 Symbol* name = _method->name();
2760 Symbol* signature = _method->signature();
2761 const Klass* holder = _method->method_holder()->super();
2762 Symbol* holder_name = holder->name();
2763 ThreadInVMfromUnknown tiv;
2764 JavaThread* current = JavaThread::current();
2765 HandleMark hm(current);
2766 Handle loader(current, _method->method_holder()->class_loader());
2767
2768 // Walk up the class hierarchy and search for super methods
2769 while (holder != nullptr) {
2770 Method* super_method = holder->lookup_method(name, signature);
2771 if (super_method == nullptr) {
2772 break;
2773 }
2774 if (!super_method->is_static() && !super_method->is_private() &&
2775 (!super_method->is_package_private() ||
2776 super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
2777 _supers->push(super_method);
2778 }
2779 holder = super_method->method_holder()->super();
2780 }
2781 // Search interfaces for super methods
2782 Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
2783 for (int i = 0; i < interfaces->length(); ++i) {
2784 Method* m = interfaces->at(i)->lookup_method(name, signature);
2785 if (m != nullptr && !m->is_static() && m->is_public()) {
2786 _supers->push(m);
2787 }
2788 }
2789 return _supers;
2790 }
2791
2792 // Iterate over arguments and compute scalarized and non-scalarized signatures
2793 void CompiledEntrySignature::compute_calling_conventions(bool init) {
2794 bool has_scalarized = false;
2795 if (_method != nullptr) {
2796 InstanceKlass* holder = _method->method_holder();
2797 int arg_num = 0;
2798 if (!_method->is_static()) {
2799 // We shouldn't scalarize 'this' in a value class constructor
2800 if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() && !_method->is_object_constructor() &&
2801 (init || _method->is_scalarized_arg(arg_num))) {
2802 _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
2803 has_scalarized = true;
2804 _has_inline_recv = true;
2805 _num_inline_args++;
2806 } else {
2807 SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
2808 }
2809 SigEntry::add_entry(_sig, T_OBJECT, holder->name());
2810 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
2811 arg_num++;
2812 }
2813 for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
2814 BasicType bt = ss.type();
2815 if (bt == T_OBJECT) {
2816 InlineKlass* vk = ss.as_inline_klass(holder);
2817 if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
2818 // Check for a calling convention mismatch with super method(s)
2819 bool scalar_super = false;
2820 bool non_scalar_super = false;
2821 GrowableArray<Method*>* supers = get_supers();
2822 for (int i = 0; i < supers->length(); ++i) {
2823 Method* super_method = supers->at(i);
2824 if (super_method->is_scalarized_arg(arg_num)) {
2825 scalar_super = true;
2826 } else {
2827 non_scalar_super = true;
2828 }
2829 }
2830 #ifdef ASSERT
2831 // Randomly enable below code paths for stress testing
2832 bool stress = init && StressCallingConvention;
2833 if (stress && (os::random() & 1) == 1) {
2834 non_scalar_super = true;
2835 if ((os::random() & 1) == 1) {
2836 scalar_super = true;
2837 }
2838 }
2839 #endif
2840 if (non_scalar_super) {
2841 // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
2842 if (scalar_super) {
2843 // Found non-scalar *and* scalar super methods. We can't handle both.
2844 // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
2845 for (int i = 0; i < supers->length(); ++i) {
2846 Method* super_method = supers->at(i);
2847 if (super_method->is_scalarized_arg(arg_num) debug_only(|| (stress && (os::random() & 1) == 1))) {
2848 super_method->set_mismatch();
2849 MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
2850 JavaThread* thread = JavaThread::current();
2851 HandleMark hm(thread);
2852 methodHandle mh(thread, super_method);
2853 DeoptimizationScope deopt_scope;
2854 CodeCache::mark_for_deoptimization(&deopt_scope, mh());
2855 deopt_scope.deoptimize_marked();
2856 }
2857 }
2858 }
2859 // Fall back to non-scalarized calling convention
2860 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2861 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2862 } else {
2863 _num_inline_args++;
2864 has_scalarized = true;
2865 int last = _sig_cc->length();
2866 int last_ro = _sig_cc_ro->length();
2867 _sig_cc->appendAll(vk->extended_sig());
2868 _sig_cc_ro->appendAll(vk->extended_sig());
2869 if (bt == T_OBJECT) {
2870 // Nullable inline type argument, insert InlineTypeNode::IsInit field right after T_METADATA delimiter
2871 _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, nullptr));
2872 _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, nullptr));
2873 }
2874 }
2875 } else {
2876 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2877 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2878 }
2879 bt = T_OBJECT;
2880 } else {
2881 SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
2882 SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
2883 }
2884 SigEntry::add_entry(_sig, bt, ss.as_symbol());
2885 if (bt != T_VOID) {
2886 arg_num++;
2887 }
2888 }
2889 }
2890
2891 // Compute the non-scalarized calling convention
2892 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
2893 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
2894
2895 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
2896 if (has_scalarized && !_method->is_native()) {
2897 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
2898 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
2899
2900 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
2901 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
2902
2903 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
2904 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
2905
2906 // Upper bound on stack arguments to avoid hitting the argument limit and
2907 // bailing out of compilation ("unsupported incoming calling sequence").
2908 // TODO we need a reasonable limit (flag?) here
2909 if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
2910 return; // Success
2911 }
2912 }
2913
2914 // No scalarized args
2915 _sig_cc = _sig;
2916 _regs_cc = _regs;
2917 _args_on_stack_cc = _args_on_stack;
2918
2919 _sig_cc_ro = _sig;
2920 _regs_cc_ro = _regs;
2921 _args_on_stack_cc_ro = _args_on_stack;
2922 }
2923
2924 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2925 // Use customized signature handler. Need to lock around updates to
2926 // the _adapter_handler_table (it is not safe for concurrent readers
2927 // and a single writer: this could be fixed if it becomes a
2928 // problem).
2929
2930 // Fast-path for trivial adapters
2931 AdapterHandlerEntry* entry = get_simple_adapter(method);
2932 if (entry != nullptr) {
2933 return entry;
2934 }
2935
2936 ResourceMark rm;
2937 AdapterBlob* new_adapter = nullptr;
2938
2939 CompiledEntrySignature ces(method());
2940 ces.compute_calling_conventions();
2941 if (ces.has_scalarized_args()) {
2942 if (!method->has_scalarized_args()) {
2943 method->set_has_scalarized_args();
2944 }
2945 if (ces.c1_needs_stack_repair()) {
2946 method->set_c1_needs_stack_repair();
2947 }
2948 if (ces.c2_needs_stack_repair() && !method->c2_needs_stack_repair()) {
2949 method->set_c2_needs_stack_repair();
2950 }
2951 } else if (method->is_abstract()) {
2952 return _abstract_method_handler;
2953 }
2954
2955 {
2956 MutexLocker mu(AdapterHandlerLibrary_lock);
2957
2958 if (ces.has_scalarized_args() && method->is_abstract()) {
2959 // Save a C heap allocated version of the signature for abstract methods with scalarized inline type arguments
2960 address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2961 entry = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(nullptr),
2962 SharedRuntime::throw_AbstractMethodError_entry(),
2963 wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
2964 wrong_method_abstract, wrong_method_abstract);
2965 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro()->length(), mtInternal);
2966 heap_sig->appendAll(ces.sig_cc_ro());
2967 entry->set_sig_cc(heap_sig);
2968 return entry;
2969 }
2970
2971 // Lookup method signature's fingerprint
2972 entry = lookup(ces.sig_cc(), ces.has_inline_recv());
2973
2974 if (entry != nullptr) {
2975 #ifdef ASSERT
2976 if (VerifyAdapterSharing) {
2977 AdapterBlob* comparison_blob = nullptr;
2978 AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, ces, false);
2979 assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
2980 assert(comparison_entry->compare_code(entry), "code must match");
2981 // Release the one just created and return the original
2982 delete comparison_entry;
2983 }
2984 #endif
2985 return entry;
2986 }
2987
2988 entry = create_adapter(new_adapter, ces, /* allocate_code_blob */ true);
2989 }
2990
2991 // Outside of the lock
2992 if (new_adapter != nullptr) {
2993 post_adapter_creation(new_adapter, entry);
2994 }
2995 return entry;
2996 }
2997
2998 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
2999 CompiledEntrySignature& ces,
3000 bool allocate_code_blob) {
3001 if (log_is_enabled(Info, perf, class, link)) {
3002 ClassLoader::perf_method_adapters_count()->inc();
3003 }
3004
3005 // StubRoutines::_final_stubs_code is initialized after this function can be called. As a result,
3006 // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated prior
3007 // to all StubRoutines::_final_stubs_code being set. Checks refer to runtime range checks generated
3008 // in an I2C stub that ensure that an I2C stub is called from an interpreter frame or stubs.
3009 bool contains_all_checks = StubRoutines::final_stubs_code() != nullptr;
3010
3011 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3012 CodeBuffer buffer(buf);
3013 short buffer_locs[20];
3014 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3015 sizeof(buffer_locs)/sizeof(relocInfo));
3016
3017 // Make a C heap allocated version of the fingerprint to store in the adapter
3018 AdapterFingerPrint* fingerprint = new AdapterFingerPrint(ces.sig_cc(), ces.has_inline_recv());
3019 MacroAssembler _masm(&buffer);
3020 AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
3021 ces.args_on_stack(),
3022 ces.sig(),
3023 ces.regs(),
3024 ces.sig_cc(),
3025 ces.regs_cc(),
3026 ces.sig_cc_ro(),
3027 ces.regs_cc_ro(),
3028 fingerprint,
3029 new_adapter,
3030 allocate_code_blob);
3031
3032 if (ces.has_scalarized_args()) {
3033 // Save a C heap allocated version of the scalarized signature and store it in the adapter
3034 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3035 heap_sig->appendAll(ces.sig_cc());
3036 entry->set_sig_cc(heap_sig);
3037 }
3038
3039 #ifdef ASSERT
3040 if (VerifyAdapterSharing) {
3041 entry->save_code(buf->code_begin(), buffer.insts_size());
3042 if (!allocate_code_blob) {
3043 return entry;
3044 }
3045 }
3046 #endif
3047
3048 NOT_PRODUCT(int insts_size = buffer.insts_size());
3049 if (new_adapter == nullptr) {
3050 // CodeCache is full, disable compilation
3051 // Ought to log this but compile log is only per compile thread
3052 // and we're some non descript Java thread.
3053 return nullptr;
3054 }
3055 entry->relocate(new_adapter->content_begin());
3056 #ifndef PRODUCT
3057 // debugging support
3058 if (PrintAdapterHandlers || PrintStubCode) {
3059 ttyLocker ttyl;
3060 entry->print_adapter_on(tty);
3061 tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
3062 _adapter_handler_table->number_of_entries(), fingerprint->as_basic_args_string(),
3063 fingerprint->as_string(), insts_size);
3064 tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
3065 if (Verbose || PrintStubCode) {
3066 address first_pc = entry->base_address();
3067 if (first_pc != nullptr) {
3069 NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
3070 tty->cr();
3071 }
3072 }
3073 }
3074 #endif
3075
3076 // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
3077 // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
3078 if (contains_all_checks || !VerifyAdapterCalls) {
3079 assert_lock_strong(AdapterHandlerLibrary_lock);
3080 _adapter_handler_table->put(fingerprint, entry);
3081 }
3082 return entry;
3083 }
3084
3085 address AdapterHandlerEntry::base_address() {
3086 address base = _i2c_entry;
3087 if (base == nullptr) base = _c2i_entry;
3088 assert(base <= _c2i_entry || _c2i_entry == nullptr, "");
3089 assert(base <= _c2i_inline_entry || _c2i_inline_entry == nullptr, "");
3090 assert(base <= _c2i_inline_ro_entry || _c2i_inline_ro_entry == nullptr, "");
3091 assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");
3092 assert(base <= _c2i_unverified_inline_entry || _c2i_unverified_inline_entry == nullptr, "");
3093 assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
3094 return base;
3095 }
3096
3097 void AdapterHandlerEntry::relocate(address new_base) {
3098 address old_base = base_address();
3099 assert(old_base != nullptr, "");
3100 ptrdiff_t delta = new_base - old_base;
3101 if (_i2c_entry != nullptr)
3102 _i2c_entry += delta;
3103 if (_c2i_entry != nullptr)
3104 _c2i_entry += delta;
3105 if (_c2i_inline_entry != nullptr)
3106 _c2i_inline_entry += delta;
3107 if (_c2i_inline_ro_entry != nullptr)
3108 _c2i_inline_ro_entry += delta;
3109 if (_c2i_unverified_entry != nullptr)
3110 _c2i_unverified_entry += delta;
3111 if (_c2i_unverified_inline_entry != nullptr)
3112 _c2i_unverified_inline_entry += delta;
3113 if (_c2i_no_clinit_check_entry != nullptr)
3114 _c2i_no_clinit_check_entry += delta;
3115 assert(base_address() == new_base, "");
3116 }
3117
3118
3119 AdapterHandlerEntry::~AdapterHandlerEntry() {
3120 delete _fingerprint;
3121 if (_sig_cc != nullptr) {
3122 delete _sig_cc;
3123 }
3124 #ifdef ASSERT
3125 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3126 #endif
3127 }
3128
3129
3130 #ifdef ASSERT
3131 // Capture the code before relocation so that it can be compared
3132 // against other versions. If the code is captured after relocation
3133 // then relative instructions won't be equivalent.
3134 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3135 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3136 _saved_code_length = length;
3137 memcpy(_saved_code, buffer, length);
3138 }
3139
3140
3141 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3142 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3143
3190
3191 struct { double data[20]; } locs_buf;
3192 struct { double data[20]; } stubs_locs_buf;
3193 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3194 #if defined(AARCH64) || defined(PPC64)
3195 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3196 // in the constant pool to ensure ordering between the barrier and oops
3197 // accesses. For native_wrappers we need a constant.
3198 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3199 // static java call that is resolved in the runtime.
3200 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3201 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3202 }
3203 #endif
3204 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3205 MacroAssembler _masm(&buffer);
3206
3207 // Fill in the signature array, for the calling-convention call.
3208 const int total_args_passed = method->size_of_parameters();
3209
3210 BasicType stack_sig_bt[16];
3211 VMRegPair stack_regs[16];
3212 BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3213 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3214
3215 int i = 0;
3216 if (!method->is_static()) { // Pass in receiver first
3217 sig_bt[i++] = T_OBJECT;
3218 }
3219 SignatureStream ss(method->signature());
3220 for (; !ss.at_return_type(); ss.next()) {
3221 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
3222 if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3223 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
3224 }
3225 }
3226 assert(i == total_args_passed, "");
3227 BasicType ret_type = ss.type();
3228
3229 // Now get the compiled-Java arguments layout.
3230 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3231
3232 // Generate the compiled-to-native wrapper code
3233 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3234
3235 if (nm != nullptr) {
3236 {
3237 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3238 if (nm->make_in_use()) {
3239 method->set_code(method, nm);
3240 }
3241 }
3242
3243 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3244 if (directive->PrintAssemblyOption) {
3245 nm->print_code();
3246 }
3247 DirectivesStack::release(directive);
3454 st->print("Adapter for signature: ");
3455 a->print_adapter_on(st);
3456 return true;
3457 } else {
3458 return false; // keep looking
3459 }
3460 };
3461 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3462 _adapter_handler_table->iterate(findblob);
3463 assert(found, "Should have found handler");
3464 }
3465
3466 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3467 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3468 if (get_i2c_entry() != nullptr) {
3469 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3470 }
3471 if (get_c2i_entry() != nullptr) {
3472 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3473 }
3474 if (get_c2i_entry() != nullptr) {
3475 st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3476 }
3477 if (get_c2i_entry() != nullptr) {
3478 st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3479 }
3480 if (get_c2i_unverified_entry() != nullptr) {
3481 st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3482 }
3483 if (get_c2i_unverified_entry() != nullptr) {
3484 st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3485 }
3486 if (get_c2i_no_clinit_check_entry() != nullptr) {
3487 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3488 }
3489 st->cr();
3490 }
3491
3492 #ifndef PRODUCT
3493
3494 void AdapterHandlerLibrary::print_statistics() {
3495 print_table_statistics();
3496 }
3497
3498 #endif /* PRODUCT */
3499
3500 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3501 assert(current == JavaThread::current(), "pre-condition");
3502 StackOverflow* overflow_state = current->stack_overflow_state();
3503 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3504 overflow_state->set_reserved_stack_activation(current->stack_base());
3553 event.set_method(method);
3554 event.commit();
3555 }
3556 }
3557 }
3558 return activation;
3559 }
3560
3561 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3562 // After any safepoint, just before going back to compiled code,
3563 // we inform the GC that we will be doing initializing writes to
3564 // this object in the future without emitting card-marks, so
3565 // GC may take any compensating steps.
3566
3567 oop new_obj = current->vm_result();
3568 if (new_obj == nullptr) return;
3569
3570 BarrierSet *bs = BarrierSet::barrier_set();
3571 bs->on_slowpath_allocation_exit(current, new_obj);
3572 }
3573
3574 // We are at a compiled code to interpreter call. We need backing
3575 // buffers for all inline type arguments. Allocate an object array to
3576 // hold them (convenient because once we're done with it we don't have
3577 // to worry about freeing it).
3578 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
3579 assert(InlineTypePassFieldsAsArgs, "no reason to call this");
3580 ResourceMark rm;
3581
3582 int nb_slots = 0;
3583 InstanceKlass* holder = callee->method_holder();
3584 allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
3585 if (allocate_receiver) {
3586 nb_slots++;
3587 }
3588 int arg_num = callee->is_static() ? 0 : 1;
3589 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3590 BasicType bt = ss.type();
3591 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
3592 nb_slots++;
3593 }
3594 if (bt != T_VOID) {
3595 arg_num++;
3596 }
3597 }
3598 objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
3599 objArrayHandle array(THREAD, array_oop);
3600 arg_num = callee->is_static() ? 0 : 1;
3601 int i = 0;
3602 if (allocate_receiver) {
3603 InlineKlass* vk = InlineKlass::cast(holder);
3604 oop res = vk->allocate_instance(CHECK_NULL);
3605 array->obj_at_put(i++, res);
3606 }
3607 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3608 BasicType bt = ss.type();
3609 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
3610 InlineKlass* vk = ss.as_inline_klass(holder);
3611 assert(vk != nullptr, "Unexpected klass");
3612 oop res = vk->allocate_instance(CHECK_NULL);
3613 array->obj_at_put(i++, res);
3614 }
3615 if (bt != T_VOID) {
3616 arg_num++;
3617 }
3618 }
3619 return array();
3620 }
3621
3622 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
3623 methodHandle callee(current, callee_method);
3624 oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
3625 current->set_vm_result(array);
3626 current->set_vm_result_2(callee()); // TODO: required to keep callee live?
3627 JRT_END
3628
3629 // We're returning from an interpreted method: load each field into a
3630 // register following the calling convention
3631 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
3632 {
3633 assert(res->klass()->is_inline_klass(), "only inline types here");
3634 ResourceMark rm;
3635 RegisterMap reg_map(current,
3636 RegisterMap::UpdateMap::include,
3637 RegisterMap::ProcessFrames::include,
3638 RegisterMap::WalkContinuation::skip);
3639 frame stubFrame = current->last_frame();
3640 frame callerFrame = stubFrame.sender(®_map);
3641 assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
3642
3643 InlineKlass* vk = InlineKlass::cast(res->klass());
3644
3645 const Array<SigEntry>* sig_vk = vk->extended_sig();
3646 const Array<VMRegPair>* regs = vk->return_regs();
3647
3648 if (regs == nullptr) {
3649 // The fields of the inline klass don't fit in registers, bail out
3650 return;
3651 }
3652
3653 int j = 1;
3654 for (int i = 0; i < sig_vk->length(); i++) {
3655 BasicType bt = sig_vk->at(i)._bt;
3656 if (bt == T_METADATA) {
3657 continue;
3658 }
3659 if (bt == T_VOID) {
3660 if (sig_vk->at(i-1)._bt == T_LONG ||
3661 sig_vk->at(i-1)._bt == T_DOUBLE) {
3662 j++;
3663 }
3664 continue;
3665 }
3666 int off = sig_vk->at(i)._offset;
3667 assert(off > 0, "offset in object should be positive");
3668 VMRegPair pair = regs->at(j);
3669 address loc = reg_map.location(pair.first(), nullptr);
3670 switch(bt) {
3671 case T_BOOLEAN:
3672 *(jboolean*)loc = res->bool_field(off);
3673 break;
3674 case T_CHAR:
3675 *(jchar*)loc = res->char_field(off);
3676 break;
3677 case T_BYTE:
3678 *(jbyte*)loc = res->byte_field(off);
3679 break;
3680 case T_SHORT:
3681 *(jshort*)loc = res->short_field(off);
3682 break;
3683 case T_INT: {
3684 *(jint*)loc = res->int_field(off);
3685 break;
3686 }
3687 case T_LONG:
3688 #ifdef _LP64
3689 *(intptr_t*)loc = res->long_field(off);
3690 #else
3691 Unimplemented();
3692 #endif
3693 break;
3694 case T_OBJECT:
3695 case T_ARRAY: {
3696 *(oop*)loc = res->obj_field(off);
3697 break;
3698 }
3699 case T_FLOAT:
3700 *(jfloat*)loc = res->float_field(off);
3701 break;
3702 case T_DOUBLE:
3703 *(jdouble*)loc = res->double_field(off);
3704 break;
3705 default:
3706 ShouldNotReachHere();
3707 }
3708 j++;
3709 }
3710 assert(j == regs->length(), "missed a field?");
3711
3712 #ifdef ASSERT
3713 VMRegPair pair = regs->at(0);
3714 address loc = reg_map.location(pair.first(), nullptr);
3715 assert(*(oopDesc**)loc == res, "overwritten object");
3716 #endif
3717
3718 current->set_vm_result(res);
3719 }
3720 JRT_END
3721
3722 // We've returned to an interpreted method, the interpreter needs a
3723 // reference to an inline type instance. Allocate it and initialize it
3724 // from field's values in registers.
3725 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
3726 {
3727 ResourceMark rm;
3728 RegisterMap reg_map(current,
3729 RegisterMap::UpdateMap::include,
3730 RegisterMap::ProcessFrames::include,
3731 RegisterMap::WalkContinuation::skip);
3732 frame stubFrame = current->last_frame();
3733 frame callerFrame = stubFrame.sender(®_map);
3734
3735 #ifdef ASSERT
3736 InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
3737 #endif
3738
3739 if (!is_set_nth_bit(res, 0)) {
3740 // We're not returning with inline type fields in registers (the
3741 // calling convention didn't allow it for this inline klass)
3742 assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
3743 current->set_vm_result((oopDesc*)res);
3744 assert(verif_vk == nullptr, "broken calling convention");
3745 return;
3746 }
3747
3748 clear_nth_bit(res, 0);
3749 InlineKlass* vk = (InlineKlass*)res;
3750 assert(verif_vk == vk, "broken calling convention");
3751 assert(Metaspace::contains((void*)res), "should be klass");
3752
3753 // Allocate handles for every oop field so they are safe in case of
3754 // a safepoint when allocating
3755 GrowableArray<Handle> handles;
3756 vk->save_oop_fields(reg_map, handles);
3757
3758 // It's unsafe to safepoint until we are here
3759 JRT_BLOCK;
3760 {
3761 JavaThread* THREAD = current;
3762 oop vt = vk->realloc_result(reg_map, handles, CHECK);
3763 current->set_vm_result(vt);
3764 }
3765 JRT_BLOCK_END;
3766 }
3767 JRT_END
|