25 #include "classfile/classLoader.hpp"
26 #include "classfile/javaClasses.inline.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "classfile/vmClasses.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/compiledIC.hpp"
32 #include "code/nmethod.inline.hpp"
33 #include "code/scopeDesc.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "compiler/abstractCompiler.hpp"
36 #include "compiler/compileBroker.hpp"
37 #include "compiler/disassembler.hpp"
38 #include "gc/shared/barrierSet.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "interpreter/interpreterRuntime.hpp"
42 #include "jvm.h"
43 #include "jfr/jfrEvents.hpp"
44 #include "logging/log.hpp"
45 #include "memory/resourceArea.hpp"
46 #include "memory/universe.hpp"
47 #include "metaprogramming/primitiveConversions.hpp"
48 #include "oops/klass.hpp"
49 #include "oops/method.inline.hpp"
50 #include "oops/objArrayKlass.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "prims/forte.hpp"
53 #include "prims/jvmtiExport.hpp"
54 #include "prims/jvmtiThreadState.hpp"
55 #include "prims/methodHandles.hpp"
56 #include "prims/nativeLookup.hpp"
57 #include "runtime/arguments.hpp"
58 #include "runtime/atomic.hpp"
59 #include "runtime/basicLock.inline.hpp"
60 #include "runtime/frame.inline.hpp"
61 #include "runtime/handles.inline.hpp"
62 #include "runtime/init.hpp"
63 #include "runtime/interfaceSupport.inline.hpp"
64 #include "runtime/java.hpp"
65 #include "runtime/javaCalls.hpp"
66 #include "runtime/jniHandles.inline.hpp"
67 #include "runtime/perfData.hpp"
68 #include "runtime/sharedRuntime.hpp"
69 #include "runtime/stackWatermarkSet.hpp"
70 #include "runtime/stubRoutines.hpp"
71 #include "runtime/synchronizer.inline.hpp"
1168 // for a call current in progress, i.e., arguments has been pushed on stack
1169 // but callee has not been invoked yet. Caller frame must be compiled.
1170 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1171 CallInfo& callinfo, TRAPS) {
1172 Handle receiver;
1173 Handle nullHandle; // create a handy null handle for exception returns
1174 JavaThread* current = THREAD;
1175
1176 assert(!vfst.at_end(), "Java frame must exist");
1177
1178 // Find caller and bci from vframe
1179 methodHandle caller(current, vfst.method());
1180 int bci = vfst.bci();
1181
1182 if (caller->is_continuation_enter_intrinsic()) {
1183 bc = Bytecodes::_invokestatic;
1184 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1185 return receiver;
1186 }
1187
1188 Bytecode_invoke bytecode(caller, bci);
1189 int bytecode_index = bytecode.index();
1190 bc = bytecode.invoke_code();
1191
1192 methodHandle attached_method(current, extract_attached_method(vfst));
1193 if (attached_method.not_null()) {
1194 Method* callee = bytecode.static_target(CHECK_NH);
1195 vmIntrinsics::ID id = callee->intrinsic_id();
1196 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1197 // it attaches statically resolved method to the call site.
1198 if (MethodHandles::is_signature_polymorphic(id) &&
1199 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1200 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1201
1202 // Adjust invocation mode according to the attached method.
1203 switch (bc) {
1204 case Bytecodes::_invokevirtual:
1205 if (attached_method->method_holder()->is_interface()) {
1206 bc = Bytecodes::_invokeinterface;
1207 }
1208 break;
1209 case Bytecodes::_invokeinterface:
1210 if (!attached_method->method_holder()->is_interface()) {
1211 bc = Bytecodes::_invokevirtual;
1212 }
1213 break;
1214 case Bytecodes::_invokehandle:
1215 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1216 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1217 : Bytecodes::_invokevirtual;
1218 }
1219 break;
1220 default:
1221 break;
1222 }
1223 }
1224 }
1225
1226 assert(bc != Bytecodes::_illegal, "not initialized");
1227
1228 bool has_receiver = bc != Bytecodes::_invokestatic &&
1229 bc != Bytecodes::_invokedynamic &&
1230 bc != Bytecodes::_invokehandle;
1231
1232 // Find receiver for non-static call
1233 if (has_receiver) {
1234 // This register map must be update since we need to find the receiver for
1235 // compiled frames. The receiver might be in a register.
1236 RegisterMap reg_map2(current,
1237 RegisterMap::UpdateMap::include,
1238 RegisterMap::ProcessFrames::include,
1239 RegisterMap::WalkContinuation::skip);
1240 frame stubFrame = current->last_frame();
1241 // Caller-frame is a compiled frame
1242 frame callerFrame = stubFrame.sender(®_map2);
1243
1244 if (attached_method.is_null()) {
1245 Method* callee = bytecode.static_target(CHECK_NH);
1246 if (callee == nullptr) {
1247 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1248 }
1249 }
1250
1251 // Retrieve from a compiled argument list
1252 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1253 assert(oopDesc::is_oop_or_null(receiver()), "");
1254
1255 if (receiver.is_null()) {
1256 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1257 }
1258 }
1259
1260 // Resolve method
1261 if (attached_method.not_null()) {
1262 // Parameterized by attached method.
1263 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1264 } else {
1265 // Parameterized by bytecode.
1266 constantPoolHandle constants(current, caller->constants());
1267 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1268 }
1269
1270 #ifdef ASSERT
1271 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1272 if (has_receiver) {
1273 assert(receiver.not_null(), "should have thrown exception");
1274 Klass* receiver_klass = receiver->klass();
1275 Klass* rk = nullptr;
1276 if (attached_method.not_null()) {
1277 // In case there's resolved method attached, use its holder during the check.
1278 rk = attached_method->method_holder();
1279 } else {
1280 // Klass is already loaded.
1281 constantPoolHandle constants(current, caller->constants());
1282 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1283 }
1284 Klass* static_receiver_klass = rk;
1285 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1286 "actual receiver must be subclass of static receiver klass");
1287 if (receiver_klass->is_instance_klass()) {
1288 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1289 tty->print_cr("ERROR: Klass not yet initialized!!");
1290 receiver_klass->print();
1291 }
1292 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1293 }
1294 }
1295 #endif
1296
1297 return receiver;
1298 }
1299
1300 methodHandle SharedRuntime::find_callee_method(TRAPS) {
1301 JavaThread* current = THREAD;
1302 ResourceMark rm(current);
1303 // We need first to check if any Java activations (compiled, interpreted)
1304 // exist on the stack since last JavaCall. If not, we need
1305 // to get the target method from the JavaCall wrapper.
1306 vframeStream vfst(current, true); // Do not skip any javaCalls
1307 methodHandle callee_method;
1308 if (vfst.at_end()) {
1309 // No Java frames were found on stack since we did the JavaCall.
1310 // Hence the stack can only contain an entry_frame. We need to
1311 // find the target method from the stub frame.
1312 RegisterMap reg_map(current,
1313 RegisterMap::UpdateMap::skip,
1314 RegisterMap::ProcessFrames::include,
1315 RegisterMap::WalkContinuation::skip);
1316 frame fr = current->last_frame();
1317 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1318 fr = fr.sender(®_map);
1319 assert(fr.is_entry_frame(), "must be");
1320 // fr is now pointing to the entry frame.
1321 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1322 } else {
1323 Bytecodes::Code bc;
1324 CallInfo callinfo;
1325 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1326 callee_method = methodHandle(current, callinfo.selected_method());
1327 }
1328 assert(callee_method()->is_method(), "must be");
1329 return callee_method;
1330 }
1331
1332 // Resolves a call.
1333 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1334 JavaThread* current = THREAD;
1335 ResourceMark rm(current);
1336 RegisterMap cbl_map(current,
1337 RegisterMap::UpdateMap::skip,
1338 RegisterMap::ProcessFrames::include,
1339 RegisterMap::WalkContinuation::skip);
1340 frame caller_frame = current->last_frame().sender(&cbl_map);
1341
1342 CodeBlob* caller_cb = caller_frame.cb();
1343 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1344 nmethod* caller_nm = caller_cb->as_nmethod();
1345
1346 // determine call info & receiver
1347 // note: a) receiver is null for static calls
1348 // b) an exception is thrown if receiver is null for non-static calls
1349 CallInfo call_info;
1350 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1351 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1352
1353 NoSafepointVerifier nsv;
1354
1355 methodHandle callee_method(current, call_info.selected_method());
1356
1357 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1358 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1359 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1360 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1361 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1362
1363 assert(!caller_nm->is_unloading(), "It should not be unloading");
1364
1365 #ifndef PRODUCT
1366 // tracing/debugging/statistics
1367 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1368 (is_virtual) ? (&_resolve_virtual_ctr) :
1369 (&_resolve_static_ctr);
1370 Atomic::inc(addr);
1371
1372 if (TraceCallFixup) {
1373 ResourceMark rm(current);
1374 tty->print("resolving %s%s (%s) call to",
1375 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1376 Bytecodes::name(invoke_code));
1377 callee_method->print_short_name(tty);
1378 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1379 p2i(caller_frame.pc()), p2i(callee_method->code()));
1380 }
1381 #endif
1382
1383 if (invoke_code == Bytecodes::_invokestatic) {
1384 assert(callee_method->method_holder()->is_initialized() ||
1385 callee_method->method_holder()->is_reentrant_initialization(current),
1386 "invalid class initialization state for invoke_static");
1387 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1388 // In order to keep class initialization check, do not patch call
1389 // site for static call when the class is not fully initialized.
1390 // Proper check is enforced by call site re-resolution on every invocation.
1391 //
1392 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1393 // explicit class initialization check is put in nmethod entry (VEP).
1394 assert(callee_method->method_holder()->is_linked(), "must be");
1395 return callee_method;
1396 }
1397 }
1398
1399
1400 // JSR 292 key invariant:
1401 // If the resolved method is a MethodHandle invoke target, the call
1402 // site must be a MethodHandle call site, because the lambda form might tail-call
1403 // leaving the stack in a state unknown to either caller or callee
1404
1405 // Compute entry points. The computation of the entry points is independent of
1406 // patching the call.
1407
1408 // Make sure the callee nmethod does not get deoptimized and removed before
1409 // we are done patching the code.
1410
1411
1412 CompiledICLocker ml(caller_nm);
1413 if (is_virtual && !is_optimized) {
1414 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1415 inline_cache->update(&call_info, receiver->klass());
1416 } else {
1417 // Callsite is a direct call - set it to the destination method
1418 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1419 callsite->set(callee_method);
1420 }
1421
1422 return callee_method;
1423 }
1424
1425 // Inline caches exist only in compiled code
1426 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1427 #ifdef ASSERT
1428 RegisterMap reg_map(current,
1429 RegisterMap::UpdateMap::skip,
1430 RegisterMap::ProcessFrames::include,
1431 RegisterMap::WalkContinuation::skip);
1432 frame stub_frame = current->last_frame();
1433 assert(stub_frame.is_runtime_frame(), "sanity check");
1434 frame caller_frame = stub_frame.sender(®_map);
1435 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1436 #endif /* ASSERT */
1437
1438 methodHandle callee_method;
1439 JRT_BLOCK
1440 callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1441 // Return Method* through TLS
1442 current->set_vm_result_2(callee_method());
1443 JRT_BLOCK_END
1444 // return compiled code entry point after potential safepoints
1445 return get_resolved_entry(current, callee_method);
1446 JRT_END
1447
1448
1449 // Handle call site that has been made non-entrant
1450 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1451 // 6243940 We might end up in here if the callee is deoptimized
1452 // as we race to call it. We don't want to take a safepoint if
1453 // the caller was interpreted because the caller frame will look
1454 // interpreted to the stack walkers and arguments are now
1455 // "compiled" so it is much better to make this transition
1456 // invisible to the stack walking code. The i2c path will
1457 // place the callee method in the callee_target. It is stashed
1458 // there because if we try and find the callee by normal means a
1459 // safepoint is possible and have trouble gc'ing the compiled args.
1460 RegisterMap reg_map(current,
1461 RegisterMap::UpdateMap::skip,
1462 RegisterMap::ProcessFrames::include,
1463 RegisterMap::WalkContinuation::skip);
1464 frame stub_frame = current->last_frame();
1465 assert(stub_frame.is_runtime_frame(), "sanity check");
1466 frame caller_frame = stub_frame.sender(®_map);
1467
1468 if (caller_frame.is_interpreted_frame() ||
1469 caller_frame.is_entry_frame() ||
1470 caller_frame.is_upcall_stub_frame()) {
1471 Method* callee = current->callee_target();
1472 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1473 current->set_vm_result_2(callee);
1474 current->set_callee_target(nullptr);
1475 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1476 // Bypass class initialization checks in c2i when caller is in native.
1477 // JNI calls to static methods don't have class initialization checks.
1478 // Fast class initialization checks are present in c2i adapters and call into
1479 // SharedRuntime::handle_wrong_method() on the slow path.
1480 //
1481 // JVM upcalls may land here as well, but there's a proper check present in
1482 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1483 // so bypassing it in c2i adapter is benign.
1484 return callee->get_c2i_no_clinit_check_entry();
1485 } else {
1486 return callee->get_c2i_entry();
1487 }
1488 }
1489
1490 // Must be compiled to compiled path which is safe to stackwalk
1491 methodHandle callee_method;
1492 JRT_BLOCK
1493 // Force resolving of caller (if we called from compiled frame)
1494 callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1495 current->set_vm_result_2(callee_method());
1496 JRT_BLOCK_END
1497 // return compiled code entry point after potential safepoints
1498 return get_resolved_entry(current, callee_method);
1499 JRT_END
1500
1501 // Handle abstract method call
1502 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1503 // Verbose error message for AbstractMethodError.
1504 // Get the called method from the invoke bytecode.
1505 vframeStream vfst(current, true);
1506 assert(!vfst.at_end(), "Java frame must exist");
1507 methodHandle caller(current, vfst.method());
1508 Bytecode_invoke invoke(caller, vfst.bci());
1509 DEBUG_ONLY( invoke.verify(); )
1510
1511 // Find the compiled caller frame.
1512 RegisterMap reg_map(current,
1513 RegisterMap::UpdateMap::include,
1514 RegisterMap::ProcessFrames::include,
1515 RegisterMap::WalkContinuation::skip);
1516 frame stubFrame = current->last_frame();
1517 assert(stubFrame.is_runtime_frame(), "must be");
1518 frame callerFrame = stubFrame.sender(®_map);
1519 assert(callerFrame.is_compiled_frame(), "must be");
1520
1521 // Install exception and return forward entry.
1522 address res = SharedRuntime::throw_AbstractMethodError_entry();
1523 JRT_BLOCK
1524 methodHandle callee(current, invoke.static_target(current));
1525 if (!callee.is_null()) {
1526 oop recv = callerFrame.retrieve_receiver(®_map);
1527 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1528 res = StubRoutines::forward_exception_entry();
1529 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1530 }
1531 JRT_BLOCK_END
1532 return res;
1533 JRT_END
1534
1535 // return verified_code_entry if interp_only_mode is not set for the current thread;
1536 // otherwise return c2i entry.
1537 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method) {
1538 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1539 // In interp_only_mode we need to go to the interpreted entry
1540 // The c2i won't patch in this mode -- see fixup_callers_callsite
1541 return callee_method->get_c2i_entry();
1542 }
1543 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1544 return callee_method->verified_code_entry();
1545 }
1546
1547 // resolve a static call and patch code
1548 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1549 methodHandle callee_method;
1550 bool enter_special = false;
1551 JRT_BLOCK
1552 callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1553 current->set_vm_result_2(callee_method());
1554 JRT_BLOCK_END
1555 // return compiled code entry point after potential safepoints
1556 return get_resolved_entry(current, callee_method);
1557 JRT_END
1558
1559 // resolve virtual call and update inline cache to monomorphic
1560 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1561 methodHandle callee_method;
1562 JRT_BLOCK
1563 callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1564 current->set_vm_result_2(callee_method());
1565 JRT_BLOCK_END
1566 // return compiled code entry point after potential safepoints
1567 return get_resolved_entry(current, callee_method);
1568 JRT_END
1569
1570
1571 // Resolve a virtual call that can be statically bound (e.g., always
1572 // monomorphic, so it has no inline cache). Patch code to resolved target.
1573 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1574 methodHandle callee_method;
1575 JRT_BLOCK
1576 callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1577 current->set_vm_result_2(callee_method());
1578 JRT_BLOCK_END
1579 // return compiled code entry point after potential safepoints
1580 return get_resolved_entry(current, callee_method);
1581 JRT_END
1582
1583 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1584 JavaThread* current = THREAD;
1585 ResourceMark rm(current);
1586 CallInfo call_info;
1587 Bytecodes::Code bc;
1588
1589 // receiver is null for static calls. An exception is thrown for null
1590 // receivers for non-static calls
1591 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1592
1593 methodHandle callee_method(current, call_info.selected_method());
1594
1595 #ifndef PRODUCT
1596 Atomic::inc(&_ic_miss_ctr);
1597
1598 // Statistics & Tracing
1599 if (TraceCallFixup) {
1600 ResourceMark rm(current);
1601 tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1602 callee_method->print_short_name(tty);
1603 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1604 }
1605
1606 if (ICMissHistogram) {
1607 MutexLocker m(VMStatistic_lock);
1608 RegisterMap reg_map(current,
1609 RegisterMap::UpdateMap::skip,
1610 RegisterMap::ProcessFrames::include,
1611 RegisterMap::WalkContinuation::skip);
1612 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1613 // produce statistics under the lock
1614 trace_ic_miss(f.pc());
1615 }
1616 #endif
1617
1618 // install an event collector so that when a vtable stub is created the
1619 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1620 // event can't be posted when the stub is created as locks are held
1621 // - instead the event will be deferred until the event collector goes
1622 // out of scope.
1623 JvmtiDynamicCodeEventCollector event_collector;
1624
1625 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1626 RegisterMap reg_map(current,
1627 RegisterMap::UpdateMap::skip,
1628 RegisterMap::ProcessFrames::include,
1629 RegisterMap::WalkContinuation::skip);
1630 frame caller_frame = current->last_frame().sender(®_map);
1631 CodeBlob* cb = caller_frame.cb();
1632 nmethod* caller_nm = cb->as_nmethod();
1633
1634 CompiledICLocker ml(caller_nm);
1635 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1636 inline_cache->update(&call_info, receiver()->klass());
1637
1638 return callee_method;
1639 }
1640
1641 //
1642 // Resets a call-site in compiled code so it will get resolved again.
1643 // This routines handles both virtual call sites, optimized virtual call
1644 // sites, and static call sites. Typically used to change a call sites
1645 // destination from compiled to interpreted.
1646 //
1647 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1648 JavaThread* current = THREAD;
1649 ResourceMark rm(current);
1650 RegisterMap reg_map(current,
1651 RegisterMap::UpdateMap::skip,
1652 RegisterMap::ProcessFrames::include,
1653 RegisterMap::WalkContinuation::skip);
1654 frame stub_frame = current->last_frame();
1655 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1656 frame caller = stub_frame.sender(®_map);
1657
1658 // Do nothing if the frame isn't a live compiled frame.
1659 // nmethod could be deoptimized by the time we get here
1660 // so no update to the caller is needed.
1661
1662 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1663 (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1664
1665 address pc = caller.pc();
1666
1667 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1668 assert(caller_nm != nullptr, "did not find caller nmethod");
1669
1670 // Default call_addr is the location of the "basic" call.
1671 // Determine the address of the call we a reresolving. With
1672 // Inline Caches we will always find a recognizable call.
1673 // With Inline Caches disabled we may or may not find a
1674 // recognizable call. We will always find a call for static
1675 // calls and for optimized virtual calls. For vanilla virtual
1676 // calls it depends on the state of the UseInlineCaches switch.
1677 //
1678 // With Inline Caches disabled we can get here for a virtual call
1679 // for two reasons:
1680 // 1 - calling an abstract method. The vtable for abstract methods
1681 // will run us thru handle_wrong_method and we will eventually
1682 // end up in the interpreter to throw the ame.
1683 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1684 // call and between the time we fetch the entry address and
1685 // we jump to it the target gets deoptimized. Similar to 1
1686 // we will wind up in the interprter (thru a c2i with c2).
1687 //
1688 CompiledICLocker ml(caller_nm);
1689 address call_addr = caller_nm->call_instruction_address(pc);
1690
1691 if (call_addr != nullptr) {
1692 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1693 // bytes back in the instruction stream so we must also check for reloc info.
1694 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1695 bool ret = iter.next(); // Get item
1696 if (ret) {
1697 switch (iter.type()) {
1698 case relocInfo::static_call_type:
1699 case relocInfo::opt_virtual_call_type: {
1700 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1701 cdc->set_to_clean();
1702 break;
1703 }
1704
1705 case relocInfo::virtual_call_type: {
1706 // compiled, dispatched call (which used to call an interpreted method)
1707 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1708 inline_cache->set_to_clean();
1709 break;
1710 }
1711 default:
1712 break;
1713 }
1714 }
1715 }
1716 }
1717
1718 methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1719
1720
1721 #ifndef PRODUCT
1722 Atomic::inc(&_wrong_method_ctr);
1723
1724 if (TraceCallFixup) {
1725 ResourceMark rm(current);
1726 tty->print("handle_wrong_method reresolving call to");
1727 callee_method->print_short_name(tty);
1728 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1729 }
1730 #endif
1731
1732 return callee_method;
1733 }
1734
1735 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1736 // The faulting unsafe accesses should be changed to throw the error
1737 // synchronously instead. Meanwhile the faulting instruction will be
1738 // skipped over (effectively turning it into a no-op) and an
1739 // asynchronous exception will be raised which the thread will
1740 // handle at a later point. If the instruction is a load it will
1741 // return garbage.
1742
1743 // Request an async exception.
1744 thread->set_pending_unsafe_access_error();
1745
1746 // Return address of next instruction to execute.
1912 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
1913
1914 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1915 if (message == nullptr) {
1916 // Shouldn't happen, but don't cause even more problems if it does
1917 message = const_cast<char*>(caster_klass->external_name());
1918 } else {
1919 jio_snprintf(message,
1920 msglen,
1921 "class %s cannot be cast to class %s (%s%s%s)",
1922 caster_name,
1923 target_name,
1924 caster_klass_description,
1925 klass_separator,
1926 target_klass_description
1927 );
1928 }
1929 return message;
1930 }
1931
1932 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1933 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
1934 JRT_END
1935
1936 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
1937 if (!SafepointSynchronize::is_synchronizing()) {
1938 // Only try quick_enter() if we're not trying to reach a safepoint
1939 // so that the calling thread reaches the safepoint more quickly.
1940 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
1941 return;
1942 }
1943 }
1944 // NO_ASYNC required because an async exception on the state transition destructor
1945 // would leave you with the lock held and it would never be released.
1946 // The normal monitorenter NullPointerException is thrown without acquiring a lock
1947 // and the model is that an exception implies the method failed.
1948 JRT_BLOCK_NO_ASYNC
1949 Handle h_obj(THREAD, obj);
1950 ObjectSynchronizer::enter(h_obj, lock, current);
1951 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2162 tty->print_cr(" %% in nested categories are relative to their category");
2163 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2164 tty->cr();
2165
2166 MethodArityHistogram h;
2167 }
2168 #endif
2169
2170 #ifndef PRODUCT
2171 static int _lookups; // number of calls to lookup
2172 static int _equals; // number of buckets checked with matching hash
2173 static int _hits; // number of successful lookups
2174 static int _compact; // number of equals calls with compact signature
2175 #endif
2176
2177 // A simple wrapper class around the calling convention information
2178 // that allows sharing of adapters for the same calling convention.
2179 class AdapterFingerPrint : public CHeapObj<mtCode> {
2180 private:
2181 enum {
2182 _basic_type_bits = 4,
2183 _basic_type_mask = right_n_bits(_basic_type_bits),
2184 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2185 _compact_int_count = 3
2186 };
2187 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2188 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2189
2190 union {
2191 int _compact[_compact_int_count];
2192 int* _fingerprint;
2193 } _value;
2194 int _length; // A negative length indicates the fingerprint is in the compact form,
2195 // Otherwise _value._fingerprint is the array.
2196
2197 // Remap BasicTypes that are handled equivalently by the adapters.
2198 // These are correct for the current system but someday it might be
2199 // necessary to make this mapping platform dependent.
2200 static int adapter_encoding(BasicType in) {
2201 switch (in) {
2202 case T_BOOLEAN:
2203 case T_BYTE:
2204 case T_SHORT:
2205 case T_CHAR:
2206 // There are all promoted to T_INT in the calling convention
2207 return T_INT;
2208
2209 case T_OBJECT:
2210 case T_ARRAY:
2211 // In other words, we assume that any register good enough for
2212 // an int or long is good enough for a managed pointer.
2213 #ifdef _LP64
2214 return T_LONG;
2215 #else
2216 return T_INT;
2217 #endif
2218
2219 case T_INT:
2220 case T_LONG:
2221 case T_FLOAT:
2222 case T_DOUBLE:
2223 case T_VOID:
2224 return in;
2225
2226 default:
2227 ShouldNotReachHere();
2228 return T_CONFLICT;
2229 }
2230 }
2231
2232 public:
2233 AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2234 // The fingerprint is based on the BasicType signature encoded
2235 // into an array of ints with eight entries per int.
2236 int* ptr;
2237 int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2238 if (len <= _compact_int_count) {
2239 assert(_compact_int_count == 3, "else change next line");
2240 _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2241 // Storing the signature encoded as signed chars hits about 98%
2242 // of the time.
2243 _length = -len;
2244 ptr = _value._compact;
2245 } else {
2246 _length = len;
2247 _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2248 ptr = _value._fingerprint;
2249 }
2250
2251 // Now pack the BasicTypes with 8 per int
2252 int sig_index = 0;
2253 for (int index = 0; index < len; index++) {
2254 int value = 0;
2255 for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2256 int bt = adapter_encoding(sig_bt[sig_index++]);
2257 assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2258 value = (value << _basic_type_bits) | bt;
2259 }
2260 ptr[index] = value;
2261 }
2262 }
2263
2264 ~AdapterFingerPrint() {
2265 if (_length > 0) {
2266 FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2267 }
2268 }
2269
2270 int value(int index) {
2271 if (_length < 0) {
2272 return _value._compact[index];
2273 }
2274 return _value._fingerprint[index];
2275 }
2276 int length() {
2277 if (_length < 0) return -_length;
2278 return _length;
2279 }
2280
2281 bool is_compact() {
2306 const char* as_basic_args_string() {
2307 stringStream st;
2308 bool long_prev = false;
2309 for (int i = 0; i < length(); i++) {
2310 unsigned val = (unsigned)value(i);
2311 // args are packed so that first/lower arguments are in the highest
2312 // bits of each int value, so iterate from highest to the lowest
2313 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2314 unsigned v = (val >> j) & _basic_type_mask;
2315 if (v == 0) {
2316 assert(i == length() - 1, "Only expect zeroes in the last word");
2317 continue;
2318 }
2319 if (long_prev) {
2320 long_prev = false;
2321 if (v == T_VOID) {
2322 st.print("J");
2323 } else {
2324 st.print("L");
2325 }
2326 }
2327 switch (v) {
2328 case T_INT: st.print("I"); break;
2329 case T_LONG: long_prev = true; break;
2330 case T_FLOAT: st.print("F"); break;
2331 case T_DOUBLE: st.print("D"); break;
2332 case T_VOID: break;
2333 default: ShouldNotReachHere();
2334 }
2335 }
2336 }
2337 if (long_prev) {
2338 st.print("L");
2339 }
2340 return st.as_string();
2341 }
2342 #endif // !product
2343
2344 bool equals(AdapterFingerPrint* other) {
2345 if (other->_length != _length) {
2346 return false;
2347 }
2348 if (_length < 0) {
2349 assert(_compact_int_count == 3, "else change next line");
2350 return _value._compact[0] == other->_value._compact[0] &&
2351 _value._compact[1] == other->_value._compact[1] &&
2352 _value._compact[2] == other->_value._compact[2];
2353 } else {
2361 }
2362
2363 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2364 NOT_PRODUCT(_equals++);
2365 return fp1->equals(fp2);
2366 }
2367
2368 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2369 return fp->compute_hash();
2370 }
2371 };
2372
2373 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2374 using AdapterHandlerTable = ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2375 AnyObj::C_HEAP, mtCode,
2376 AdapterFingerPrint::compute_hash,
2377 AdapterFingerPrint::equals>;
2378 static AdapterHandlerTable* _adapter_handler_table;
2379
2380 // Find a entry with the same fingerprint if it exists
2381 static AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
2382 NOT_PRODUCT(_lookups++);
2383 assert_lock_strong(AdapterHandlerLibrary_lock);
2384 AdapterFingerPrint fp(total_args_passed, sig_bt);
2385 AdapterHandlerEntry** entry = _adapter_handler_table->get(&fp);
2386 if (entry != nullptr) {
2387 #ifndef PRODUCT
2388 if (fp.is_compact()) _compact++;
2389 _hits++;
2390 #endif
2391 return *entry;
2392 }
2393 return nullptr;
2394 }
2395
2396 #ifndef PRODUCT
2397 static void print_table_statistics() {
2398 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2399 return sizeof(*key) + sizeof(*a);
2400 };
2401 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2402 ts.print(tty, "AdapterHandlerTable");
2403 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2404 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2405 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2406 _lookups, _equals, _hits, _compact);
2407 }
2408 #endif
2409
2410 // ---------------------------------------------------------------------------
2411 // Implementation of AdapterHandlerLibrary
2412 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2413 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2414 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2415 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2416 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2417 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2418 const int AdapterHandlerLibrary_size = 16*K;
2419 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2420
2421 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2422 return _buffer;
2423 }
2424
2425 static void post_adapter_creation(const AdapterBlob* new_adapter,
2426 const AdapterHandlerEntry* entry) {
2427 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2428 char blob_id[256];
2429 jio_snprintf(blob_id,
2430 sizeof(blob_id),
2431 "%s(%s)",
2432 new_adapter->name(),
2433 entry->fingerprint()->as_string());
2434 if (Forte::is_enabled()) {
2435 Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2436 }
2437
2438 if (JvmtiExport::should_post_dynamic_code_generated()) {
2441 }
2442 }
2443
2444 void AdapterHandlerLibrary::initialize() {
2445 ResourceMark rm;
2446 AdapterBlob* no_arg_blob = nullptr;
2447 AdapterBlob* int_arg_blob = nullptr;
2448 AdapterBlob* obj_arg_blob = nullptr;
2449 AdapterBlob* obj_int_arg_blob = nullptr;
2450 AdapterBlob* obj_obj_arg_blob = nullptr;
2451 {
2452 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2453 MutexLocker mu(AdapterHandlerLibrary_lock);
2454
2455 // Create a special handler for abstract methods. Abstract methods
2456 // are never compiled so an i2c entry is somewhat meaningless, but
2457 // throw AbstractMethodError just in case.
2458 // Pass wrong_method_abstract for the c2i transitions to return
2459 // AbstractMethodError for invalid invocations.
2460 address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2461 _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, nullptr),
2462 SharedRuntime::throw_AbstractMethodError_entry(),
2463 wrong_method_abstract, wrong_method_abstract);
2464
2465 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2466 _no_arg_handler = create_adapter(no_arg_blob, 0, nullptr, true);
2467
2468 BasicType obj_args[] = { T_OBJECT };
2469 _obj_arg_handler = create_adapter(obj_arg_blob, 1, obj_args, true);
2470
2471 BasicType int_args[] = { T_INT };
2472 _int_arg_handler = create_adapter(int_arg_blob, 1, int_args, true);
2473
2474 BasicType obj_int_args[] = { T_OBJECT, T_INT };
2475 _obj_int_arg_handler = create_adapter(obj_int_arg_blob, 2, obj_int_args, true);
2476
2477 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2478 _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, 2, obj_obj_args, true);
2479
2480 assert(no_arg_blob != nullptr &&
2481 obj_arg_blob != nullptr &&
2482 int_arg_blob != nullptr &&
2483 obj_int_arg_blob != nullptr &&
2484 obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
2485 }
2486
2487 // Outside of the lock
2488 post_adapter_creation(no_arg_blob, _no_arg_handler);
2489 post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2490 post_adapter_creation(int_arg_blob, _int_arg_handler);
2491 post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2492 post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2493 }
2494
2495 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2496 address i2c_entry,
2497 address c2i_entry,
2498 address c2i_unverified_entry,
2499 address c2i_no_clinit_check_entry) {
2500 // Insert an entry into the table
2501 return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry,
2502 c2i_no_clinit_check_entry);
2503 }
2504
2505 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2506 if (method->is_abstract()) {
2507 return _abstract_method_handler;
2508 }
2509 int total_args_passed = method->size_of_parameters(); // All args on stack
2510 if (total_args_passed == 0) {
2511 return _no_arg_handler;
2512 } else if (total_args_passed == 1) {
2513 if (!method->is_static()) {
2514 return _obj_arg_handler;
2515 }
2516 switch (method->signature()->char_at(1)) {
2517 case JVM_SIGNATURE_CLASS:
2518 case JVM_SIGNATURE_ARRAY:
2519 return _obj_arg_handler;
2520 case JVM_SIGNATURE_INT:
2521 case JVM_SIGNATURE_BOOLEAN:
2522 case JVM_SIGNATURE_CHAR:
2523 case JVM_SIGNATURE_BYTE:
2524 case JVM_SIGNATURE_SHORT:
2525 return _int_arg_handler;
2526 }
2527 } else if (total_args_passed == 2 &&
2528 !method->is_static()) {
2529 switch (method->signature()->char_at(1)) {
2530 case JVM_SIGNATURE_CLASS:
2531 case JVM_SIGNATURE_ARRAY:
2532 return _obj_obj_arg_handler;
2533 case JVM_SIGNATURE_INT:
2534 case JVM_SIGNATURE_BOOLEAN:
2535 case JVM_SIGNATURE_CHAR:
2536 case JVM_SIGNATURE_BYTE:
2537 case JVM_SIGNATURE_SHORT:
2538 return _obj_int_arg_handler;
2539 }
2540 }
2541 return nullptr;
2542 }
2543
2544 class AdapterSignatureIterator : public SignatureIterator {
2545 private:
2546 BasicType stack_sig_bt[16];
2547 BasicType* sig_bt;
2548 int index;
2549
2550 public:
2551 AdapterSignatureIterator(Symbol* signature,
2552 fingerprint_t fingerprint,
2553 bool is_static,
2554 int total_args_passed) :
2555 SignatureIterator(signature, fingerprint),
2556 index(0)
2557 {
2558 sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2559 if (!is_static) { // Pass in receiver first
2560 sig_bt[index++] = T_OBJECT;
2561 }
2562 do_parameters_on(this);
2563 }
2564
2565 BasicType* basic_types() {
2566 return sig_bt;
2567 }
2568
2569 #ifdef ASSERT
2570 int slots() {
2571 return index;
2572 }
2573 #endif
2574
2575 private:
2576
2577 friend class SignatureIterator; // so do_parameters_on can call do_type
2578 void do_type(BasicType type) {
2579 sig_bt[index++] = type;
2580 if (type == T_LONG || type == T_DOUBLE) {
2581 sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots
2582 }
2583 }
2584 };
2585
2586 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2587 // Use customized signature handler. Need to lock around updates to
2588 // the _adapter_handler_table (it is not safe for concurrent readers
2589 // and a single writer: this could be fixed if it becomes a
2590 // problem).
2591
2592 // Fast-path for trivial adapters
2593 AdapterHandlerEntry* entry = get_simple_adapter(method);
2594 if (entry != nullptr) {
2595 return entry;
2596 }
2597
2598 ResourceMark rm;
2599 AdapterBlob* new_adapter = nullptr;
2600
2601 // Fill in the signature array, for the calling-convention call.
2602 int total_args_passed = method->size_of_parameters(); // All args on stack
2603
2604 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2605 method->is_static(), total_args_passed);
2606 assert(si.slots() == total_args_passed, "");
2607 BasicType* sig_bt = si.basic_types();
2608 {
2609 MutexLocker mu(AdapterHandlerLibrary_lock);
2610
2611 // Lookup method signature's fingerprint
2612 entry = lookup(total_args_passed, sig_bt);
2613
2614 if (entry != nullptr) {
2615 #ifdef ASSERT
2616 if (VerifyAdapterSharing) {
2617 AdapterBlob* comparison_blob = nullptr;
2618 AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, total_args_passed, sig_bt, false);
2619 assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
2620 assert(comparison_entry->compare_code(entry), "code must match");
2621 // Release the one just created and return the original
2622 delete comparison_entry;
2623 }
2624 #endif
2625 return entry;
2626 }
2627
2628 entry = create_adapter(new_adapter, total_args_passed, sig_bt, /* allocate_code_blob */ true);
2629 }
2630
2631 // Outside of the lock
2632 if (new_adapter != nullptr) {
2633 post_adapter_creation(new_adapter, entry);
2634 }
2635 return entry;
2636 }
2637
2638 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
2639 int total_args_passed,
2640 BasicType* sig_bt,
2641 bool allocate_code_blob) {
2642 if (log_is_enabled(Info, perf, class, link)) {
2643 ClassLoader::perf_method_adapters_count()->inc();
2644 }
2645
2646 // StubRoutines::_final_stubs_code is initialized after this function can be called. As a result,
2647 // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated prior
2648 // to all StubRoutines::_final_stubs_code being set. Checks refer to runtime range checks generated
2649 // in an I2C stub that ensure that an I2C stub is called from an interpreter frame or stubs.
2650 bool contains_all_checks = StubRoutines::final_stubs_code() != nullptr;
2651
2652 VMRegPair stack_regs[16];
2653 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2654
2655 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2656 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2657 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2658 CodeBuffer buffer(buf);
2659 short buffer_locs[20];
2660 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2661 sizeof(buffer_locs)/sizeof(relocInfo));
2662
2663 // Make a C heap allocated version of the fingerprint to store in the adapter
2664 AdapterFingerPrint* fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
2665 MacroAssembler _masm(&buffer);
2666 AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2667 total_args_passed,
2668 comp_args_on_stack,
2669 sig_bt,
2670 regs,
2671 fingerprint);
2672
2673 #ifdef ASSERT
2674 if (VerifyAdapterSharing) {
2675 entry->save_code(buf->code_begin(), buffer.insts_size());
2676 if (!allocate_code_blob) {
2677 return entry;
2678 }
2679 }
2680 #endif
2681
2682 new_adapter = AdapterBlob::create(&buffer);
2683 NOT_PRODUCT(int insts_size = buffer.insts_size());
2684 if (new_adapter == nullptr) {
2685 // CodeCache is full, disable compilation
2686 // Ought to log this but compile log is only per compile thread
2687 // and we're some non descript Java thread.
2688 return nullptr;
2689 }
2690 entry->relocate(new_adapter->content_begin());
2691 #ifndef PRODUCT
2692 // debugging support
2693 if (PrintAdapterHandlers || PrintStubCode) {
2694 ttyLocker ttyl;
2695 entry->print_adapter_on(tty);
2696 tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
2697 _adapter_handler_table->number_of_entries(), fingerprint->as_basic_args_string(),
2698 fingerprint->as_string(), insts_size);
2699 tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
2700 if (Verbose || PrintStubCode) {
2701 address first_pc = entry->base_address();
2702 if (first_pc != nullptr) {
2704 NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
2705 tty->cr();
2706 }
2707 }
2708 }
2709 #endif
2710
2711 // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
2712 // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
2713 if (contains_all_checks || !VerifyAdapterCalls) {
2714 assert_lock_strong(AdapterHandlerLibrary_lock);
2715 _adapter_handler_table->put(fingerprint, entry);
2716 }
2717 return entry;
2718 }
2719
2720 address AdapterHandlerEntry::base_address() {
2721 address base = _i2c_entry;
2722 if (base == nullptr) base = _c2i_entry;
2723 assert(base <= _c2i_entry || _c2i_entry == nullptr, "");
2724 assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");
2725 assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
2726 return base;
2727 }
2728
2729 void AdapterHandlerEntry::relocate(address new_base) {
2730 address old_base = base_address();
2731 assert(old_base != nullptr, "");
2732 ptrdiff_t delta = new_base - old_base;
2733 if (_i2c_entry != nullptr)
2734 _i2c_entry += delta;
2735 if (_c2i_entry != nullptr)
2736 _c2i_entry += delta;
2737 if (_c2i_unverified_entry != nullptr)
2738 _c2i_unverified_entry += delta;
2739 if (_c2i_no_clinit_check_entry != nullptr)
2740 _c2i_no_clinit_check_entry += delta;
2741 assert(base_address() == new_base, "");
2742 }
2743
2744
2745 AdapterHandlerEntry::~AdapterHandlerEntry() {
2746 delete _fingerprint;
2747 #ifdef ASSERT
2748 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
2749 #endif
2750 }
2751
2752
2753 #ifdef ASSERT
2754 // Capture the code before relocation so that it can be compared
2755 // against other versions. If the code is captured after relocation
2756 // then relative instructions won't be equivalent.
2757 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
2758 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
2759 _saved_code_length = length;
2760 memcpy(_saved_code, buffer, length);
2761 }
2762
2763
2764 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
2765 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
2766
2813
2814 struct { double data[20]; } locs_buf;
2815 struct { double data[20]; } stubs_locs_buf;
2816 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
2817 #if defined(AARCH64) || defined(PPC64)
2818 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
2819 // in the constant pool to ensure ordering between the barrier and oops
2820 // accesses. For native_wrappers we need a constant.
2821 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
2822 // static java call that is resolved in the runtime.
2823 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
2824 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
2825 }
2826 #endif
2827 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
2828 MacroAssembler _masm(&buffer);
2829
2830 // Fill in the signature array, for the calling-convention call.
2831 const int total_args_passed = method->size_of_parameters();
2832
2833 VMRegPair stack_regs[16];
2834 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2835
2836 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2837 method->is_static(), total_args_passed);
2838 BasicType* sig_bt = si.basic_types();
2839 assert(si.slots() == total_args_passed, "");
2840 BasicType ret_type = si.return_type();
2841
2842 // Now get the compiled-Java arguments layout.
2843 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2844
2845 // Generate the compiled-to-native wrapper code
2846 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
2847
2848 if (nm != nullptr) {
2849 {
2850 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
2851 if (nm->make_in_use()) {
2852 method->set_code(method, nm);
2853 }
2854 }
2855
2856 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
2857 if (directive->PrintAssemblyOption) {
2858 nm->print_code();
2859 }
2860 DirectivesStack::release(directive);
3067 st->print("Adapter for signature: ");
3068 a->print_adapter_on(st);
3069 return true;
3070 } else {
3071 return false; // keep looking
3072 }
3073 };
3074 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3075 _adapter_handler_table->iterate(findblob);
3076 assert(found, "Should have found handler");
3077 }
3078
3079 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3080 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3081 if (get_i2c_entry() != nullptr) {
3082 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3083 }
3084 if (get_c2i_entry() != nullptr) {
3085 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3086 }
3087 if (get_c2i_unverified_entry() != nullptr) {
3088 st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3089 }
3090 if (get_c2i_no_clinit_check_entry() != nullptr) {
3091 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3092 }
3093 st->cr();
3094 }
3095
3096 #ifndef PRODUCT
3097
3098 void AdapterHandlerLibrary::print_statistics() {
3099 print_table_statistics();
3100 }
3101
3102 #endif /* PRODUCT */
3103
3104 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3105 assert(current == JavaThread::current(), "pre-condition");
3106 StackOverflow* overflow_state = current->stack_overflow_state();
3107 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3108 overflow_state->set_reserved_stack_activation(current->stack_base());
3157 event.set_method(method);
3158 event.commit();
3159 }
3160 }
3161 }
3162 return activation;
3163 }
3164
3165 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3166 // After any safepoint, just before going back to compiled code,
3167 // we inform the GC that we will be doing initializing writes to
3168 // this object in the future without emitting card-marks, so
3169 // GC may take any compensating steps.
3170
3171 oop new_obj = current->vm_result();
3172 if (new_obj == nullptr) return;
3173
3174 BarrierSet *bs = BarrierSet::barrier_set();
3175 bs->on_slowpath_allocation_exit(current, new_obj);
3176 }
|
25 #include "classfile/classLoader.hpp"
26 #include "classfile/javaClasses.inline.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "classfile/vmClasses.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/compiledIC.hpp"
32 #include "code/nmethod.inline.hpp"
33 #include "code/scopeDesc.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "compiler/abstractCompiler.hpp"
36 #include "compiler/compileBroker.hpp"
37 #include "compiler/disassembler.hpp"
38 #include "gc/shared/barrierSet.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "interpreter/interpreterRuntime.hpp"
42 #include "jvm.h"
43 #include "jfr/jfrEvents.hpp"
44 #include "logging/log.hpp"
45 #include "memory/oopFactory.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "memory/universe.hpp"
48 #include "oops/access.hpp"
49 #include "oops/fieldStreams.inline.hpp"
50 #include "metaprogramming/primitiveConversions.hpp"
51 #include "oops/klass.hpp"
52 #include "oops/method.inline.hpp"
53 #include "oops/objArrayKlass.hpp"
54 #include "oops/objArrayOop.inline.hpp"
55 #include "oops/oop.inline.hpp"
56 #include "oops/inlineKlass.inline.hpp"
57 #include "prims/forte.hpp"
58 #include "prims/jvmtiExport.hpp"
59 #include "prims/jvmtiThreadState.hpp"
60 #include "prims/methodHandles.hpp"
61 #include "prims/nativeLookup.hpp"
62 #include "runtime/arguments.hpp"
63 #include "runtime/atomic.hpp"
64 #include "runtime/basicLock.inline.hpp"
65 #include "runtime/frame.inline.hpp"
66 #include "runtime/handles.inline.hpp"
67 #include "runtime/init.hpp"
68 #include "runtime/interfaceSupport.inline.hpp"
69 #include "runtime/java.hpp"
70 #include "runtime/javaCalls.hpp"
71 #include "runtime/jniHandles.inline.hpp"
72 #include "runtime/perfData.hpp"
73 #include "runtime/sharedRuntime.hpp"
74 #include "runtime/stackWatermarkSet.hpp"
75 #include "runtime/stubRoutines.hpp"
76 #include "runtime/synchronizer.inline.hpp"
1173 // for a call current in progress, i.e., arguments has been pushed on stack
1174 // but callee has not been invoked yet. Caller frame must be compiled.
1175 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1176 CallInfo& callinfo, TRAPS) {
1177 Handle receiver;
1178 Handle nullHandle; // create a handy null handle for exception returns
1179 JavaThread* current = THREAD;
1180
1181 assert(!vfst.at_end(), "Java frame must exist");
1182
1183 // Find caller and bci from vframe
1184 methodHandle caller(current, vfst.method());
1185 int bci = vfst.bci();
1186
1187 if (caller->is_continuation_enter_intrinsic()) {
1188 bc = Bytecodes::_invokestatic;
1189 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1190 return receiver;
1191 }
1192
1193 // Substitutability test implementation piggy backs on static call resolution
1194 Bytecodes::Code code = caller->java_code_at(bci);
1195 if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1196 bc = Bytecodes::_invokestatic;
1197 methodHandle attached_method(THREAD, extract_attached_method(vfst));
1198 assert(attached_method.not_null(), "must have attached method");
1199 vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1200 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1201 #ifdef ASSERT
1202 Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature());
1203 assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1204 #endif
1205 return receiver;
1206 }
1207
1208 Bytecode_invoke bytecode(caller, bci);
1209 int bytecode_index = bytecode.index();
1210 bc = bytecode.invoke_code();
1211
1212 methodHandle attached_method(current, extract_attached_method(vfst));
1213 if (attached_method.not_null()) {
1214 Method* callee = bytecode.static_target(CHECK_NH);
1215 vmIntrinsics::ID id = callee->intrinsic_id();
1216 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1217 // it attaches statically resolved method to the call site.
1218 if (MethodHandles::is_signature_polymorphic(id) &&
1219 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1220 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1221
1222 // Adjust invocation mode according to the attached method.
1223 switch (bc) {
1224 case Bytecodes::_invokevirtual:
1225 if (attached_method->method_holder()->is_interface()) {
1226 bc = Bytecodes::_invokeinterface;
1227 }
1228 break;
1229 case Bytecodes::_invokeinterface:
1230 if (!attached_method->method_holder()->is_interface()) {
1231 bc = Bytecodes::_invokevirtual;
1232 }
1233 break;
1234 case Bytecodes::_invokehandle:
1235 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1236 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1237 : Bytecodes::_invokevirtual;
1238 }
1239 break;
1240 default:
1241 break;
1242 }
1243 } else {
1244 assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1245 if (!attached_method->method_holder()->is_inline_klass()) {
1246 // Ignore the attached method in this case to not confuse below code
1247 attached_method = methodHandle(current, nullptr);
1248 }
1249 }
1250 }
1251
1252 assert(bc != Bytecodes::_illegal, "not initialized");
1253
1254 bool has_receiver = bc != Bytecodes::_invokestatic &&
1255 bc != Bytecodes::_invokedynamic &&
1256 bc != Bytecodes::_invokehandle;
1257 bool check_null_and_abstract = true;
1258
1259 // Find receiver for non-static call
1260 if (has_receiver) {
1261 // This register map must be update since we need to find the receiver for
1262 // compiled frames. The receiver might be in a register.
1263 RegisterMap reg_map2(current,
1264 RegisterMap::UpdateMap::include,
1265 RegisterMap::ProcessFrames::include,
1266 RegisterMap::WalkContinuation::skip);
1267 frame stubFrame = current->last_frame();
1268 // Caller-frame is a compiled frame
1269 frame callerFrame = stubFrame.sender(®_map2);
1270
1271 Method* callee = attached_method();
1272 if (callee == nullptr) {
1273 callee = bytecode.static_target(CHECK_NH);
1274 if (callee == nullptr) {
1275 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1276 }
1277 }
1278 bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->as_nmethod()->is_compiled_by_c1();
1279 if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1280 // If the receiver is an inline type that is passed as fields, no oop is available
1281 // Resolve the call without receiver null checking.
1282 assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1283 assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1284 if (bc == Bytecodes::_invokeinterface) {
1285 bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1286 }
1287 check_null_and_abstract = false;
1288 } else {
1289 // Retrieve from a compiled argument list
1290 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1291 assert(oopDesc::is_oop_or_null(receiver()), "");
1292 if (receiver.is_null()) {
1293 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1294 }
1295 }
1296 }
1297
1298 // Resolve method
1299 if (attached_method.not_null()) {
1300 // Parameterized by attached method.
1301 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1302 } else {
1303 // Parameterized by bytecode.
1304 constantPoolHandle constants(current, caller->constants());
1305 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1306 }
1307
1308 #ifdef ASSERT
1309 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1310 if (has_receiver && check_null_and_abstract) {
1311 assert(receiver.not_null(), "should have thrown exception");
1312 Klass* receiver_klass = receiver->klass();
1313 Klass* rk = nullptr;
1314 if (attached_method.not_null()) {
1315 // In case there's resolved method attached, use its holder during the check.
1316 rk = attached_method->method_holder();
1317 } else {
1318 // Klass is already loaded.
1319 constantPoolHandle constants(current, caller->constants());
1320 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1321 }
1322 Klass* static_receiver_klass = rk;
1323 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1324 "actual receiver must be subclass of static receiver klass");
1325 if (receiver_klass->is_instance_klass()) {
1326 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1327 tty->print_cr("ERROR: Klass not yet initialized!!");
1328 receiver_klass->print();
1329 }
1330 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1331 }
1332 }
1333 #endif
1334
1335 return receiver;
1336 }
1337
1338 methodHandle SharedRuntime::find_callee_method(bool is_optimized, bool& caller_is_c1, TRAPS) {
1339 JavaThread* current = THREAD;
1340 ResourceMark rm(current);
1341 // We need first to check if any Java activations (compiled, interpreted)
1342 // exist on the stack since last JavaCall. If not, we need
1343 // to get the target method from the JavaCall wrapper.
1344 vframeStream vfst(current, true); // Do not skip any javaCalls
1345 methodHandle callee_method;
1346 if (vfst.at_end()) {
1347 // No Java frames were found on stack since we did the JavaCall.
1348 // Hence the stack can only contain an entry_frame. We need to
1349 // find the target method from the stub frame.
1350 RegisterMap reg_map(current,
1351 RegisterMap::UpdateMap::skip,
1352 RegisterMap::ProcessFrames::include,
1353 RegisterMap::WalkContinuation::skip);
1354 frame fr = current->last_frame();
1355 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1356 fr = fr.sender(®_map);
1357 assert(fr.is_entry_frame(), "must be");
1358 // fr is now pointing to the entry frame.
1359 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1360 } else {
1361 Bytecodes::Code bc;
1362 CallInfo callinfo;
1363 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1364 // Calls via mismatching methods are always non-scalarized
1365 if (callinfo.resolved_method()->mismatch() && !is_optimized) {
1366 caller_is_c1 = true;
1367 }
1368 callee_method = methodHandle(current, callinfo.selected_method());
1369 }
1370 assert(callee_method()->is_method(), "must be");
1371 return callee_method;
1372 }
1373
1374 // Resolves a call.
1375 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_is_c1, TRAPS) {
1376 JavaThread* current = THREAD;
1377 ResourceMark rm(current);
1378 RegisterMap cbl_map(current,
1379 RegisterMap::UpdateMap::skip,
1380 RegisterMap::ProcessFrames::include,
1381 RegisterMap::WalkContinuation::skip);
1382 frame caller_frame = current->last_frame().sender(&cbl_map);
1383
1384 CodeBlob* caller_cb = caller_frame.cb();
1385 guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1386 nmethod* caller_nm = caller_cb->as_nmethod();
1387
1388 // determine call info & receiver
1389 // note: a) receiver is null for static calls
1390 // b) an exception is thrown if receiver is null for non-static calls
1391 CallInfo call_info;
1392 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1393 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1394
1395 NoSafepointVerifier nsv;
1396
1397 methodHandle callee_method(current, call_info.selected_method());
1398 // Calls via mismatching methods are always non-scalarized
1399 if (caller_nm->is_compiled_by_c1() || (call_info.resolved_method()->mismatch() && !is_optimized)) {
1400 caller_is_c1 = true;
1401 }
1402
1403 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1404 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1405 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1406 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1407 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1408
1409 assert(!caller_nm->is_unloading(), "It should not be unloading");
1410
1411 #ifndef PRODUCT
1412 // tracing/debugging/statistics
1413 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1414 (is_virtual) ? (&_resolve_virtual_ctr) :
1415 (&_resolve_static_ctr);
1416 Atomic::inc(addr);
1417
1418 if (TraceCallFixup) {
1419 ResourceMark rm(current);
1420 tty->print("resolving %s%s (%s) call%s to",
1421 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1422 Bytecodes::name(invoke_code), (caller_is_c1) ? " from C1" : "");
1423 callee_method->print_short_name(tty);
1424 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1425 p2i(caller_frame.pc()), p2i(callee_method->code()));
1426 }
1427 #endif
1428
1429 if (invoke_code == Bytecodes::_invokestatic) {
1430 assert(callee_method->method_holder()->is_initialized() ||
1431 callee_method->method_holder()->is_reentrant_initialization(current),
1432 "invalid class initialization state for invoke_static");
1433 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1434 // In order to keep class initialization check, do not patch call
1435 // site for static call when the class is not fully initialized.
1436 // Proper check is enforced by call site re-resolution on every invocation.
1437 //
1438 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1439 // explicit class initialization check is put in nmethod entry (VEP).
1440 assert(callee_method->method_holder()->is_linked(), "must be");
1441 return callee_method;
1442 }
1443 }
1444
1445
1446 // JSR 292 key invariant:
1447 // If the resolved method is a MethodHandle invoke target, the call
1448 // site must be a MethodHandle call site, because the lambda form might tail-call
1449 // leaving the stack in a state unknown to either caller or callee
1450
1451 // Compute entry points. The computation of the entry points is independent of
1452 // patching the call.
1453
1454 // Make sure the callee nmethod does not get deoptimized and removed before
1455 // we are done patching the code.
1456
1457
1458 CompiledICLocker ml(caller_nm);
1459 if (is_virtual && !is_optimized) {
1460 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1461 inline_cache->update(&call_info, receiver->klass(), caller_is_c1);
1462 } else {
1463 // Callsite is a direct call - set it to the destination method
1464 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1465 callsite->set(callee_method, caller_is_c1);
1466 }
1467
1468 return callee_method;
1469 }
1470
1471 // Inline caches exist only in compiled code
1472 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1473 #ifdef ASSERT
1474 RegisterMap reg_map(current,
1475 RegisterMap::UpdateMap::skip,
1476 RegisterMap::ProcessFrames::include,
1477 RegisterMap::WalkContinuation::skip);
1478 frame stub_frame = current->last_frame();
1479 assert(stub_frame.is_runtime_frame(), "sanity check");
1480 frame caller_frame = stub_frame.sender(®_map);
1481 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1482 #endif /* ASSERT */
1483
1484 methodHandle callee_method;
1485 bool is_optimized = false;
1486 bool caller_is_c1 = false;
1487 JRT_BLOCK
1488 callee_method = SharedRuntime::handle_ic_miss_helper(is_optimized, caller_is_c1, CHECK_NULL);
1489 // Return Method* through TLS
1490 current->set_vm_result_2(callee_method());
1491 JRT_BLOCK_END
1492 // return compiled code entry point after potential safepoints
1493 return get_resolved_entry(current, callee_method, false, is_optimized, caller_is_c1);
1494 JRT_END
1495
1496
1497 // Handle call site that has been made non-entrant
1498 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1499 // 6243940 We might end up in here if the callee is deoptimized
1500 // as we race to call it. We don't want to take a safepoint if
1501 // the caller was interpreted because the caller frame will look
1502 // interpreted to the stack walkers and arguments are now
1503 // "compiled" so it is much better to make this transition
1504 // invisible to the stack walking code. The i2c path will
1505 // place the callee method in the callee_target. It is stashed
1506 // there because if we try and find the callee by normal means a
1507 // safepoint is possible and have trouble gc'ing the compiled args.
1508 RegisterMap reg_map(current,
1509 RegisterMap::UpdateMap::skip,
1510 RegisterMap::ProcessFrames::include,
1511 RegisterMap::WalkContinuation::skip);
1512 frame stub_frame = current->last_frame();
1513 assert(stub_frame.is_runtime_frame(), "sanity check");
1514 frame caller_frame = stub_frame.sender(®_map);
1515
1516 if (caller_frame.is_interpreted_frame() ||
1517 caller_frame.is_entry_frame() ||
1518 caller_frame.is_upcall_stub_frame()) {
1519 Method* callee = current->callee_target();
1520 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1521 current->set_vm_result_2(callee);
1522 current->set_callee_target(nullptr);
1523 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1524 // Bypass class initialization checks in c2i when caller is in native.
1525 // JNI calls to static methods don't have class initialization checks.
1526 // Fast class initialization checks are present in c2i adapters and call into
1527 // SharedRuntime::handle_wrong_method() on the slow path.
1528 //
1529 // JVM upcalls may land here as well, but there's a proper check present in
1530 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1531 // so bypassing it in c2i adapter is benign.
1532 return callee->get_c2i_no_clinit_check_entry();
1533 } else {
1534 if (caller_frame.is_interpreted_frame()) {
1535 return callee->get_c2i_inline_entry();
1536 } else {
1537 return callee->get_c2i_entry();
1538 }
1539 }
1540 }
1541
1542 // Must be compiled to compiled path which is safe to stackwalk
1543 methodHandle callee_method;
1544 bool is_static_call = false;
1545 bool is_optimized = false;
1546 bool caller_is_c1 = false;
1547 JRT_BLOCK
1548 // Force resolving of caller (if we called from compiled frame)
1549 callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_NULL);
1550 current->set_vm_result_2(callee_method());
1551 JRT_BLOCK_END
1552 // return compiled code entry point after potential safepoints
1553 return get_resolved_entry(current, callee_method, is_static_call, is_optimized, caller_is_c1);
1554 JRT_END
1555
1556 // Handle abstract method call
1557 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1558 // Verbose error message for AbstractMethodError.
1559 // Get the called method from the invoke bytecode.
1560 vframeStream vfst(current, true);
1561 assert(!vfst.at_end(), "Java frame must exist");
1562 methodHandle caller(current, vfst.method());
1563 Bytecode_invoke invoke(caller, vfst.bci());
1564 DEBUG_ONLY( invoke.verify(); )
1565
1566 // Find the compiled caller frame.
1567 RegisterMap reg_map(current,
1568 RegisterMap::UpdateMap::include,
1569 RegisterMap::ProcessFrames::include,
1570 RegisterMap::WalkContinuation::skip);
1571 frame stubFrame = current->last_frame();
1572 assert(stubFrame.is_runtime_frame(), "must be");
1573 frame callerFrame = stubFrame.sender(®_map);
1574 assert(callerFrame.is_compiled_frame(), "must be");
1575
1576 // Install exception and return forward entry.
1577 address res = SharedRuntime::throw_AbstractMethodError_entry();
1578 JRT_BLOCK
1579 methodHandle callee(current, invoke.static_target(current));
1580 if (!callee.is_null()) {
1581 oop recv = callerFrame.retrieve_receiver(®_map);
1582 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1583 res = StubRoutines::forward_exception_entry();
1584 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1585 }
1586 JRT_BLOCK_END
1587 return res;
1588 JRT_END
1589
1590 // return verified_code_entry if interp_only_mode is not set for the current thread;
1591 // otherwise return c2i entry.
1592 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method,
1593 bool is_static_call, bool is_optimized, bool caller_is_c1) {
1594 if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1595 // In interp_only_mode we need to go to the interpreted entry
1596 // The c2i won't patch in this mode -- see fixup_callers_callsite
1597 return callee_method->get_c2i_entry();
1598 }
1599
1600 if (caller_is_c1) {
1601 assert(callee_method->verified_inline_code_entry() != nullptr, "Jump to zero!");
1602 return callee_method->verified_inline_code_entry();
1603 } else if (is_static_call || is_optimized) {
1604 assert(callee_method->verified_code_entry() != nullptr, "Jump to zero!");
1605 return callee_method->verified_code_entry();
1606 } else {
1607 assert(callee_method->verified_inline_ro_code_entry() != nullptr, "Jump to zero!");
1608 return callee_method->verified_inline_ro_code_entry();
1609 }
1610 }
1611
1612 // resolve a static call and patch code
1613 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1614 methodHandle callee_method;
1615 bool caller_is_c1 = false;
1616 bool enter_special = false;
1617 JRT_BLOCK
1618 callee_method = SharedRuntime::resolve_helper(false, false, caller_is_c1, CHECK_NULL);
1619 current->set_vm_result_2(callee_method());
1620 JRT_BLOCK_END
1621 // return compiled code entry point after potential safepoints
1622 return get_resolved_entry(current, callee_method, true, false, caller_is_c1);
1623 JRT_END
1624
1625 // resolve virtual call and update inline cache to monomorphic
1626 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1627 methodHandle callee_method;
1628 bool caller_is_c1 = false;
1629 JRT_BLOCK
1630 callee_method = SharedRuntime::resolve_helper(true, false, caller_is_c1, CHECK_NULL);
1631 current->set_vm_result_2(callee_method());
1632 JRT_BLOCK_END
1633 // return compiled code entry point after potential safepoints
1634 return get_resolved_entry(current, callee_method, false, false, caller_is_c1);
1635 JRT_END
1636
1637
1638 // Resolve a virtual call that can be statically bound (e.g., always
1639 // monomorphic, so it has no inline cache). Patch code to resolved target.
1640 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1641 methodHandle callee_method;
1642 bool caller_is_c1 = false;
1643 JRT_BLOCK
1644 callee_method = SharedRuntime::resolve_helper(true, true, caller_is_c1, CHECK_NULL);
1645 current->set_vm_result_2(callee_method());
1646 JRT_BLOCK_END
1647 // return compiled code entry point after potential safepoints
1648 return get_resolved_entry(current, callee_method, false, true, caller_is_c1);
1649 JRT_END
1650
1651
1652
1653 methodHandle SharedRuntime::handle_ic_miss_helper(bool& is_optimized, bool& caller_is_c1, TRAPS) {
1654 JavaThread* current = THREAD;
1655 ResourceMark rm(current);
1656 CallInfo call_info;
1657 Bytecodes::Code bc;
1658
1659 // receiver is null for static calls. An exception is thrown for null
1660 // receivers for non-static calls
1661 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1662
1663 methodHandle callee_method(current, call_info.selected_method());
1664
1665 #ifndef PRODUCT
1666 Atomic::inc(&_ic_miss_ctr);
1667
1668 // Statistics & Tracing
1669 if (TraceCallFixup) {
1670 ResourceMark rm(current);
1671 tty->print("IC miss (%s) call%s to", Bytecodes::name(bc), (caller_is_c1) ? " from C1" : "");
1672 callee_method->print_short_name(tty);
1673 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1674 }
1675
1676 if (ICMissHistogram) {
1677 MutexLocker m(VMStatistic_lock);
1678 RegisterMap reg_map(current,
1679 RegisterMap::UpdateMap::skip,
1680 RegisterMap::ProcessFrames::include,
1681 RegisterMap::WalkContinuation::skip);
1682 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1683 // produce statistics under the lock
1684 trace_ic_miss(f.pc());
1685 }
1686 #endif
1687
1688 // install an event collector so that when a vtable stub is created the
1689 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1690 // event can't be posted when the stub is created as locks are held
1691 // - instead the event will be deferred until the event collector goes
1692 // out of scope.
1693 JvmtiDynamicCodeEventCollector event_collector;
1694
1695 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1696 RegisterMap reg_map(current,
1697 RegisterMap::UpdateMap::skip,
1698 RegisterMap::ProcessFrames::include,
1699 RegisterMap::WalkContinuation::skip);
1700 frame caller_frame = current->last_frame().sender(®_map);
1701 CodeBlob* cb = caller_frame.cb();
1702 nmethod* caller_nm = cb->as_nmethod();
1703 // Calls via mismatching methods are always non-scalarized
1704 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1705 caller_is_c1 = true;
1706 }
1707
1708 CompiledICLocker ml(caller_nm);
1709 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1710 inline_cache->update(&call_info, receiver()->klass(), caller_is_c1);
1711
1712 return callee_method;
1713 }
1714
1715 //
1716 // Resets a call-site in compiled code so it will get resolved again.
1717 // This routines handles both virtual call sites, optimized virtual call
1718 // sites, and static call sites. Typically used to change a call sites
1719 // destination from compiled to interpreted.
1720 //
1721 methodHandle SharedRuntime::reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_is_c1, TRAPS) {
1722 JavaThread* current = THREAD;
1723 ResourceMark rm(current);
1724 RegisterMap reg_map(current,
1725 RegisterMap::UpdateMap::skip,
1726 RegisterMap::ProcessFrames::include,
1727 RegisterMap::WalkContinuation::skip);
1728 frame stub_frame = current->last_frame();
1729 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1730 frame caller = stub_frame.sender(®_map);
1731 if (caller.is_compiled_frame()) {
1732 caller_is_c1 = caller.cb()->as_nmethod()->is_compiled_by_c1();
1733 }
1734
1735 // Do nothing if the frame isn't a live compiled frame.
1736 // nmethod could be deoptimized by the time we get here
1737 // so no update to the caller is needed.
1738
1739 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1740 (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1741
1742 address pc = caller.pc();
1743
1744 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1745 assert(caller_nm != nullptr, "did not find caller nmethod");
1746
1747 // Default call_addr is the location of the "basic" call.
1748 // Determine the address of the call we a reresolving. With
1749 // Inline Caches we will always find a recognizable call.
1750 // With Inline Caches disabled we may or may not find a
1751 // recognizable call. We will always find a call for static
1752 // calls and for optimized virtual calls. For vanilla virtual
1753 // calls it depends on the state of the UseInlineCaches switch.
1754 //
1755 // With Inline Caches disabled we can get here for a virtual call
1756 // for two reasons:
1757 // 1 - calling an abstract method. The vtable for abstract methods
1758 // will run us thru handle_wrong_method and we will eventually
1759 // end up in the interpreter to throw the ame.
1760 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1761 // call and between the time we fetch the entry address and
1762 // we jump to it the target gets deoptimized. Similar to 1
1763 // we will wind up in the interprter (thru a c2i with c2).
1764 //
1765 CompiledICLocker ml(caller_nm);
1766 address call_addr = caller_nm->call_instruction_address(pc);
1767
1768 if (call_addr != nullptr) {
1769 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1770 // bytes back in the instruction stream so we must also check for reloc info.
1771 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1772 bool ret = iter.next(); // Get item
1773 if (ret) {
1774 is_static_call = false;
1775 is_optimized = false;
1776 switch (iter.type()) {
1777 case relocInfo::static_call_type:
1778 is_static_call = true;
1779 case relocInfo::opt_virtual_call_type: {
1780 is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1781 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1782 cdc->set_to_clean();
1783 break;
1784 }
1785 case relocInfo::virtual_call_type: {
1786 // compiled, dispatched call (which used to call an interpreted method)
1787 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1788 inline_cache->set_to_clean();
1789 break;
1790 }
1791 default:
1792 break;
1793 }
1794 }
1795 }
1796 }
1797
1798 methodHandle callee_method = find_callee_method(is_optimized, caller_is_c1, CHECK_(methodHandle()));
1799
1800 #ifndef PRODUCT
1801 Atomic::inc(&_wrong_method_ctr);
1802
1803 if (TraceCallFixup) {
1804 ResourceMark rm(current);
1805 tty->print("handle_wrong_method reresolving call%s to", (caller_is_c1) ? " from C1" : "");
1806 callee_method->print_short_name(tty);
1807 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1808 }
1809 #endif
1810
1811 return callee_method;
1812 }
1813
1814 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1815 // The faulting unsafe accesses should be changed to throw the error
1816 // synchronously instead. Meanwhile the faulting instruction will be
1817 // skipped over (effectively turning it into a no-op) and an
1818 // asynchronous exception will be raised which the thread will
1819 // handle at a later point. If the instruction is a load it will
1820 // return garbage.
1821
1822 // Request an async exception.
1823 thread->set_pending_unsafe_access_error();
1824
1825 // Return address of next instruction to execute.
1991 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
1992
1993 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1994 if (message == nullptr) {
1995 // Shouldn't happen, but don't cause even more problems if it does
1996 message = const_cast<char*>(caster_klass->external_name());
1997 } else {
1998 jio_snprintf(message,
1999 msglen,
2000 "class %s cannot be cast to class %s (%s%s%s)",
2001 caster_name,
2002 target_name,
2003 caster_klass_description,
2004 klass_separator,
2005 target_klass_description
2006 );
2007 }
2008 return message;
2009 }
2010
2011 char* SharedRuntime::generate_identity_exception_message(JavaThread* current, Klass* klass) {
2012 assert(klass->is_inline_klass(), "Must be a concrete value class");
2013 const char* desc = "Cannot synchronize on an instance of value class ";
2014 const char* className = klass->external_name();
2015 size_t msglen = strlen(desc) + strlen(className) + 1;
2016 char* message = NEW_RESOURCE_ARRAY(char, msglen);
2017 if (nullptr == message) {
2018 // Out of memory: can't create detailed error message
2019 message = const_cast<char*>(klass->external_name());
2020 } else {
2021 jio_snprintf(message, msglen, "%s%s", desc, className);
2022 }
2023 return message;
2024 }
2025
2026 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2027 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2028 JRT_END
2029
2030 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2031 if (!SafepointSynchronize::is_synchronizing()) {
2032 // Only try quick_enter() if we're not trying to reach a safepoint
2033 // so that the calling thread reaches the safepoint more quickly.
2034 if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2035 return;
2036 }
2037 }
2038 // NO_ASYNC required because an async exception on the state transition destructor
2039 // would leave you with the lock held and it would never be released.
2040 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2041 // and the model is that an exception implies the method failed.
2042 JRT_BLOCK_NO_ASYNC
2043 Handle h_obj(THREAD, obj);
2044 ObjectSynchronizer::enter(h_obj, lock, current);
2045 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2256 tty->print_cr(" %% in nested categories are relative to their category");
2257 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2258 tty->cr();
2259
2260 MethodArityHistogram h;
2261 }
2262 #endif
2263
2264 #ifndef PRODUCT
2265 static int _lookups; // number of calls to lookup
2266 static int _equals; // number of buckets checked with matching hash
2267 static int _hits; // number of successful lookups
2268 static int _compact; // number of equals calls with compact signature
2269 #endif
2270
2271 // A simple wrapper class around the calling convention information
2272 // that allows sharing of adapters for the same calling convention.
2273 class AdapterFingerPrint : public CHeapObj<mtCode> {
2274 private:
2275 enum {
2276 _basic_type_bits = 5,
2277 _basic_type_mask = right_n_bits(_basic_type_bits),
2278 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2279 _compact_int_count = 3
2280 };
2281 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2282 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2283
2284 union {
2285 int _compact[_compact_int_count];
2286 int* _fingerprint;
2287 } _value;
2288 int _length; // A negative length indicates the fingerprint is in the compact form,
2289 // Otherwise _value._fingerprint is the array.
2290
2291 // Remap BasicTypes that are handled equivalently by the adapters.
2292 // These are correct for the current system but someday it might be
2293 // necessary to make this mapping platform dependent.
2294 static BasicType adapter_encoding(BasicType in) {
2295 switch (in) {
2296 case T_BOOLEAN:
2297 case T_BYTE:
2298 case T_SHORT:
2299 case T_CHAR:
2300 // They are all promoted to T_INT in the calling convention
2301 return T_INT;
2302
2303 case T_OBJECT:
2304 case T_ARRAY:
2305 // In other words, we assume that any register good enough for
2306 // an int or long is good enough for a managed pointer.
2307 #ifdef _LP64
2308 return T_LONG;
2309 #else
2310 return T_INT;
2311 #endif
2312
2313 case T_INT:
2314 case T_LONG:
2315 case T_FLOAT:
2316 case T_DOUBLE:
2317 case T_VOID:
2318 return in;
2319
2320 default:
2321 ShouldNotReachHere();
2322 return T_CONFLICT;
2323 }
2324 }
2325
2326 public:
2327 AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2328 // The fingerprint is based on the BasicType signature encoded
2329 // into an array of ints with eight entries per int.
2330 int total_args_passed = (sig != nullptr) ? sig->length() : 0;
2331 int* ptr;
2332 int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2333 if (len <= _compact_int_count) {
2334 assert(_compact_int_count == 3, "else change next line");
2335 _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2336 // Storing the signature encoded as signed chars hits about 98%
2337 // of the time.
2338 _length = -len;
2339 ptr = _value._compact;
2340 } else {
2341 _length = len;
2342 _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2343 ptr = _value._fingerprint;
2344 }
2345
2346 // Now pack the BasicTypes with 8 per int
2347 int sig_index = 0;
2348 BasicType prev_bt = T_ILLEGAL;
2349 int vt_count = 0;
2350 for (int index = 0; index < len; index++) {
2351 int value = 0;
2352 for (int byte = 0; byte < _basic_types_per_int; byte++) {
2353 BasicType bt = T_ILLEGAL;
2354 if (sig_index < total_args_passed) {
2355 bt = sig->at(sig_index++)._bt;
2356 if (bt == T_METADATA) {
2357 // Found start of inline type in signature
2358 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2359 if (sig_index == 1 && has_ro_adapter) {
2360 // With a ro_adapter, replace receiver inline type delimiter by T_VOID to prevent matching
2361 // with other adapters that have the same inline type as first argument and no receiver.
2362 bt = T_VOID;
2363 }
2364 vt_count++;
2365 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2366 // Found end of inline type in signature
2367 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2368 vt_count--;
2369 assert(vt_count >= 0, "invalid vt_count");
2370 } else if (vt_count == 0) {
2371 // Widen fields that are not part of a scalarized inline type argument
2372 bt = adapter_encoding(bt);
2373 }
2374 prev_bt = bt;
2375 }
2376 int bt_val = (bt == T_ILLEGAL) ? 0 : bt;
2377 assert((bt_val & _basic_type_mask) == bt_val, "must fit in 4 bits");
2378 value = (value << _basic_type_bits) | bt_val;
2379 }
2380 ptr[index] = value;
2381 }
2382 assert(vt_count == 0, "invalid vt_count");
2383 }
2384
2385 ~AdapterFingerPrint() {
2386 if (_length > 0) {
2387 FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2388 }
2389 }
2390
2391 int value(int index) {
2392 if (_length < 0) {
2393 return _value._compact[index];
2394 }
2395 return _value._fingerprint[index];
2396 }
2397 int length() {
2398 if (_length < 0) return -_length;
2399 return _length;
2400 }
2401
2402 bool is_compact() {
2427 const char* as_basic_args_string() {
2428 stringStream st;
2429 bool long_prev = false;
2430 for (int i = 0; i < length(); i++) {
2431 unsigned val = (unsigned)value(i);
2432 // args are packed so that first/lower arguments are in the highest
2433 // bits of each int value, so iterate from highest to the lowest
2434 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2435 unsigned v = (val >> j) & _basic_type_mask;
2436 if (v == 0) {
2437 assert(i == length() - 1, "Only expect zeroes in the last word");
2438 continue;
2439 }
2440 if (long_prev) {
2441 long_prev = false;
2442 if (v == T_VOID) {
2443 st.print("J");
2444 } else {
2445 st.print("L");
2446 }
2447 } else if (v == T_LONG) {
2448 long_prev = true;
2449 } else if (v != T_VOID){
2450 st.print("%c", type2char((BasicType)v));
2451 }
2452 }
2453 }
2454 if (long_prev) {
2455 st.print("L");
2456 }
2457 return st.as_string();
2458 }
2459 #endif // !product
2460
2461 bool equals(AdapterFingerPrint* other) {
2462 if (other->_length != _length) {
2463 return false;
2464 }
2465 if (_length < 0) {
2466 assert(_compact_int_count == 3, "else change next line");
2467 return _value._compact[0] == other->_value._compact[0] &&
2468 _value._compact[1] == other->_value._compact[1] &&
2469 _value._compact[2] == other->_value._compact[2];
2470 } else {
2478 }
2479
2480 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2481 NOT_PRODUCT(_equals++);
2482 return fp1->equals(fp2);
2483 }
2484
2485 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2486 return fp->compute_hash();
2487 }
2488 };
2489
2490 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2491 using AdapterHandlerTable = ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2492 AnyObj::C_HEAP, mtCode,
2493 AdapterFingerPrint::compute_hash,
2494 AdapterFingerPrint::equals>;
2495 static AdapterHandlerTable* _adapter_handler_table;
2496
2497 // Find a entry with the same fingerprint if it exists
2498 static AdapterHandlerEntry* lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2499 NOT_PRODUCT(_lookups++);
2500 assert_lock_strong(AdapterHandlerLibrary_lock);
2501 AdapterFingerPrint fp(sig, has_ro_adapter);
2502 AdapterHandlerEntry** entry = _adapter_handler_table->get(&fp);
2503 if (entry != nullptr) {
2504 #ifndef PRODUCT
2505 if (fp.is_compact()) _compact++;
2506 _hits++;
2507 #endif
2508 return *entry;
2509 }
2510 return nullptr;
2511 }
2512
2513 #ifndef PRODUCT
2514 static void print_table_statistics() {
2515 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2516 return sizeof(*key) + sizeof(*a);
2517 };
2518 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2519 ts.print(tty, "AdapterHandlerTable");
2520 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2521 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2522 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2523 _lookups, _equals, _hits, _compact);
2524 }
2525 #endif
2526
2527 // ---------------------------------------------------------------------------
2528 // Implementation of AdapterHandlerLibrary
2529 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2530 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2531 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2532 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2533 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2534 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2535 const int AdapterHandlerLibrary_size = 48*K;
2536 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2537
2538 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2539 return _buffer;
2540 }
2541
2542 static void post_adapter_creation(const AdapterBlob* new_adapter,
2543 const AdapterHandlerEntry* entry) {
2544 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2545 char blob_id[256];
2546 jio_snprintf(blob_id,
2547 sizeof(blob_id),
2548 "%s(%s)",
2549 new_adapter->name(),
2550 entry->fingerprint()->as_string());
2551 if (Forte::is_enabled()) {
2552 Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2553 }
2554
2555 if (JvmtiExport::should_post_dynamic_code_generated()) {
2558 }
2559 }
2560
2561 void AdapterHandlerLibrary::initialize() {
2562 ResourceMark rm;
2563 AdapterBlob* no_arg_blob = nullptr;
2564 AdapterBlob* int_arg_blob = nullptr;
2565 AdapterBlob* obj_arg_blob = nullptr;
2566 AdapterBlob* obj_int_arg_blob = nullptr;
2567 AdapterBlob* obj_obj_arg_blob = nullptr;
2568 {
2569 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2570 MutexLocker mu(AdapterHandlerLibrary_lock);
2571
2572 // Create a special handler for abstract methods. Abstract methods
2573 // are never compiled so an i2c entry is somewhat meaningless, but
2574 // throw AbstractMethodError just in case.
2575 // Pass wrong_method_abstract for the c2i transitions to return
2576 // AbstractMethodError for invalid invocations.
2577 address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2578 _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(nullptr),
2579 SharedRuntime::throw_AbstractMethodError_entry(),
2580 wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
2581 wrong_method_abstract, wrong_method_abstract);
2582 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2583
2584 CompiledEntrySignature no_args;
2585 no_args.compute_calling_conventions();
2586 _no_arg_handler = create_adapter(no_arg_blob, no_args, true);
2587
2588 CompiledEntrySignature obj_args;
2589 SigEntry::add_entry(obj_args.sig(), T_OBJECT);
2590 obj_args.compute_calling_conventions();
2591 _obj_arg_handler = create_adapter(obj_arg_blob, obj_args, true);
2592
2593 CompiledEntrySignature int_args;
2594 SigEntry::add_entry(int_args.sig(), T_INT);
2595 int_args.compute_calling_conventions();
2596 _int_arg_handler = create_adapter(int_arg_blob, int_args, true);
2597
2598 CompiledEntrySignature obj_int_args;
2599 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT);
2600 SigEntry::add_entry(obj_int_args.sig(), T_INT);
2601 obj_int_args.compute_calling_conventions();
2602 _obj_int_arg_handler = create_adapter(obj_int_arg_blob, obj_int_args, true);
2603
2604 CompiledEntrySignature obj_obj_args;
2605 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2606 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT);
2607 obj_obj_args.compute_calling_conventions();
2608 _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, obj_obj_args, true);
2609
2610 assert(no_arg_blob != nullptr &&
2611 obj_arg_blob != nullptr &&
2612 int_arg_blob != nullptr &&
2613 obj_int_arg_blob != nullptr &&
2614 obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
2615 }
2616 return;
2617
2618 // Outside of the lock
2619 post_adapter_creation(no_arg_blob, _no_arg_handler);
2620 post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2621 post_adapter_creation(int_arg_blob, _int_arg_handler);
2622 post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2623 post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2624 }
2625
2626 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2627 address i2c_entry,
2628 address c2i_entry,
2629 address c2i_inline_entry,
2630 address c2i_inline_ro_entry,
2631 address c2i_unverified_entry,
2632 address c2i_unverified_inline_entry,
2633 address c2i_no_clinit_check_entry) {
2634 return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry,
2635 c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
2636 }
2637
2638 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2639 if (method->is_abstract()) {
2640 return nullptr;
2641 }
2642 int total_args_passed = method->size_of_parameters(); // All args on stack
2643 if (total_args_passed == 0) {
2644 return _no_arg_handler;
2645 } else if (total_args_passed == 1) {
2646 if (!method->is_static()) {
2647 if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2648 return nullptr;
2649 }
2650 return _obj_arg_handler;
2651 }
2652 switch (method->signature()->char_at(1)) {
2653 case JVM_SIGNATURE_CLASS: {
2654 if (InlineTypePassFieldsAsArgs) {
2655 SignatureStream ss(method->signature());
2656 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2657 if (vk != nullptr) {
2658 return nullptr;
2659 }
2660 }
2661 return _obj_arg_handler;
2662 }
2663 case JVM_SIGNATURE_ARRAY:
2664 return _obj_arg_handler;
2665 case JVM_SIGNATURE_INT:
2666 case JVM_SIGNATURE_BOOLEAN:
2667 case JVM_SIGNATURE_CHAR:
2668 case JVM_SIGNATURE_BYTE:
2669 case JVM_SIGNATURE_SHORT:
2670 return _int_arg_handler;
2671 }
2672 } else if (total_args_passed == 2 &&
2673 !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2674 switch (method->signature()->char_at(1)) {
2675 case JVM_SIGNATURE_CLASS: {
2676 if (InlineTypePassFieldsAsArgs) {
2677 SignatureStream ss(method->signature());
2678 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2679 if (vk != nullptr) {
2680 return nullptr;
2681 }
2682 }
2683 return _obj_obj_arg_handler;
2684 }
2685 case JVM_SIGNATURE_ARRAY:
2686 return _obj_obj_arg_handler;
2687 case JVM_SIGNATURE_INT:
2688 case JVM_SIGNATURE_BOOLEAN:
2689 case JVM_SIGNATURE_CHAR:
2690 case JVM_SIGNATURE_BYTE:
2691 case JVM_SIGNATURE_SHORT:
2692 return _obj_int_arg_handler;
2693 }
2694 }
2695 return nullptr;
2696 }
2697
2698 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2699 _method(method), _num_inline_args(0), _has_inline_recv(false),
2700 _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2701 _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2702 _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2703 _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2704 _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2705 _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2706 }
2707
2708 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2709 // or the same entry for VEP and VIEP(RO).
2710 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2711 if (!has_scalarized_args()) {
2712 // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2713 return CodeOffsets::Verified_Entry;
2714 }
2715 if (_method->is_static()) {
2716 // Static methods don't need VIEP(RO)
2717 return CodeOffsets::Verified_Entry;
2718 }
2719
2720 if (has_inline_recv()) {
2721 if (num_inline_args() == 1) {
2722 // Share same entry for VIEP and VIEP(RO).
2723 // This is quite common: we have an instance method in an InlineKlass that has
2724 // no inline type args other than <this>.
2725 return CodeOffsets::Verified_Inline_Entry;
2726 } else {
2727 assert(num_inline_args() > 1, "must be");
2728 // No sharing:
2729 // VIEP(RO) -- <this> is passed as object
2730 // VEP -- <this> is passed as fields
2731 return CodeOffsets::Verified_Inline_Entry_RO;
2732 }
2733 }
2734
2735 // Either a static method, or <this> is not an inline type
2736 if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2737 // No sharing:
2738 // Some arguments are passed on the stack, and we have inserted reserved entries
2739 // into the VEP, but we never insert reserved entries into the VIEP(RO).
2740 return CodeOffsets::Verified_Inline_Entry_RO;
2741 } else {
2742 // Share same entry for VEP and VIEP(RO).
2743 return CodeOffsets::Verified_Entry;
2744 }
2745 }
2746
2747 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2748 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2749 if (_supers != nullptr) {
2750 return _supers;
2751 }
2752 _supers = new GrowableArray<Method*>();
2753 // Skip private, static, and <init> methods
2754 if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2755 return _supers;
2756 }
2757 Symbol* name = _method->name();
2758 Symbol* signature = _method->signature();
2759 const Klass* holder = _method->method_holder()->super();
2760 Symbol* holder_name = holder->name();
2761 ThreadInVMfromUnknown tiv;
2762 JavaThread* current = JavaThread::current();
2763 HandleMark hm(current);
2764 Handle loader(current, _method->method_holder()->class_loader());
2765
2766 // Walk up the class hierarchy and search for super methods
2767 while (holder != nullptr) {
2768 Method* super_method = holder->lookup_method(name, signature);
2769 if (super_method == nullptr) {
2770 break;
2771 }
2772 if (!super_method->is_static() && !super_method->is_private() &&
2773 (!super_method->is_package_private() ||
2774 super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
2775 _supers->push(super_method);
2776 }
2777 holder = super_method->method_holder()->super();
2778 }
2779 // Search interfaces for super methods
2780 Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
2781 for (int i = 0; i < interfaces->length(); ++i) {
2782 Method* m = interfaces->at(i)->lookup_method(name, signature);
2783 if (m != nullptr && !m->is_static() && m->is_public()) {
2784 _supers->push(m);
2785 }
2786 }
2787 return _supers;
2788 }
2789
2790 // Iterate over arguments and compute scalarized and non-scalarized signatures
2791 void CompiledEntrySignature::compute_calling_conventions(bool init) {
2792 bool has_scalarized = false;
2793 if (_method != nullptr) {
2794 InstanceKlass* holder = _method->method_holder();
2795 int arg_num = 0;
2796 if (!_method->is_static()) {
2797 // We shouldn't scalarize 'this' in a value class constructor
2798 if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() && !_method->is_object_constructor() &&
2799 (init || _method->is_scalarized_arg(arg_num))) {
2800 _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
2801 has_scalarized = true;
2802 _has_inline_recv = true;
2803 _num_inline_args++;
2804 } else {
2805 SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
2806 }
2807 SigEntry::add_entry(_sig, T_OBJECT, holder->name());
2808 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
2809 arg_num++;
2810 }
2811 for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
2812 BasicType bt = ss.type();
2813 if (bt == T_OBJECT) {
2814 InlineKlass* vk = ss.as_inline_klass(holder);
2815 if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
2816 // Check for a calling convention mismatch with super method(s)
2817 bool scalar_super = false;
2818 bool non_scalar_super = false;
2819 GrowableArray<Method*>* supers = get_supers();
2820 for (int i = 0; i < supers->length(); ++i) {
2821 Method* super_method = supers->at(i);
2822 if (super_method->is_scalarized_arg(arg_num)) {
2823 scalar_super = true;
2824 } else {
2825 non_scalar_super = true;
2826 }
2827 }
2828 #ifdef ASSERT
2829 // Randomly enable below code paths for stress testing
2830 bool stress = init && StressCallingConvention;
2831 if (stress && (os::random() & 1) == 1) {
2832 non_scalar_super = true;
2833 if ((os::random() & 1) == 1) {
2834 scalar_super = true;
2835 }
2836 }
2837 #endif
2838 if (non_scalar_super) {
2839 // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
2840 if (scalar_super) {
2841 // Found non-scalar *and* scalar super methods. We can't handle both.
2842 // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
2843 for (int i = 0; i < supers->length(); ++i) {
2844 Method* super_method = supers->at(i);
2845 if (super_method->is_scalarized_arg(arg_num) debug_only(|| (stress && (os::random() & 1) == 1))) {
2846 super_method->set_mismatch();
2847 MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
2848 JavaThread* thread = JavaThread::current();
2849 HandleMark hm(thread);
2850 methodHandle mh(thread, super_method);
2851 DeoptimizationScope deopt_scope;
2852 CodeCache::mark_for_deoptimization(&deopt_scope, mh());
2853 deopt_scope.deoptimize_marked();
2854 }
2855 }
2856 }
2857 // Fall back to non-scalarized calling convention
2858 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2859 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2860 } else {
2861 _num_inline_args++;
2862 has_scalarized = true;
2863 int last = _sig_cc->length();
2864 int last_ro = _sig_cc_ro->length();
2865 _sig_cc->appendAll(vk->extended_sig());
2866 _sig_cc_ro->appendAll(vk->extended_sig());
2867 if (bt == T_OBJECT) {
2868 // Nullable inline type argument, insert InlineTypeNode::IsInit field right after T_METADATA delimiter
2869 // Set the sort_offset so that the field is detected as null marker by nmethod::print_nmethod_labels.
2870 _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, 0));
2871 _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, 0));
2872 }
2873 }
2874 } else {
2875 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2876 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2877 }
2878 bt = T_OBJECT;
2879 } else {
2880 SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
2881 SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
2882 }
2883 SigEntry::add_entry(_sig, bt, ss.as_symbol());
2884 if (bt != T_VOID) {
2885 arg_num++;
2886 }
2887 }
2888 }
2889
2890 // Compute the non-scalarized calling convention
2891 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
2892 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
2893
2894 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
2895 if (has_scalarized && !_method->is_native()) {
2896 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
2897 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
2898
2899 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
2900 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
2901
2902 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
2903 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
2904
2905 // Upper bound on stack arguments to avoid hitting the argument limit and
2906 // bailing out of compilation ("unsupported incoming calling sequence").
2907 // TODO we need a reasonable limit (flag?) here
2908 if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
2909 return; // Success
2910 }
2911 }
2912
2913 // No scalarized args
2914 _sig_cc = _sig;
2915 _regs_cc = _regs;
2916 _args_on_stack_cc = _args_on_stack;
2917
2918 _sig_cc_ro = _sig;
2919 _regs_cc_ro = _regs;
2920 _args_on_stack_cc_ro = _args_on_stack;
2921 }
2922
2923 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2924 // Use customized signature handler. Need to lock around updates to
2925 // the _adapter_handler_table (it is not safe for concurrent readers
2926 // and a single writer: this could be fixed if it becomes a
2927 // problem).
2928
2929 // Fast-path for trivial adapters
2930 AdapterHandlerEntry* entry = get_simple_adapter(method);
2931 if (entry != nullptr) {
2932 return entry;
2933 }
2934
2935 ResourceMark rm;
2936 AdapterBlob* new_adapter = nullptr;
2937
2938 CompiledEntrySignature ces(method());
2939 ces.compute_calling_conventions();
2940 if (ces.has_scalarized_args()) {
2941 if (!method->has_scalarized_args()) {
2942 method->set_has_scalarized_args();
2943 }
2944 if (ces.c1_needs_stack_repair()) {
2945 method->set_c1_needs_stack_repair();
2946 }
2947 if (ces.c2_needs_stack_repair() && !method->c2_needs_stack_repair()) {
2948 method->set_c2_needs_stack_repair();
2949 }
2950 } else if (method->is_abstract()) {
2951 return _abstract_method_handler;
2952 }
2953
2954 {
2955 MutexLocker mu(AdapterHandlerLibrary_lock);
2956
2957 if (ces.has_scalarized_args() && method->is_abstract()) {
2958 // Save a C heap allocated version of the signature for abstract methods with scalarized inline type arguments
2959 address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2960 entry = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(nullptr),
2961 SharedRuntime::throw_AbstractMethodError_entry(),
2962 wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
2963 wrong_method_abstract, wrong_method_abstract);
2964 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro()->length(), mtInternal);
2965 heap_sig->appendAll(ces.sig_cc_ro());
2966 entry->set_sig_cc(heap_sig);
2967 return entry;
2968 }
2969
2970 // Lookup method signature's fingerprint
2971 entry = lookup(ces.sig_cc(), ces.has_inline_recv());
2972
2973 if (entry != nullptr) {
2974 #ifdef ASSERT
2975 if (VerifyAdapterSharing) {
2976 AdapterBlob* comparison_blob = nullptr;
2977 AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, ces, false);
2978 assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
2979 assert(comparison_entry->compare_code(entry), "code must match");
2980 // Release the one just created and return the original
2981 delete comparison_entry;
2982 }
2983 #endif
2984 return entry;
2985 }
2986
2987 entry = create_adapter(new_adapter, ces, /* allocate_code_blob */ true);
2988 }
2989
2990 // Outside of the lock
2991 if (new_adapter != nullptr) {
2992 post_adapter_creation(new_adapter, entry);
2993 }
2994 return entry;
2995 }
2996
2997 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
2998 CompiledEntrySignature& ces,
2999 bool allocate_code_blob) {
3000 if (log_is_enabled(Info, perf, class, link)) {
3001 ClassLoader::perf_method_adapters_count()->inc();
3002 }
3003
3004 // StubRoutines::_final_stubs_code is initialized after this function can be called. As a result,
3005 // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated prior
3006 // to all StubRoutines::_final_stubs_code being set. Checks refer to runtime range checks generated
3007 // in an I2C stub that ensure that an I2C stub is called from an interpreter frame or stubs.
3008 bool contains_all_checks = StubRoutines::final_stubs_code() != nullptr;
3009
3010 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3011 CodeBuffer buffer(buf);
3012 short buffer_locs[20];
3013 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3014 sizeof(buffer_locs)/sizeof(relocInfo));
3015
3016 // Make a C heap allocated version of the fingerprint to store in the adapter
3017 AdapterFingerPrint* fingerprint = new AdapterFingerPrint(ces.sig_cc(), ces.has_inline_recv());
3018 MacroAssembler _masm(&buffer);
3019 AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
3020 ces.args_on_stack(),
3021 ces.sig(),
3022 ces.regs(),
3023 ces.sig_cc(),
3024 ces.regs_cc(),
3025 ces.sig_cc_ro(),
3026 ces.regs_cc_ro(),
3027 fingerprint,
3028 new_adapter,
3029 allocate_code_blob);
3030
3031 if (ces.has_scalarized_args()) {
3032 // Save a C heap allocated version of the scalarized signature and store it in the adapter
3033 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3034 heap_sig->appendAll(ces.sig_cc());
3035 entry->set_sig_cc(heap_sig);
3036 }
3037
3038 #ifdef ASSERT
3039 if (VerifyAdapterSharing) {
3040 entry->save_code(buf->code_begin(), buffer.insts_size());
3041 if (!allocate_code_blob) {
3042 return entry;
3043 }
3044 }
3045 #endif
3046
3047 NOT_PRODUCT(int insts_size = buffer.insts_size());
3048 if (new_adapter == nullptr) {
3049 // CodeCache is full, disable compilation
3050 // Ought to log this but compile log is only per compile thread
3051 // and we're some non descript Java thread.
3052 return nullptr;
3053 }
3054 entry->relocate(new_adapter->content_begin());
3055 #ifndef PRODUCT
3056 // debugging support
3057 if (PrintAdapterHandlers || PrintStubCode) {
3058 ttyLocker ttyl;
3059 entry->print_adapter_on(tty);
3060 tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
3061 _adapter_handler_table->number_of_entries(), fingerprint->as_basic_args_string(),
3062 fingerprint->as_string(), insts_size);
3063 tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
3064 if (Verbose || PrintStubCode) {
3065 address first_pc = entry->base_address();
3066 if (first_pc != nullptr) {
3068 NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
3069 tty->cr();
3070 }
3071 }
3072 }
3073 #endif
3074
3075 // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
3076 // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
3077 if (contains_all_checks || !VerifyAdapterCalls) {
3078 assert_lock_strong(AdapterHandlerLibrary_lock);
3079 _adapter_handler_table->put(fingerprint, entry);
3080 }
3081 return entry;
3082 }
3083
3084 address AdapterHandlerEntry::base_address() {
3085 address base = _i2c_entry;
3086 if (base == nullptr) base = _c2i_entry;
3087 assert(base <= _c2i_entry || _c2i_entry == nullptr, "");
3088 assert(base <= _c2i_inline_entry || _c2i_inline_entry == nullptr, "");
3089 assert(base <= _c2i_inline_ro_entry || _c2i_inline_ro_entry == nullptr, "");
3090 assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");
3091 assert(base <= _c2i_unverified_inline_entry || _c2i_unverified_inline_entry == nullptr, "");
3092 assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
3093 return base;
3094 }
3095
3096 void AdapterHandlerEntry::relocate(address new_base) {
3097 address old_base = base_address();
3098 assert(old_base != nullptr, "");
3099 ptrdiff_t delta = new_base - old_base;
3100 if (_i2c_entry != nullptr)
3101 _i2c_entry += delta;
3102 if (_c2i_entry != nullptr)
3103 _c2i_entry += delta;
3104 if (_c2i_inline_entry != nullptr)
3105 _c2i_inline_entry += delta;
3106 if (_c2i_inline_ro_entry != nullptr)
3107 _c2i_inline_ro_entry += delta;
3108 if (_c2i_unverified_entry != nullptr)
3109 _c2i_unverified_entry += delta;
3110 if (_c2i_unverified_inline_entry != nullptr)
3111 _c2i_unverified_inline_entry += delta;
3112 if (_c2i_no_clinit_check_entry != nullptr)
3113 _c2i_no_clinit_check_entry += delta;
3114 assert(base_address() == new_base, "");
3115 }
3116
3117
3118 AdapterHandlerEntry::~AdapterHandlerEntry() {
3119 delete _fingerprint;
3120 if (_sig_cc != nullptr) {
3121 delete _sig_cc;
3122 }
3123 #ifdef ASSERT
3124 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3125 #endif
3126 }
3127
3128
3129 #ifdef ASSERT
3130 // Capture the code before relocation so that it can be compared
3131 // against other versions. If the code is captured after relocation
3132 // then relative instructions won't be equivalent.
3133 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3134 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3135 _saved_code_length = length;
3136 memcpy(_saved_code, buffer, length);
3137 }
3138
3139
3140 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3141 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3142
3189
3190 struct { double data[20]; } locs_buf;
3191 struct { double data[20]; } stubs_locs_buf;
3192 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3193 #if defined(AARCH64) || defined(PPC64)
3194 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3195 // in the constant pool to ensure ordering between the barrier and oops
3196 // accesses. For native_wrappers we need a constant.
3197 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3198 // static java call that is resolved in the runtime.
3199 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3200 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3201 }
3202 #endif
3203 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3204 MacroAssembler _masm(&buffer);
3205
3206 // Fill in the signature array, for the calling-convention call.
3207 const int total_args_passed = method->size_of_parameters();
3208
3209 BasicType stack_sig_bt[16];
3210 VMRegPair stack_regs[16];
3211 BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3212 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3213
3214 int i = 0;
3215 if (!method->is_static()) { // Pass in receiver first
3216 sig_bt[i++] = T_OBJECT;
3217 }
3218 SignatureStream ss(method->signature());
3219 for (; !ss.at_return_type(); ss.next()) {
3220 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
3221 if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3222 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
3223 }
3224 }
3225 assert(i == total_args_passed, "");
3226 BasicType ret_type = ss.type();
3227
3228 // Now get the compiled-Java arguments layout.
3229 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3230
3231 // Generate the compiled-to-native wrapper code
3232 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3233
3234 if (nm != nullptr) {
3235 {
3236 MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3237 if (nm->make_in_use()) {
3238 method->set_code(method, nm);
3239 }
3240 }
3241
3242 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3243 if (directive->PrintAssemblyOption) {
3244 nm->print_code();
3245 }
3246 DirectivesStack::release(directive);
3453 st->print("Adapter for signature: ");
3454 a->print_adapter_on(st);
3455 return true;
3456 } else {
3457 return false; // keep looking
3458 }
3459 };
3460 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3461 _adapter_handler_table->iterate(findblob);
3462 assert(found, "Should have found handler");
3463 }
3464
3465 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3466 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3467 if (get_i2c_entry() != nullptr) {
3468 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3469 }
3470 if (get_c2i_entry() != nullptr) {
3471 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3472 }
3473 if (get_c2i_entry() != nullptr) {
3474 st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3475 }
3476 if (get_c2i_entry() != nullptr) {
3477 st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3478 }
3479 if (get_c2i_unverified_entry() != nullptr) {
3480 st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3481 }
3482 if (get_c2i_unverified_entry() != nullptr) {
3483 st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3484 }
3485 if (get_c2i_no_clinit_check_entry() != nullptr) {
3486 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3487 }
3488 st->cr();
3489 }
3490
3491 #ifndef PRODUCT
3492
3493 void AdapterHandlerLibrary::print_statistics() {
3494 print_table_statistics();
3495 }
3496
3497 #endif /* PRODUCT */
3498
3499 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3500 assert(current == JavaThread::current(), "pre-condition");
3501 StackOverflow* overflow_state = current->stack_overflow_state();
3502 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3503 overflow_state->set_reserved_stack_activation(current->stack_base());
3552 event.set_method(method);
3553 event.commit();
3554 }
3555 }
3556 }
3557 return activation;
3558 }
3559
3560 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3561 // After any safepoint, just before going back to compiled code,
3562 // we inform the GC that we will be doing initializing writes to
3563 // this object in the future without emitting card-marks, so
3564 // GC may take any compensating steps.
3565
3566 oop new_obj = current->vm_result();
3567 if (new_obj == nullptr) return;
3568
3569 BarrierSet *bs = BarrierSet::barrier_set();
3570 bs->on_slowpath_allocation_exit(current, new_obj);
3571 }
3572
3573 // We are at a compiled code to interpreter call. We need backing
3574 // buffers for all inline type arguments. Allocate an object array to
3575 // hold them (convenient because once we're done with it we don't have
3576 // to worry about freeing it).
3577 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
3578 assert(InlineTypePassFieldsAsArgs, "no reason to call this");
3579 ResourceMark rm;
3580
3581 int nb_slots = 0;
3582 InstanceKlass* holder = callee->method_holder();
3583 allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
3584 if (allocate_receiver) {
3585 nb_slots++;
3586 }
3587 int arg_num = callee->is_static() ? 0 : 1;
3588 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3589 BasicType bt = ss.type();
3590 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
3591 nb_slots++;
3592 }
3593 if (bt != T_VOID) {
3594 arg_num++;
3595 }
3596 }
3597 objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
3598 objArrayHandle array(THREAD, array_oop);
3599 arg_num = callee->is_static() ? 0 : 1;
3600 int i = 0;
3601 if (allocate_receiver) {
3602 InlineKlass* vk = InlineKlass::cast(holder);
3603 oop res = vk->allocate_instance(CHECK_NULL);
3604 array->obj_at_put(i++, res);
3605 }
3606 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3607 BasicType bt = ss.type();
3608 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
3609 InlineKlass* vk = ss.as_inline_klass(holder);
3610 assert(vk != nullptr, "Unexpected klass");
3611 oop res = vk->allocate_instance(CHECK_NULL);
3612 array->obj_at_put(i++, res);
3613 }
3614 if (bt != T_VOID) {
3615 arg_num++;
3616 }
3617 }
3618 return array();
3619 }
3620
3621 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
3622 methodHandle callee(current, callee_method);
3623 oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
3624 current->set_vm_result(array);
3625 current->set_vm_result_2(callee()); // TODO: required to keep callee live?
3626 JRT_END
3627
3628 // We're returning from an interpreted method: load each field into a
3629 // register following the calling convention
3630 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
3631 {
3632 assert(res->klass()->is_inline_klass(), "only inline types here");
3633 ResourceMark rm;
3634 RegisterMap reg_map(current,
3635 RegisterMap::UpdateMap::include,
3636 RegisterMap::ProcessFrames::include,
3637 RegisterMap::WalkContinuation::skip);
3638 frame stubFrame = current->last_frame();
3639 frame callerFrame = stubFrame.sender(®_map);
3640 assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
3641
3642 InlineKlass* vk = InlineKlass::cast(res->klass());
3643
3644 const Array<SigEntry>* sig_vk = vk->extended_sig();
3645 const Array<VMRegPair>* regs = vk->return_regs();
3646
3647 if (regs == nullptr) {
3648 // The fields of the inline klass don't fit in registers, bail out
3649 return;
3650 }
3651
3652 int j = 1;
3653 for (int i = 0; i < sig_vk->length(); i++) {
3654 BasicType bt = sig_vk->at(i)._bt;
3655 if (bt == T_METADATA) {
3656 continue;
3657 }
3658 if (bt == T_VOID) {
3659 if (sig_vk->at(i-1)._bt == T_LONG ||
3660 sig_vk->at(i-1)._bt == T_DOUBLE) {
3661 j++;
3662 }
3663 continue;
3664 }
3665 int off = sig_vk->at(i)._offset;
3666 assert(off > 0, "offset in object should be positive");
3667 VMRegPair pair = regs->at(j);
3668 address loc = reg_map.location(pair.first(), nullptr);
3669 switch(bt) {
3670 case T_BOOLEAN:
3671 *(jboolean*)loc = res->bool_field(off);
3672 break;
3673 case T_CHAR:
3674 *(jchar*)loc = res->char_field(off);
3675 break;
3676 case T_BYTE:
3677 *(jbyte*)loc = res->byte_field(off);
3678 break;
3679 case T_SHORT:
3680 *(jshort*)loc = res->short_field(off);
3681 break;
3682 case T_INT: {
3683 *(jint*)loc = res->int_field(off);
3684 break;
3685 }
3686 case T_LONG:
3687 #ifdef _LP64
3688 *(intptr_t*)loc = res->long_field(off);
3689 #else
3690 Unimplemented();
3691 #endif
3692 break;
3693 case T_OBJECT:
3694 case T_ARRAY: {
3695 *(oop*)loc = res->obj_field(off);
3696 break;
3697 }
3698 case T_FLOAT:
3699 *(jfloat*)loc = res->float_field(off);
3700 break;
3701 case T_DOUBLE:
3702 *(jdouble*)loc = res->double_field(off);
3703 break;
3704 default:
3705 ShouldNotReachHere();
3706 }
3707 j++;
3708 }
3709 assert(j == regs->length(), "missed a field?");
3710
3711 #ifdef ASSERT
3712 VMRegPair pair = regs->at(0);
3713 address loc = reg_map.location(pair.first(), nullptr);
3714 assert(*(oopDesc**)loc == res, "overwritten object");
3715 #endif
3716
3717 current->set_vm_result(res);
3718 }
3719 JRT_END
3720
3721 // We've returned to an interpreted method, the interpreter needs a
3722 // reference to an inline type instance. Allocate it and initialize it
3723 // from field's values in registers.
3724 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
3725 {
3726 ResourceMark rm;
3727 RegisterMap reg_map(current,
3728 RegisterMap::UpdateMap::include,
3729 RegisterMap::ProcessFrames::include,
3730 RegisterMap::WalkContinuation::skip);
3731 frame stubFrame = current->last_frame();
3732 frame callerFrame = stubFrame.sender(®_map);
3733
3734 #ifdef ASSERT
3735 InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
3736 #endif
3737
3738 if (!is_set_nth_bit(res, 0)) {
3739 // We're not returning with inline type fields in registers (the
3740 // calling convention didn't allow it for this inline klass)
3741 assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
3742 current->set_vm_result((oopDesc*)res);
3743 assert(verif_vk == nullptr, "broken calling convention");
3744 return;
3745 }
3746
3747 clear_nth_bit(res, 0);
3748 InlineKlass* vk = (InlineKlass*)res;
3749 assert(verif_vk == vk, "broken calling convention");
3750 assert(Metaspace::contains((void*)res), "should be klass");
3751
3752 // Allocate handles for every oop field so they are safe in case of
3753 // a safepoint when allocating
3754 GrowableArray<Handle> handles;
3755 vk->save_oop_fields(reg_map, handles);
3756
3757 // It's unsafe to safepoint until we are here
3758 JRT_BLOCK;
3759 {
3760 JavaThread* THREAD = current;
3761 oop vt = vk->realloc_result(reg_map, handles, CHECK);
3762 current->set_vm_result(vt);
3763 }
3764 JRT_BLOCK_END;
3765 }
3766 JRT_END
|