26 #include "classfile/javaClasses.inline.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "classfile/vmClasses.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/compiledIC.hpp"
32 #include "code/compiledMethod.inline.hpp"
33 #include "code/scopeDesc.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "compiler/abstractCompiler.hpp"
36 #include "compiler/compileBroker.hpp"
37 #include "compiler/disassembler.hpp"
38 #include "gc/shared/barrierSet.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/gcLocker.inline.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/interpreterRuntime.hpp"
43 #include "jvm.h"
44 #include "jfr/jfrEvents.hpp"
45 #include "logging/log.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "memory/universe.hpp"
48 #include "metaprogramming/primitiveConversions.hpp"
49 #include "oops/klass.hpp"
50 #include "oops/method.inline.hpp"
51 #include "oops/objArrayKlass.hpp"
52 #include "oops/oop.inline.hpp"
53 #include "prims/forte.hpp"
54 #include "prims/jvmtiExport.hpp"
55 #include "prims/jvmtiThreadState.hpp"
56 #include "prims/methodHandles.hpp"
57 #include "prims/nativeLookup.hpp"
58 #include "runtime/atomic.hpp"
59 #include "runtime/frame.inline.hpp"
60 #include "runtime/handles.inline.hpp"
61 #include "runtime/init.hpp"
62 #include "runtime/interfaceSupport.inline.hpp"
63 #include "runtime/java.hpp"
64 #include "runtime/javaCalls.hpp"
65 #include "runtime/jniHandles.inline.hpp"
66 #include "runtime/sharedRuntime.hpp"
67 #include "runtime/stackWatermarkSet.hpp"
68 #include "runtime/stubRoutines.hpp"
69 #include "runtime/synchronizer.hpp"
70 #include "runtime/vframe.inline.hpp"
71 #include "runtime/vframeArray.hpp"
72 #include "runtime/vm_version.hpp"
73 #include "utilities/copy.hpp"
74 #include "utilities/dtrace.hpp"
75 #include "utilities/events.hpp"
76 #include "utilities/resourceHash.hpp"
77 #include "utilities/macros.hpp"
78 #include "utilities/xmlstream.hpp"
79 #ifdef COMPILER1
80 #include "c1/c1_Runtime1.hpp"
81 #endif
82 #if INCLUDE_JFR
83 #include "jfr/jfr.hpp"
84 #endif
85
86 // Shared stub locations
87 RuntimeStub* SharedRuntime::_wrong_method_blob;
88 RuntimeStub* SharedRuntime::_wrong_method_abstract_blob;
89 RuntimeStub* SharedRuntime::_ic_miss_blob;
90 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
91 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
92 RuntimeStub* SharedRuntime::_resolve_static_call_blob;
93 address SharedRuntime::_resolve_static_call_entry;
94
95 DeoptimizationBlob* SharedRuntime::_deopt_blob;
96 SafepointBlob* SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
97 SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob;
98 SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
99
100 #ifdef COMPILER2
101 UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
102 #endif // COMPILER2
103
104 nmethod* SharedRuntime::_cont_doYield_stub;
105
106 //----------------------------generate_stubs-----------------------------------
107 void SharedRuntime::generate_stubs() {
108 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), "wrong_method_stub");
109 _wrong_method_abstract_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
110 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub");
111 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call");
112 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call");
113 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call");
114 _resolve_static_call_entry = _resolve_static_call_blob->entry_point();
115
116 AdapterHandlerLibrary::initialize();
117
118 #if COMPILER2_OR_JVMCI
119 // Vectors are generated only by C2 and JVMCI.
120 bool support_wide = is_wide_vector(MaxVectorSize);
121 if (support_wide) {
122 _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
123 }
124 #endif // COMPILER2_OR_JVMCI
125 _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
126 _polling_page_return_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
127
128 generate_deopt_blob();
129
130 #ifdef COMPILER2
131 generate_uncommon_trap_blob();
132 #endif // COMPILER2
133 }
134
1123 // for a call current in progress, i.e., arguments has been pushed on stack
1124 // but callee has not been invoked yet. Caller frame must be compiled.
1125 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1126 CallInfo& callinfo, TRAPS) {
1127 Handle receiver;
1128 Handle nullHandle; // create a handy null handle for exception returns
1129 JavaThread* current = THREAD;
1130
1131 assert(!vfst.at_end(), "Java frame must exist");
1132
1133 // Find caller and bci from vframe
1134 methodHandle caller(current, vfst.method());
1135 int bci = vfst.bci();
1136
1137 if (caller->is_continuation_enter_intrinsic()) {
1138 bc = Bytecodes::_invokestatic;
1139 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1140 return receiver;
1141 }
1142
1143 Bytecode_invoke bytecode(caller, bci);
1144 int bytecode_index = bytecode.index();
1145 bc = bytecode.invoke_code();
1146
1147 methodHandle attached_method(current, extract_attached_method(vfst));
1148 if (attached_method.not_null()) {
1149 Method* callee = bytecode.static_target(CHECK_NH);
1150 vmIntrinsics::ID id = callee->intrinsic_id();
1151 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1152 // it attaches statically resolved method to the call site.
1153 if (MethodHandles::is_signature_polymorphic(id) &&
1154 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1155 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1156
1157 // Adjust invocation mode according to the attached method.
1158 switch (bc) {
1159 case Bytecodes::_invokevirtual:
1160 if (attached_method->method_holder()->is_interface()) {
1161 bc = Bytecodes::_invokeinterface;
1162 }
1163 break;
1164 case Bytecodes::_invokeinterface:
1165 if (!attached_method->method_holder()->is_interface()) {
1166 bc = Bytecodes::_invokevirtual;
1167 }
1168 break;
1169 case Bytecodes::_invokehandle:
1170 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1171 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1172 : Bytecodes::_invokevirtual;
1173 }
1174 break;
1175 default:
1176 break;
1177 }
1178 }
1179 }
1180
1181 assert(bc != Bytecodes::_illegal, "not initialized");
1182
1183 bool has_receiver = bc != Bytecodes::_invokestatic &&
1184 bc != Bytecodes::_invokedynamic &&
1185 bc != Bytecodes::_invokehandle;
1186
1187 // Find receiver for non-static call
1188 if (has_receiver) {
1189 // This register map must be update since we need to find the receiver for
1190 // compiled frames. The receiver might be in a register.
1191 RegisterMap reg_map2(current,
1192 RegisterMap::UpdateMap::include,
1193 RegisterMap::ProcessFrames::include,
1194 RegisterMap::WalkContinuation::skip);
1195 frame stubFrame = current->last_frame();
1196 // Caller-frame is a compiled frame
1197 frame callerFrame = stubFrame.sender(®_map2);
1198
1199 if (attached_method.is_null()) {
1200 Method* callee = bytecode.static_target(CHECK_NH);
1201 if (callee == nullptr) {
1202 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1203 }
1204 }
1205
1206 // Retrieve from a compiled argument list
1207 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1208 assert(oopDesc::is_oop_or_null(receiver()), "");
1209
1210 if (receiver.is_null()) {
1211 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1212 }
1213 }
1214
1215 // Resolve method
1216 if (attached_method.not_null()) {
1217 // Parameterized by attached method.
1218 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1219 } else {
1220 // Parameterized by bytecode.
1221 constantPoolHandle constants(current, caller->constants());
1222 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1223 }
1224
1225 #ifdef ASSERT
1226 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1227 if (has_receiver) {
1228 assert(receiver.not_null(), "should have thrown exception");
1229 Klass* receiver_klass = receiver->klass();
1230 Klass* rk = nullptr;
1231 if (attached_method.not_null()) {
1232 // In case there's resolved method attached, use its holder during the check.
1233 rk = attached_method->method_holder();
1234 } else {
1235 // Klass is already loaded.
1236 constantPoolHandle constants(current, caller->constants());
1237 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1238 }
1239 Klass* static_receiver_klass = rk;
1240 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1241 "actual receiver must be subclass of static receiver klass");
1242 if (receiver_klass->is_instance_klass()) {
1243 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1244 tty->print_cr("ERROR: Klass not yet initialized!!");
1245 receiver_klass->print();
1246 }
1247 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1248 }
1249 }
1250 #endif
1251
1252 return receiver;
1253 }
1254
1255 methodHandle SharedRuntime::find_callee_method(TRAPS) {
1256 JavaThread* current = THREAD;
1257 ResourceMark rm(current);
1258 // We need first to check if any Java activations (compiled, interpreted)
1259 // exist on the stack since last JavaCall. If not, we need
1260 // to get the target method from the JavaCall wrapper.
1261 vframeStream vfst(current, true); // Do not skip any javaCalls
1262 methodHandle callee_method;
1263 if (vfst.at_end()) {
1264 // No Java frames were found on stack since we did the JavaCall.
1265 // Hence the stack can only contain an entry_frame. We need to
1266 // find the target method from the stub frame.
1267 RegisterMap reg_map(current,
1268 RegisterMap::UpdateMap::skip,
1269 RegisterMap::ProcessFrames::include,
1270 RegisterMap::WalkContinuation::skip);
1271 frame fr = current->last_frame();
1272 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1273 fr = fr.sender(®_map);
1274 assert(fr.is_entry_frame(), "must be");
1275 // fr is now pointing to the entry frame.
1276 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1277 } else {
1278 Bytecodes::Code bc;
1279 CallInfo callinfo;
1280 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1281 callee_method = methodHandle(current, callinfo.selected_method());
1282 }
1283 assert(callee_method()->is_method(), "must be");
1284 return callee_method;
1285 }
1286
1287 // Resolves a call.
1288 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1289 JavaThread* current = THREAD;
1290 ResourceMark rm(current);
1291 RegisterMap cbl_map(current,
1292 RegisterMap::UpdateMap::skip,
1293 RegisterMap::ProcessFrames::include,
1294 RegisterMap::WalkContinuation::skip);
1295 frame caller_frame = current->last_frame().sender(&cbl_map);
1296
1297 CodeBlob* caller_cb = caller_frame.cb();
1298 guarantee(caller_cb != nullptr && caller_cb->is_compiled(), "must be called from compiled method");
1299 CompiledMethod* caller_nm = caller_cb->as_compiled_method();
1300
1301 // determine call info & receiver
1302 // note: a) receiver is null for static calls
1303 // b) an exception is thrown if receiver is null for non-static calls
1304 CallInfo call_info;
1305 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1306 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1307
1308 NoSafepointVerifier nsv;
1309
1310 methodHandle callee_method(current, call_info.selected_method());
1311
1312 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1313 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1314 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1315 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1316 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1317
1318 assert(!caller_nm->is_unloading(), "It should not be unloading");
1319
1320 #ifndef PRODUCT
1321 // tracing/debugging/statistics
1322 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1323 (is_virtual) ? (&_resolve_virtual_ctr) :
1324 (&_resolve_static_ctr);
1325 Atomic::inc(addr);
1326
1327 if (TraceCallFixup) {
1328 ResourceMark rm(current);
1329 tty->print("resolving %s%s (%s) call to",
1330 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1331 Bytecodes::name(invoke_code));
1332 callee_method->print_short_name(tty);
1333 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1334 p2i(caller_frame.pc()), p2i(callee_method->code()));
1335 }
1336 #endif
1337
1338 if (invoke_code == Bytecodes::_invokestatic) {
1339 assert(callee_method->method_holder()->is_initialized() ||
1340 callee_method->method_holder()->is_init_thread(current),
1341 "invalid class initialization state for invoke_static");
1342 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1343 // In order to keep class initialization check, do not patch call
1344 // site for static call when the class is not fully initialized.
1345 // Proper check is enforced by call site re-resolution on every invocation.
1346 //
1347 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1348 // explicit class initialization check is put in nmethod entry (VEP).
1349 assert(callee_method->method_holder()->is_linked(), "must be");
1350 return callee_method;
1351 }
1352 }
1353
1354
1355 // JSR 292 key invariant:
1356 // If the resolved method is a MethodHandle invoke target, the call
1357 // site must be a MethodHandle call site, because the lambda form might tail-call
1358 // leaving the stack in a state unknown to either caller or callee
1359
1360 // Compute entry points. The computation of the entry points is independent of
1361 // patching the call.
1362
1363 // Make sure the callee nmethod does not get deoptimized and removed before
1364 // we are done patching the code.
1365
1366
1367 CompiledICLocker ml(caller_nm);
1368 if (is_virtual && !is_optimized) {
1369 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1370 inline_cache->update(&call_info, receiver->klass());
1371 } else {
1372 // Callsite is a direct call - set it to the destination method
1373 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1374 callsite->set(callee_method);
1375 }
1376
1377 return callee_method;
1378 }
1379
1380 // Inline caches exist only in compiled code
1381 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1382 #ifdef ASSERT
1383 RegisterMap reg_map(current,
1384 RegisterMap::UpdateMap::skip,
1385 RegisterMap::ProcessFrames::include,
1386 RegisterMap::WalkContinuation::skip);
1387 frame stub_frame = current->last_frame();
1388 assert(stub_frame.is_runtime_frame(), "sanity check");
1389 frame caller_frame = stub_frame.sender(®_map);
1390 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1391 #endif /* ASSERT */
1392
1393 methodHandle callee_method;
1394 JRT_BLOCK
1395 callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1396 // Return Method* through TLS
1397 current->set_vm_result_2(callee_method());
1398 JRT_BLOCK_END
1399 // return compiled code entry point after potential safepoints
1400 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1401 return callee_method->verified_code_entry();
1402 JRT_END
1403
1404
1405 // Handle call site that has been made non-entrant
1406 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1407 // 6243940 We might end up in here if the callee is deoptimized
1408 // as we race to call it. We don't want to take a safepoint if
1409 // the caller was interpreted because the caller frame will look
1410 // interpreted to the stack walkers and arguments are now
1411 // "compiled" so it is much better to make this transition
1412 // invisible to the stack walking code. The i2c path will
1413 // place the callee method in the callee_target. It is stashed
1414 // there because if we try and find the callee by normal means a
1415 // safepoint is possible and have trouble gc'ing the compiled args.
1416 RegisterMap reg_map(current,
1417 RegisterMap::UpdateMap::skip,
1418 RegisterMap::ProcessFrames::include,
1419 RegisterMap::WalkContinuation::skip);
1420 frame stub_frame = current->last_frame();
1421 assert(stub_frame.is_runtime_frame(), "sanity check");
1422 frame caller_frame = stub_frame.sender(®_map);
1423
1424 if (caller_frame.is_interpreted_frame() ||
1425 caller_frame.is_entry_frame() ||
1426 caller_frame.is_upcall_stub_frame()) {
1427 Method* callee = current->callee_target();
1428 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1429 current->set_vm_result_2(callee);
1430 current->set_callee_target(nullptr);
1431 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1432 // Bypass class initialization checks in c2i when caller is in native.
1433 // JNI calls to static methods don't have class initialization checks.
1434 // Fast class initialization checks are present in c2i adapters and call into
1435 // SharedRuntime::handle_wrong_method() on the slow path.
1436 //
1437 // JVM upcalls may land here as well, but there's a proper check present in
1438 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1439 // so bypassing it in c2i adapter is benign.
1440 return callee->get_c2i_no_clinit_check_entry();
1441 } else {
1442 return callee->get_c2i_entry();
1443 }
1444 }
1445
1446 // Must be compiled to compiled path which is safe to stackwalk
1447 methodHandle callee_method;
1448 JRT_BLOCK
1449 // Force resolving of caller (if we called from compiled frame)
1450 callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1451 current->set_vm_result_2(callee_method());
1452 JRT_BLOCK_END
1453 // return compiled code entry point after potential safepoints
1454 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1455 return callee_method->verified_code_entry();
1456 JRT_END
1457
1458 // Handle abstract method call
1459 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1460 // Verbose error message for AbstractMethodError.
1461 // Get the called method from the invoke bytecode.
1462 vframeStream vfst(current, true);
1463 assert(!vfst.at_end(), "Java frame must exist");
1464 methodHandle caller(current, vfst.method());
1465 Bytecode_invoke invoke(caller, vfst.bci());
1466 DEBUG_ONLY( invoke.verify(); )
1467
1468 // Find the compiled caller frame.
1469 RegisterMap reg_map(current,
1470 RegisterMap::UpdateMap::include,
1471 RegisterMap::ProcessFrames::include,
1472 RegisterMap::WalkContinuation::skip);
1473 frame stubFrame = current->last_frame();
1474 assert(stubFrame.is_runtime_frame(), "must be");
1475 frame callerFrame = stubFrame.sender(®_map);
1476 assert(callerFrame.is_compiled_frame(), "must be");
1477
1478 // Install exception and return forward entry.
1479 address res = StubRoutines::throw_AbstractMethodError_entry();
1480 JRT_BLOCK
1481 methodHandle callee(current, invoke.static_target(current));
1482 if (!callee.is_null()) {
1483 oop recv = callerFrame.retrieve_receiver(®_map);
1484 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1485 res = StubRoutines::forward_exception_entry();
1486 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1487 }
1488 JRT_BLOCK_END
1489 return res;
1490 JRT_END
1491
1492
1493 // resolve a static call and patch code
1494 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1495 methodHandle callee_method;
1496 bool enter_special = false;
1497 JRT_BLOCK
1498 callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1499 current->set_vm_result_2(callee_method());
1500
1501 if (current->is_interp_only_mode()) {
1502 RegisterMap reg_map(current,
1503 RegisterMap::UpdateMap::skip,
1504 RegisterMap::ProcessFrames::include,
1505 RegisterMap::WalkContinuation::skip);
1506 frame stub_frame = current->last_frame();
1507 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1508 frame caller = stub_frame.sender(®_map);
1509 enter_special = caller.cb() != nullptr && caller.cb()->is_compiled()
1510 && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic();
1511 }
1512 JRT_BLOCK_END
1513
1514 if (current->is_interp_only_mode() && enter_special) {
1515 // enterSpecial is compiled and calls this method to resolve the call to Continuation::enter
1516 // but in interp_only_mode we need to go to the interpreted entry
1517 // The c2i won't patch in this mode -- see fixup_callers_callsite
1518 //
1519 // This should probably be done in all cases, not just enterSpecial (see JDK-8218403),
1520 // but that's part of a larger fix, and the situation is worse for enterSpecial, as it has no
1521 // interpreted version.
1522 return callee_method->get_c2i_entry();
1523 }
1524
1525 // return compiled code entry point after potential safepoints
1526 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1527 return callee_method->verified_code_entry();
1528 JRT_END
1529
1530
1531 // resolve virtual call and update inline cache to monomorphic
1532 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1533 methodHandle callee_method;
1534 JRT_BLOCK
1535 callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1536 current->set_vm_result_2(callee_method());
1537 JRT_BLOCK_END
1538 // return compiled code entry point after potential safepoints
1539 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1540 return callee_method->verified_code_entry();
1541 JRT_END
1542
1543
1544 // Resolve a virtual call that can be statically bound (e.g., always
1545 // monomorphic, so it has no inline cache). Patch code to resolved target.
1546 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1547 methodHandle callee_method;
1548 JRT_BLOCK
1549 callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1550 current->set_vm_result_2(callee_method());
1551 JRT_BLOCK_END
1552 // return compiled code entry point after potential safepoints
1553 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1554 return callee_method->verified_code_entry();
1555 JRT_END
1556
1557 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1558 JavaThread* current = THREAD;
1559 ResourceMark rm(current);
1560 CallInfo call_info;
1561 Bytecodes::Code bc;
1562
1563 // receiver is null for static calls. An exception is thrown for null
1564 // receivers for non-static calls
1565 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1566
1567 methodHandle callee_method(current, call_info.selected_method());
1568
1569 #ifndef PRODUCT
1570 Atomic::inc(&_ic_miss_ctr);
1571
1572 // Statistics & Tracing
1573 if (TraceCallFixup) {
1574 ResourceMark rm(current);
1575 tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1576 callee_method->print_short_name(tty);
1577 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1578 }
1579
1580 if (ICMissHistogram) {
1581 MutexLocker m(VMStatistic_lock);
1582 RegisterMap reg_map(current,
1583 RegisterMap::UpdateMap::skip,
1584 RegisterMap::ProcessFrames::include,
1585 RegisterMap::WalkContinuation::skip);
1586 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1587 // produce statistics under the lock
1588 trace_ic_miss(f.pc());
1589 }
1590 #endif
1591
1592 // install an event collector so that when a vtable stub is created the
1593 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1594 // event can't be posted when the stub is created as locks are held
1595 // - instead the event will be deferred until the event collector goes
1596 // out of scope.
1597 JvmtiDynamicCodeEventCollector event_collector;
1598
1599 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1600 RegisterMap reg_map(current,
1601 RegisterMap::UpdateMap::skip,
1602 RegisterMap::ProcessFrames::include,
1603 RegisterMap::WalkContinuation::skip);
1604 frame caller_frame = current->last_frame().sender(®_map);
1605 CodeBlob* cb = caller_frame.cb();
1606 CompiledMethod* caller_nm = cb->as_compiled_method();
1607
1608 CompiledICLocker ml(caller_nm);
1609 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1610 inline_cache->update(&call_info, receiver()->klass());
1611
1612 return callee_method;
1613 }
1614
1615 //
1616 // Resets a call-site in compiled code so it will get resolved again.
1617 // This routines handles both virtual call sites, optimized virtual call
1618 // sites, and static call sites. Typically used to change a call sites
1619 // destination from compiled to interpreted.
1620 //
1621 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1622 JavaThread* current = THREAD;
1623 ResourceMark rm(current);
1624 RegisterMap reg_map(current,
1625 RegisterMap::UpdateMap::skip,
1626 RegisterMap::ProcessFrames::include,
1627 RegisterMap::WalkContinuation::skip);
1628 frame stub_frame = current->last_frame();
1629 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1630 frame caller = stub_frame.sender(®_map);
1631
1632 // Do nothing if the frame isn't a live compiled frame.
1633 // nmethod could be deoptimized by the time we get here
1634 // so no update to the caller is needed.
1635
1636 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1637 (caller.is_native_frame() && ((CompiledMethod*)caller.cb())->method()->is_continuation_enter_intrinsic())) {
1638
1639 address pc = caller.pc();
1640
1641 CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
1642
1643 // Default call_addr is the location of the "basic" call.
1644 // Determine the address of the call we a reresolving. With
1645 // Inline Caches we will always find a recognizable call.
1646 // With Inline Caches disabled we may or may not find a
1647 // recognizable call. We will always find a call for static
1648 // calls and for optimized virtual calls. For vanilla virtual
1649 // calls it depends on the state of the UseInlineCaches switch.
1650 //
1651 // With Inline Caches disabled we can get here for a virtual call
1652 // for two reasons:
1653 // 1 - calling an abstract method. The vtable for abstract methods
1654 // will run us thru handle_wrong_method and we will eventually
1655 // end up in the interpreter to throw the ame.
1656 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1657 // call and between the time we fetch the entry address and
1658 // we jump to it the target gets deoptimized. Similar to 1
1659 // we will wind up in the interprter (thru a c2i with c2).
1660 //
1661 CompiledICLocker ml(caller_nm);
1662 address call_addr = caller_nm->call_instruction_address(pc);
1663
1664 if (call_addr != nullptr) {
1665 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1666 // bytes back in the instruction stream so we must also check for reloc info.
1667 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1668 bool ret = iter.next(); // Get item
1669 if (ret) {
1670 switch (iter.type()) {
1671 case relocInfo::static_call_type:
1672 case relocInfo::opt_virtual_call_type: {
1673 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1674 cdc->set_to_clean();
1675 break;
1676 }
1677
1678 case relocInfo::virtual_call_type: {
1679 // compiled, dispatched call (which used to call an interpreted method)
1680 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1681 inline_cache->set_to_clean();
1682 break;
1683 }
1684 default:
1685 break;
1686 }
1687 }
1688 }
1689 }
1690
1691 methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1692
1693
1694 #ifndef PRODUCT
1695 Atomic::inc(&_wrong_method_ctr);
1696
1697 if (TraceCallFixup) {
1698 ResourceMark rm(current);
1699 tty->print("handle_wrong_method reresolving call to");
1700 callee_method->print_short_name(tty);
1701 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1702 }
1703 #endif
1704
1705 return callee_method;
1706 }
1707
1708 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1709 // The faulting unsafe accesses should be changed to throw the error
1710 // synchronously instead. Meanwhile the faulting instruction will be
1711 // skipped over (effectively turning it into a no-op) and an
1712 // asynchronous exception will be raised which the thread will
1713 // handle at a later point. If the instruction is a load it will
1714 // return garbage.
1715
1716 // Request an async exception.
1717 thread->set_pending_unsafe_access_error();
1718
1719 // Return address of next instruction to execute.
1884 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
1885
1886 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1887 if (message == nullptr) {
1888 // Shouldn't happen, but don't cause even more problems if it does
1889 message = const_cast<char*>(caster_klass->external_name());
1890 } else {
1891 jio_snprintf(message,
1892 msglen,
1893 "class %s cannot be cast to class %s (%s%s%s)",
1894 caster_name,
1895 target_name,
1896 caster_klass_description,
1897 klass_separator,
1898 target_klass_description
1899 );
1900 }
1901 return message;
1902 }
1903
1904 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1905 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
1906 JRT_END
1907
1908 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
1909 if (!SafepointSynchronize::is_synchronizing()) {
1910 // Only try quick_enter() if we're not trying to reach a safepoint
1911 // so that the calling thread reaches the safepoint more quickly.
1912 if (ObjectSynchronizer::quick_enter(obj, current, lock)) {
1913 return;
1914 }
1915 }
1916 // NO_ASYNC required because an async exception on the state transition destructor
1917 // would leave you with the lock held and it would never be released.
1918 // The normal monitorenter NullPointerException is thrown without acquiring a lock
1919 // and the model is that an exception implies the method failed.
1920 JRT_BLOCK_NO_ASYNC
1921 Handle h_obj(THREAD, obj);
1922 ObjectSynchronizer::enter(h_obj, lock, current);
1923 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2099 tty->print_cr(" %% in nested categories are relative to their category");
2100 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2101 tty->cr();
2102
2103 MethodArityHistogram h;
2104 }
2105 #endif
2106
2107 #ifndef PRODUCT
2108 static int _lookups; // number of calls to lookup
2109 static int _equals; // number of buckets checked with matching hash
2110 static int _hits; // number of successful lookups
2111 static int _compact; // number of equals calls with compact signature
2112 #endif
2113
2114 // A simple wrapper class around the calling convention information
2115 // that allows sharing of adapters for the same calling convention.
2116 class AdapterFingerPrint : public CHeapObj<mtCode> {
2117 private:
2118 enum {
2119 _basic_type_bits = 4,
2120 _basic_type_mask = right_n_bits(_basic_type_bits),
2121 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2122 _compact_int_count = 3
2123 };
2124 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2125 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2126
2127 union {
2128 int _compact[_compact_int_count];
2129 int* _fingerprint;
2130 } _value;
2131 int _length; // A negative length indicates the fingerprint is in the compact form,
2132 // Otherwise _value._fingerprint is the array.
2133
2134 // Remap BasicTypes that are handled equivalently by the adapters.
2135 // These are correct for the current system but someday it might be
2136 // necessary to make this mapping platform dependent.
2137 static int adapter_encoding(BasicType in) {
2138 switch (in) {
2139 case T_BOOLEAN:
2140 case T_BYTE:
2141 case T_SHORT:
2142 case T_CHAR:
2143 // There are all promoted to T_INT in the calling convention
2144 return T_INT;
2145
2146 case T_OBJECT:
2147 case T_ARRAY:
2148 // In other words, we assume that any register good enough for
2149 // an int or long is good enough for a managed pointer.
2150 #ifdef _LP64
2151 return T_LONG;
2152 #else
2153 return T_INT;
2154 #endif
2155
2156 case T_INT:
2157 case T_LONG:
2158 case T_FLOAT:
2159 case T_DOUBLE:
2160 case T_VOID:
2161 return in;
2162
2163 default:
2164 ShouldNotReachHere();
2165 return T_CONFLICT;
2166 }
2167 }
2168
2169 public:
2170 AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2171 // The fingerprint is based on the BasicType signature encoded
2172 // into an array of ints with eight entries per int.
2173 int* ptr;
2174 int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2175 if (len <= _compact_int_count) {
2176 assert(_compact_int_count == 3, "else change next line");
2177 _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2178 // Storing the signature encoded as signed chars hits about 98%
2179 // of the time.
2180 _length = -len;
2181 ptr = _value._compact;
2182 } else {
2183 _length = len;
2184 _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2185 ptr = _value._fingerprint;
2186 }
2187
2188 // Now pack the BasicTypes with 8 per int
2189 int sig_index = 0;
2190 for (int index = 0; index < len; index++) {
2191 int value = 0;
2192 for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2193 int bt = adapter_encoding(sig_bt[sig_index++]);
2194 assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2195 value = (value << _basic_type_bits) | bt;
2196 }
2197 ptr[index] = value;
2198 }
2199 }
2200
2201 ~AdapterFingerPrint() {
2202 if (_length > 0) {
2203 FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2204 }
2205 }
2206
2207 int value(int index) {
2208 if (_length < 0) {
2209 return _value._compact[index];
2210 }
2211 return _value._fingerprint[index];
2212 }
2213 int length() {
2214 if (_length < 0) return -_length;
2215 return _length;
2216 }
2217
2218 bool is_compact() {
2243 const char* as_basic_args_string() {
2244 stringStream st;
2245 bool long_prev = false;
2246 for (int i = 0; i < length(); i++) {
2247 unsigned val = (unsigned)value(i);
2248 // args are packed so that first/lower arguments are in the highest
2249 // bits of each int value, so iterate from highest to the lowest
2250 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2251 unsigned v = (val >> j) & _basic_type_mask;
2252 if (v == 0) {
2253 assert(i == length() - 1, "Only expect zeroes in the last word");
2254 continue;
2255 }
2256 if (long_prev) {
2257 long_prev = false;
2258 if (v == T_VOID) {
2259 st.print("J");
2260 } else {
2261 st.print("L");
2262 }
2263 }
2264 switch (v) {
2265 case T_INT: st.print("I"); break;
2266 case T_LONG: long_prev = true; break;
2267 case T_FLOAT: st.print("F"); break;
2268 case T_DOUBLE: st.print("D"); break;
2269 case T_VOID: break;
2270 default: ShouldNotReachHere();
2271 }
2272 }
2273 }
2274 if (long_prev) {
2275 st.print("L");
2276 }
2277 return st.as_string();
2278 }
2279 #endif // !product
2280
2281 bool equals(AdapterFingerPrint* other) {
2282 if (other->_length != _length) {
2283 return false;
2284 }
2285 if (_length < 0) {
2286 assert(_compact_int_count == 3, "else change next line");
2287 return _value._compact[0] == other->_value._compact[0] &&
2288 _value._compact[1] == other->_value._compact[1] &&
2289 _value._compact[2] == other->_value._compact[2];
2290 } else {
2298 }
2299
2300 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2301 NOT_PRODUCT(_equals++);
2302 return fp1->equals(fp2);
2303 }
2304
2305 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2306 return fp->compute_hash();
2307 }
2308 };
2309
2310 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2311 using AdapterHandlerTable = ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2312 AnyObj::C_HEAP, mtCode,
2313 AdapterFingerPrint::compute_hash,
2314 AdapterFingerPrint::equals>;
2315 static AdapterHandlerTable* _adapter_handler_table;
2316
2317 // Find a entry with the same fingerprint if it exists
2318 static AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
2319 NOT_PRODUCT(_lookups++);
2320 assert_lock_strong(AdapterHandlerLibrary_lock);
2321 AdapterFingerPrint fp(total_args_passed, sig_bt);
2322 AdapterHandlerEntry** entry = _adapter_handler_table->get(&fp);
2323 if (entry != nullptr) {
2324 #ifndef PRODUCT
2325 if (fp.is_compact()) _compact++;
2326 _hits++;
2327 #endif
2328 return *entry;
2329 }
2330 return nullptr;
2331 }
2332
2333 #ifndef PRODUCT
2334 static void print_table_statistics() {
2335 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2336 return sizeof(*key) + sizeof(*a);
2337 };
2338 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2339 ts.print(tty, "AdapterHandlerTable");
2340 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2341 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2342 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2343 _lookups, _equals, _hits, _compact);
2344 }
2345 #endif
2346
2347 // ---------------------------------------------------------------------------
2348 // Implementation of AdapterHandlerLibrary
2349 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2350 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2351 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2352 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2353 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2354 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2355 const int AdapterHandlerLibrary_size = 16*K;
2356 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2357
2358 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2359 return _buffer;
2360 }
2361
2362 static void post_adapter_creation(const AdapterBlob* new_adapter,
2363 const AdapterHandlerEntry* entry) {
2364 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2365 char blob_id[256];
2366 jio_snprintf(blob_id,
2367 sizeof(blob_id),
2368 "%s(%s)",
2369 new_adapter->name(),
2370 entry->fingerprint()->as_string());
2371 if (Forte::is_enabled()) {
2372 Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2373 }
2374
2375 if (JvmtiExport::should_post_dynamic_code_generated()) {
2378 }
2379 }
2380
2381 void AdapterHandlerLibrary::initialize() {
2382 ResourceMark rm;
2383 AdapterBlob* no_arg_blob = nullptr;
2384 AdapterBlob* int_arg_blob = nullptr;
2385 AdapterBlob* obj_arg_blob = nullptr;
2386 AdapterBlob* obj_int_arg_blob = nullptr;
2387 AdapterBlob* obj_obj_arg_blob = nullptr;
2388 {
2389 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2390 MutexLocker mu(AdapterHandlerLibrary_lock);
2391
2392 // Create a special handler for abstract methods. Abstract methods
2393 // are never compiled so an i2c entry is somewhat meaningless, but
2394 // throw AbstractMethodError just in case.
2395 // Pass wrong_method_abstract for the c2i transitions to return
2396 // AbstractMethodError for invalid invocations.
2397 address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2398 _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, nullptr),
2399 StubRoutines::throw_AbstractMethodError_entry(),
2400 wrong_method_abstract, wrong_method_abstract);
2401
2402 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2403 _no_arg_handler = create_adapter(no_arg_blob, 0, nullptr, true);
2404
2405 BasicType obj_args[] = { T_OBJECT };
2406 _obj_arg_handler = create_adapter(obj_arg_blob, 1, obj_args, true);
2407
2408 BasicType int_args[] = { T_INT };
2409 _int_arg_handler = create_adapter(int_arg_blob, 1, int_args, true);
2410
2411 BasicType obj_int_args[] = { T_OBJECT, T_INT };
2412 _obj_int_arg_handler = create_adapter(obj_int_arg_blob, 2, obj_int_args, true);
2413
2414 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2415 _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, 2, obj_obj_args, true);
2416
2417 assert(no_arg_blob != nullptr &&
2418 obj_arg_blob != nullptr &&
2419 int_arg_blob != nullptr &&
2420 obj_int_arg_blob != nullptr &&
2421 obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
2422 }
2423
2424 // Outside of the lock
2425 post_adapter_creation(no_arg_blob, _no_arg_handler);
2426 post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2427 post_adapter_creation(int_arg_blob, _int_arg_handler);
2428 post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2429 post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2430 }
2431
2432 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2433 address i2c_entry,
2434 address c2i_entry,
2435 address c2i_unverified_entry,
2436 address c2i_no_clinit_check_entry) {
2437 // Insert an entry into the table
2438 return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry,
2439 c2i_no_clinit_check_entry);
2440 }
2441
2442 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2443 if (method->is_abstract()) {
2444 return _abstract_method_handler;
2445 }
2446 int total_args_passed = method->size_of_parameters(); // All args on stack
2447 if (total_args_passed == 0) {
2448 return _no_arg_handler;
2449 } else if (total_args_passed == 1) {
2450 if (!method->is_static()) {
2451 return _obj_arg_handler;
2452 }
2453 switch (method->signature()->char_at(1)) {
2454 case JVM_SIGNATURE_CLASS:
2455 case JVM_SIGNATURE_ARRAY:
2456 return _obj_arg_handler;
2457 case JVM_SIGNATURE_INT:
2458 case JVM_SIGNATURE_BOOLEAN:
2459 case JVM_SIGNATURE_CHAR:
2460 case JVM_SIGNATURE_BYTE:
2461 case JVM_SIGNATURE_SHORT:
2462 return _int_arg_handler;
2463 }
2464 } else if (total_args_passed == 2 &&
2465 !method->is_static()) {
2466 switch (method->signature()->char_at(1)) {
2467 case JVM_SIGNATURE_CLASS:
2468 case JVM_SIGNATURE_ARRAY:
2469 return _obj_obj_arg_handler;
2470 case JVM_SIGNATURE_INT:
2471 case JVM_SIGNATURE_BOOLEAN:
2472 case JVM_SIGNATURE_CHAR:
2473 case JVM_SIGNATURE_BYTE:
2474 case JVM_SIGNATURE_SHORT:
2475 return _obj_int_arg_handler;
2476 }
2477 }
2478 return nullptr;
2479 }
2480
2481 class AdapterSignatureIterator : public SignatureIterator {
2482 private:
2483 BasicType stack_sig_bt[16];
2484 BasicType* sig_bt;
2485 int index;
2486
2487 public:
2488 AdapterSignatureIterator(Symbol* signature,
2489 fingerprint_t fingerprint,
2490 bool is_static,
2491 int total_args_passed) :
2492 SignatureIterator(signature, fingerprint),
2493 index(0)
2494 {
2495 sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2496 if (!is_static) { // Pass in receiver first
2497 sig_bt[index++] = T_OBJECT;
2498 }
2499 do_parameters_on(this);
2500 }
2501
2502 BasicType* basic_types() {
2503 return sig_bt;
2504 }
2505
2506 #ifdef ASSERT
2507 int slots() {
2508 return index;
2509 }
2510 #endif
2511
2512 private:
2513
2514 friend class SignatureIterator; // so do_parameters_on can call do_type
2515 void do_type(BasicType type) {
2516 sig_bt[index++] = type;
2517 if (type == T_LONG || type == T_DOUBLE) {
2518 sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots
2519 }
2520 }
2521 };
2522
2523 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2524 // Use customized signature handler. Need to lock around updates to
2525 // the _adapter_handler_table (it is not safe for concurrent readers
2526 // and a single writer: this could be fixed if it becomes a
2527 // problem).
2528
2529 // Fast-path for trivial adapters
2530 AdapterHandlerEntry* entry = get_simple_adapter(method);
2531 if (entry != nullptr) {
2532 return entry;
2533 }
2534
2535 ResourceMark rm;
2536 AdapterBlob* new_adapter = nullptr;
2537
2538 // Fill in the signature array, for the calling-convention call.
2539 int total_args_passed = method->size_of_parameters(); // All args on stack
2540
2541 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2542 method->is_static(), total_args_passed);
2543 assert(si.slots() == total_args_passed, "");
2544 BasicType* sig_bt = si.basic_types();
2545 {
2546 MutexLocker mu(AdapterHandlerLibrary_lock);
2547
2548 // Lookup method signature's fingerprint
2549 entry = lookup(total_args_passed, sig_bt);
2550
2551 if (entry != nullptr) {
2552 #ifdef ASSERT
2553 if (VerifyAdapterSharing) {
2554 AdapterBlob* comparison_blob = nullptr;
2555 AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, total_args_passed, sig_bt, false);
2556 assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
2557 assert(comparison_entry->compare_code(entry), "code must match");
2558 // Release the one just created and return the original
2559 delete comparison_entry;
2560 }
2561 #endif
2562 return entry;
2563 }
2564
2565 entry = create_adapter(new_adapter, total_args_passed, sig_bt, /* allocate_code_blob */ true);
2566 }
2567
2568 // Outside of the lock
2569 if (new_adapter != nullptr) {
2570 post_adapter_creation(new_adapter, entry);
2571 }
2572 return entry;
2573 }
2574
2575 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
2576 int total_args_passed,
2577 BasicType* sig_bt,
2578 bool allocate_code_blob) {
2579
2580 // StubRoutines::_final_stubs_code is initialized after this function can be called. As a result,
2581 // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated prior
2582 // to all StubRoutines::_final_stubs_code being set. Checks refer to runtime range checks generated
2583 // in an I2C stub that ensure that an I2C stub is called from an interpreter frame or stubs.
2584 bool contains_all_checks = StubRoutines::final_stubs_code() != nullptr;
2585
2586 VMRegPair stack_regs[16];
2587 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2588
2589 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2590 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2591 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2592 CodeBuffer buffer(buf);
2593 short buffer_locs[20];
2594 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2595 sizeof(buffer_locs)/sizeof(relocInfo));
2596
2597 // Make a C heap allocated version of the fingerprint to store in the adapter
2598 AdapterFingerPrint* fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
2599 MacroAssembler _masm(&buffer);
2600 AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2601 total_args_passed,
2602 comp_args_on_stack,
2603 sig_bt,
2604 regs,
2605 fingerprint);
2606
2607 #ifdef ASSERT
2608 if (VerifyAdapterSharing) {
2609 entry->save_code(buf->code_begin(), buffer.insts_size());
2610 if (!allocate_code_blob) {
2611 return entry;
2612 }
2613 }
2614 #endif
2615
2616 new_adapter = AdapterBlob::create(&buffer);
2617 NOT_PRODUCT(int insts_size = buffer.insts_size());
2618 if (new_adapter == nullptr) {
2619 // CodeCache is full, disable compilation
2620 // Ought to log this but compile log is only per compile thread
2621 // and we're some non descript Java thread.
2622 return nullptr;
2623 }
2624 entry->relocate(new_adapter->content_begin());
2625 #ifndef PRODUCT
2626 // debugging support
2627 if (PrintAdapterHandlers || PrintStubCode) {
2628 ttyLocker ttyl;
2629 entry->print_adapter_on(tty);
2630 tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
2631 _adapter_handler_table->number_of_entries(), fingerprint->as_basic_args_string(),
2632 fingerprint->as_string(), insts_size);
2633 tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
2634 if (Verbose || PrintStubCode) {
2635 address first_pc = entry->base_address();
2636 if (first_pc != nullptr) {
2638 NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
2639 tty->cr();
2640 }
2641 }
2642 }
2643 #endif
2644
2645 // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
2646 // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
2647 if (contains_all_checks || !VerifyAdapterCalls) {
2648 assert_lock_strong(AdapterHandlerLibrary_lock);
2649 _adapter_handler_table->put(fingerprint, entry);
2650 }
2651 return entry;
2652 }
2653
2654 address AdapterHandlerEntry::base_address() {
2655 address base = _i2c_entry;
2656 if (base == nullptr) base = _c2i_entry;
2657 assert(base <= _c2i_entry || _c2i_entry == nullptr, "");
2658 assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");
2659 assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
2660 return base;
2661 }
2662
2663 void AdapterHandlerEntry::relocate(address new_base) {
2664 address old_base = base_address();
2665 assert(old_base != nullptr, "");
2666 ptrdiff_t delta = new_base - old_base;
2667 if (_i2c_entry != nullptr)
2668 _i2c_entry += delta;
2669 if (_c2i_entry != nullptr)
2670 _c2i_entry += delta;
2671 if (_c2i_unverified_entry != nullptr)
2672 _c2i_unverified_entry += delta;
2673 if (_c2i_no_clinit_check_entry != nullptr)
2674 _c2i_no_clinit_check_entry += delta;
2675 assert(base_address() == new_base, "");
2676 }
2677
2678
2679 AdapterHandlerEntry::~AdapterHandlerEntry() {
2680 delete _fingerprint;
2681 #ifdef ASSERT
2682 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
2683 #endif
2684 }
2685
2686
2687 #ifdef ASSERT
2688 // Capture the code before relocation so that it can be compared
2689 // against other versions. If the code is captured after relocation
2690 // then relative instructions won't be equivalent.
2691 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
2692 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
2693 _saved_code_length = length;
2694 memcpy(_saved_code, buffer, length);
2695 }
2696
2697
2698 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
2699 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
2700
2747
2748 struct { double data[20]; } locs_buf;
2749 struct { double data[20]; } stubs_locs_buf;
2750 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
2751 #if defined(AARCH64) || defined(PPC64)
2752 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
2753 // in the constant pool to ensure ordering between the barrier and oops
2754 // accesses. For native_wrappers we need a constant.
2755 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
2756 // static java call that is resolved in the runtime.
2757 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
2758 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
2759 }
2760 #endif
2761 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
2762 MacroAssembler _masm(&buffer);
2763
2764 // Fill in the signature array, for the calling-convention call.
2765 const int total_args_passed = method->size_of_parameters();
2766
2767 VMRegPair stack_regs[16];
2768 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2769
2770 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2771 method->is_static(), total_args_passed);
2772 BasicType* sig_bt = si.basic_types();
2773 assert(si.slots() == total_args_passed, "");
2774 BasicType ret_type = si.return_type();
2775
2776 // Now get the compiled-Java arguments layout.
2777 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2778
2779 // Generate the compiled-to-native wrapper code
2780 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
2781
2782 if (nm != nullptr) {
2783 {
2784 MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
2785 if (nm->make_in_use()) {
2786 method->set_code(method, nm);
2787 }
2788 }
2789
2790 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
2791 if (directive->PrintAssemblyOption) {
2792 nm->print_code();
2793 }
2794 DirectivesStack::release(directive);
2999 st->print("Adapter for signature: ");
3000 a->print_adapter_on(st);
3001 return true;
3002 } else {
3003 return false; // keep looking
3004 }
3005 };
3006 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3007 _adapter_handler_table->iterate(findblob);
3008 assert(found, "Should have found handler");
3009 }
3010
3011 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3012 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3013 if (get_i2c_entry() != nullptr) {
3014 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3015 }
3016 if (get_c2i_entry() != nullptr) {
3017 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3018 }
3019 if (get_c2i_unverified_entry() != nullptr) {
3020 st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3021 }
3022 if (get_c2i_no_clinit_check_entry() != nullptr) {
3023 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3024 }
3025 st->cr();
3026 }
3027
3028 #ifndef PRODUCT
3029
3030 void AdapterHandlerLibrary::print_statistics() {
3031 print_table_statistics();
3032 }
3033
3034 #endif /* PRODUCT */
3035
3036 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3037 assert(current == JavaThread::current(), "pre-condition");
3038 StackOverflow* overflow_state = current->stack_overflow_state();
3039 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3040 overflow_state->set_reserved_stack_activation(current->stack_base());
3089 event.set_method(method);
3090 event.commit();
3091 }
3092 }
3093 }
3094 return activation;
3095 }
3096
3097 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3098 // After any safepoint, just before going back to compiled code,
3099 // we inform the GC that we will be doing initializing writes to
3100 // this object in the future without emitting card-marks, so
3101 // GC may take any compensating steps.
3102
3103 oop new_obj = current->vm_result();
3104 if (new_obj == nullptr) return;
3105
3106 BarrierSet *bs = BarrierSet::barrier_set();
3107 bs->on_slowpath_allocation_exit(current, new_obj);
3108 }
|
26 #include "classfile/javaClasses.inline.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "classfile/vmClasses.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/compiledIC.hpp"
32 #include "code/compiledMethod.inline.hpp"
33 #include "code/scopeDesc.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "compiler/abstractCompiler.hpp"
36 #include "compiler/compileBroker.hpp"
37 #include "compiler/disassembler.hpp"
38 #include "gc/shared/barrierSet.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "gc/shared/gcLocker.inline.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/interpreterRuntime.hpp"
43 #include "jvm.h"
44 #include "jfr/jfrEvents.hpp"
45 #include "logging/log.hpp"
46 #include "memory/oopFactory.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "memory/universe.hpp"
49 #include "oops/access.hpp"
50 #include "oops/fieldStreams.inline.hpp"
51 #include "metaprogramming/primitiveConversions.hpp"
52 #include "oops/klass.hpp"
53 #include "oops/method.inline.hpp"
54 #include "oops/objArrayKlass.hpp"
55 #include "oops/objArrayOop.inline.hpp"
56 #include "oops/oop.inline.hpp"
57 #include "oops/inlineKlass.inline.hpp"
58 #include "prims/forte.hpp"
59 #include "prims/jvmtiExport.hpp"
60 #include "prims/jvmtiThreadState.hpp"
61 #include "prims/methodHandles.hpp"
62 #include "prims/nativeLookup.hpp"
63 #include "runtime/atomic.hpp"
64 #include "runtime/frame.inline.hpp"
65 #include "runtime/handles.inline.hpp"
66 #include "runtime/init.hpp"
67 #include "runtime/interfaceSupport.inline.hpp"
68 #include "runtime/java.hpp"
69 #include "runtime/javaCalls.hpp"
70 #include "runtime/jniHandles.inline.hpp"
71 #include "runtime/sharedRuntime.hpp"
72 #include "runtime/stackWatermarkSet.hpp"
73 #include "runtime/stubRoutines.hpp"
74 #include "runtime/synchronizer.hpp"
75 #include "runtime/vframe.inline.hpp"
76 #include "runtime/vframeArray.hpp"
77 #include "runtime/vm_version.hpp"
78 #include "utilities/copy.hpp"
79 #include "utilities/dtrace.hpp"
80 #include "utilities/events.hpp"
81 #include "utilities/resourceHash.hpp"
82 #include "utilities/macros.hpp"
83 #include "utilities/xmlstream.hpp"
84 #ifdef COMPILER1
85 #include "c1/c1_Runtime1.hpp"
86 #endif
87 #if INCLUDE_JFR
88 #include "jfr/jfr.hpp"
89 #endif
90
91 // Shared stub locations
92 RuntimeStub* SharedRuntime::_wrong_method_blob;
93 RuntimeStub* SharedRuntime::_wrong_method_abstract_blob;
94 RuntimeStub* SharedRuntime::_ic_miss_blob;
95 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
96 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
97 RuntimeStub* SharedRuntime::_resolve_static_call_blob;
98
99 DeoptimizationBlob* SharedRuntime::_deopt_blob;
100 SafepointBlob* SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
101 SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob;
102 SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
103
104 #ifdef COMPILER2
105 UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
106 #endif // COMPILER2
107
108 nmethod* SharedRuntime::_cont_doYield_stub;
109
110 //----------------------------generate_stubs-----------------------------------
111 void SharedRuntime::generate_stubs() {
112 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), "wrong_method_stub");
113 _wrong_method_abstract_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
114 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub");
115 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call");
116 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call");
117 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call");
118
119 AdapterHandlerLibrary::initialize();
120
121 #if COMPILER2_OR_JVMCI
122 // Vectors are generated only by C2 and JVMCI.
123 bool support_wide = is_wide_vector(MaxVectorSize);
124 if (support_wide) {
125 _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
126 }
127 #endif // COMPILER2_OR_JVMCI
128 _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
129 _polling_page_return_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
130
131 generate_deopt_blob();
132
133 #ifdef COMPILER2
134 generate_uncommon_trap_blob();
135 #endif // COMPILER2
136 }
137
1126 // for a call current in progress, i.e., arguments has been pushed on stack
1127 // but callee has not been invoked yet. Caller frame must be compiled.
1128 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1129 CallInfo& callinfo, TRAPS) {
1130 Handle receiver;
1131 Handle nullHandle; // create a handy null handle for exception returns
1132 JavaThread* current = THREAD;
1133
1134 assert(!vfst.at_end(), "Java frame must exist");
1135
1136 // Find caller and bci from vframe
1137 methodHandle caller(current, vfst.method());
1138 int bci = vfst.bci();
1139
1140 if (caller->is_continuation_enter_intrinsic()) {
1141 bc = Bytecodes::_invokestatic;
1142 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1143 return receiver;
1144 }
1145
1146 // Substitutability test implementation piggy backs on static call resolution
1147 Bytecodes::Code code = caller->java_code_at(bci);
1148 if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1149 bc = Bytecodes::_invokestatic;
1150 methodHandle attached_method(THREAD, extract_attached_method(vfst));
1151 assert(attached_method.not_null(), "must have attached method");
1152 vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1153 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1154 #ifdef ASSERT
1155 Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature());
1156 assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1157 #endif
1158 return receiver;
1159 }
1160
1161 Bytecode_invoke bytecode(caller, bci);
1162 int bytecode_index = bytecode.index();
1163 bc = bytecode.invoke_code();
1164
1165 methodHandle attached_method(current, extract_attached_method(vfst));
1166 if (attached_method.not_null()) {
1167 Method* callee = bytecode.static_target(CHECK_NH);
1168 vmIntrinsics::ID id = callee->intrinsic_id();
1169 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1170 // it attaches statically resolved method to the call site.
1171 if (MethodHandles::is_signature_polymorphic(id) &&
1172 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1173 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1174
1175 // Adjust invocation mode according to the attached method.
1176 switch (bc) {
1177 case Bytecodes::_invokevirtual:
1178 if (attached_method->method_holder()->is_interface()) {
1179 bc = Bytecodes::_invokeinterface;
1180 }
1181 break;
1182 case Bytecodes::_invokeinterface:
1183 if (!attached_method->method_holder()->is_interface()) {
1184 bc = Bytecodes::_invokevirtual;
1185 }
1186 break;
1187 case Bytecodes::_invokehandle:
1188 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1189 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1190 : Bytecodes::_invokevirtual;
1191 }
1192 break;
1193 default:
1194 break;
1195 }
1196 } else {
1197 assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1198 if (!attached_method->method_holder()->is_inline_klass()) {
1199 // Ignore the attached method in this case to not confuse below code
1200 attached_method = methodHandle(current, nullptr);
1201 }
1202 }
1203 }
1204
1205 assert(bc != Bytecodes::_illegal, "not initialized");
1206
1207 bool has_receiver = bc != Bytecodes::_invokestatic &&
1208 bc != Bytecodes::_invokedynamic &&
1209 bc != Bytecodes::_invokehandle;
1210 bool check_null_and_abstract = true;
1211
1212 // Find receiver for non-static call
1213 if (has_receiver) {
1214 // This register map must be update since we need to find the receiver for
1215 // compiled frames. The receiver might be in a register.
1216 RegisterMap reg_map2(current,
1217 RegisterMap::UpdateMap::include,
1218 RegisterMap::ProcessFrames::include,
1219 RegisterMap::WalkContinuation::skip);
1220 frame stubFrame = current->last_frame();
1221 // Caller-frame is a compiled frame
1222 frame callerFrame = stubFrame.sender(®_map2);
1223
1224 Method* callee = attached_method();
1225 if (callee == nullptr) {
1226 callee = bytecode.static_target(CHECK_NH);
1227 if (callee == nullptr) {
1228 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1229 }
1230 }
1231 bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->is_compiled_by_c1();
1232 if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1233 // If the receiver is an inline type that is passed as fields, no oop is available
1234 // Resolve the call without receiver null checking.
1235 assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1236 assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1237 if (bc == Bytecodes::_invokeinterface) {
1238 bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1239 }
1240 check_null_and_abstract = false;
1241 } else {
1242 // Retrieve from a compiled argument list
1243 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1244 assert(oopDesc::is_oop_or_null(receiver()), "");
1245 if (receiver.is_null()) {
1246 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1247 }
1248 }
1249 }
1250
1251 // Resolve method
1252 if (attached_method.not_null()) {
1253 // Parameterized by attached method.
1254 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1255 } else {
1256 // Parameterized by bytecode.
1257 constantPoolHandle constants(current, caller->constants());
1258 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1259 }
1260
1261 #ifdef ASSERT
1262 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1263 if (has_receiver && check_null_and_abstract) {
1264 assert(receiver.not_null(), "should have thrown exception");
1265 Klass* receiver_klass = receiver->klass();
1266 Klass* rk = nullptr;
1267 if (attached_method.not_null()) {
1268 // In case there's resolved method attached, use its holder during the check.
1269 rk = attached_method->method_holder();
1270 } else {
1271 // Klass is already loaded.
1272 constantPoolHandle constants(current, caller->constants());
1273 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1274 }
1275 Klass* static_receiver_klass = rk;
1276 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1277 "actual receiver must be subclass of static receiver klass");
1278 if (receiver_klass->is_instance_klass()) {
1279 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1280 tty->print_cr("ERROR: Klass not yet initialized!!");
1281 receiver_klass->print();
1282 }
1283 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1284 }
1285 }
1286 #endif
1287
1288 return receiver;
1289 }
1290
1291 methodHandle SharedRuntime::find_callee_method(bool is_optimized, bool& caller_is_c1, TRAPS) {
1292 JavaThread* current = THREAD;
1293 ResourceMark rm(current);
1294 // We need first to check if any Java activations (compiled, interpreted)
1295 // exist on the stack since last JavaCall. If not, we need
1296 // to get the target method from the JavaCall wrapper.
1297 vframeStream vfst(current, true); // Do not skip any javaCalls
1298 methodHandle callee_method;
1299 if (vfst.at_end()) {
1300 // No Java frames were found on stack since we did the JavaCall.
1301 // Hence the stack can only contain an entry_frame. We need to
1302 // find the target method from the stub frame.
1303 RegisterMap reg_map(current,
1304 RegisterMap::UpdateMap::skip,
1305 RegisterMap::ProcessFrames::include,
1306 RegisterMap::WalkContinuation::skip);
1307 frame fr = current->last_frame();
1308 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1309 fr = fr.sender(®_map);
1310 assert(fr.is_entry_frame(), "must be");
1311 // fr is now pointing to the entry frame.
1312 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1313 } else {
1314 Bytecodes::Code bc;
1315 CallInfo callinfo;
1316 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1317 // Calls via mismatching methods are always non-scalarized
1318 if (callinfo.resolved_method()->mismatch() && !is_optimized) {
1319 caller_is_c1 = true;
1320 }
1321 callee_method = methodHandle(current, callinfo.selected_method());
1322 }
1323 assert(callee_method()->is_method(), "must be");
1324 return callee_method;
1325 }
1326
1327 // Resolves a call.
1328 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_is_c1, TRAPS) {
1329 JavaThread* current = THREAD;
1330 ResourceMark rm(current);
1331 RegisterMap cbl_map(current,
1332 RegisterMap::UpdateMap::skip,
1333 RegisterMap::ProcessFrames::include,
1334 RegisterMap::WalkContinuation::skip);
1335 frame caller_frame = current->last_frame().sender(&cbl_map);
1336
1337 CodeBlob* caller_cb = caller_frame.cb();
1338 guarantee(caller_cb != nullptr && caller_cb->is_compiled(), "must be called from compiled method");
1339 CompiledMethod* caller_nm = caller_cb->as_compiled_method();
1340
1341 // determine call info & receiver
1342 // note: a) receiver is null for static calls
1343 // b) an exception is thrown if receiver is null for non-static calls
1344 CallInfo call_info;
1345 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1346 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1347
1348 NoSafepointVerifier nsv;
1349
1350 methodHandle callee_method(current, call_info.selected_method());
1351 // Calls via mismatching methods are always non-scalarized
1352 if (caller_nm->is_compiled_by_c1() || (call_info.resolved_method()->mismatch() && !is_optimized)) {
1353 caller_is_c1 = true;
1354 }
1355
1356 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1357 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1358 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1359 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1360 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1361
1362 assert(!caller_nm->is_unloading(), "It should not be unloading");
1363
1364 #ifndef PRODUCT
1365 // tracing/debugging/statistics
1366 uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1367 (is_virtual) ? (&_resolve_virtual_ctr) :
1368 (&_resolve_static_ctr);
1369 Atomic::inc(addr);
1370
1371 if (TraceCallFixup) {
1372 ResourceMark rm(current);
1373 tty->print("resolving %s%s (%s) call%s to",
1374 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1375 Bytecodes::name(invoke_code), (caller_is_c1) ? " from C1" : "");
1376 callee_method->print_short_name(tty);
1377 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1378 p2i(caller_frame.pc()), p2i(callee_method->code()));
1379 }
1380 #endif
1381
1382 if (invoke_code == Bytecodes::_invokestatic) {
1383 assert(callee_method->method_holder()->is_initialized() ||
1384 callee_method->method_holder()->is_init_thread(current),
1385 "invalid class initialization state for invoke_static");
1386 if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1387 // In order to keep class initialization check, do not patch call
1388 // site for static call when the class is not fully initialized.
1389 // Proper check is enforced by call site re-resolution on every invocation.
1390 //
1391 // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1392 // explicit class initialization check is put in nmethod entry (VEP).
1393 assert(callee_method->method_holder()->is_linked(), "must be");
1394 return callee_method;
1395 }
1396 }
1397
1398
1399 // JSR 292 key invariant:
1400 // If the resolved method is a MethodHandle invoke target, the call
1401 // site must be a MethodHandle call site, because the lambda form might tail-call
1402 // leaving the stack in a state unknown to either caller or callee
1403
1404 // Compute entry points. The computation of the entry points is independent of
1405 // patching the call.
1406
1407 // Make sure the callee nmethod does not get deoptimized and removed before
1408 // we are done patching the code.
1409
1410
1411 CompiledICLocker ml(caller_nm);
1412 if (is_virtual && !is_optimized) {
1413 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1414 inline_cache->update(&call_info, receiver->klass(), caller_is_c1);
1415 } else {
1416 // Callsite is a direct call - set it to the destination method
1417 CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1418 callsite->set(callee_method, caller_is_c1);
1419 }
1420
1421 return callee_method;
1422 }
1423
1424 // Inline caches exist only in compiled code
1425 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1426 #ifdef ASSERT
1427 RegisterMap reg_map(current,
1428 RegisterMap::UpdateMap::skip,
1429 RegisterMap::ProcessFrames::include,
1430 RegisterMap::WalkContinuation::skip);
1431 frame stub_frame = current->last_frame();
1432 assert(stub_frame.is_runtime_frame(), "sanity check");
1433 frame caller_frame = stub_frame.sender(®_map);
1434 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1435 #endif /* ASSERT */
1436
1437 methodHandle callee_method;
1438 bool is_optimized = false;
1439 bool caller_is_c1 = false;
1440 JRT_BLOCK
1441 callee_method = SharedRuntime::handle_ic_miss_helper(is_optimized, caller_is_c1, CHECK_NULL);
1442 // Return Method* through TLS
1443 current->set_vm_result_2(callee_method());
1444 JRT_BLOCK_END
1445 // return compiled code entry point after potential safepoints
1446 return entry_for_handle_wrong_method(callee_method, false, is_optimized, caller_is_c1);
1447 JRT_END
1448
1449
1450 // Handle call site that has been made non-entrant
1451 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1452 // 6243940 We might end up in here if the callee is deoptimized
1453 // as we race to call it. We don't want to take a safepoint if
1454 // the caller was interpreted because the caller frame will look
1455 // interpreted to the stack walkers and arguments are now
1456 // "compiled" so it is much better to make this transition
1457 // invisible to the stack walking code. The i2c path will
1458 // place the callee method in the callee_target. It is stashed
1459 // there because if we try and find the callee by normal means a
1460 // safepoint is possible and have trouble gc'ing the compiled args.
1461 RegisterMap reg_map(current,
1462 RegisterMap::UpdateMap::skip,
1463 RegisterMap::ProcessFrames::include,
1464 RegisterMap::WalkContinuation::skip);
1465 frame stub_frame = current->last_frame();
1466 assert(stub_frame.is_runtime_frame(), "sanity check");
1467 frame caller_frame = stub_frame.sender(®_map);
1468
1469 if (caller_frame.is_interpreted_frame() ||
1470 caller_frame.is_entry_frame() ||
1471 caller_frame.is_upcall_stub_frame()) {
1472 Method* callee = current->callee_target();
1473 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1474 current->set_vm_result_2(callee);
1475 current->set_callee_target(nullptr);
1476 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1477 // Bypass class initialization checks in c2i when caller is in native.
1478 // JNI calls to static methods don't have class initialization checks.
1479 // Fast class initialization checks are present in c2i adapters and call into
1480 // SharedRuntime::handle_wrong_method() on the slow path.
1481 //
1482 // JVM upcalls may land here as well, but there's a proper check present in
1483 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1484 // so bypassing it in c2i adapter is benign.
1485 return callee->get_c2i_no_clinit_check_entry();
1486 } else {
1487 if (caller_frame.is_interpreted_frame()) {
1488 return callee->get_c2i_inline_entry();
1489 } else {
1490 return callee->get_c2i_entry();
1491 }
1492 }
1493 }
1494
1495 // Must be compiled to compiled path which is safe to stackwalk
1496 methodHandle callee_method;
1497 bool is_static_call = false;
1498 bool is_optimized = false;
1499 bool caller_is_c1 = false;
1500 JRT_BLOCK
1501 // Force resolving of caller (if we called from compiled frame)
1502 callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_NULL);
1503 current->set_vm_result_2(callee_method());
1504 JRT_BLOCK_END
1505 // return compiled code entry point after potential safepoints
1506 return entry_for_handle_wrong_method(callee_method, is_static_call, is_optimized, caller_is_c1);
1507 JRT_END
1508
1509 // Handle abstract method call
1510 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1511 // Verbose error message for AbstractMethodError.
1512 // Get the called method from the invoke bytecode.
1513 vframeStream vfst(current, true);
1514 assert(!vfst.at_end(), "Java frame must exist");
1515 methodHandle caller(current, vfst.method());
1516 Bytecode_invoke invoke(caller, vfst.bci());
1517 DEBUG_ONLY( invoke.verify(); )
1518
1519 // Find the compiled caller frame.
1520 RegisterMap reg_map(current,
1521 RegisterMap::UpdateMap::include,
1522 RegisterMap::ProcessFrames::include,
1523 RegisterMap::WalkContinuation::skip);
1524 frame stubFrame = current->last_frame();
1525 assert(stubFrame.is_runtime_frame(), "must be");
1526 frame callerFrame = stubFrame.sender(®_map);
1527 assert(callerFrame.is_compiled_frame(), "must be");
1528
1529 // Install exception and return forward entry.
1530 address res = StubRoutines::throw_AbstractMethodError_entry();
1531 JRT_BLOCK
1532 methodHandle callee(current, invoke.static_target(current));
1533 if (!callee.is_null()) {
1534 oop recv = callerFrame.retrieve_receiver(®_map);
1535 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1536 res = StubRoutines::forward_exception_entry();
1537 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1538 }
1539 JRT_BLOCK_END
1540 return res;
1541 JRT_END
1542
1543
1544 // resolve a static call and patch code
1545 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1546 methodHandle callee_method;
1547 bool caller_is_c1 = false;
1548 bool enter_special = false;
1549 JRT_BLOCK
1550 callee_method = SharedRuntime::resolve_helper(false, false, caller_is_c1, CHECK_NULL);
1551 current->set_vm_result_2(callee_method());
1552
1553 if (current->is_interp_only_mode()) {
1554 RegisterMap reg_map(current,
1555 RegisterMap::UpdateMap::skip,
1556 RegisterMap::ProcessFrames::include,
1557 RegisterMap::WalkContinuation::skip);
1558 frame stub_frame = current->last_frame();
1559 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1560 frame caller = stub_frame.sender(®_map);
1561 enter_special = caller.cb() != nullptr && caller.cb()->is_compiled()
1562 && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic();
1563 }
1564 JRT_BLOCK_END
1565
1566 if (current->is_interp_only_mode() && enter_special) {
1567 // enterSpecial is compiled and calls this method to resolve the call to Continuation::enter
1568 // but in interp_only_mode we need to go to the interpreted entry
1569 // The c2i won't patch in this mode -- see fixup_callers_callsite
1570 //
1571 // This should probably be done in all cases, not just enterSpecial (see JDK-8218403),
1572 // but that's part of a larger fix, and the situation is worse for enterSpecial, as it has no
1573 // interpreted version.
1574 return callee_method->get_c2i_entry();
1575 }
1576
1577 // return compiled code entry point after potential safepoints
1578 address entry = caller_is_c1 ?
1579 callee_method->verified_inline_code_entry() : callee_method->verified_code_entry();
1580 assert(entry != nullptr, "Jump to zero!");
1581 return entry;
1582 JRT_END
1583
1584
1585 // resolve virtual call and update inline cache to monomorphic
1586 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1587 methodHandle callee_method;
1588 bool caller_is_c1 = false;
1589 JRT_BLOCK
1590 callee_method = SharedRuntime::resolve_helper(true, false, caller_is_c1, CHECK_NULL);
1591 current->set_vm_result_2(callee_method());
1592 JRT_BLOCK_END
1593 // return compiled code entry point after potential safepoints
1594 address entry = caller_is_c1 ?
1595 callee_method->verified_inline_code_entry() : callee_method->verified_inline_ro_code_entry();
1596 assert(entry != nullptr, "Jump to zero!");
1597 return entry;
1598 JRT_END
1599
1600
1601 // Resolve a virtual call that can be statically bound (e.g., always
1602 // monomorphic, so it has no inline cache). Patch code to resolved target.
1603 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1604 methodHandle callee_method;
1605 bool caller_is_c1 = false;
1606 JRT_BLOCK
1607 callee_method = SharedRuntime::resolve_helper(true, true, caller_is_c1, CHECK_NULL);
1608 current->set_vm_result_2(callee_method());
1609 JRT_BLOCK_END
1610 // return compiled code entry point after potential safepoints
1611 address entry = caller_is_c1 ?
1612 callee_method->verified_inline_code_entry() : callee_method->verified_code_entry();
1613 assert(entry != nullptr, "Jump to zero!");
1614 return entry;
1615 JRT_END
1616
1617
1618
1619 methodHandle SharedRuntime::handle_ic_miss_helper(bool& is_optimized, bool& caller_is_c1, TRAPS) {
1620 JavaThread* current = THREAD;
1621 ResourceMark rm(current);
1622 CallInfo call_info;
1623 Bytecodes::Code bc;
1624
1625 // receiver is null for static calls. An exception is thrown for null
1626 // receivers for non-static calls
1627 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1628
1629 methodHandle callee_method(current, call_info.selected_method());
1630
1631 #ifndef PRODUCT
1632 Atomic::inc(&_ic_miss_ctr);
1633
1634 // Statistics & Tracing
1635 if (TraceCallFixup) {
1636 ResourceMark rm(current);
1637 tty->print("IC miss (%s) call%s to", Bytecodes::name(bc), (caller_is_c1) ? " from C1" : "");
1638 callee_method->print_short_name(tty);
1639 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1640 }
1641
1642 if (ICMissHistogram) {
1643 MutexLocker m(VMStatistic_lock);
1644 RegisterMap reg_map(current,
1645 RegisterMap::UpdateMap::skip,
1646 RegisterMap::ProcessFrames::include,
1647 RegisterMap::WalkContinuation::skip);
1648 frame f = current->last_frame().real_sender(®_map);// skip runtime stub
1649 // produce statistics under the lock
1650 trace_ic_miss(f.pc());
1651 }
1652 #endif
1653
1654 // install an event collector so that when a vtable stub is created the
1655 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1656 // event can't be posted when the stub is created as locks are held
1657 // - instead the event will be deferred until the event collector goes
1658 // out of scope.
1659 JvmtiDynamicCodeEventCollector event_collector;
1660
1661 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1662 RegisterMap reg_map(current,
1663 RegisterMap::UpdateMap::skip,
1664 RegisterMap::ProcessFrames::include,
1665 RegisterMap::WalkContinuation::skip);
1666 frame caller_frame = current->last_frame().sender(®_map);
1667 CodeBlob* cb = caller_frame.cb();
1668 CompiledMethod* caller_nm = cb->as_compiled_method();
1669 // Calls via mismatching methods are always non-scalarized
1670 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1671 caller_is_c1 = true;
1672 }
1673
1674 CompiledICLocker ml(caller_nm);
1675 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1676 inline_cache->update(&call_info, receiver()->klass(), caller_is_c1);
1677
1678 return callee_method;
1679 }
1680
1681 //
1682 // Resets a call-site in compiled code so it will get resolved again.
1683 // This routines handles both virtual call sites, optimized virtual call
1684 // sites, and static call sites. Typically used to change a call sites
1685 // destination from compiled to interpreted.
1686 //
1687 methodHandle SharedRuntime::reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_is_c1, TRAPS) {
1688 JavaThread* current = THREAD;
1689 ResourceMark rm(current);
1690 RegisterMap reg_map(current,
1691 RegisterMap::UpdateMap::skip,
1692 RegisterMap::ProcessFrames::include,
1693 RegisterMap::WalkContinuation::skip);
1694 frame stub_frame = current->last_frame();
1695 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1696 frame caller = stub_frame.sender(®_map);
1697 if (caller.is_compiled_frame()) {
1698 caller_is_c1 = caller.cb()->is_compiled_by_c1();
1699 }
1700
1701 // Do nothing if the frame isn't a live compiled frame.
1702 // nmethod could be deoptimized by the time we get here
1703 // so no update to the caller is needed.
1704
1705 if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1706 (caller.is_native_frame() && ((CompiledMethod*)caller.cb())->method()->is_continuation_enter_intrinsic())) {
1707
1708 address pc = caller.pc();
1709
1710 CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
1711
1712 // Default call_addr is the location of the "basic" call.
1713 // Determine the address of the call we a reresolving. With
1714 // Inline Caches we will always find a recognizable call.
1715 // With Inline Caches disabled we may or may not find a
1716 // recognizable call. We will always find a call for static
1717 // calls and for optimized virtual calls. For vanilla virtual
1718 // calls it depends on the state of the UseInlineCaches switch.
1719 //
1720 // With Inline Caches disabled we can get here for a virtual call
1721 // for two reasons:
1722 // 1 - calling an abstract method. The vtable for abstract methods
1723 // will run us thru handle_wrong_method and we will eventually
1724 // end up in the interpreter to throw the ame.
1725 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1726 // call and between the time we fetch the entry address and
1727 // we jump to it the target gets deoptimized. Similar to 1
1728 // we will wind up in the interprter (thru a c2i with c2).
1729 //
1730 CompiledICLocker ml(caller_nm);
1731 address call_addr = caller_nm->call_instruction_address(pc);
1732
1733 if (call_addr != nullptr) {
1734 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1735 // bytes back in the instruction stream so we must also check for reloc info.
1736 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1737 bool ret = iter.next(); // Get item
1738 if (ret) {
1739 is_static_call = false;
1740 is_optimized = false;
1741 switch (iter.type()) {
1742 case relocInfo::static_call_type:
1743 is_static_call = true;
1744 case relocInfo::opt_virtual_call_type: {
1745 is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1746 CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1747 cdc->set_to_clean();
1748 break;
1749 }
1750 case relocInfo::virtual_call_type: {
1751 // compiled, dispatched call (which used to call an interpreted method)
1752 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1753 inline_cache->set_to_clean();
1754 break;
1755 }
1756 default:
1757 break;
1758 }
1759 }
1760 }
1761 }
1762
1763 methodHandle callee_method = find_callee_method(is_optimized, caller_is_c1, CHECK_(methodHandle()));
1764
1765 #ifndef PRODUCT
1766 Atomic::inc(&_wrong_method_ctr);
1767
1768 if (TraceCallFixup) {
1769 ResourceMark rm(current);
1770 tty->print("handle_wrong_method reresolving call%s to", (caller_is_c1) ? " from C1" : "");
1771 callee_method->print_short_name(tty);
1772 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1773 }
1774 #endif
1775
1776 return callee_method;
1777 }
1778
1779 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1780 // The faulting unsafe accesses should be changed to throw the error
1781 // synchronously instead. Meanwhile the faulting instruction will be
1782 // skipped over (effectively turning it into a no-op) and an
1783 // asynchronous exception will be raised which the thread will
1784 // handle at a later point. If the instruction is a load it will
1785 // return garbage.
1786
1787 // Request an async exception.
1788 thread->set_pending_unsafe_access_error();
1789
1790 // Return address of next instruction to execute.
1955 msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
1956
1957 char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
1958 if (message == nullptr) {
1959 // Shouldn't happen, but don't cause even more problems if it does
1960 message = const_cast<char*>(caster_klass->external_name());
1961 } else {
1962 jio_snprintf(message,
1963 msglen,
1964 "class %s cannot be cast to class %s (%s%s%s)",
1965 caster_name,
1966 target_name,
1967 caster_klass_description,
1968 klass_separator,
1969 target_klass_description
1970 );
1971 }
1972 return message;
1973 }
1974
1975 char* SharedRuntime::generate_identity_exception_message(JavaThread* current, Klass* klass) {
1976 assert(klass->is_inline_klass(), "Must be a concrete value class");
1977 const char* desc = "Cannot synchronize on an instance of value class ";
1978 const char* className = klass->external_name();
1979 size_t msglen = strlen(desc) + strlen(className) + 1;
1980 char* message = NEW_RESOURCE_ARRAY(char, msglen);
1981 if (nullptr == message) {
1982 // Out of memory: can't create detailed error message
1983 message = const_cast<char*>(klass->external_name());
1984 } else {
1985 jio_snprintf(message, msglen, "%s%s", desc, className);
1986 }
1987 return message;
1988 }
1989
1990 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1991 (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
1992 JRT_END
1993
1994 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
1995 if (!SafepointSynchronize::is_synchronizing()) {
1996 // Only try quick_enter() if we're not trying to reach a safepoint
1997 // so that the calling thread reaches the safepoint more quickly.
1998 if (ObjectSynchronizer::quick_enter(obj, current, lock)) {
1999 return;
2000 }
2001 }
2002 // NO_ASYNC required because an async exception on the state transition destructor
2003 // would leave you with the lock held and it would never be released.
2004 // The normal monitorenter NullPointerException is thrown without acquiring a lock
2005 // and the model is that an exception implies the method failed.
2006 JRT_BLOCK_NO_ASYNC
2007 Handle h_obj(THREAD, obj);
2008 ObjectSynchronizer::enter(h_obj, lock, current);
2009 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2185 tty->print_cr(" %% in nested categories are relative to their category");
2186 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2187 tty->cr();
2188
2189 MethodArityHistogram h;
2190 }
2191 #endif
2192
2193 #ifndef PRODUCT
2194 static int _lookups; // number of calls to lookup
2195 static int _equals; // number of buckets checked with matching hash
2196 static int _hits; // number of successful lookups
2197 static int _compact; // number of equals calls with compact signature
2198 #endif
2199
2200 // A simple wrapper class around the calling convention information
2201 // that allows sharing of adapters for the same calling convention.
2202 class AdapterFingerPrint : public CHeapObj<mtCode> {
2203 private:
2204 enum {
2205 _basic_type_bits = 5,
2206 _basic_type_mask = right_n_bits(_basic_type_bits),
2207 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2208 _compact_int_count = 3
2209 };
2210 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2211 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2212
2213 union {
2214 int _compact[_compact_int_count];
2215 int* _fingerprint;
2216 } _value;
2217 int _length; // A negative length indicates the fingerprint is in the compact form,
2218 // Otherwise _value._fingerprint is the array.
2219
2220 // Remap BasicTypes that are handled equivalently by the adapters.
2221 // These are correct for the current system but someday it might be
2222 // necessary to make this mapping platform dependent.
2223 static BasicType adapter_encoding(BasicType in) {
2224 switch (in) {
2225 case T_BOOLEAN:
2226 case T_BYTE:
2227 case T_SHORT:
2228 case T_CHAR:
2229 // They are all promoted to T_INT in the calling convention
2230 return T_INT;
2231
2232 case T_OBJECT:
2233 case T_ARRAY:
2234 // In other words, we assume that any register good enough for
2235 // an int or long is good enough for a managed pointer.
2236 #ifdef _LP64
2237 return T_LONG;
2238 #else
2239 return T_INT;
2240 #endif
2241
2242 case T_INT:
2243 case T_LONG:
2244 case T_FLOAT:
2245 case T_DOUBLE:
2246 case T_VOID:
2247 return in;
2248
2249 default:
2250 ShouldNotReachHere();
2251 return T_CONFLICT;
2252 }
2253 }
2254
2255 public:
2256 AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2257 // The fingerprint is based on the BasicType signature encoded
2258 // into an array of ints with eight entries per int.
2259 int total_args_passed = (sig != nullptr) ? sig->length() : 0;
2260 int* ptr;
2261 int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2262 if (len <= _compact_int_count) {
2263 assert(_compact_int_count == 3, "else change next line");
2264 _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2265 // Storing the signature encoded as signed chars hits about 98%
2266 // of the time.
2267 _length = -len;
2268 ptr = _value._compact;
2269 } else {
2270 _length = len;
2271 _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2272 ptr = _value._fingerprint;
2273 }
2274
2275 // Now pack the BasicTypes with 8 per int
2276 int sig_index = 0;
2277 BasicType prev_bt = T_ILLEGAL;
2278 int vt_count = 0;
2279 for (int index = 0; index < len; index++) {
2280 int value = 0;
2281 for (int byte = 0; byte < _basic_types_per_int; byte++) {
2282 BasicType bt = T_ILLEGAL;
2283 if (sig_index < total_args_passed) {
2284 bt = sig->at(sig_index++)._bt;
2285 if (bt == T_METADATA) {
2286 // Found start of inline type in signature
2287 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2288 if (sig_index == 1 && has_ro_adapter) {
2289 // With a ro_adapter, replace receiver inline type delimiter by T_VOID to prevent matching
2290 // with other adapters that have the same inline type as first argument and no receiver.
2291 bt = T_VOID;
2292 }
2293 vt_count++;
2294 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2295 // Found end of inline type in signature
2296 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2297 vt_count--;
2298 assert(vt_count >= 0, "invalid vt_count");
2299 } else if (vt_count == 0) {
2300 // Widen fields that are not part of a scalarized inline type argument
2301 bt = adapter_encoding(bt);
2302 }
2303 prev_bt = bt;
2304 }
2305 int bt_val = (bt == T_ILLEGAL) ? 0 : bt;
2306 assert((bt_val & _basic_type_mask) == bt_val, "must fit in 4 bits");
2307 value = (value << _basic_type_bits) | bt_val;
2308 }
2309 ptr[index] = value;
2310 }
2311 assert(vt_count == 0, "invalid vt_count");
2312 }
2313
2314 ~AdapterFingerPrint() {
2315 if (_length > 0) {
2316 FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2317 }
2318 }
2319
2320 int value(int index) {
2321 if (_length < 0) {
2322 return _value._compact[index];
2323 }
2324 return _value._fingerprint[index];
2325 }
2326 int length() {
2327 if (_length < 0) return -_length;
2328 return _length;
2329 }
2330
2331 bool is_compact() {
2356 const char* as_basic_args_string() {
2357 stringStream st;
2358 bool long_prev = false;
2359 for (int i = 0; i < length(); i++) {
2360 unsigned val = (unsigned)value(i);
2361 // args are packed so that first/lower arguments are in the highest
2362 // bits of each int value, so iterate from highest to the lowest
2363 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2364 unsigned v = (val >> j) & _basic_type_mask;
2365 if (v == 0) {
2366 assert(i == length() - 1, "Only expect zeroes in the last word");
2367 continue;
2368 }
2369 if (long_prev) {
2370 long_prev = false;
2371 if (v == T_VOID) {
2372 st.print("J");
2373 } else {
2374 st.print("L");
2375 }
2376 } else if (v == T_LONG) {
2377 long_prev = true;
2378 } else if (v != T_VOID){
2379 st.print("%c", type2char((BasicType)v));
2380 }
2381 }
2382 }
2383 if (long_prev) {
2384 st.print("L");
2385 }
2386 return st.as_string();
2387 }
2388 #endif // !product
2389
2390 bool equals(AdapterFingerPrint* other) {
2391 if (other->_length != _length) {
2392 return false;
2393 }
2394 if (_length < 0) {
2395 assert(_compact_int_count == 3, "else change next line");
2396 return _value._compact[0] == other->_value._compact[0] &&
2397 _value._compact[1] == other->_value._compact[1] &&
2398 _value._compact[2] == other->_value._compact[2];
2399 } else {
2407 }
2408
2409 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2410 NOT_PRODUCT(_equals++);
2411 return fp1->equals(fp2);
2412 }
2413
2414 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2415 return fp->compute_hash();
2416 }
2417 };
2418
2419 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2420 using AdapterHandlerTable = ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2421 AnyObj::C_HEAP, mtCode,
2422 AdapterFingerPrint::compute_hash,
2423 AdapterFingerPrint::equals>;
2424 static AdapterHandlerTable* _adapter_handler_table;
2425
2426 // Find a entry with the same fingerprint if it exists
2427 static AdapterHandlerEntry* lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2428 NOT_PRODUCT(_lookups++);
2429 assert_lock_strong(AdapterHandlerLibrary_lock);
2430 AdapterFingerPrint fp(sig, has_ro_adapter);
2431 AdapterHandlerEntry** entry = _adapter_handler_table->get(&fp);
2432 if (entry != nullptr) {
2433 #ifndef PRODUCT
2434 if (fp.is_compact()) _compact++;
2435 _hits++;
2436 #endif
2437 return *entry;
2438 }
2439 return nullptr;
2440 }
2441
2442 #ifndef PRODUCT
2443 static void print_table_statistics() {
2444 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2445 return sizeof(*key) + sizeof(*a);
2446 };
2447 TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2448 ts.print(tty, "AdapterHandlerTable");
2449 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2450 _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2451 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2452 _lookups, _equals, _hits, _compact);
2453 }
2454 #endif
2455
2456 // ---------------------------------------------------------------------------
2457 // Implementation of AdapterHandlerLibrary
2458 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2459 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2460 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2461 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2462 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2463 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2464 const int AdapterHandlerLibrary_size = 48*K;
2465 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2466
2467 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2468 return _buffer;
2469 }
2470
2471 static void post_adapter_creation(const AdapterBlob* new_adapter,
2472 const AdapterHandlerEntry* entry) {
2473 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2474 char blob_id[256];
2475 jio_snprintf(blob_id,
2476 sizeof(blob_id),
2477 "%s(%s)",
2478 new_adapter->name(),
2479 entry->fingerprint()->as_string());
2480 if (Forte::is_enabled()) {
2481 Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2482 }
2483
2484 if (JvmtiExport::should_post_dynamic_code_generated()) {
2487 }
2488 }
2489
2490 void AdapterHandlerLibrary::initialize() {
2491 ResourceMark rm;
2492 AdapterBlob* no_arg_blob = nullptr;
2493 AdapterBlob* int_arg_blob = nullptr;
2494 AdapterBlob* obj_arg_blob = nullptr;
2495 AdapterBlob* obj_int_arg_blob = nullptr;
2496 AdapterBlob* obj_obj_arg_blob = nullptr;
2497 {
2498 _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2499 MutexLocker mu(AdapterHandlerLibrary_lock);
2500
2501 // Create a special handler for abstract methods. Abstract methods
2502 // are never compiled so an i2c entry is somewhat meaningless, but
2503 // throw AbstractMethodError just in case.
2504 // Pass wrong_method_abstract for the c2i transitions to return
2505 // AbstractMethodError for invalid invocations.
2506 address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2507 _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(nullptr),
2508 StubRoutines::throw_AbstractMethodError_entry(),
2509 wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
2510 wrong_method_abstract, wrong_method_abstract);
2511 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2512
2513 CompiledEntrySignature no_args;
2514 no_args.compute_calling_conventions();
2515 _no_arg_handler = create_adapter(no_arg_blob, no_args, true);
2516
2517 CompiledEntrySignature obj_args;
2518 SigEntry::add_entry(obj_args.sig(), T_OBJECT, nullptr);
2519 obj_args.compute_calling_conventions();
2520 _obj_arg_handler = create_adapter(obj_arg_blob, obj_args, true);
2521
2522 CompiledEntrySignature int_args;
2523 SigEntry::add_entry(int_args.sig(), T_INT, nullptr);
2524 int_args.compute_calling_conventions();
2525 _int_arg_handler = create_adapter(int_arg_blob, int_args, true);
2526
2527 CompiledEntrySignature obj_int_args;
2528 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT, nullptr);
2529 SigEntry::add_entry(obj_int_args.sig(), T_INT, nullptr);
2530 obj_int_args.compute_calling_conventions();
2531 _obj_int_arg_handler = create_adapter(obj_int_arg_blob, obj_int_args, true);
2532
2533 CompiledEntrySignature obj_obj_args;
2534 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT, nullptr);
2535 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT, nullptr);
2536 obj_obj_args.compute_calling_conventions();
2537 _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, obj_obj_args, true);
2538
2539 assert(no_arg_blob != nullptr &&
2540 obj_arg_blob != nullptr &&
2541 int_arg_blob != nullptr &&
2542 obj_int_arg_blob != nullptr &&
2543 obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
2544 }
2545 return;
2546
2547 // Outside of the lock
2548 post_adapter_creation(no_arg_blob, _no_arg_handler);
2549 post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2550 post_adapter_creation(int_arg_blob, _int_arg_handler);
2551 post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2552 post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2553 }
2554
2555 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2556 address i2c_entry,
2557 address c2i_entry,
2558 address c2i_inline_entry,
2559 address c2i_inline_ro_entry,
2560 address c2i_unverified_entry,
2561 address c2i_unverified_inline_entry,
2562 address c2i_no_clinit_check_entry) {
2563 return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry,
2564 c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
2565 }
2566
2567 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2568 if (method->is_abstract()) {
2569 return nullptr;
2570 }
2571 int total_args_passed = method->size_of_parameters(); // All args on stack
2572 if (total_args_passed == 0) {
2573 return _no_arg_handler;
2574 } else if (total_args_passed == 1) {
2575 if (!method->is_static()) {
2576 if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2577 return nullptr;
2578 }
2579 return _obj_arg_handler;
2580 }
2581 switch (method->signature()->char_at(1)) {
2582 case JVM_SIGNATURE_CLASS: {
2583 if (InlineTypePassFieldsAsArgs) {
2584 SignatureStream ss(method->signature());
2585 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2586 if (vk != nullptr) {
2587 return nullptr;
2588 }
2589 }
2590 return _obj_arg_handler;
2591 }
2592 case JVM_SIGNATURE_ARRAY:
2593 return _obj_arg_handler;
2594 case JVM_SIGNATURE_INT:
2595 case JVM_SIGNATURE_BOOLEAN:
2596 case JVM_SIGNATURE_CHAR:
2597 case JVM_SIGNATURE_BYTE:
2598 case JVM_SIGNATURE_SHORT:
2599 return _int_arg_handler;
2600 }
2601 } else if (total_args_passed == 2 &&
2602 !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2603 switch (method->signature()->char_at(1)) {
2604 case JVM_SIGNATURE_CLASS: {
2605 if (InlineTypePassFieldsAsArgs) {
2606 SignatureStream ss(method->signature());
2607 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2608 if (vk != nullptr) {
2609 return nullptr;
2610 }
2611 }
2612 return _obj_obj_arg_handler;
2613 }
2614 case JVM_SIGNATURE_ARRAY:
2615 return _obj_obj_arg_handler;
2616 case JVM_SIGNATURE_INT:
2617 case JVM_SIGNATURE_BOOLEAN:
2618 case JVM_SIGNATURE_CHAR:
2619 case JVM_SIGNATURE_BYTE:
2620 case JVM_SIGNATURE_SHORT:
2621 return _obj_int_arg_handler;
2622 }
2623 }
2624 return nullptr;
2625 }
2626
2627 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2628 _method(method), _num_inline_args(0), _has_inline_recv(false),
2629 _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2630 _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2631 _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2632 _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2633 _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2634 _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2635 }
2636
2637 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2638 // or the same entry for VEP and VIEP(RO).
2639 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2640 if (!has_scalarized_args()) {
2641 // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2642 return CodeOffsets::Verified_Entry;
2643 }
2644 if (_method->is_static()) {
2645 // Static methods don't need VIEP(RO)
2646 return CodeOffsets::Verified_Entry;
2647 }
2648
2649 if (has_inline_recv()) {
2650 if (num_inline_args() == 1) {
2651 // Share same entry for VIEP and VIEP(RO).
2652 // This is quite common: we have an instance method in an InlineKlass that has
2653 // no inline type args other than <this>.
2654 return CodeOffsets::Verified_Inline_Entry;
2655 } else {
2656 assert(num_inline_args() > 1, "must be");
2657 // No sharing:
2658 // VIEP(RO) -- <this> is passed as object
2659 // VEP -- <this> is passed as fields
2660 return CodeOffsets::Verified_Inline_Entry_RO;
2661 }
2662 }
2663
2664 // Either a static method, or <this> is not an inline type
2665 if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2666 // No sharing:
2667 // Some arguments are passed on the stack, and we have inserted reserved entries
2668 // into the VEP, but we never insert reserved entries into the VIEP(RO).
2669 return CodeOffsets::Verified_Inline_Entry_RO;
2670 } else {
2671 // Share same entry for VEP and VIEP(RO).
2672 return CodeOffsets::Verified_Entry;
2673 }
2674 }
2675
2676 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2677 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2678 if (_supers != nullptr) {
2679 return _supers;
2680 }
2681 _supers = new GrowableArray<Method*>();
2682 // Skip private, static, and <init> methods
2683 if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2684 return _supers;
2685 }
2686 Symbol* name = _method->name();
2687 Symbol* signature = _method->signature();
2688 const Klass* holder = _method->method_holder()->super();
2689 Symbol* holder_name = holder->name();
2690 ThreadInVMfromUnknown tiv;
2691 JavaThread* current = JavaThread::current();
2692 HandleMark hm(current);
2693 Handle loader(current, _method->method_holder()->class_loader());
2694
2695 // Walk up the class hierarchy and search for super methods
2696 while (holder != nullptr) {
2697 Method* super_method = holder->lookup_method(name, signature);
2698 if (super_method == nullptr) {
2699 break;
2700 }
2701 if (!super_method->is_static() && !super_method->is_private() &&
2702 (!super_method->is_package_private() ||
2703 super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
2704 _supers->push(super_method);
2705 }
2706 holder = super_method->method_holder()->super();
2707 }
2708 // Search interfaces for super methods
2709 Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
2710 for (int i = 0; i < interfaces->length(); ++i) {
2711 Method* m = interfaces->at(i)->lookup_method(name, signature);
2712 if (m != nullptr && !m->is_static() && m->is_public()) {
2713 _supers->push(m);
2714 }
2715 }
2716 return _supers;
2717 }
2718
2719 // Iterate over arguments and compute scalarized and non-scalarized signatures
2720 void CompiledEntrySignature::compute_calling_conventions(bool init) {
2721 bool has_scalarized = false;
2722 if (_method != nullptr) {
2723 InstanceKlass* holder = _method->method_holder();
2724 int arg_num = 0;
2725 if (!_method->is_static()) {
2726 // We shouldn't scalarize 'this' in a value class constructor
2727 if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() && !_method->is_object_constructor() &&
2728 (init || _method->is_scalarized_arg(arg_num))) {
2729 _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
2730 has_scalarized = true;
2731 _has_inline_recv = true;
2732 _num_inline_args++;
2733 } else {
2734 SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
2735 }
2736 SigEntry::add_entry(_sig, T_OBJECT, holder->name());
2737 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
2738 arg_num++;
2739 }
2740 for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
2741 BasicType bt = ss.type();
2742 if (bt == T_OBJECT) {
2743 InlineKlass* vk = ss.as_inline_klass(holder);
2744 if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
2745 // Check for a calling convention mismatch with super method(s)
2746 bool scalar_super = false;
2747 bool non_scalar_super = false;
2748 GrowableArray<Method*>* supers = get_supers();
2749 for (int i = 0; i < supers->length(); ++i) {
2750 Method* super_method = supers->at(i);
2751 if (super_method->is_scalarized_arg(arg_num)) {
2752 scalar_super = true;
2753 } else {
2754 non_scalar_super = true;
2755 }
2756 }
2757 #ifdef ASSERT
2758 // Randomly enable below code paths for stress testing
2759 bool stress = init && StressCallingConvention;
2760 if (stress && (os::random() & 1) == 1) {
2761 non_scalar_super = true;
2762 if ((os::random() & 1) == 1) {
2763 scalar_super = true;
2764 }
2765 }
2766 #endif
2767 if (non_scalar_super) {
2768 // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
2769 if (scalar_super) {
2770 // Found non-scalar *and* scalar super methods. We can't handle both.
2771 // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
2772 for (int i = 0; i < supers->length(); ++i) {
2773 Method* super_method = supers->at(i);
2774 if (super_method->is_scalarized_arg(arg_num) debug_only(|| (stress && (os::random() & 1) == 1))) {
2775 super_method->set_mismatch();
2776 MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
2777 JavaThread* thread = JavaThread::current();
2778 HandleMark hm(thread);
2779 methodHandle mh(thread, super_method);
2780 DeoptimizationScope deopt_scope;
2781 CodeCache::mark_for_deoptimization(&deopt_scope, mh());
2782 deopt_scope.deoptimize_marked();
2783 }
2784 }
2785 }
2786 // Fall back to non-scalarized calling convention
2787 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2788 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2789 } else {
2790 _num_inline_args++;
2791 has_scalarized = true;
2792 int last = _sig_cc->length();
2793 int last_ro = _sig_cc_ro->length();
2794 _sig_cc->appendAll(vk->extended_sig());
2795 _sig_cc_ro->appendAll(vk->extended_sig());
2796 if (bt == T_OBJECT) {
2797 // Nullable inline type argument, insert InlineTypeNode::IsInit field right after T_METADATA delimiter
2798 _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, nullptr));
2799 _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, nullptr));
2800 }
2801 }
2802 } else {
2803 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2804 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2805 }
2806 bt = T_OBJECT;
2807 } else {
2808 SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
2809 SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
2810 }
2811 SigEntry::add_entry(_sig, bt, ss.as_symbol());
2812 if (bt != T_VOID) {
2813 arg_num++;
2814 }
2815 }
2816 }
2817
2818 // Compute the non-scalarized calling convention
2819 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
2820 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
2821
2822 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
2823 if (has_scalarized && !_method->is_native()) {
2824 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
2825 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
2826
2827 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
2828 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
2829
2830 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
2831 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
2832
2833 // Upper bound on stack arguments to avoid hitting the argument limit and
2834 // bailing out of compilation ("unsupported incoming calling sequence").
2835 // TODO we need a reasonable limit (flag?) here
2836 if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
2837 return; // Success
2838 }
2839 }
2840
2841 // No scalarized args
2842 _sig_cc = _sig;
2843 _regs_cc = _regs;
2844 _args_on_stack_cc = _args_on_stack;
2845
2846 _sig_cc_ro = _sig;
2847 _regs_cc_ro = _regs;
2848 _args_on_stack_cc_ro = _args_on_stack;
2849 }
2850
2851 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2852 // Use customized signature handler. Need to lock around updates to
2853 // the _adapter_handler_table (it is not safe for concurrent readers
2854 // and a single writer: this could be fixed if it becomes a
2855 // problem).
2856
2857 // Fast-path for trivial adapters
2858 AdapterHandlerEntry* entry = get_simple_adapter(method);
2859 if (entry != nullptr) {
2860 return entry;
2861 }
2862
2863 ResourceMark rm;
2864 AdapterBlob* new_adapter = nullptr;
2865
2866 CompiledEntrySignature ces(method());
2867 ces.compute_calling_conventions();
2868 if (ces.has_scalarized_args()) {
2869 if (!method->has_scalarized_args()) {
2870 method->set_has_scalarized_args();
2871 }
2872 if (ces.c1_needs_stack_repair()) {
2873 method->set_c1_needs_stack_repair();
2874 }
2875 if (ces.c2_needs_stack_repair() && !method->c2_needs_stack_repair()) {
2876 method->set_c2_needs_stack_repair();
2877 }
2878 } else if (method->is_abstract()) {
2879 return _abstract_method_handler;
2880 }
2881
2882 {
2883 MutexLocker mu(AdapterHandlerLibrary_lock);
2884
2885 if (ces.has_scalarized_args() && method->is_abstract()) {
2886 // Save a C heap allocated version of the signature for abstract methods with scalarized inline type arguments
2887 address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2888 entry = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(nullptr),
2889 StubRoutines::throw_AbstractMethodError_entry(),
2890 wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
2891 wrong_method_abstract, wrong_method_abstract);
2892 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro()->length(), mtInternal);
2893 heap_sig->appendAll(ces.sig_cc_ro());
2894 entry->set_sig_cc(heap_sig);
2895 return entry;
2896 }
2897
2898 // Lookup method signature's fingerprint
2899 entry = lookup(ces.sig_cc(), ces.has_inline_recv());
2900
2901 if (entry != nullptr) {
2902 #ifdef ASSERT
2903 if (VerifyAdapterSharing) {
2904 AdapterBlob* comparison_blob = nullptr;
2905 AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, ces, false);
2906 assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
2907 assert(comparison_entry->compare_code(entry), "code must match");
2908 // Release the one just created and return the original
2909 delete comparison_entry;
2910 }
2911 #endif
2912 return entry;
2913 }
2914
2915 entry = create_adapter(new_adapter, ces, /* allocate_code_blob */ true);
2916 }
2917
2918 // Outside of the lock
2919 if (new_adapter != nullptr) {
2920 post_adapter_creation(new_adapter, entry);
2921 }
2922 return entry;
2923 }
2924
2925 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
2926 CompiledEntrySignature& ces,
2927 bool allocate_code_blob) {
2928
2929 // StubRoutines::_final_stubs_code is initialized after this function can be called. As a result,
2930 // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated prior
2931 // to all StubRoutines::_final_stubs_code being set. Checks refer to runtime range checks generated
2932 // in an I2C stub that ensure that an I2C stub is called from an interpreter frame or stubs.
2933 bool contains_all_checks = StubRoutines::final_stubs_code() != nullptr;
2934
2935 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2936 CodeBuffer buffer(buf);
2937 short buffer_locs[20];
2938 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2939 sizeof(buffer_locs)/sizeof(relocInfo));
2940
2941 // Make a C heap allocated version of the fingerprint to store in the adapter
2942 AdapterFingerPrint* fingerprint = new AdapterFingerPrint(ces.sig_cc(), ces.has_inline_recv());
2943 MacroAssembler _masm(&buffer);
2944 AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2945 ces.args_on_stack(),
2946 ces.sig(),
2947 ces.regs(),
2948 ces.sig_cc(),
2949 ces.regs_cc(),
2950 ces.sig_cc_ro(),
2951 ces.regs_cc_ro(),
2952 fingerprint,
2953 new_adapter,
2954 allocate_code_blob);
2955
2956 if (ces.has_scalarized_args()) {
2957 // Save a C heap allocated version of the scalarized signature and store it in the adapter
2958 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
2959 heap_sig->appendAll(ces.sig_cc());
2960 entry->set_sig_cc(heap_sig);
2961 }
2962
2963 #ifdef ASSERT
2964 if (VerifyAdapterSharing) {
2965 entry->save_code(buf->code_begin(), buffer.insts_size());
2966 if (!allocate_code_blob) {
2967 return entry;
2968 }
2969 }
2970 #endif
2971
2972 NOT_PRODUCT(int insts_size = buffer.insts_size());
2973 if (new_adapter == nullptr) {
2974 // CodeCache is full, disable compilation
2975 // Ought to log this but compile log is only per compile thread
2976 // and we're some non descript Java thread.
2977 return nullptr;
2978 }
2979 entry->relocate(new_adapter->content_begin());
2980 #ifndef PRODUCT
2981 // debugging support
2982 if (PrintAdapterHandlers || PrintStubCode) {
2983 ttyLocker ttyl;
2984 entry->print_adapter_on(tty);
2985 tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
2986 _adapter_handler_table->number_of_entries(), fingerprint->as_basic_args_string(),
2987 fingerprint->as_string(), insts_size);
2988 tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
2989 if (Verbose || PrintStubCode) {
2990 address first_pc = entry->base_address();
2991 if (first_pc != nullptr) {
2993 NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
2994 tty->cr();
2995 }
2996 }
2997 }
2998 #endif
2999
3000 // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
3001 // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
3002 if (contains_all_checks || !VerifyAdapterCalls) {
3003 assert_lock_strong(AdapterHandlerLibrary_lock);
3004 _adapter_handler_table->put(fingerprint, entry);
3005 }
3006 return entry;
3007 }
3008
3009 address AdapterHandlerEntry::base_address() {
3010 address base = _i2c_entry;
3011 if (base == nullptr) base = _c2i_entry;
3012 assert(base <= _c2i_entry || _c2i_entry == nullptr, "");
3013 assert(base <= _c2i_inline_entry || _c2i_inline_entry == nullptr, "");
3014 assert(base <= _c2i_inline_ro_entry || _c2i_inline_ro_entry == nullptr, "");
3015 assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");
3016 assert(base <= _c2i_unverified_inline_entry || _c2i_unverified_inline_entry == nullptr, "");
3017 assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
3018 return base;
3019 }
3020
3021 void AdapterHandlerEntry::relocate(address new_base) {
3022 address old_base = base_address();
3023 assert(old_base != nullptr, "");
3024 ptrdiff_t delta = new_base - old_base;
3025 if (_i2c_entry != nullptr)
3026 _i2c_entry += delta;
3027 if (_c2i_entry != nullptr)
3028 _c2i_entry += delta;
3029 if (_c2i_inline_entry != nullptr)
3030 _c2i_inline_entry += delta;
3031 if (_c2i_inline_ro_entry != nullptr)
3032 _c2i_inline_ro_entry += delta;
3033 if (_c2i_unverified_entry != nullptr)
3034 _c2i_unverified_entry += delta;
3035 if (_c2i_unverified_inline_entry != nullptr)
3036 _c2i_unverified_inline_entry += delta;
3037 if (_c2i_no_clinit_check_entry != nullptr)
3038 _c2i_no_clinit_check_entry += delta;
3039 assert(base_address() == new_base, "");
3040 }
3041
3042
3043 AdapterHandlerEntry::~AdapterHandlerEntry() {
3044 delete _fingerprint;
3045 if (_sig_cc != nullptr) {
3046 delete _sig_cc;
3047 }
3048 #ifdef ASSERT
3049 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3050 #endif
3051 }
3052
3053
3054 #ifdef ASSERT
3055 // Capture the code before relocation so that it can be compared
3056 // against other versions. If the code is captured after relocation
3057 // then relative instructions won't be equivalent.
3058 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3059 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3060 _saved_code_length = length;
3061 memcpy(_saved_code, buffer, length);
3062 }
3063
3064
3065 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3066 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3067
3114
3115 struct { double data[20]; } locs_buf;
3116 struct { double data[20]; } stubs_locs_buf;
3117 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3118 #if defined(AARCH64) || defined(PPC64)
3119 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3120 // in the constant pool to ensure ordering between the barrier and oops
3121 // accesses. For native_wrappers we need a constant.
3122 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3123 // static java call that is resolved in the runtime.
3124 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3125 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3126 }
3127 #endif
3128 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3129 MacroAssembler _masm(&buffer);
3130
3131 // Fill in the signature array, for the calling-convention call.
3132 const int total_args_passed = method->size_of_parameters();
3133
3134 BasicType stack_sig_bt[16];
3135 VMRegPair stack_regs[16];
3136 BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3137 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3138
3139 int i = 0;
3140 if (!method->is_static()) { // Pass in receiver first
3141 sig_bt[i++] = T_OBJECT;
3142 }
3143 SignatureStream ss(method->signature());
3144 for (; !ss.at_return_type(); ss.next()) {
3145 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
3146 if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3147 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
3148 }
3149 }
3150 assert(i == total_args_passed, "");
3151 BasicType ret_type = ss.type();
3152
3153 // Now get the compiled-Java arguments layout.
3154 SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3155
3156 // Generate the compiled-to-native wrapper code
3157 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3158
3159 if (nm != nullptr) {
3160 {
3161 MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
3162 if (nm->make_in_use()) {
3163 method->set_code(method, nm);
3164 }
3165 }
3166
3167 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
3168 if (directive->PrintAssemblyOption) {
3169 nm->print_code();
3170 }
3171 DirectivesStack::release(directive);
3376 st->print("Adapter for signature: ");
3377 a->print_adapter_on(st);
3378 return true;
3379 } else {
3380 return false; // keep looking
3381 }
3382 };
3383 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3384 _adapter_handler_table->iterate(findblob);
3385 assert(found, "Should have found handler");
3386 }
3387
3388 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3389 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3390 if (get_i2c_entry() != nullptr) {
3391 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3392 }
3393 if (get_c2i_entry() != nullptr) {
3394 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3395 }
3396 if (get_c2i_entry() != nullptr) {
3397 st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3398 }
3399 if (get_c2i_entry() != nullptr) {
3400 st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3401 }
3402 if (get_c2i_unverified_entry() != nullptr) {
3403 st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3404 }
3405 if (get_c2i_unverified_entry() != nullptr) {
3406 st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3407 }
3408 if (get_c2i_no_clinit_check_entry() != nullptr) {
3409 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3410 }
3411 st->cr();
3412 }
3413
3414 #ifndef PRODUCT
3415
3416 void AdapterHandlerLibrary::print_statistics() {
3417 print_table_statistics();
3418 }
3419
3420 #endif /* PRODUCT */
3421
3422 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3423 assert(current == JavaThread::current(), "pre-condition");
3424 StackOverflow* overflow_state = current->stack_overflow_state();
3425 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3426 overflow_state->set_reserved_stack_activation(current->stack_base());
3475 event.set_method(method);
3476 event.commit();
3477 }
3478 }
3479 }
3480 return activation;
3481 }
3482
3483 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3484 // After any safepoint, just before going back to compiled code,
3485 // we inform the GC that we will be doing initializing writes to
3486 // this object in the future without emitting card-marks, so
3487 // GC may take any compensating steps.
3488
3489 oop new_obj = current->vm_result();
3490 if (new_obj == nullptr) return;
3491
3492 BarrierSet *bs = BarrierSet::barrier_set();
3493 bs->on_slowpath_allocation_exit(current, new_obj);
3494 }
3495
3496 // We are at a compiled code to interpreter call. We need backing
3497 // buffers for all inline type arguments. Allocate an object array to
3498 // hold them (convenient because once we're done with it we don't have
3499 // to worry about freeing it).
3500 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
3501 assert(InlineTypePassFieldsAsArgs, "no reason to call this");
3502 ResourceMark rm;
3503
3504 int nb_slots = 0;
3505 InstanceKlass* holder = callee->method_holder();
3506 allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
3507 if (allocate_receiver) {
3508 nb_slots++;
3509 }
3510 int arg_num = callee->is_static() ? 0 : 1;
3511 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3512 BasicType bt = ss.type();
3513 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
3514 nb_slots++;
3515 }
3516 if (bt != T_VOID) {
3517 arg_num++;
3518 }
3519 }
3520 objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
3521 objArrayHandle array(THREAD, array_oop);
3522 arg_num = callee->is_static() ? 0 : 1;
3523 int i = 0;
3524 if (allocate_receiver) {
3525 InlineKlass* vk = InlineKlass::cast(holder);
3526 oop res = vk->allocate_instance(CHECK_NULL);
3527 array->obj_at_put(i++, res);
3528 }
3529 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3530 BasicType bt = ss.type();
3531 if (bt == T_OBJECT && callee->is_scalarized_arg(arg_num)) {
3532 InlineKlass* vk = ss.as_inline_klass(holder);
3533 assert(vk != nullptr, "Unexpected klass");
3534 oop res = vk->allocate_instance(CHECK_NULL);
3535 array->obj_at_put(i++, res);
3536 }
3537 if (bt != T_VOID) {
3538 arg_num++;
3539 }
3540 }
3541 return array();
3542 }
3543
3544 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
3545 methodHandle callee(current, callee_method);
3546 oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
3547 current->set_vm_result(array);
3548 current->set_vm_result_2(callee()); // TODO: required to keep callee live?
3549 JRT_END
3550
3551 // We're returning from an interpreted method: load each field into a
3552 // register following the calling convention
3553 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
3554 {
3555 assert(res->klass()->is_inline_klass(), "only inline types here");
3556 ResourceMark rm;
3557 RegisterMap reg_map(current,
3558 RegisterMap::UpdateMap::include,
3559 RegisterMap::ProcessFrames::include,
3560 RegisterMap::WalkContinuation::skip);
3561 frame stubFrame = current->last_frame();
3562 frame callerFrame = stubFrame.sender(®_map);
3563 assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
3564
3565 InlineKlass* vk = InlineKlass::cast(res->klass());
3566
3567 const Array<SigEntry>* sig_vk = vk->extended_sig();
3568 const Array<VMRegPair>* regs = vk->return_regs();
3569
3570 if (regs == nullptr) {
3571 // The fields of the inline klass don't fit in registers, bail out
3572 return;
3573 }
3574
3575 int j = 1;
3576 for (int i = 0; i < sig_vk->length(); i++) {
3577 BasicType bt = sig_vk->at(i)._bt;
3578 if (bt == T_METADATA) {
3579 continue;
3580 }
3581 if (bt == T_VOID) {
3582 if (sig_vk->at(i-1)._bt == T_LONG ||
3583 sig_vk->at(i-1)._bt == T_DOUBLE) {
3584 j++;
3585 }
3586 continue;
3587 }
3588 int off = sig_vk->at(i)._offset;
3589 assert(off > 0, "offset in object should be positive");
3590 VMRegPair pair = regs->at(j);
3591 address loc = reg_map.location(pair.first(), nullptr);
3592 switch(bt) {
3593 case T_BOOLEAN:
3594 *(jboolean*)loc = res->bool_field(off);
3595 break;
3596 case T_CHAR:
3597 *(jchar*)loc = res->char_field(off);
3598 break;
3599 case T_BYTE:
3600 *(jbyte*)loc = res->byte_field(off);
3601 break;
3602 case T_SHORT:
3603 *(jshort*)loc = res->short_field(off);
3604 break;
3605 case T_INT: {
3606 *(jint*)loc = res->int_field(off);
3607 break;
3608 }
3609 case T_LONG:
3610 #ifdef _LP64
3611 *(intptr_t*)loc = res->long_field(off);
3612 #else
3613 Unimplemented();
3614 #endif
3615 break;
3616 case T_OBJECT:
3617 case T_ARRAY: {
3618 *(oop*)loc = res->obj_field(off);
3619 break;
3620 }
3621 case T_FLOAT:
3622 *(jfloat*)loc = res->float_field(off);
3623 break;
3624 case T_DOUBLE:
3625 *(jdouble*)loc = res->double_field(off);
3626 break;
3627 default:
3628 ShouldNotReachHere();
3629 }
3630 j++;
3631 }
3632 assert(j == regs->length(), "missed a field?");
3633
3634 #ifdef ASSERT
3635 VMRegPair pair = regs->at(0);
3636 address loc = reg_map.location(pair.first(), nullptr);
3637 assert(*(oopDesc**)loc == res, "overwritten object");
3638 #endif
3639
3640 current->set_vm_result(res);
3641 }
3642 JRT_END
3643
3644 // We've returned to an interpreted method, the interpreter needs a
3645 // reference to an inline type instance. Allocate it and initialize it
3646 // from field's values in registers.
3647 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
3648 {
3649 ResourceMark rm;
3650 RegisterMap reg_map(current,
3651 RegisterMap::UpdateMap::include,
3652 RegisterMap::ProcessFrames::include,
3653 RegisterMap::WalkContinuation::skip);
3654 frame stubFrame = current->last_frame();
3655 frame callerFrame = stubFrame.sender(®_map);
3656
3657 #ifdef ASSERT
3658 InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
3659 #endif
3660
3661 if (!is_set_nth_bit(res, 0)) {
3662 // We're not returning with inline type fields in registers (the
3663 // calling convention didn't allow it for this inline klass)
3664 assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
3665 current->set_vm_result((oopDesc*)res);
3666 assert(verif_vk == nullptr, "broken calling convention");
3667 return;
3668 }
3669
3670 clear_nth_bit(res, 0);
3671 InlineKlass* vk = (InlineKlass*)res;
3672 assert(verif_vk == vk, "broken calling convention");
3673 assert(Metaspace::contains((void*)res), "should be klass");
3674
3675 // Allocate handles for every oop field so they are safe in case of
3676 // a safepoint when allocating
3677 GrowableArray<Handle> handles;
3678 vk->save_oop_fields(reg_map, handles);
3679
3680 // It's unsafe to safepoint until we are here
3681 JRT_BLOCK;
3682 {
3683 JavaThread* THREAD = current;
3684 oop vt = vk->realloc_result(reg_map, handles, CHECK);
3685 current->set_vm_result(vt);
3686 }
3687 JRT_BLOCK_END;
3688 }
3689 JRT_END
|