27 #include "classfile/stringTable.hpp"
28 #include "classfile/vmClasses.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/compiledIC.hpp"
32 #include "code/icBuffer.hpp"
33 #include "code/compiledMethod.inline.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "code/vtableStubs.hpp"
36 #include "compiler/abstractCompiler.hpp"
37 #include "compiler/compileBroker.hpp"
38 #include "compiler/disassembler.hpp"
39 #include "gc/shared/barrierSet.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "gc/shared/gcLocker.inline.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "interpreter/interpreterRuntime.hpp"
44 #include "jvm.h"
45 #include "jfr/jfrEvents.hpp"
46 #include "logging/log.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "memory/universe.hpp"
49 #include "metaprogramming/primitiveConversions.hpp"
50 #include "oops/compiledICHolder.inline.hpp"
51 #include "oops/klass.hpp"
52 #include "oops/method.inline.hpp"
53 #include "oops/objArrayKlass.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "prims/forte.hpp"
56 #include "prims/jvmtiExport.hpp"
57 #include "prims/jvmtiThreadState.hpp"
58 #include "prims/methodHandles.hpp"
59 #include "prims/nativeLookup.hpp"
60 #include "runtime/atomic.hpp"
61 #include "runtime/frame.inline.hpp"
62 #include "runtime/handles.inline.hpp"
63 #include "runtime/init.hpp"
64 #include "runtime/interfaceSupport.inline.hpp"
65 #include "runtime/java.hpp"
66 #include "runtime/javaCalls.hpp"
67 #include "runtime/jniHandles.inline.hpp"
68 #include "runtime/sharedRuntime.hpp"
69 #include "runtime/stackWatermarkSet.hpp"
70 #include "runtime/stubRoutines.hpp"
71 #include "runtime/synchronizer.hpp"
72 #include "runtime/vframe.inline.hpp"
73 #include "runtime/vframeArray.hpp"
74 #include "runtime/vm_version.hpp"
75 #include "utilities/copy.hpp"
76 #include "utilities/dtrace.hpp"
77 #include "utilities/events.hpp"
78 #include "utilities/resourceHash.hpp"
79 #include "utilities/macros.hpp"
80 #include "utilities/xmlstream.hpp"
81 #ifdef COMPILER1
82 #include "c1/c1_Runtime1.hpp"
83 #endif
84
85 // Shared stub locations
86 RuntimeStub* SharedRuntime::_wrong_method_blob;
87 RuntimeStub* SharedRuntime::_wrong_method_abstract_blob;
88 RuntimeStub* SharedRuntime::_ic_miss_blob;
89 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
90 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
91 RuntimeStub* SharedRuntime::_resolve_static_call_blob;
92 address SharedRuntime::_resolve_static_call_entry;
93
94 DeoptimizationBlob* SharedRuntime::_deopt_blob;
95 SafepointBlob* SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
96 SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob;
97 SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
98
99 #ifdef COMPILER2
100 UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
101 #endif // COMPILER2
102
103 nmethod* SharedRuntime::_cont_doYield_stub;
104
105 //----------------------------generate_stubs-----------------------------------
106 void SharedRuntime::generate_stubs() {
107 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), "wrong_method_stub");
108 _wrong_method_abstract_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
109 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub");
110 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call");
111 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call");
112 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call");
113 _resolve_static_call_entry = _resolve_static_call_blob->entry_point();
114
115 AdapterHandlerLibrary::initialize();
116
117 #if COMPILER2_OR_JVMCI
118 // Vectors are generated only by C2 and JVMCI.
119 bool support_wide = is_wide_vector(MaxVectorSize);
120 if (support_wide) {
121 _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
122 }
123 #endif // COMPILER2_OR_JVMCI
124 _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
125 _polling_page_return_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
126
127 generate_deopt_blob();
128
129 #ifdef COMPILER2
130 generate_uncommon_trap_blob();
131 #endif // COMPILER2
132 }
133
1111 // for a call current in progress, i.e., arguments has been pushed on stack
1112 // but callee has not been invoked yet. Caller frame must be compiled.
1113 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1114 CallInfo& callinfo, TRAPS) {
1115 Handle receiver;
1116 Handle nullHandle; // create a handy null handle for exception returns
1117 JavaThread* current = THREAD;
1118
1119 assert(!vfst.at_end(), "Java frame must exist");
1120
1121 // Find caller and bci from vframe
1122 methodHandle caller(current, vfst.method());
1123 int bci = vfst.bci();
1124
1125 if (caller->is_continuation_enter_intrinsic()) {
1126 bc = Bytecodes::_invokestatic;
1127 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1128 return receiver;
1129 }
1130
1131 Bytecode_invoke bytecode(caller, bci);
1132 int bytecode_index = bytecode.index();
1133 bc = bytecode.invoke_code();
1134
1135 methodHandle attached_method(current, extract_attached_method(vfst));
1136 if (attached_method.not_null()) {
1137 Method* callee = bytecode.static_target(CHECK_NH);
1138 vmIntrinsics::ID id = callee->intrinsic_id();
1139 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1140 // it attaches statically resolved method to the call site.
1141 if (MethodHandles::is_signature_polymorphic(id) &&
1142 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1143 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1144
1145 // Adjust invocation mode according to the attached method.
1146 switch (bc) {
1147 case Bytecodes::_invokevirtual:
1148 if (attached_method->method_holder()->is_interface()) {
1149 bc = Bytecodes::_invokeinterface;
1150 }
1151 break;
1152 case Bytecodes::_invokeinterface:
1153 if (!attached_method->method_holder()->is_interface()) {
1154 bc = Bytecodes::_invokevirtual;
1155 }
1156 break;
1157 case Bytecodes::_invokehandle:
1158 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1159 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1160 : Bytecodes::_invokevirtual;
1161 }
1162 break;
1163 default:
1164 break;
1165 }
1166 }
1167 }
1168
1169 assert(bc != Bytecodes::_illegal, "not initialized");
1170
1171 bool has_receiver = bc != Bytecodes::_invokestatic &&
1172 bc != Bytecodes::_invokedynamic &&
1173 bc != Bytecodes::_invokehandle;
1174
1175 // Find receiver for non-static call
1176 if (has_receiver) {
1177 // This register map must be update since we need to find the receiver for
1178 // compiled frames. The receiver might be in a register.
1179 RegisterMap reg_map2(current,
1180 RegisterMap::UpdateMap::include,
1181 RegisterMap::ProcessFrames::include,
1182 RegisterMap::WalkContinuation::skip);
1183 frame stubFrame = current->last_frame();
1184 // Caller-frame is a compiled frame
1185 frame callerFrame = stubFrame.sender(®_map2);
1186
1187 if (attached_method.is_null()) {
1188 Method* callee = bytecode.static_target(CHECK_NH);
1189 if (callee == nullptr) {
1190 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1191 }
1192 }
1193
1194 // Retrieve from a compiled argument list
1195 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1196 assert(oopDesc::is_oop_or_null(receiver()), "");
1197
1198 if (receiver.is_null()) {
1199 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1200 }
1201 }
1202
1203 // Resolve method
1204 if (attached_method.not_null()) {
1205 // Parameterized by attached method.
1206 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1207 } else {
1208 // Parameterized by bytecode.
1209 constantPoolHandle constants(current, caller->constants());
1210 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1211 }
1212
1213 #ifdef ASSERT
1214 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1215 if (has_receiver) {
1216 assert(receiver.not_null(), "should have thrown exception");
1217 Klass* receiver_klass = receiver->klass();
1218 Klass* rk = nullptr;
1219 if (attached_method.not_null()) {
1220 // In case there's resolved method attached, use its holder during the check.
1221 rk = attached_method->method_holder();
1222 } else {
1223 // Klass is already loaded.
1224 constantPoolHandle constants(current, caller->constants());
1225 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1226 }
1227 Klass* static_receiver_klass = rk;
1228 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1229 "actual receiver must be subclass of static receiver klass");
1230 if (receiver_klass->is_instance_klass()) {
1231 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1232 tty->print_cr("ERROR: Klass not yet initialized!!");
1233 receiver_klass->print();
1234 }
1235 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1236 }
1237 }
1238 #endif
1239
1240 return receiver;
1241 }
1242
1243 methodHandle SharedRuntime::find_callee_method(TRAPS) {
1244 JavaThread* current = THREAD;
1245 ResourceMark rm(current);
1246 // We need first to check if any Java activations (compiled, interpreted)
1247 // exist on the stack since last JavaCall. If not, we need
1248 // to get the target method from the JavaCall wrapper.
1249 vframeStream vfst(current, true); // Do not skip any javaCalls
1250 methodHandle callee_method;
1251 if (vfst.at_end()) {
1252 // No Java frames were found on stack since we did the JavaCall.
1253 // Hence the stack can only contain an entry_frame. We need to
1254 // find the target method from the stub frame.
1255 RegisterMap reg_map(current,
1256 RegisterMap::UpdateMap::skip,
1257 RegisterMap::ProcessFrames::include,
1258 RegisterMap::WalkContinuation::skip);
1259 frame fr = current->last_frame();
1260 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1261 fr = fr.sender(®_map);
1262 assert(fr.is_entry_frame(), "must be");
1263 // fr is now pointing to the entry frame.
1264 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1265 } else {
1266 Bytecodes::Code bc;
1267 CallInfo callinfo;
1268 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1269 callee_method = methodHandle(current, callinfo.selected_method());
1270 }
1271 assert(callee_method()->is_method(), "must be");
1272 return callee_method;
1273 }
1274
1275 // Resolves a call.
1276 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1277 methodHandle callee_method;
1278 callee_method = resolve_sub_helper(is_virtual, is_optimized, THREAD);
1279 if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1280 int retry_count = 0;
1281 while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1282 callee_method->method_holder() != vmClasses::Object_klass()) {
1283 // If has a pending exception then there is no need to re-try to
1284 // resolve this method.
1285 // If the method has been redefined, we need to try again.
1286 // Hack: we have no way to update the vtables of arrays, so don't
1287 // require that java.lang.Object has been updated.
1288
1289 // It is very unlikely that method is redefined more than 100 times
1290 // in the middle of resolve. If it is looping here more than 100 times
1291 // means then there could be a bug here.
1292 guarantee((retry_count++ < 100),
1293 "Could not resolve to latest version of redefined method");
1294 // method is redefined in the middle of resolve so re-try.
1295 callee_method = resolve_sub_helper(is_virtual, is_optimized, THREAD);
1296 }
1297 }
1298 return callee_method;
1299 }
1300
1301 // This fails if resolution required refilling of IC stubs
1302 bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
1303 CompiledMethod* caller_nm, bool is_virtual, bool is_optimized,
1304 Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) {
1305 StaticCallInfo static_call_info;
1306 CompiledICInfo virtual_call_info;
1307
1308 // Make sure the callee nmethod does not get deoptimized and removed before
1309 // we are done patching the code.
1310 CompiledMethod* callee = callee_method->code();
1311
1312 if (callee != nullptr) {
1313 assert(callee->is_compiled(), "must be nmethod for patching");
1314 }
1315
1316 if (callee != nullptr && !callee->is_in_use()) {
1317 // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1318 callee = nullptr;
1319 }
1320 #ifdef ASSERT
1321 address dest_entry_point = callee == nullptr ? 0 : callee->entry_point(); // used below
1322 #endif
1323
1324 bool is_nmethod = caller_nm->is_nmethod();
1325
1326 if (is_virtual) {
1327 assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
1328 bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1329 Klass* klass = invoke_code == Bytecodes::_invokehandle ? nullptr : receiver->klass();
1330 CompiledIC::compute_monomorphic_entry(callee_method, klass,
1331 is_optimized, static_bound, is_nmethod, virtual_call_info,
1332 CHECK_false);
1333 } else {
1334 // static call
1335 CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info);
1336 }
1337
1338 // grab lock, check for deoptimization and potentially patch caller
1339 {
1340 CompiledICLocker ml(caller_nm);
1341
1342 // Lock blocks for safepoint during which both nmethods can change state.
1343
1344 // Now that we are ready to patch if the Method* was redefined then
1345 // don't update call site and let the caller retry.
1346 // Don't update call site if callee nmethod was unloaded or deoptimized.
1347 // Don't update call site if callee nmethod was replaced by an other nmethod
1348 // which may happen when multiply alive nmethod (tiered compilation)
1349 // will be supported.
1350 if (!callee_method->is_old() &&
1351 (callee == nullptr || (callee->is_in_use() && callee_method->code() == callee))) {
1352 NoSafepointVerifier nsv;
1353 #ifdef ASSERT
1354 // We must not try to patch to jump to an already unloaded method.
1355 if (dest_entry_point != 0) {
1368 } else {
1369 if (VM_Version::supports_fast_class_init_checks() &&
1370 invoke_code == Bytecodes::_invokestatic &&
1371 callee_method->needs_clinit_barrier() &&
1372 callee != nullptr && callee->is_compiled_by_jvmci()) {
1373 return true; // skip patching for JVMCI
1374 }
1375 CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
1376 if (is_nmethod && caller_nm->method()->is_continuation_enter_intrinsic()) {
1377 ssc->compute_entry_for_continuation_entry(callee_method, static_call_info);
1378 }
1379 if (ssc->is_clean()) ssc->set(static_call_info);
1380 }
1381 }
1382 } // unlock CompiledICLocker
1383 return true;
1384 }
1385
1386 // Resolves a call. The compilers generate code for calls that go here
1387 // and are patched with the real destination of the call.
1388 methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimized, TRAPS) {
1389 JavaThread* current = THREAD;
1390 ResourceMark rm(current);
1391 RegisterMap cbl_map(current,
1392 RegisterMap::UpdateMap::skip,
1393 RegisterMap::ProcessFrames::include,
1394 RegisterMap::WalkContinuation::skip);
1395 frame caller_frame = current->last_frame().sender(&cbl_map);
1396
1397 CodeBlob* caller_cb = caller_frame.cb();
1398 guarantee(caller_cb != nullptr && caller_cb->is_compiled(), "must be called from compiled method");
1399 CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
1400
1401 // determine call info & receiver
1402 // note: a) receiver is null for static calls
1403 // b) an exception is thrown if receiver is null for non-static calls
1404 CallInfo call_info;
1405 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1406 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1407 methodHandle callee_method(current, call_info.selected_method());
1408
1409 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1410 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1411 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1412 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1413 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1414
1415 assert(!caller_nm->is_unloading(), "It should not be unloading");
1416
1417 #ifndef PRODUCT
1418 // tracing/debugging/statistics
1419 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1420 (is_virtual) ? (&_resolve_virtual_ctr) :
1421 (&_resolve_static_ctr);
1422 Atomic::inc(addr);
1423
1424 if (TraceCallFixup) {
1425 ResourceMark rm(current);
1426 tty->print("resolving %s%s (%s) call to",
1427 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1452 // If the resolved method is a MethodHandle invoke target, the call
1453 // site must be a MethodHandle call site, because the lambda form might tail-call
1454 // leaving the stack in a state unknown to either caller or callee
1455 // TODO detune for now but we might need it again
1456 // assert(!callee_method->is_compiled_lambda_form() ||
1457 // caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1458
1459 // Compute entry points. This might require generation of C2I converter
1460 // frames, so we cannot be holding any locks here. Furthermore, the
1461 // computation of the entry points is independent of patching the call. We
1462 // always return the entry-point, but we only patch the stub if the call has
1463 // not been deoptimized. Return values: For a virtual call this is an
1464 // (cached_oop, destination address) pair. For a static call/optimized
1465 // virtual this is just a destination address.
1466
1467 // Patching IC caches may fail if we run out if transition stubs.
1468 // We refill the ic stubs then and try again.
1469 for (;;) {
1470 ICRefillVerifier ic_refill_verifier;
1471 bool successful = resolve_sub_helper_internal(callee_method, caller_frame, caller_nm,
1472 is_virtual, is_optimized, receiver,
1473 call_info, invoke_code, CHECK_(methodHandle()));
1474 if (successful) {
1475 return callee_method;
1476 } else {
1477 InlineCacheBuffer::refill_ic_stubs();
1478 }
1479 }
1480
1481 }
1482
1483
1484 // Inline caches exist only in compiled code
1485 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1486 #ifdef ASSERT
1487 RegisterMap reg_map(current,
1488 RegisterMap::UpdateMap::skip,
1489 RegisterMap::ProcessFrames::include,
1490 RegisterMap::WalkContinuation::skip);
1491 frame stub_frame = current->last_frame();
1492 assert(stub_frame.is_runtime_frame(), "sanity check");
1493 frame caller_frame = stub_frame.sender(®_map);
1494 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1495 #endif /* ASSERT */
1496
1497 methodHandle callee_method;
1498 JRT_BLOCK
1499 callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1500 // Return Method* through TLS
1501 current->set_vm_result_2(callee_method());
1502 JRT_BLOCK_END
1503 // return compiled code entry point after potential safepoints
1504 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1505 return callee_method->verified_code_entry();
1506 JRT_END
1507
1508
1509 // Handle call site that has been made non-entrant
1510 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1511 // 6243940 We might end up in here if the callee is deoptimized
1512 // as we race to call it. We don't want to take a safepoint if
1513 // the caller was interpreted because the caller frame will look
1514 // interpreted to the stack walkers and arguments are now
1515 // "compiled" so it is much better to make this transition
1516 // invisible to the stack walking code. The i2c path will
1517 // place the callee method in the callee_target. It is stashed
1518 // there because if we try and find the callee by normal means a
1519 // safepoint is possible and have trouble gc'ing the compiled args.
1520 RegisterMap reg_map(current,
1521 RegisterMap::UpdateMap::skip,
1522 RegisterMap::ProcessFrames::include,
1523 RegisterMap::WalkContinuation::skip);
1524 frame stub_frame = current->last_frame();
1525 assert(stub_frame.is_runtime_frame(), "sanity check");
1526 frame caller_frame = stub_frame.sender(®_map);
1527
1528 if (caller_frame.is_interpreted_frame() ||
1529 caller_frame.is_entry_frame() ||
1530 caller_frame.is_upcall_stub_frame()) {
1531 Method* callee = current->callee_target();
1532 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1533 current->set_vm_result_2(callee);
1534 current->set_callee_target(nullptr);
1535 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1536 // Bypass class initialization checks in c2i when caller is in native.
1537 // JNI calls to static methods don't have class initialization checks.
1538 // Fast class initialization checks are present in c2i adapters and call into
1539 // SharedRuntime::handle_wrong_method() on the slow path.
1540 //
1541 // JVM upcalls may land here as well, but there's a proper check present in
1542 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1543 // so bypassing it in c2i adapter is benign.
1544 return callee->get_c2i_no_clinit_check_entry();
1545 } else {
1546 return callee->get_c2i_entry();
1547 }
1548 }
1549
1550 // Must be compiled to compiled path which is safe to stackwalk
1551 methodHandle callee_method;
1552 JRT_BLOCK
1553 // Force resolving of caller (if we called from compiled frame)
1554 callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1555 current->set_vm_result_2(callee_method());
1556 JRT_BLOCK_END
1557 // return compiled code entry point after potential safepoints
1558 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1559 return callee_method->verified_code_entry();
1560 JRT_END
1561
1562 // Handle abstract method call
1563 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1564 // Verbose error message for AbstractMethodError.
1565 // Get the called method from the invoke bytecode.
1566 vframeStream vfst(current, true);
1567 assert(!vfst.at_end(), "Java frame must exist");
1568 methodHandle caller(current, vfst.method());
1569 Bytecode_invoke invoke(caller, vfst.bci());
1570 DEBUG_ONLY( invoke.verify(); )
1571
1572 // Find the compiled caller frame.
1573 RegisterMap reg_map(current,
1574 RegisterMap::UpdateMap::include,
1575 RegisterMap::ProcessFrames::include,
1576 RegisterMap::WalkContinuation::skip);
1577 frame stubFrame = current->last_frame();
1578 assert(stubFrame.is_runtime_frame(), "must be");
1579 frame callerFrame = stubFrame.sender(®_map);
1580 assert(callerFrame.is_compiled_frame(), "must be");
1581
1582 // Install exception and return forward entry.
1583 address res = StubRoutines::throw_AbstractMethodError_entry();
1584 JRT_BLOCK
1585 methodHandle callee(current, invoke.static_target(current));
1586 if (!callee.is_null()) {
1587 oop recv = callerFrame.retrieve_receiver(®_map);
1588 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1589 res = StubRoutines::forward_exception_entry();
1590 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1591 }
1592 JRT_BLOCK_END
1593 return res;
1594 JRT_END
1595
1596
1597 // resolve a static call and patch code
1598 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1599 methodHandle callee_method;
1600 bool enter_special = false;
1601 JRT_BLOCK
1602 callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1603 current->set_vm_result_2(callee_method());
1604
1605 if (current->is_interp_only_mode()) {
1606 RegisterMap reg_map(current,
1607 RegisterMap::UpdateMap::skip,
1608 RegisterMap::ProcessFrames::include,
1609 RegisterMap::WalkContinuation::skip);
1610 frame stub_frame = current->last_frame();
1611 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1612 frame caller = stub_frame.sender(®_map);
1613 enter_special = caller.cb() != nullptr && caller.cb()->is_compiled()
1614 && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic();
1615 }
1616 JRT_BLOCK_END
1617
1618 if (current->is_interp_only_mode() && enter_special) {
1619 // enterSpecial is compiled and calls this method to resolve the call to Continuation::enter
1620 // but in interp_only_mode we need to go to the interpreted entry
1621 // The c2i won't patch in this mode -- see fixup_callers_callsite
1622 //
1623 // This should probably be done in all cases, not just enterSpecial (see JDK-8218403),
1624 // but that's part of a larger fix, and the situation is worse for enterSpecial, as it has no
1625 // interpreted version.
1626 return callee_method->get_c2i_entry();
1627 }
1628
1629 // return compiled code entry point after potential safepoints
1630 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1631 return callee_method->verified_code_entry();
1632 JRT_END
1633
1634
1635 // resolve virtual call and update inline cache to monomorphic
1636 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1637 methodHandle callee_method;
1638 JRT_BLOCK
1639 callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1640 current->set_vm_result_2(callee_method());
1641 JRT_BLOCK_END
1642 // return compiled code entry point after potential safepoints
1643 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1644 return callee_method->verified_code_entry();
1645 JRT_END
1646
1647
1648 // Resolve a virtual call that can be statically bound (e.g., always
1649 // monomorphic, so it has no inline cache). Patch code to resolved target.
1650 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1651 methodHandle callee_method;
1652 JRT_BLOCK
1653 callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1654 current->set_vm_result_2(callee_method());
1655 JRT_BLOCK_END
1656 // return compiled code entry point after potential safepoints
1657 assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1658 return callee_method->verified_code_entry();
1659 JRT_END
1660
1661 // The handle_ic_miss_helper_internal function returns false if it failed due
1662 // to either running out of vtable stubs or ic stubs due to IC transitions
1663 // to transitional states. The needs_ic_stub_refill value will be set if
1664 // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
1665 // refills the IC stubs and tries again.
1666 bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
1667 const frame& caller_frame, methodHandle callee_method,
1668 Bytecodes::Code bc, CallInfo& call_info,
1669 bool& needs_ic_stub_refill, TRAPS) {
1670 CompiledICLocker ml(caller_nm);
1671 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1672 bool should_be_mono = false;
1673 if (inline_cache->is_optimized()) {
1674 if (TraceCallFixup) {
1675 ResourceMark rm(THREAD);
1676 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1677 callee_method->print_short_name(tty);
1678 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1679 }
1680 should_be_mono = true;
1681 } else if (inline_cache->is_icholder_call()) {
1682 CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1683 if (ic_oop != nullptr) {
1684 if (!ic_oop->is_loader_alive()) {
1685 // Deferred IC cleaning due to concurrent class unloading
1686 if (!inline_cache->set_to_clean()) {
1687 needs_ic_stub_refill = true;
1688 return false;
1689 }
1690 } else if (receiver()->klass() == ic_oop->holder_klass()) {
1691 // This isn't a real miss. We must have seen that compiled code
1692 // is now available and we want the call site converted to a
1693 // monomorphic compiled call site.
1694 // We can't assert for callee_method->code() != nullptr because it
1695 // could have been deoptimized in the meantime
1696 if (TraceCallFixup) {
1697 ResourceMark rm(THREAD);
1698 tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1699 callee_method->print_short_name(tty);
1700 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1701 }
1702 should_be_mono = true;
1703 }
1704 }
1705 }
1706
1707 if (should_be_mono) {
1708 // We have a path that was monomorphic but was going interpreted
1709 // and now we have (or had) a compiled entry. We correct the IC
1710 // by using a new icBuffer.
1711 CompiledICInfo info;
1712 Klass* receiver_klass = receiver()->klass();
1713 inline_cache->compute_monomorphic_entry(callee_method,
1714 receiver_klass,
1715 inline_cache->is_optimized(),
1716 false, caller_nm->is_nmethod(),
1717 info, CHECK_false);
1718 if (!inline_cache->set_to_monomorphic(info)) {
1719 needs_ic_stub_refill = true;
1720 return false;
1721 }
1722 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1723 // Potential change to megamorphic
1724
1725 bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false);
1726 if (needs_ic_stub_refill) {
1727 return false;
1728 }
1729 if (!successful) {
1730 if (!inline_cache->set_to_clean()) {
1731 needs_ic_stub_refill = true;
1732 return false;
1733 }
1734 }
1735 } else {
1736 // Either clean or megamorphic
1737 }
1738 return true;
1739 }
1740
1741 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1742 JavaThread* current = THREAD;
1743 ResourceMark rm(current);
1744 CallInfo call_info;
1745 Bytecodes::Code bc;
1746
1747 // receiver is null for static calls. An exception is thrown for null
1748 // receivers for non-static calls
1749 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1750 // Compiler1 can produce virtual call sites that can actually be statically bound
1751 // If we fell thru to below we would think that the site was going megamorphic
1752 // when in fact the site can never miss. Worse because we'd think it was megamorphic
1753 // we'd try and do a vtable dispatch however methods that can be statically bound
1754 // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1755 // reresolution of the call site (as if we did a handle_wrong_method and not an
1756 // plain ic_miss) and the site will be converted to an optimized virtual call site
1757 // never to miss again. I don't believe C2 will produce code like this but if it
1758 // did this would still be the correct thing to do for it too, hence no ifdef.
1759 //
1760 if (call_info.resolved_method()->can_be_statically_bound()) {
1761 methodHandle callee_method = SharedRuntime::reresolve_call_site(CHECK_(methodHandle()));
1762 if (TraceCallFixup) {
1763 RegisterMap reg_map(current,
1764 RegisterMap::UpdateMap::skip,
1765 RegisterMap::ProcessFrames::include,
1766 RegisterMap::WalkContinuation::skip);
1767 frame caller_frame = current->last_frame().sender(®_map);
1768 ResourceMark rm(current);
1769 tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1770 callee_method->print_short_name(tty);
1771 tty->print_cr(" from pc: " INTPTR_FORMAT, p2i(caller_frame.pc()));
1772 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1773 }
1774 return callee_method;
1775 }
1776
1777 methodHandle callee_method(current, call_info.selected_method());
1778
1779 #ifndef PRODUCT
1780 Atomic::inc(&_ic_miss_ctr);
1781
1800 #endif
1801
1802 // install an event collector so that when a vtable stub is created the
1803 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1804 // event can't be posted when the stub is created as locks are held
1805 // - instead the event will be deferred until the event collector goes
1806 // out of scope.
1807 JvmtiDynamicCodeEventCollector event_collector;
1808
1809 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1810 // Transitioning IC caches may require transition stubs. If we run out
1811 // of transition stubs, we have to drop locks and perform a safepoint
1812 // that refills them.
1813 RegisterMap reg_map(current,
1814 RegisterMap::UpdateMap::skip,
1815 RegisterMap::ProcessFrames::include,
1816 RegisterMap::WalkContinuation::skip);
1817 frame caller_frame = current->last_frame().sender(®_map);
1818 CodeBlob* cb = caller_frame.cb();
1819 CompiledMethod* caller_nm = cb->as_compiled_method();
1820
1821 for (;;) {
1822 ICRefillVerifier ic_refill_verifier;
1823 bool needs_ic_stub_refill = false;
1824 bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1825 bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle()));
1826 if (successful || !needs_ic_stub_refill) {
1827 return callee_method;
1828 } else {
1829 InlineCacheBuffer::refill_ic_stubs();
1830 }
1831 }
1832 }
1833
1834 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1835 CompiledICLocker ml(caller_nm);
1836 if (is_static_call) {
1837 CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1838 if (!ssc->is_clean()) {
1839 return ssc->set_to_clean();
1840 }
1841 } else {
1842 // compiled, dispatched call (which used to call an interpreted method)
1843 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1844 if (!inline_cache->is_clean()) {
1845 return inline_cache->set_to_clean();
1846 }
1847 }
1848 return true;
1849 }
1850
1851 //
1852 // Resets a call-site in compiled code so it will get resolved again.
1853 // This routines handles both virtual call sites, optimized virtual call
1854 // sites, and static call sites. Typically used to change a call sites
1855 // destination from compiled to interpreted.
1856 //
1857 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1858 JavaThread* current = THREAD;
1859 ResourceMark rm(current);
1860 RegisterMap reg_map(current,
1861 RegisterMap::UpdateMap::skip,
1862 RegisterMap::ProcessFrames::include,
1863 RegisterMap::WalkContinuation::skip);
1864 frame stub_frame = current->last_frame();
1865 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1866 frame caller = stub_frame.sender(®_map);
1867
1868 // Do nothing if the frame isn't a live compiled frame.
1869 // nmethod could be deoptimized by the time we get here
1870 // so no update to the caller is needed.
1871
1872 if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1873
1874 address pc = caller.pc();
1875
1876 // Check for static or virtual call
1877 bool is_static_call = false;
1878 CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
1879
1880 // Default call_addr is the location of the "basic" call.
1881 // Determine the address of the call we a reresolving. With
1882 // Inline Caches we will always find a recognizable call.
1883 // With Inline Caches disabled we may or may not find a
1884 // recognizable call. We will always find a call for static
1885 // calls and for optimized virtual calls. For vanilla virtual
1886 // calls it depends on the state of the UseInlineCaches switch.
1887 //
1888 // With Inline Caches disabled we can get here for a virtual call
1889 // for two reasons:
1890 // 1 - calling an abstract method. The vtable for abstract methods
1891 // will run us thru handle_wrong_method and we will eventually
1892 // end up in the interpreter to throw the ame.
1893 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1894 // call and between the time we fetch the entry address and
1895 // we jump to it the target gets deoptimized. Similar to 1
1896 // we will wind up in the interprter (thru a c2i with c2).
1897 //
1898 address call_addr = nullptr;
1899 {
1900 // Get call instruction under lock because another thread may be
1901 // busy patching it.
1902 CompiledICLocker ml(caller_nm);
1903 // Location of call instruction
1904 call_addr = caller_nm->call_instruction_address(pc);
1905 }
1906
1907 // Check relocations for the matching call to 1) avoid false positives,
1908 // and 2) determine the type.
1909 if (call_addr != nullptr) {
1910 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1911 // bytes back in the instruction stream so we must also check for reloc info.
1912 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1913 bool ret = iter.next(); // Get item
1914 if (ret) {
1915 bool is_static_call = false;
1916 switch (iter.type()) {
1917 case relocInfo::static_call_type:
1918 is_static_call = true;
1919
1920 case relocInfo::virtual_call_type:
1921 case relocInfo::opt_virtual_call_type:
1922 // Cleaning the inline cache will force a new resolve. This is more robust
1923 // than directly setting it to the new destination, since resolving of calls
1924 // is always done through the same code path. (experience shows that it
1925 // leads to very hard to track down bugs, if an inline cache gets updated
1926 // to a wrong method). It should not be performance critical, since the
1927 // resolve is only done once.
1928 guarantee(iter.addr() == call_addr, "must find call");
1929 for (;;) {
1930 ICRefillVerifier ic_refill_verifier;
1931 if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
1932 InlineCacheBuffer::refill_ic_stubs();
1933 } else {
1934 break;
1935 }
1936 }
1937 break;
1938 default:
1939 break;
1940 }
1941 }
1942 }
1943 }
1944
1945 methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1946
1947
1948 #ifndef PRODUCT
1949 Atomic::inc(&_wrong_method_ctr);
1950
1951 if (TraceCallFixup) {
1952 ResourceMark rm(current);
1953 tty->print("handle_wrong_method reresolving call to");
1954 callee_method->print_short_name(tty);
1955 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1956 }
1957 #endif
1958
1959 return callee_method;
1960 }
1961
1962 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1963 // The faulting unsafe accesses should be changed to throw the error
1964 // synchronously instead. Meanwhile the faulting instruction will be
1965 // skipped over (effectively turning it into a no-op) and an
1966 // asynchronous exception will be raised which the thread will
2100 // for the rest of its life! Just another racing bug in the life of
2101 // fixup_callers_callsite ...
2102 //
2103 RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
2104 iter.next();
2105 assert(iter.has_current(), "must have a reloc at java call site");
2106 relocInfo::relocType typ = iter.reloc()->type();
2107 if (typ != relocInfo::static_call_type &&
2108 typ != relocInfo::opt_virtual_call_type &&
2109 typ != relocInfo::static_stub_type) {
2110 return;
2111 }
2112 if (nm->method()->is_continuation_enter_intrinsic()) {
2113 assert(ContinuationEntry::is_interpreted_call(call->instruction_address()) == JavaThread::current()->is_interp_only_mode(),
2114 "mode: %d", JavaThread::current()->is_interp_only_mode());
2115 if (ContinuationEntry::is_interpreted_call(call->instruction_address())) {
2116 return;
2117 }
2118 }
2119 address destination = call->destination();
2120 address entry_point = callee->verified_entry_point();
2121 if (should_fixup_call_destination(destination, entry_point, caller_pc, moop, cb)) {
2122 call->set_destination_mt_safe(entry_point);
2123 }
2124 }
2125 }
2126 JRT_END
2127
2128
2129 // same as JVM_Arraycopy, but called directly from compiled code
2130 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
2131 oopDesc* dest, jint dest_pos,
2132 jint length,
2133 JavaThread* current)) {
2134 #ifndef PRODUCT
2135 _slow_array_copy_ctr++;
2136 #endif
2137 // Check if we have null pointers
2138 if (src == nullptr || dest == nullptr) {
2139 THROW(vmSymbols::java_lang_NullPointerException());
2140 }
2430 private:
2431 enum {
2432 _basic_type_bits = 4,
2433 _basic_type_mask = right_n_bits(_basic_type_bits),
2434 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2435 _compact_int_count = 3
2436 };
2437 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2438 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2439
2440 union {
2441 int _compact[_compact_int_count];
2442 int* _fingerprint;
2443 } _value;
2444 int _length; // A negative length indicates the fingerprint is in the compact form,
2445 // Otherwise _value._fingerprint is the array.
2446
2447 // Remap BasicTypes that are handled equivalently by the adapters.
2448 // These are correct for the current system but someday it might be
2449 // necessary to make this mapping platform dependent.
2450 static int adapter_encoding(BasicType in) {
2451 switch (in) {
2452 case T_BOOLEAN:
2453 case T_BYTE:
2454 case T_SHORT:
2455 case T_CHAR:
2456 // There are all promoted to T_INT in the calling convention
2457 return T_INT;
2458
2459 case T_OBJECT:
2460 case T_ARRAY:
2461 // In other words, we assume that any register good enough for
2462 // an int or long is good enough for a managed pointer.
2463 #ifdef _LP64
2464 return T_LONG;
2465 #else
2466 return T_INT;
2467 #endif
2468
2469 case T_INT:
2470 case T_LONG:
2471 case T_FLOAT:
2472 case T_DOUBLE:
2473 case T_VOID:
2474 return in;
2475
2476 default:
2477 ShouldNotReachHere();
2478 return T_CONFLICT;
2479 }
2480 }
2481
2482 public:
2483 AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2484 // The fingerprint is based on the BasicType signature encoded
2485 // into an array of ints with eight entries per int.
2486 int* ptr;
2487 int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2488 if (len <= _compact_int_count) {
2489 assert(_compact_int_count == 3, "else change next line");
2490 _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2491 // Storing the signature encoded as signed chars hits about 98%
2492 // of the time.
2493 _length = -len;
2494 ptr = _value._compact;
2495 } else {
2496 _length = len;
2497 _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2498 ptr = _value._fingerprint;
2499 }
2500
2501 // Now pack the BasicTypes with 8 per int
2502 int sig_index = 0;
2503 for (int index = 0; index < len; index++) {
2504 int value = 0;
2505 for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2506 int bt = adapter_encoding(sig_bt[sig_index++]);
2507 assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2508 value = (value << _basic_type_bits) | bt;
2509 }
2510 ptr[index] = value;
2511 }
2512 }
2513
2514 ~AdapterFingerPrint() {
2515 if (_length > 0) {
2516 FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2517 }
2518 }
2519
2520 int value(int index) {
2521 if (_length < 0) {
2522 return _value._compact[index];
2523 }
2524 return _value._fingerprint[index];
2525 }
2526 int length() {
2527 if (_length < 0) return -_length;
2528 return _length;
2529 }
2530
2531 bool is_compact() {
2556 const char* as_basic_args_string() {
2557 stringStream st;
2558 bool long_prev = false;
2559 for (int i = 0; i < length(); i++) {
2560 unsigned val = (unsigned)value(i);
2561 // args are packed so that first/lower arguments are in the highest
2562 // bits of each int value, so iterate from highest to the lowest
2563 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2564 unsigned v = (val >> j) & _basic_type_mask;
2565 if (v == 0) {
2566 assert(i == length() - 1, "Only expect zeroes in the last word");
2567 continue;
2568 }
2569 if (long_prev) {
2570 long_prev = false;
2571 if (v == T_VOID) {
2572 st.print("J");
2573 } else {
2574 st.print("L");
2575 }
2576 }
2577 switch (v) {
2578 case T_INT: st.print("I"); break;
2579 case T_LONG: long_prev = true; break;
2580 case T_FLOAT: st.print("F"); break;
2581 case T_DOUBLE: st.print("D"); break;
2582 case T_VOID: break;
2583 default: ShouldNotReachHere();
2584 }
2585 }
2586 }
2587 if (long_prev) {
2588 st.print("L");
2589 }
2590 return st.as_string();
2591 }
2592 #endif // !product
2593
2594 bool equals(AdapterFingerPrint* other) {
2595 if (other->_length != _length) {
2596 return false;
2597 }
2598 if (_length < 0) {
2599 assert(_compact_int_count == 3, "else change next line");
2600 return _value._compact[0] == other->_value._compact[0] &&
2601 _value._compact[1] == other->_value._compact[1] &&
2602 _value._compact[2] == other->_value._compact[2];
2603 } else {
2610 return true;
2611 }
2612
2613 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2614 NOT_PRODUCT(_equals++);
2615 return fp1->equals(fp2);
2616 }
2617
2618 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2619 return fp->compute_hash();
2620 }
2621 };
2622
2623 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2624 ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2625 AnyObj::C_HEAP, mtCode,
2626 AdapterFingerPrint::compute_hash,
2627 AdapterFingerPrint::equals> _adapter_handler_table;
2628
2629 // Find a entry with the same fingerprint if it exists
2630 static AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
2631 NOT_PRODUCT(_lookups++);
2632 assert_lock_strong(AdapterHandlerLibrary_lock);
2633 AdapterFingerPrint fp(total_args_passed, sig_bt);
2634 AdapterHandlerEntry** entry = _adapter_handler_table.get(&fp);
2635 if (entry != nullptr) {
2636 #ifndef PRODUCT
2637 if (fp.is_compact()) _compact++;
2638 _hits++;
2639 #endif
2640 return *entry;
2641 }
2642 return nullptr;
2643 }
2644
2645 #ifndef PRODUCT
2646 static void print_table_statistics() {
2647 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2648 return sizeof(*key) + sizeof(*a);
2649 };
2650 TableStatistics ts = _adapter_handler_table.statistics_calculate(size);
2651 ts.print(tty, "AdapterHandlerTable");
2652 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2653 _adapter_handler_table.table_size(), _adapter_handler_table.number_of_entries());
2654 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2655 _lookups, _equals, _hits, _compact);
2656 }
2657 #endif
2658
2659 // ---------------------------------------------------------------------------
2660 // Implementation of AdapterHandlerLibrary
2661 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2662 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2663 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2664 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2665 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2666 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2667 const int AdapterHandlerLibrary_size = 16*K;
2668 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2669
2670 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2671 return _buffer;
2672 }
2673
2674 static void post_adapter_creation(const AdapterBlob* new_adapter,
2675 const AdapterHandlerEntry* entry) {
2676 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2677 char blob_id[256];
2678 jio_snprintf(blob_id,
2679 sizeof(blob_id),
2680 "%s(%s)",
2681 new_adapter->name(),
2682 entry->fingerprint()->as_string());
2683 if (Forte::is_enabled()) {
2684 Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2685 }
2686
2687 if (JvmtiExport::should_post_dynamic_code_generated()) {
2689 }
2690 }
2691 }
2692
2693 void AdapterHandlerLibrary::initialize() {
2694 ResourceMark rm;
2695 AdapterBlob* no_arg_blob = nullptr;
2696 AdapterBlob* int_arg_blob = nullptr;
2697 AdapterBlob* obj_arg_blob = nullptr;
2698 AdapterBlob* obj_int_arg_blob = nullptr;
2699 AdapterBlob* obj_obj_arg_blob = nullptr;
2700 {
2701 MutexLocker mu(AdapterHandlerLibrary_lock);
2702
2703 // Create a special handler for abstract methods. Abstract methods
2704 // are never compiled so an i2c entry is somewhat meaningless, but
2705 // throw AbstractMethodError just in case.
2706 // Pass wrong_method_abstract for the c2i transitions to return
2707 // AbstractMethodError for invalid invocations.
2708 address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2709 _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, nullptr),
2710 StubRoutines::throw_AbstractMethodError_entry(),
2711 wrong_method_abstract, wrong_method_abstract);
2712
2713 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2714 _no_arg_handler = create_adapter(no_arg_blob, 0, nullptr, true);
2715
2716 BasicType obj_args[] = { T_OBJECT };
2717 _obj_arg_handler = create_adapter(obj_arg_blob, 1, obj_args, true);
2718
2719 BasicType int_args[] = { T_INT };
2720 _int_arg_handler = create_adapter(int_arg_blob, 1, int_args, true);
2721
2722 BasicType obj_int_args[] = { T_OBJECT, T_INT };
2723 _obj_int_arg_handler = create_adapter(obj_int_arg_blob, 2, obj_int_args, true);
2724
2725 BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2726 _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, 2, obj_obj_args, true);
2727
2728 assert(no_arg_blob != nullptr &&
2729 obj_arg_blob != nullptr &&
2730 int_arg_blob != nullptr &&
2731 obj_int_arg_blob != nullptr &&
2732 obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
2733 }
2734
2735 // Outside of the lock
2736 post_adapter_creation(no_arg_blob, _no_arg_handler);
2737 post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2738 post_adapter_creation(int_arg_blob, _int_arg_handler);
2739 post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2740 post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2741 }
2742
2743 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2744 address i2c_entry,
2745 address c2i_entry,
2746 address c2i_unverified_entry,
2747 address c2i_no_clinit_check_entry) {
2748 // Insert an entry into the table
2749 return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry,
2750 c2i_no_clinit_check_entry);
2751 }
2752
2753 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2754 if (method->is_abstract()) {
2755 return _abstract_method_handler;
2756 }
2757 int total_args_passed = method->size_of_parameters(); // All args on stack
2758 if (total_args_passed == 0) {
2759 return _no_arg_handler;
2760 } else if (total_args_passed == 1) {
2761 if (!method->is_static()) {
2762 return _obj_arg_handler;
2763 }
2764 switch (method->signature()->char_at(1)) {
2765 case JVM_SIGNATURE_CLASS:
2766 case JVM_SIGNATURE_ARRAY:
2767 return _obj_arg_handler;
2768 case JVM_SIGNATURE_INT:
2769 case JVM_SIGNATURE_BOOLEAN:
2770 case JVM_SIGNATURE_CHAR:
2771 case JVM_SIGNATURE_BYTE:
2772 case JVM_SIGNATURE_SHORT:
2773 return _int_arg_handler;
2774 }
2775 } else if (total_args_passed == 2 &&
2776 !method->is_static()) {
2777 switch (method->signature()->char_at(1)) {
2778 case JVM_SIGNATURE_CLASS:
2779 case JVM_SIGNATURE_ARRAY:
2780 return _obj_obj_arg_handler;
2781 case JVM_SIGNATURE_INT:
2782 case JVM_SIGNATURE_BOOLEAN:
2783 case JVM_SIGNATURE_CHAR:
2784 case JVM_SIGNATURE_BYTE:
2785 case JVM_SIGNATURE_SHORT:
2786 return _obj_int_arg_handler;
2787 }
2788 }
2789 return nullptr;
2790 }
2791
2792 class AdapterSignatureIterator : public SignatureIterator {
2793 private:
2794 BasicType stack_sig_bt[16];
2795 BasicType* sig_bt;
2796 int index;
2797
2798 public:
2799 AdapterSignatureIterator(Symbol* signature,
2800 fingerprint_t fingerprint,
2801 bool is_static,
2802 int total_args_passed) :
2803 SignatureIterator(signature, fingerprint),
2804 index(0)
2805 {
2806 sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2807 if (!is_static) { // Pass in receiver first
2808 sig_bt[index++] = T_OBJECT;
2809 }
2810 do_parameters_on(this);
2811 }
2812
2813 BasicType* basic_types() {
2814 return sig_bt;
2815 }
2816
2817 #ifdef ASSERT
2818 int slots() {
2819 return index;
2820 }
2821 #endif
2822
2823 private:
2824
2825 friend class SignatureIterator; // so do_parameters_on can call do_type
2826 void do_type(BasicType type) {
2827 sig_bt[index++] = type;
2828 if (type == T_LONG || type == T_DOUBLE) {
2829 sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots
2830 }
2831 }
2832 };
2833
2834 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2835 // Use customized signature handler. Need to lock around updates to
2836 // the _adapter_handler_table (it is not safe for concurrent readers
2837 // and a single writer: this could be fixed if it becomes a
2838 // problem).
2839
2840 // Fast-path for trivial adapters
2841 AdapterHandlerEntry* entry = get_simple_adapter(method);
2842 if (entry != nullptr) {
2843 return entry;
2844 }
2845
2846 ResourceMark rm;
2847 AdapterBlob* new_adapter = nullptr;
2848
2849 // Fill in the signature array, for the calling-convention call.
2850 int total_args_passed = method->size_of_parameters(); // All args on stack
2851
2852 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2853 method->is_static(), total_args_passed);
2854 assert(si.slots() == total_args_passed, "");
2855 BasicType* sig_bt = si.basic_types();
2856 {
2857 MutexLocker mu(AdapterHandlerLibrary_lock);
2858
2859 // Lookup method signature's fingerprint
2860 entry = lookup(total_args_passed, sig_bt);
2861
2862 if (entry != nullptr) {
2863 #ifdef ASSERT
2864 if (VerifyAdapterSharing) {
2865 AdapterBlob* comparison_blob = nullptr;
2866 AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, total_args_passed, sig_bt, false);
2867 assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
2868 assert(comparison_entry->compare_code(entry), "code must match");
2869 // Release the one just created and return the original
2870 delete comparison_entry;
2871 }
2872 #endif
2873 return entry;
2874 }
2875
2876 entry = create_adapter(new_adapter, total_args_passed, sig_bt, /* allocate_code_blob */ true);
2877 }
2878
2879 // Outside of the lock
2880 if (new_adapter != nullptr) {
2881 post_adapter_creation(new_adapter, entry);
2882 }
2883 return entry;
2884 }
2885
2886 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
2887 int total_args_passed,
2888 BasicType* sig_bt,
2889 bool allocate_code_blob) {
2890
2891 // StubRoutines::_final_stubs_code is initialized after this function can be called. As a result,
2892 // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated prior
2893 // to all StubRoutines::_final_stubs_code being set. Checks refer to runtime range checks generated
2894 // in an I2C stub that ensure that an I2C stub is called from an interpreter frame or stubs.
2895 bool contains_all_checks = StubRoutines::final_stubs_code() != nullptr;
2896
2897 VMRegPair stack_regs[16];
2898 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2899
2900 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2901 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2902 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2903 CodeBuffer buffer(buf);
2904 short buffer_locs[20];
2905 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2906 sizeof(buffer_locs)/sizeof(relocInfo));
2907
2908 // Make a C heap allocated version of the fingerprint to store in the adapter
2909 AdapterFingerPrint* fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
2910 MacroAssembler _masm(&buffer);
2911 AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2912 total_args_passed,
2913 comp_args_on_stack,
2914 sig_bt,
2915 regs,
2916 fingerprint);
2917
2918 #ifdef ASSERT
2919 if (VerifyAdapterSharing) {
2920 entry->save_code(buf->code_begin(), buffer.insts_size());
2921 if (!allocate_code_blob) {
2922 return entry;
2923 }
2924 }
2925 #endif
2926
2927 new_adapter = AdapterBlob::create(&buffer);
2928 NOT_PRODUCT(int insts_size = buffer.insts_size());
2929 if (new_adapter == nullptr) {
2930 // CodeCache is full, disable compilation
2931 // Ought to log this but compile log is only per compile thread
2932 // and we're some non descript Java thread.
2933 return nullptr;
2934 }
2935 entry->relocate(new_adapter->content_begin());
2936 #ifndef PRODUCT
2937 // debugging support
2938 if (PrintAdapterHandlers || PrintStubCode) {
2939 ttyLocker ttyl;
2940 entry->print_adapter_on(tty);
2941 tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
2942 _adapter_handler_table.number_of_entries(), fingerprint->as_basic_args_string(),
2943 fingerprint->as_string(), insts_size);
2944 tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
2945 if (Verbose || PrintStubCode) {
2946 address first_pc = entry->base_address();
2947 if (first_pc != nullptr) {
2949 NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
2950 tty->cr();
2951 }
2952 }
2953 }
2954 #endif
2955
2956 // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
2957 // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
2958 if (contains_all_checks || !VerifyAdapterCalls) {
2959 assert_lock_strong(AdapterHandlerLibrary_lock);
2960 _adapter_handler_table.put(fingerprint, entry);
2961 }
2962 return entry;
2963 }
2964
2965 address AdapterHandlerEntry::base_address() {
2966 address base = _i2c_entry;
2967 if (base == nullptr) base = _c2i_entry;
2968 assert(base <= _c2i_entry || _c2i_entry == nullptr, "");
2969 assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");
2970 assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
2971 return base;
2972 }
2973
2974 void AdapterHandlerEntry::relocate(address new_base) {
2975 address old_base = base_address();
2976 assert(old_base != nullptr, "");
2977 ptrdiff_t delta = new_base - old_base;
2978 if (_i2c_entry != nullptr)
2979 _i2c_entry += delta;
2980 if (_c2i_entry != nullptr)
2981 _c2i_entry += delta;
2982 if (_c2i_unverified_entry != nullptr)
2983 _c2i_unverified_entry += delta;
2984 if (_c2i_no_clinit_check_entry != nullptr)
2985 _c2i_no_clinit_check_entry += delta;
2986 assert(base_address() == new_base, "");
2987 }
2988
2989
2990 AdapterHandlerEntry::~AdapterHandlerEntry() {
2991 delete _fingerprint;
2992 #ifdef ASSERT
2993 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
2994 #endif
2995 }
2996
2997
2998 #ifdef ASSERT
2999 // Capture the code before relocation so that it can be compared
3000 // against other versions. If the code is captured after relocation
3001 // then relative instructions won't be equivalent.
3002 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3003 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3004 _saved_code_length = length;
3005 memcpy(_saved_code, buffer, length);
3006 }
3007
3008
3009 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3010 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3011
3058
3059 struct { double data[20]; } locs_buf;
3060 struct { double data[20]; } stubs_locs_buf;
3061 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3062 #if defined(AARCH64) || defined(PPC64)
3063 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3064 // in the constant pool to ensure ordering between the barrier and oops
3065 // accesses. For native_wrappers we need a constant.
3066 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3067 // static java call that is resolved in the runtime.
3068 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3069 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3070 }
3071 #endif
3072 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3073 MacroAssembler _masm(&buffer);
3074
3075 // Fill in the signature array, for the calling-convention call.
3076 const int total_args_passed = method->size_of_parameters();
3077
3078 VMRegPair stack_regs[16];
3079 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3080
3081 AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3082 method->is_static(), total_args_passed);
3083 BasicType* sig_bt = si.basic_types();
3084 assert(si.slots() == total_args_passed, "");
3085 BasicType ret_type = si.return_type();
3086
3087 // Now get the compiled-Java arguments layout.
3088 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3089
3090 // Generate the compiled-to-native wrapper code
3091 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3092
3093 if (nm != nullptr) {
3094 {
3095 MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
3096 if (nm->make_in_use()) {
3097 method->set_code(method, nm);
3098 }
3099 }
3100
3101 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
3102 if (directive->PrintAssemblyOption) {
3103 nm->print_code();
3104 }
3105 DirectivesStack::release(directive);
3302 st->print("Adapter for signature: ");
3303 a->print_adapter_on(st);
3304 return true;
3305 } else {
3306 return false; // keep looking
3307 }
3308 };
3309 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3310 _adapter_handler_table.iterate(findblob);
3311 assert(found, "Should have found handler");
3312 }
3313
3314 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3315 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3316 if (get_i2c_entry() != nullptr) {
3317 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3318 }
3319 if (get_c2i_entry() != nullptr) {
3320 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3321 }
3322 if (get_c2i_unverified_entry() != nullptr) {
3323 st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3324 }
3325 if (get_c2i_no_clinit_check_entry() != nullptr) {
3326 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3327 }
3328 st->cr();
3329 }
3330
3331 #ifndef PRODUCT
3332
3333 void AdapterHandlerLibrary::print_statistics() {
3334 print_table_statistics();
3335 }
3336
3337 #endif /* PRODUCT */
3338
3339 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3340 assert(current == JavaThread::current(), "pre-condition");
3341 StackOverflow* overflow_state = current->stack_overflow_state();
3342 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3343 overflow_state->set_reserved_stack_activation(current->stack_base());
3392 event.set_method(method);
3393 event.commit();
3394 }
3395 }
3396 }
3397 return activation;
3398 }
3399
3400 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3401 // After any safepoint, just before going back to compiled code,
3402 // we inform the GC that we will be doing initializing writes to
3403 // this object in the future without emitting card-marks, so
3404 // GC may take any compensating steps.
3405
3406 oop new_obj = current->vm_result();
3407 if (new_obj == nullptr) return;
3408
3409 BarrierSet *bs = BarrierSet::barrier_set();
3410 bs->on_slowpath_allocation_exit(current, new_obj);
3411 }
|
27 #include "classfile/stringTable.hpp"
28 #include "classfile/vmClasses.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/compiledIC.hpp"
32 #include "code/icBuffer.hpp"
33 #include "code/compiledMethod.inline.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "code/vtableStubs.hpp"
36 #include "compiler/abstractCompiler.hpp"
37 #include "compiler/compileBroker.hpp"
38 #include "compiler/disassembler.hpp"
39 #include "gc/shared/barrierSet.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "gc/shared/gcLocker.inline.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "interpreter/interpreterRuntime.hpp"
44 #include "jvm.h"
45 #include "jfr/jfrEvents.hpp"
46 #include "logging/log.hpp"
47 #include "memory/oopFactory.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "memory/universe.hpp"
50 #include "oops/access.hpp"
51 #include "oops/fieldStreams.inline.hpp"
52 #include "metaprogramming/primitiveConversions.hpp"
53 #include "oops/compiledICHolder.inline.hpp"
54 #include "oops/klass.hpp"
55 #include "oops/method.inline.hpp"
56 #include "oops/objArrayKlass.hpp"
57 #include "oops/objArrayOop.inline.hpp"
58 #include "oops/oop.inline.hpp"
59 #include "oops/inlineKlass.inline.hpp"
60 #include "prims/forte.hpp"
61 #include "prims/jvmtiExport.hpp"
62 #include "prims/jvmtiThreadState.hpp"
63 #include "prims/methodHandles.hpp"
64 #include "prims/nativeLookup.hpp"
65 #include "runtime/atomic.hpp"
66 #include "runtime/frame.inline.hpp"
67 #include "runtime/handles.inline.hpp"
68 #include "runtime/init.hpp"
69 #include "runtime/interfaceSupport.inline.hpp"
70 #include "runtime/java.hpp"
71 #include "runtime/javaCalls.hpp"
72 #include "runtime/jniHandles.inline.hpp"
73 #include "runtime/sharedRuntime.hpp"
74 #include "runtime/stackWatermarkSet.hpp"
75 #include "runtime/stubRoutines.hpp"
76 #include "runtime/synchronizer.hpp"
77 #include "runtime/vframe.inline.hpp"
78 #include "runtime/vframeArray.hpp"
79 #include "runtime/vm_version.hpp"
80 #include "utilities/copy.hpp"
81 #include "utilities/dtrace.hpp"
82 #include "utilities/events.hpp"
83 #include "utilities/resourceHash.hpp"
84 #include "utilities/macros.hpp"
85 #include "utilities/xmlstream.hpp"
86 #ifdef COMPILER1
87 #include "c1/c1_Runtime1.hpp"
88 #endif
89
90 // Shared stub locations
91 RuntimeStub* SharedRuntime::_wrong_method_blob;
92 RuntimeStub* SharedRuntime::_wrong_method_abstract_blob;
93 RuntimeStub* SharedRuntime::_ic_miss_blob;
94 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
95 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
96 RuntimeStub* SharedRuntime::_resolve_static_call_blob;
97
98 DeoptimizationBlob* SharedRuntime::_deopt_blob;
99 SafepointBlob* SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
100 SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob;
101 SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
102
103 #ifdef COMPILER2
104 UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
105 #endif // COMPILER2
106
107 nmethod* SharedRuntime::_cont_doYield_stub;
108
109 //----------------------------generate_stubs-----------------------------------
110 void SharedRuntime::generate_stubs() {
111 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), "wrong_method_stub");
112 _wrong_method_abstract_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
113 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub");
114 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call");
115 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call");
116 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call");
117
118 AdapterHandlerLibrary::initialize();
119
120 #if COMPILER2_OR_JVMCI
121 // Vectors are generated only by C2 and JVMCI.
122 bool support_wide = is_wide_vector(MaxVectorSize);
123 if (support_wide) {
124 _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
125 }
126 #endif // COMPILER2_OR_JVMCI
127 _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
128 _polling_page_return_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
129
130 generate_deopt_blob();
131
132 #ifdef COMPILER2
133 generate_uncommon_trap_blob();
134 #endif // COMPILER2
135 }
136
1114 // for a call current in progress, i.e., arguments has been pushed on stack
1115 // but callee has not been invoked yet. Caller frame must be compiled.
1116 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1117 CallInfo& callinfo, TRAPS) {
1118 Handle receiver;
1119 Handle nullHandle; // create a handy null handle for exception returns
1120 JavaThread* current = THREAD;
1121
1122 assert(!vfst.at_end(), "Java frame must exist");
1123
1124 // Find caller and bci from vframe
1125 methodHandle caller(current, vfst.method());
1126 int bci = vfst.bci();
1127
1128 if (caller->is_continuation_enter_intrinsic()) {
1129 bc = Bytecodes::_invokestatic;
1130 LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1131 return receiver;
1132 }
1133
1134 // Substitutability test implementation piggy backs on static call resolution
1135 Bytecodes::Code code = caller->java_code_at(bci);
1136 if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1137 bc = Bytecodes::_invokestatic;
1138 methodHandle attached_method(THREAD, extract_attached_method(vfst));
1139 assert(attached_method.not_null(), "must have attached method");
1140 vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1141 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1142 #ifdef ASSERT
1143 Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature());
1144 assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1145 #endif
1146 return receiver;
1147 }
1148
1149 Bytecode_invoke bytecode(caller, bci);
1150 int bytecode_index = bytecode.index();
1151 bc = bytecode.invoke_code();
1152
1153 methodHandle attached_method(current, extract_attached_method(vfst));
1154 if (attached_method.not_null()) {
1155 Method* callee = bytecode.static_target(CHECK_NH);
1156 vmIntrinsics::ID id = callee->intrinsic_id();
1157 // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1158 // it attaches statically resolved method to the call site.
1159 if (MethodHandles::is_signature_polymorphic(id) &&
1160 MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1161 bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1162
1163 // Adjust invocation mode according to the attached method.
1164 switch (bc) {
1165 case Bytecodes::_invokevirtual:
1166 if (attached_method->method_holder()->is_interface()) {
1167 bc = Bytecodes::_invokeinterface;
1168 }
1169 break;
1170 case Bytecodes::_invokeinterface:
1171 if (!attached_method->method_holder()->is_interface()) {
1172 bc = Bytecodes::_invokevirtual;
1173 }
1174 break;
1175 case Bytecodes::_invokehandle:
1176 if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1177 bc = attached_method->is_static() ? Bytecodes::_invokestatic
1178 : Bytecodes::_invokevirtual;
1179 }
1180 break;
1181 default:
1182 break;
1183 }
1184 } else {
1185 assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1186 if (!attached_method->method_holder()->is_inline_klass()) {
1187 // Ignore the attached method in this case to not confuse below code
1188 attached_method = methodHandle(current, nullptr);
1189 }
1190 }
1191 }
1192
1193 assert(bc != Bytecodes::_illegal, "not initialized");
1194
1195 bool has_receiver = bc != Bytecodes::_invokestatic &&
1196 bc != Bytecodes::_invokedynamic &&
1197 bc != Bytecodes::_invokehandle;
1198 bool check_null_and_abstract = true;
1199
1200 // Find receiver for non-static call
1201 if (has_receiver) {
1202 // This register map must be update since we need to find the receiver for
1203 // compiled frames. The receiver might be in a register.
1204 RegisterMap reg_map2(current,
1205 RegisterMap::UpdateMap::include,
1206 RegisterMap::ProcessFrames::include,
1207 RegisterMap::WalkContinuation::skip);
1208 frame stubFrame = current->last_frame();
1209 // Caller-frame is a compiled frame
1210 frame callerFrame = stubFrame.sender(®_map2);
1211
1212 Method* callee = attached_method();
1213 if (callee == nullptr) {
1214 callee = bytecode.static_target(CHECK_NH);
1215 if (callee == nullptr) {
1216 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1217 }
1218 }
1219 bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->is_compiled_by_c1();
1220 if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1221 // If the receiver is an inline type that is passed as fields, no oop is available
1222 // Resolve the call without receiver null checking.
1223 assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1224 assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1225 if (bc == Bytecodes::_invokeinterface) {
1226 bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1227 }
1228 check_null_and_abstract = false;
1229 } else {
1230 // Retrieve from a compiled argument list
1231 receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
1232 assert(oopDesc::is_oop_or_null(receiver()), "");
1233 if (receiver.is_null()) {
1234 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1235 }
1236 }
1237 }
1238
1239 // Resolve method
1240 if (attached_method.not_null()) {
1241 // Parameterized by attached method.
1242 LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1243 } else {
1244 // Parameterized by bytecode.
1245 constantPoolHandle constants(current, caller->constants());
1246 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1247 }
1248
1249 #ifdef ASSERT
1250 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1251 if (has_receiver && check_null_and_abstract) {
1252 assert(receiver.not_null(), "should have thrown exception");
1253 Klass* receiver_klass = receiver->klass();
1254 Klass* rk = nullptr;
1255 if (attached_method.not_null()) {
1256 // In case there's resolved method attached, use its holder during the check.
1257 rk = attached_method->method_holder();
1258 } else {
1259 // Klass is already loaded.
1260 constantPoolHandle constants(current, caller->constants());
1261 rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1262 }
1263 Klass* static_receiver_klass = rk;
1264 assert(receiver_klass->is_subtype_of(static_receiver_klass),
1265 "actual receiver must be subclass of static receiver klass");
1266 if (receiver_klass->is_instance_klass()) {
1267 if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1268 tty->print_cr("ERROR: Klass not yet initialized!!");
1269 receiver_klass->print();
1270 }
1271 assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1272 }
1273 }
1274 #endif
1275
1276 return receiver;
1277 }
1278
1279 methodHandle SharedRuntime::find_callee_method(bool is_optimized, bool& caller_is_c1, TRAPS) {
1280 JavaThread* current = THREAD;
1281 ResourceMark rm(current);
1282 // We need first to check if any Java activations (compiled, interpreted)
1283 // exist on the stack since last JavaCall. If not, we need
1284 // to get the target method from the JavaCall wrapper.
1285 vframeStream vfst(current, true); // Do not skip any javaCalls
1286 methodHandle callee_method;
1287 if (vfst.at_end()) {
1288 // No Java frames were found on stack since we did the JavaCall.
1289 // Hence the stack can only contain an entry_frame. We need to
1290 // find the target method from the stub frame.
1291 RegisterMap reg_map(current,
1292 RegisterMap::UpdateMap::skip,
1293 RegisterMap::ProcessFrames::include,
1294 RegisterMap::WalkContinuation::skip);
1295 frame fr = current->last_frame();
1296 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1297 fr = fr.sender(®_map);
1298 assert(fr.is_entry_frame(), "must be");
1299 // fr is now pointing to the entry frame.
1300 callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1301 } else {
1302 Bytecodes::Code bc;
1303 CallInfo callinfo;
1304 find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1305 // Calls via mismatching methods are always non-scalarized
1306 if (callinfo.resolved_method()->mismatch() && !is_optimized) {
1307 caller_is_c1 = true;
1308 }
1309 callee_method = methodHandle(current, callinfo.selected_method());
1310 }
1311 assert(callee_method()->is_method(), "must be");
1312 return callee_method;
1313 }
1314
1315 // Resolves a call.
1316 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_is_c1, TRAPS) {
1317 methodHandle callee_method;
1318 callee_method = resolve_sub_helper(is_virtual, is_optimized, caller_is_c1, THREAD);
1319 if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1320 int retry_count = 0;
1321 while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1322 callee_method->method_holder() != vmClasses::Object_klass()) {
1323 // If has a pending exception then there is no need to re-try to
1324 // resolve this method.
1325 // If the method has been redefined, we need to try again.
1326 // Hack: we have no way to update the vtables of arrays, so don't
1327 // require that java.lang.Object has been updated.
1328
1329 // It is very unlikely that method is redefined more than 100 times
1330 // in the middle of resolve. If it is looping here more than 100 times
1331 // means then there could be a bug here.
1332 guarantee((retry_count++ < 100),
1333 "Could not resolve to latest version of redefined method");
1334 // method is redefined in the middle of resolve so re-try.
1335 callee_method = resolve_sub_helper(is_virtual, is_optimized, caller_is_c1, THREAD);
1336 }
1337 }
1338 return callee_method;
1339 }
1340
1341 // This fails if resolution required refilling of IC stubs
1342 bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
1343 CompiledMethod* caller_nm, bool is_virtual, bool is_optimized, bool& caller_is_c1,
1344 Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) {
1345 StaticCallInfo static_call_info;
1346 CompiledICInfo virtual_call_info;
1347
1348 // Make sure the callee nmethod does not get deoptimized and removed before
1349 // we are done patching the code.
1350 CompiledMethod* callee = callee_method->code();
1351
1352 if (callee != nullptr) {
1353 assert(callee->is_compiled(), "must be nmethod for patching");
1354 }
1355
1356 if (callee != nullptr && !callee->is_in_use()) {
1357 // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1358 callee = nullptr;
1359 }
1360 #ifdef ASSERT
1361 address dest_entry_point = callee == nullptr ? 0 : callee->entry_point(); // used below
1362 #endif
1363
1364 bool is_nmethod = caller_nm->is_nmethod();
1365
1366 if (is_virtual) {
1367 Klass* receiver_klass = nullptr;
1368 if (!caller_is_c1 && callee_method->is_scalarized_arg(0)) {
1369 // If the receiver is an inline type that is passed as fields, no oop is available
1370 receiver_klass = callee_method->method_holder();
1371 } else {
1372 assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
1373 receiver_klass = invoke_code == Bytecodes::_invokehandle ? nullptr : receiver->klass();
1374 }
1375 bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1376 CompiledIC::compute_monomorphic_entry(callee_method, receiver_klass,
1377 is_optimized, static_bound, is_nmethod, caller_is_c1, virtual_call_info,
1378 CHECK_false);
1379 } else {
1380 // static call
1381 CompiledStaticCall::compute_entry(callee_method, caller_nm, static_call_info);
1382 }
1383
1384 // grab lock, check for deoptimization and potentially patch caller
1385 {
1386 CompiledICLocker ml(caller_nm);
1387
1388 // Lock blocks for safepoint during which both nmethods can change state.
1389
1390 // Now that we are ready to patch if the Method* was redefined then
1391 // don't update call site and let the caller retry.
1392 // Don't update call site if callee nmethod was unloaded or deoptimized.
1393 // Don't update call site if callee nmethod was replaced by an other nmethod
1394 // which may happen when multiply alive nmethod (tiered compilation)
1395 // will be supported.
1396 if (!callee_method->is_old() &&
1397 (callee == nullptr || (callee->is_in_use() && callee_method->code() == callee))) {
1398 NoSafepointVerifier nsv;
1399 #ifdef ASSERT
1400 // We must not try to patch to jump to an already unloaded method.
1401 if (dest_entry_point != 0) {
1414 } else {
1415 if (VM_Version::supports_fast_class_init_checks() &&
1416 invoke_code == Bytecodes::_invokestatic &&
1417 callee_method->needs_clinit_barrier() &&
1418 callee != nullptr && callee->is_compiled_by_jvmci()) {
1419 return true; // skip patching for JVMCI
1420 }
1421 CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
1422 if (is_nmethod && caller_nm->method()->is_continuation_enter_intrinsic()) {
1423 ssc->compute_entry_for_continuation_entry(callee_method, static_call_info);
1424 }
1425 if (ssc->is_clean()) ssc->set(static_call_info);
1426 }
1427 }
1428 } // unlock CompiledICLocker
1429 return true;
1430 }
1431
1432 // Resolves a call. The compilers generate code for calls that go here
1433 // and are patched with the real destination of the call.
1434 methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimized, bool& caller_is_c1, TRAPS) {
1435 JavaThread* current = THREAD;
1436 ResourceMark rm(current);
1437 RegisterMap cbl_map(current,
1438 RegisterMap::UpdateMap::skip,
1439 RegisterMap::ProcessFrames::include,
1440 RegisterMap::WalkContinuation::skip);
1441 frame caller_frame = current->last_frame().sender(&cbl_map);
1442
1443 CodeBlob* caller_cb = caller_frame.cb();
1444 guarantee(caller_cb != nullptr && caller_cb->is_compiled(), "must be called from compiled method");
1445 CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
1446
1447 // determine call info & receiver
1448 // note: a) receiver is null for static calls
1449 // b) an exception is thrown if receiver is null for non-static calls
1450 CallInfo call_info;
1451 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1452 Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1453 methodHandle callee_method(current, call_info.selected_method());
1454 // Calls via mismatching methods are always non-scalarized
1455 if (caller_nm->is_compiled_by_c1() || (call_info.resolved_method()->mismatch() && !is_optimized)) {
1456 caller_is_c1 = true;
1457 }
1458
1459 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1460 (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1461 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1462 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1463 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1464
1465 assert(!caller_nm->is_unloading(), "It should not be unloading");
1466
1467 #ifndef PRODUCT
1468 // tracing/debugging/statistics
1469 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1470 (is_virtual) ? (&_resolve_virtual_ctr) :
1471 (&_resolve_static_ctr);
1472 Atomic::inc(addr);
1473
1474 if (TraceCallFixup) {
1475 ResourceMark rm(current);
1476 tty->print("resolving %s%s (%s) call to",
1477 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1502 // If the resolved method is a MethodHandle invoke target, the call
1503 // site must be a MethodHandle call site, because the lambda form might tail-call
1504 // leaving the stack in a state unknown to either caller or callee
1505 // TODO detune for now but we might need it again
1506 // assert(!callee_method->is_compiled_lambda_form() ||
1507 // caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1508
1509 // Compute entry points. This might require generation of C2I converter
1510 // frames, so we cannot be holding any locks here. Furthermore, the
1511 // computation of the entry points is independent of patching the call. We
1512 // always return the entry-point, but we only patch the stub if the call has
1513 // not been deoptimized. Return values: For a virtual call this is an
1514 // (cached_oop, destination address) pair. For a static call/optimized
1515 // virtual this is just a destination address.
1516
1517 // Patching IC caches may fail if we run out if transition stubs.
1518 // We refill the ic stubs then and try again.
1519 for (;;) {
1520 ICRefillVerifier ic_refill_verifier;
1521 bool successful = resolve_sub_helper_internal(callee_method, caller_frame, caller_nm,
1522 is_virtual, is_optimized, caller_is_c1, receiver,
1523 call_info, invoke_code, CHECK_(methodHandle()));
1524 if (successful) {
1525 return callee_method;
1526 } else {
1527 InlineCacheBuffer::refill_ic_stubs();
1528 }
1529 }
1530
1531 }
1532
1533
1534 // Inline caches exist only in compiled code
1535 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1536 #ifdef ASSERT
1537 RegisterMap reg_map(current,
1538 RegisterMap::UpdateMap::skip,
1539 RegisterMap::ProcessFrames::include,
1540 RegisterMap::WalkContinuation::skip);
1541 frame stub_frame = current->last_frame();
1542 assert(stub_frame.is_runtime_frame(), "sanity check");
1543 frame caller_frame = stub_frame.sender(®_map);
1544 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1545 #endif /* ASSERT */
1546
1547 methodHandle callee_method;
1548 bool is_optimized = false;
1549 bool caller_is_c1 = false;
1550 JRT_BLOCK
1551 callee_method = SharedRuntime::handle_ic_miss_helper(is_optimized, caller_is_c1, CHECK_NULL);
1552 // Return Method* through TLS
1553 current->set_vm_result_2(callee_method());
1554 JRT_BLOCK_END
1555 // return compiled code entry point after potential safepoints
1556 return entry_for_handle_wrong_method(callee_method, false, is_optimized, caller_is_c1);
1557 JRT_END
1558
1559
1560 // Handle call site that has been made non-entrant
1561 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1562 // 6243940 We might end up in here if the callee is deoptimized
1563 // as we race to call it. We don't want to take a safepoint if
1564 // the caller was interpreted because the caller frame will look
1565 // interpreted to the stack walkers and arguments are now
1566 // "compiled" so it is much better to make this transition
1567 // invisible to the stack walking code. The i2c path will
1568 // place the callee method in the callee_target. It is stashed
1569 // there because if we try and find the callee by normal means a
1570 // safepoint is possible and have trouble gc'ing the compiled args.
1571 RegisterMap reg_map(current,
1572 RegisterMap::UpdateMap::skip,
1573 RegisterMap::ProcessFrames::include,
1574 RegisterMap::WalkContinuation::skip);
1575 frame stub_frame = current->last_frame();
1576 assert(stub_frame.is_runtime_frame(), "sanity check");
1577 frame caller_frame = stub_frame.sender(®_map);
1578
1579 if (caller_frame.is_interpreted_frame() ||
1580 caller_frame.is_entry_frame() ||
1581 caller_frame.is_upcall_stub_frame()) {
1582 Method* callee = current->callee_target();
1583 guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1584 current->set_vm_result_2(callee);
1585 current->set_callee_target(nullptr);
1586 if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1587 // Bypass class initialization checks in c2i when caller is in native.
1588 // JNI calls to static methods don't have class initialization checks.
1589 // Fast class initialization checks are present in c2i adapters and call into
1590 // SharedRuntime::handle_wrong_method() on the slow path.
1591 //
1592 // JVM upcalls may land here as well, but there's a proper check present in
1593 // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1594 // so bypassing it in c2i adapter is benign.
1595 return callee->get_c2i_no_clinit_check_entry();
1596 } else {
1597 if (caller_frame.is_interpreted_frame()) {
1598 return callee->get_c2i_inline_entry();
1599 } else {
1600 return callee->get_c2i_entry();
1601 }
1602 }
1603 }
1604
1605 // Must be compiled to compiled path which is safe to stackwalk
1606 methodHandle callee_method;
1607 bool is_static_call = false;
1608 bool is_optimized = false;
1609 bool caller_is_c1 = false;
1610 JRT_BLOCK
1611 // Force resolving of caller (if we called from compiled frame)
1612 callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_NULL);
1613 current->set_vm_result_2(callee_method());
1614 JRT_BLOCK_END
1615 // return compiled code entry point after potential safepoints
1616 return entry_for_handle_wrong_method(callee_method, is_static_call, is_optimized, caller_is_c1);
1617 JRT_END
1618
1619 // Handle abstract method call
1620 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1621 // Verbose error message for AbstractMethodError.
1622 // Get the called method from the invoke bytecode.
1623 vframeStream vfst(current, true);
1624 assert(!vfst.at_end(), "Java frame must exist");
1625 methodHandle caller(current, vfst.method());
1626 Bytecode_invoke invoke(caller, vfst.bci());
1627 DEBUG_ONLY( invoke.verify(); )
1628
1629 // Find the compiled caller frame.
1630 RegisterMap reg_map(current,
1631 RegisterMap::UpdateMap::include,
1632 RegisterMap::ProcessFrames::include,
1633 RegisterMap::WalkContinuation::skip);
1634 frame stubFrame = current->last_frame();
1635 assert(stubFrame.is_runtime_frame(), "must be");
1636 frame callerFrame = stubFrame.sender(®_map);
1637 assert(callerFrame.is_compiled_frame(), "must be");
1638
1639 // Install exception and return forward entry.
1640 address res = StubRoutines::throw_AbstractMethodError_entry();
1641 JRT_BLOCK
1642 methodHandle callee(current, invoke.static_target(current));
1643 if (!callee.is_null()) {
1644 oop recv = callerFrame.retrieve_receiver(®_map);
1645 Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1646 res = StubRoutines::forward_exception_entry();
1647 LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1648 }
1649 JRT_BLOCK_END
1650 return res;
1651 JRT_END
1652
1653
1654 // resolve a static call and patch code
1655 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1656 methodHandle callee_method;
1657 bool caller_is_c1 = false;
1658 bool enter_special = false;
1659 JRT_BLOCK
1660 callee_method = SharedRuntime::resolve_helper(false, false, caller_is_c1, CHECK_NULL);
1661 current->set_vm_result_2(callee_method());
1662
1663 if (current->is_interp_only_mode()) {
1664 RegisterMap reg_map(current,
1665 RegisterMap::UpdateMap::skip,
1666 RegisterMap::ProcessFrames::include,
1667 RegisterMap::WalkContinuation::skip);
1668 frame stub_frame = current->last_frame();
1669 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1670 frame caller = stub_frame.sender(®_map);
1671 enter_special = caller.cb() != nullptr && caller.cb()->is_compiled()
1672 && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic();
1673 }
1674 JRT_BLOCK_END
1675
1676 if (current->is_interp_only_mode() && enter_special) {
1677 // enterSpecial is compiled and calls this method to resolve the call to Continuation::enter
1678 // but in interp_only_mode we need to go to the interpreted entry
1679 // The c2i won't patch in this mode -- see fixup_callers_callsite
1680 //
1681 // This should probably be done in all cases, not just enterSpecial (see JDK-8218403),
1682 // but that's part of a larger fix, and the situation is worse for enterSpecial, as it has no
1683 // interpreted version.
1684 return callee_method->get_c2i_entry();
1685 }
1686
1687 // return compiled code entry point after potential safepoints
1688 address entry = caller_is_c1 ?
1689 callee_method->verified_inline_code_entry() : callee_method->verified_code_entry();
1690 assert(entry != nullptr, "Jump to zero!");
1691 return entry;
1692 JRT_END
1693
1694
1695 // resolve virtual call and update inline cache to monomorphic
1696 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1697 methodHandle callee_method;
1698 bool caller_is_c1 = false;
1699 JRT_BLOCK
1700 callee_method = SharedRuntime::resolve_helper(true, false, caller_is_c1, CHECK_NULL);
1701 current->set_vm_result_2(callee_method());
1702 JRT_BLOCK_END
1703 // return compiled code entry point after potential safepoints
1704 address entry = caller_is_c1 ?
1705 callee_method->verified_inline_code_entry() : callee_method->verified_inline_ro_code_entry();
1706 assert(entry != nullptr, "Jump to zero!");
1707 return entry;
1708 JRT_END
1709
1710
1711 // Resolve a virtual call that can be statically bound (e.g., always
1712 // monomorphic, so it has no inline cache). Patch code to resolved target.
1713 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1714 methodHandle callee_method;
1715 bool caller_is_c1 = false;
1716 JRT_BLOCK
1717 callee_method = SharedRuntime::resolve_helper(true, true, caller_is_c1, CHECK_NULL);
1718 current->set_vm_result_2(callee_method());
1719 JRT_BLOCK_END
1720 // return compiled code entry point after potential safepoints
1721 address entry = caller_is_c1 ?
1722 callee_method->verified_inline_code_entry() : callee_method->verified_code_entry();
1723 assert(entry != nullptr, "Jump to zero!");
1724 return entry;
1725 JRT_END
1726
1727 // The handle_ic_miss_helper_internal function returns false if it failed due
1728 // to either running out of vtable stubs or ic stubs due to IC transitions
1729 // to transitional states. The needs_ic_stub_refill value will be set if
1730 // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
1731 // refills the IC stubs and tries again.
1732 bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
1733 const frame& caller_frame, methodHandle callee_method,
1734 Bytecodes::Code bc, CallInfo& call_info,
1735 bool& needs_ic_stub_refill, bool& is_optimized, bool caller_is_c1, TRAPS) {
1736 CompiledICLocker ml(caller_nm);
1737 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1738 bool should_be_mono = false;
1739 if (inline_cache->is_optimized()) {
1740 if (TraceCallFixup) {
1741 ResourceMark rm(THREAD);
1742 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1743 callee_method->print_short_name(tty);
1744 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1745 }
1746 is_optimized = true;
1747 should_be_mono = true;
1748 } else if (inline_cache->is_icholder_call()) {
1749 CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1750 if (ic_oop != nullptr) {
1751 if (!ic_oop->is_loader_alive()) {
1752 // Deferred IC cleaning due to concurrent class unloading
1753 if (!inline_cache->set_to_clean()) {
1754 needs_ic_stub_refill = true;
1755 return false;
1756 }
1757 } else if (receiver()->klass() == ic_oop->holder_klass()) {
1758 // This isn't a real miss. We must have seen that compiled code
1759 // is now available and we want the call site converted to a
1760 // monomorphic compiled call site.
1761 // We can't assert for callee_method->code() != nullptr because it
1762 // could have been deoptimized in the meantime
1763 if (TraceCallFixup) {
1764 ResourceMark rm(THREAD);
1765 tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1766 callee_method->print_short_name(tty);
1767 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1768 }
1769 should_be_mono = true;
1770 }
1771 }
1772 }
1773
1774 if (should_be_mono) {
1775 // We have a path that was monomorphic but was going interpreted
1776 // and now we have (or had) a compiled entry. We correct the IC
1777 // by using a new icBuffer.
1778 CompiledICInfo info;
1779 Klass* receiver_klass = receiver()->klass();
1780 inline_cache->compute_monomorphic_entry(callee_method,
1781 receiver_klass,
1782 inline_cache->is_optimized(),
1783 false, caller_nm->is_nmethod(),
1784 caller_is_c1,
1785 info, CHECK_false);
1786 if (!inline_cache->set_to_monomorphic(info)) {
1787 needs_ic_stub_refill = true;
1788 return false;
1789 }
1790 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1791 // Potential change to megamorphic
1792
1793 bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, caller_is_c1, CHECK_false);
1794 if (needs_ic_stub_refill) {
1795 return false;
1796 }
1797 if (!successful) {
1798 if (!inline_cache->set_to_clean()) {
1799 needs_ic_stub_refill = true;
1800 return false;
1801 }
1802 }
1803 } else {
1804 // Either clean or megamorphic
1805 }
1806 return true;
1807 }
1808
1809 methodHandle SharedRuntime::handle_ic_miss_helper(bool& is_optimized, bool& caller_is_c1, TRAPS) {
1810 JavaThread* current = THREAD;
1811 ResourceMark rm(current);
1812 CallInfo call_info;
1813 Bytecodes::Code bc;
1814
1815 // receiver is null for static calls. An exception is thrown for null
1816 // receivers for non-static calls
1817 Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1818 // Compiler1 can produce virtual call sites that can actually be statically bound
1819 // If we fell thru to below we would think that the site was going megamorphic
1820 // when in fact the site can never miss. Worse because we'd think it was megamorphic
1821 // we'd try and do a vtable dispatch however methods that can be statically bound
1822 // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1823 // reresolution of the call site (as if we did a handle_wrong_method and not an
1824 // plain ic_miss) and the site will be converted to an optimized virtual call site
1825 // never to miss again. I don't believe C2 will produce code like this but if it
1826 // did this would still be the correct thing to do for it too, hence no ifdef.
1827 //
1828 if (call_info.resolved_method()->can_be_statically_bound()) {
1829 bool is_static_call = false;
1830 methodHandle callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_(methodHandle()));
1831 assert(!is_static_call, "IC miss at static call?");
1832 if (TraceCallFixup) {
1833 RegisterMap reg_map(current,
1834 RegisterMap::UpdateMap::skip,
1835 RegisterMap::ProcessFrames::include,
1836 RegisterMap::WalkContinuation::skip);
1837 frame caller_frame = current->last_frame().sender(®_map);
1838 ResourceMark rm(current);
1839 tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1840 callee_method->print_short_name(tty);
1841 tty->print_cr(" from pc: " INTPTR_FORMAT, p2i(caller_frame.pc()));
1842 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1843 }
1844 return callee_method;
1845 }
1846
1847 methodHandle callee_method(current, call_info.selected_method());
1848
1849 #ifndef PRODUCT
1850 Atomic::inc(&_ic_miss_ctr);
1851
1870 #endif
1871
1872 // install an event collector so that when a vtable stub is created the
1873 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1874 // event can't be posted when the stub is created as locks are held
1875 // - instead the event will be deferred until the event collector goes
1876 // out of scope.
1877 JvmtiDynamicCodeEventCollector event_collector;
1878
1879 // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1880 // Transitioning IC caches may require transition stubs. If we run out
1881 // of transition stubs, we have to drop locks and perform a safepoint
1882 // that refills them.
1883 RegisterMap reg_map(current,
1884 RegisterMap::UpdateMap::skip,
1885 RegisterMap::ProcessFrames::include,
1886 RegisterMap::WalkContinuation::skip);
1887 frame caller_frame = current->last_frame().sender(®_map);
1888 CodeBlob* cb = caller_frame.cb();
1889 CompiledMethod* caller_nm = cb->as_compiled_method();
1890 // Calls via mismatching methods are always non-scalarized
1891 if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1892 caller_is_c1 = true;
1893 }
1894
1895 for (;;) {
1896 ICRefillVerifier ic_refill_verifier;
1897 bool needs_ic_stub_refill = false;
1898 bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1899 bc, call_info, needs_ic_stub_refill, is_optimized, caller_is_c1, CHECK_(methodHandle()));
1900 if (successful || !needs_ic_stub_refill) {
1901 return callee_method;
1902 } else {
1903 InlineCacheBuffer::refill_ic_stubs();
1904 }
1905 }
1906 }
1907
1908 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1909 CompiledICLocker ml(caller_nm);
1910 if (is_static_call) {
1911 CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1912 if (!ssc->is_clean()) {
1913 return ssc->set_to_clean();
1914 }
1915 } else {
1916 // compiled, dispatched call (which used to call an interpreted method)
1917 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1918 if (!inline_cache->is_clean()) {
1919 return inline_cache->set_to_clean();
1920 }
1921 }
1922 return true;
1923 }
1924
1925 //
1926 // Resets a call-site in compiled code so it will get resolved again.
1927 // This routines handles both virtual call sites, optimized virtual call
1928 // sites, and static call sites. Typically used to change a call sites
1929 // destination from compiled to interpreted.
1930 //
1931 methodHandle SharedRuntime::reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_is_c1, TRAPS) {
1932 JavaThread* current = THREAD;
1933 ResourceMark rm(current);
1934 RegisterMap reg_map(current,
1935 RegisterMap::UpdateMap::skip,
1936 RegisterMap::ProcessFrames::include,
1937 RegisterMap::WalkContinuation::skip);
1938 frame stub_frame = current->last_frame();
1939 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1940 frame caller = stub_frame.sender(®_map);
1941 if (caller.is_compiled_frame()) {
1942 caller_is_c1 = caller.cb()->is_compiled_by_c1();
1943 }
1944
1945 // Do nothing if the frame isn't a live compiled frame.
1946 // nmethod could be deoptimized by the time we get here
1947 // so no update to the caller is needed.
1948
1949 if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1950
1951 address pc = caller.pc();
1952
1953 // Check for static or virtual call
1954 CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
1955
1956 // Default call_addr is the location of the "basic" call.
1957 // Determine the address of the call we a reresolving. With
1958 // Inline Caches we will always find a recognizable call.
1959 // With Inline Caches disabled we may or may not find a
1960 // recognizable call. We will always find a call for static
1961 // calls and for optimized virtual calls. For vanilla virtual
1962 // calls it depends on the state of the UseInlineCaches switch.
1963 //
1964 // With Inline Caches disabled we can get here for a virtual call
1965 // for two reasons:
1966 // 1 - calling an abstract method. The vtable for abstract methods
1967 // will run us thru handle_wrong_method and we will eventually
1968 // end up in the interpreter to throw the ame.
1969 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1970 // call and between the time we fetch the entry address and
1971 // we jump to it the target gets deoptimized. Similar to 1
1972 // we will wind up in the interprter (thru a c2i with c2).
1973 //
1974 address call_addr = nullptr;
1975 {
1976 // Get call instruction under lock because another thread may be
1977 // busy patching it.
1978 CompiledICLocker ml(caller_nm);
1979 // Location of call instruction
1980 call_addr = caller_nm->call_instruction_address(pc);
1981 }
1982
1983 // Check relocations for the matching call to 1) avoid false positives,
1984 // and 2) determine the type.
1985 if (call_addr != nullptr) {
1986 // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1987 // bytes back in the instruction stream so we must also check for reloc info.
1988 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1989 bool ret = iter.next(); // Get item
1990 if (ret) {
1991 is_static_call = false;
1992 is_optimized = false;
1993 switch (iter.type()) {
1994 case relocInfo::static_call_type:
1995 is_static_call = true;
1996
1997 case relocInfo::virtual_call_type:
1998 case relocInfo::opt_virtual_call_type:
1999 is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
2000 // Cleaning the inline cache will force a new resolve. This is more robust
2001 // than directly setting it to the new destination, since resolving of calls
2002 // is always done through the same code path. (experience shows that it
2003 // leads to very hard to track down bugs, if an inline cache gets updated
2004 // to a wrong method). It should not be performance critical, since the
2005 // resolve is only done once.
2006 guarantee(iter.addr() == call_addr, "must find call");
2007 for (;;) {
2008 ICRefillVerifier ic_refill_verifier;
2009 if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
2010 InlineCacheBuffer::refill_ic_stubs();
2011 } else {
2012 break;
2013 }
2014 }
2015 break;
2016 default:
2017 break;
2018 }
2019 }
2020 }
2021 }
2022
2023 methodHandle callee_method = find_callee_method(is_optimized, caller_is_c1, CHECK_(methodHandle()));
2024
2025 #ifndef PRODUCT
2026 Atomic::inc(&_wrong_method_ctr);
2027
2028 if (TraceCallFixup) {
2029 ResourceMark rm(current);
2030 tty->print("handle_wrong_method reresolving call to");
2031 callee_method->print_short_name(tty);
2032 tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
2033 }
2034 #endif
2035
2036 return callee_method;
2037 }
2038
2039 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
2040 // The faulting unsafe accesses should be changed to throw the error
2041 // synchronously instead. Meanwhile the faulting instruction will be
2042 // skipped over (effectively turning it into a no-op) and an
2043 // asynchronous exception will be raised which the thread will
2177 // for the rest of its life! Just another racing bug in the life of
2178 // fixup_callers_callsite ...
2179 //
2180 RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
2181 iter.next();
2182 assert(iter.has_current(), "must have a reloc at java call site");
2183 relocInfo::relocType typ = iter.reloc()->type();
2184 if (typ != relocInfo::static_call_type &&
2185 typ != relocInfo::opt_virtual_call_type &&
2186 typ != relocInfo::static_stub_type) {
2187 return;
2188 }
2189 if (nm->method()->is_continuation_enter_intrinsic()) {
2190 assert(ContinuationEntry::is_interpreted_call(call->instruction_address()) == JavaThread::current()->is_interp_only_mode(),
2191 "mode: %d", JavaThread::current()->is_interp_only_mode());
2192 if (ContinuationEntry::is_interpreted_call(call->instruction_address())) {
2193 return;
2194 }
2195 }
2196 address destination = call->destination();
2197 address entry_point = cb->is_compiled_by_c1() ? callee->verified_inline_entry_point() : callee->verified_entry_point();
2198 if (should_fixup_call_destination(destination, entry_point, caller_pc, moop, cb)) {
2199 call->set_destination_mt_safe(entry_point);
2200 }
2201 }
2202 }
2203 JRT_END
2204
2205
2206 // same as JVM_Arraycopy, but called directly from compiled code
2207 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
2208 oopDesc* dest, jint dest_pos,
2209 jint length,
2210 JavaThread* current)) {
2211 #ifndef PRODUCT
2212 _slow_array_copy_ctr++;
2213 #endif
2214 // Check if we have null pointers
2215 if (src == nullptr || dest == nullptr) {
2216 THROW(vmSymbols::java_lang_NullPointerException());
2217 }
2507 private:
2508 enum {
2509 _basic_type_bits = 4,
2510 _basic_type_mask = right_n_bits(_basic_type_bits),
2511 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2512 _compact_int_count = 3
2513 };
2514 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2515 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2516
2517 union {
2518 int _compact[_compact_int_count];
2519 int* _fingerprint;
2520 } _value;
2521 int _length; // A negative length indicates the fingerprint is in the compact form,
2522 // Otherwise _value._fingerprint is the array.
2523
2524 // Remap BasicTypes that are handled equivalently by the adapters.
2525 // These are correct for the current system but someday it might be
2526 // necessary to make this mapping platform dependent.
2527 static BasicType adapter_encoding(BasicType in) {
2528 switch (in) {
2529 case T_BOOLEAN:
2530 case T_BYTE:
2531 case T_SHORT:
2532 case T_CHAR:
2533 // They are all promoted to T_INT in the calling convention
2534 return T_INT;
2535
2536 case T_OBJECT:
2537 case T_ARRAY:
2538 // In other words, we assume that any register good enough for
2539 // an int or long is good enough for a managed pointer.
2540 #ifdef _LP64
2541 return T_LONG;
2542 #else
2543 return T_INT;
2544 #endif
2545
2546 case T_INT:
2547 case T_LONG:
2548 case T_FLOAT:
2549 case T_DOUBLE:
2550 case T_VOID:
2551 return in;
2552
2553 default:
2554 ShouldNotReachHere();
2555 return T_CONFLICT;
2556 }
2557 }
2558
2559 public:
2560 AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2561 // The fingerprint is based on the BasicType signature encoded
2562 // into an array of ints with eight entries per int.
2563 int total_args_passed = (sig != nullptr) ? sig->length() : 0;
2564 int* ptr;
2565 int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2566 if (len <= _compact_int_count) {
2567 assert(_compact_int_count == 3, "else change next line");
2568 _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2569 // Storing the signature encoded as signed chars hits about 98%
2570 // of the time.
2571 _length = -len;
2572 ptr = _value._compact;
2573 } else {
2574 _length = len;
2575 _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2576 ptr = _value._fingerprint;
2577 }
2578
2579 // Now pack the BasicTypes with 8 per int
2580 int sig_index = 0;
2581 BasicType prev_bt = T_ILLEGAL;
2582 int vt_count = 0;
2583 for (int index = 0; index < len; index++) {
2584 int value = 0;
2585 for (int byte = 0; byte < _basic_types_per_int; byte++) {
2586 BasicType bt = T_ILLEGAL;
2587 if (sig_index < total_args_passed) {
2588 bt = sig->at(sig_index++)._bt;
2589 if (bt == T_PRIMITIVE_OBJECT) {
2590 // Found start of inline type in signature
2591 assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2592 if (sig_index == 1 && has_ro_adapter) {
2593 // With a ro_adapter, replace receiver inline type delimiter by T_VOID to prevent matching
2594 // with other adapters that have the same inline type as first argument and no receiver.
2595 bt = T_VOID;
2596 }
2597 vt_count++;
2598 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2599 // Found end of inline type in signature
2600 assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2601 vt_count--;
2602 assert(vt_count >= 0, "invalid vt_count");
2603 } else if (vt_count == 0) {
2604 // Widen fields that are not part of a scalarized inline type argument
2605 bt = adapter_encoding(bt);
2606 }
2607 prev_bt = bt;
2608 }
2609 int bt_val = (bt == T_ILLEGAL) ? 0 : bt;
2610 assert((bt_val & _basic_type_mask) == bt_val, "must fit in 4 bits");
2611 value = (value << _basic_type_bits) | bt_val;
2612 }
2613 ptr[index] = value;
2614 }
2615 assert(vt_count == 0, "invalid vt_count");
2616 }
2617
2618 ~AdapterFingerPrint() {
2619 if (_length > 0) {
2620 FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2621 }
2622 }
2623
2624 int value(int index) {
2625 if (_length < 0) {
2626 return _value._compact[index];
2627 }
2628 return _value._fingerprint[index];
2629 }
2630 int length() {
2631 if (_length < 0) return -_length;
2632 return _length;
2633 }
2634
2635 bool is_compact() {
2660 const char* as_basic_args_string() {
2661 stringStream st;
2662 bool long_prev = false;
2663 for (int i = 0; i < length(); i++) {
2664 unsigned val = (unsigned)value(i);
2665 // args are packed so that first/lower arguments are in the highest
2666 // bits of each int value, so iterate from highest to the lowest
2667 for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2668 unsigned v = (val >> j) & _basic_type_mask;
2669 if (v == 0) {
2670 assert(i == length() - 1, "Only expect zeroes in the last word");
2671 continue;
2672 }
2673 if (long_prev) {
2674 long_prev = false;
2675 if (v == T_VOID) {
2676 st.print("J");
2677 } else {
2678 st.print("L");
2679 }
2680 } else if (v == T_LONG) {
2681 long_prev = true;
2682 } else if (v != T_VOID){
2683 st.print("%c", type2char((BasicType)v));
2684 }
2685 }
2686 }
2687 if (long_prev) {
2688 st.print("L");
2689 }
2690 return st.as_string();
2691 }
2692 #endif // !product
2693
2694 bool equals(AdapterFingerPrint* other) {
2695 if (other->_length != _length) {
2696 return false;
2697 }
2698 if (_length < 0) {
2699 assert(_compact_int_count == 3, "else change next line");
2700 return _value._compact[0] == other->_value._compact[0] &&
2701 _value._compact[1] == other->_value._compact[1] &&
2702 _value._compact[2] == other->_value._compact[2];
2703 } else {
2710 return true;
2711 }
2712
2713 static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2714 NOT_PRODUCT(_equals++);
2715 return fp1->equals(fp2);
2716 }
2717
2718 static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2719 return fp->compute_hash();
2720 }
2721 };
2722
2723 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2724 ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2725 AnyObj::C_HEAP, mtCode,
2726 AdapterFingerPrint::compute_hash,
2727 AdapterFingerPrint::equals> _adapter_handler_table;
2728
2729 // Find a entry with the same fingerprint if it exists
2730 static AdapterHandlerEntry* lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2731 NOT_PRODUCT(_lookups++);
2732 assert_lock_strong(AdapterHandlerLibrary_lock);
2733 AdapterFingerPrint fp(sig, has_ro_adapter);
2734 AdapterHandlerEntry** entry = _adapter_handler_table.get(&fp);
2735 if (entry != nullptr) {
2736 #ifndef PRODUCT
2737 if (fp.is_compact()) _compact++;
2738 _hits++;
2739 #endif
2740 return *entry;
2741 }
2742 return nullptr;
2743 }
2744
2745 #ifndef PRODUCT
2746 static void print_table_statistics() {
2747 auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2748 return sizeof(*key) + sizeof(*a);
2749 };
2750 TableStatistics ts = _adapter_handler_table.statistics_calculate(size);
2751 ts.print(tty, "AdapterHandlerTable");
2752 tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2753 _adapter_handler_table.table_size(), _adapter_handler_table.number_of_entries());
2754 tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2755 _lookups, _equals, _hits, _compact);
2756 }
2757 #endif
2758
2759 // ---------------------------------------------------------------------------
2760 // Implementation of AdapterHandlerLibrary
2761 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2762 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2763 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2764 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2765 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2766 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2767 const int AdapterHandlerLibrary_size = 48*K;
2768 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2769
2770 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2771 return _buffer;
2772 }
2773
2774 static void post_adapter_creation(const AdapterBlob* new_adapter,
2775 const AdapterHandlerEntry* entry) {
2776 if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2777 char blob_id[256];
2778 jio_snprintf(blob_id,
2779 sizeof(blob_id),
2780 "%s(%s)",
2781 new_adapter->name(),
2782 entry->fingerprint()->as_string());
2783 if (Forte::is_enabled()) {
2784 Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2785 }
2786
2787 if (JvmtiExport::should_post_dynamic_code_generated()) {
2789 }
2790 }
2791 }
2792
2793 void AdapterHandlerLibrary::initialize() {
2794 ResourceMark rm;
2795 AdapterBlob* no_arg_blob = nullptr;
2796 AdapterBlob* int_arg_blob = nullptr;
2797 AdapterBlob* obj_arg_blob = nullptr;
2798 AdapterBlob* obj_int_arg_blob = nullptr;
2799 AdapterBlob* obj_obj_arg_blob = nullptr;
2800 {
2801 MutexLocker mu(AdapterHandlerLibrary_lock);
2802
2803 // Create a special handler for abstract methods. Abstract methods
2804 // are never compiled so an i2c entry is somewhat meaningless, but
2805 // throw AbstractMethodError just in case.
2806 // Pass wrong_method_abstract for the c2i transitions to return
2807 // AbstractMethodError for invalid invocations.
2808 address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2809 _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(nullptr),
2810 StubRoutines::throw_AbstractMethodError_entry(),
2811 wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
2812 wrong_method_abstract, wrong_method_abstract);
2813 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2814
2815 CompiledEntrySignature no_args;
2816 no_args.compute_calling_conventions();
2817 _no_arg_handler = create_adapter(no_arg_blob, no_args, true);
2818
2819 CompiledEntrySignature obj_args;
2820 SigEntry::add_entry(obj_args.sig(), T_OBJECT, nullptr);
2821 obj_args.compute_calling_conventions();
2822 _obj_arg_handler = create_adapter(obj_arg_blob, obj_args, true);
2823
2824 CompiledEntrySignature int_args;
2825 SigEntry::add_entry(int_args.sig(), T_INT, nullptr);
2826 int_args.compute_calling_conventions();
2827 _int_arg_handler = create_adapter(int_arg_blob, int_args, true);
2828
2829 CompiledEntrySignature obj_int_args;
2830 SigEntry::add_entry(obj_int_args.sig(), T_OBJECT, nullptr);
2831 SigEntry::add_entry(obj_int_args.sig(), T_INT, nullptr);
2832 obj_int_args.compute_calling_conventions();
2833 _obj_int_arg_handler = create_adapter(obj_int_arg_blob, obj_int_args, true);
2834
2835 CompiledEntrySignature obj_obj_args;
2836 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT, nullptr);
2837 SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT, nullptr);
2838 obj_obj_args.compute_calling_conventions();
2839 _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, obj_obj_args, true);
2840
2841 assert(no_arg_blob != nullptr &&
2842 obj_arg_blob != nullptr &&
2843 int_arg_blob != nullptr &&
2844 obj_int_arg_blob != nullptr &&
2845 obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
2846 }
2847 return;
2848
2849 // Outside of the lock
2850 post_adapter_creation(no_arg_blob, _no_arg_handler);
2851 post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2852 post_adapter_creation(int_arg_blob, _int_arg_handler);
2853 post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2854 post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2855 }
2856
2857 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2858 address i2c_entry,
2859 address c2i_entry,
2860 address c2i_inline_entry,
2861 address c2i_inline_ro_entry,
2862 address c2i_unverified_entry,
2863 address c2i_unverified_inline_entry,
2864 address c2i_no_clinit_check_entry) {
2865 return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry,
2866 c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
2867 }
2868
2869 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2870 if (method->is_abstract()) {
2871 return nullptr;
2872 }
2873 int total_args_passed = method->size_of_parameters(); // All args on stack
2874 if (total_args_passed == 0) {
2875 return _no_arg_handler;
2876 } else if (total_args_passed == 1) {
2877 if (!method->is_static()) {
2878 if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2879 return nullptr;
2880 }
2881 return _obj_arg_handler;
2882 }
2883 switch (method->signature()->char_at(1)) {
2884 case JVM_SIGNATURE_CLASS: {
2885 if (InlineTypePassFieldsAsArgs) {
2886 SignatureStream ss(method->signature());
2887 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2888 if (vk != nullptr) {
2889 return nullptr;
2890 }
2891 }
2892 return _obj_arg_handler;
2893 }
2894 case JVM_SIGNATURE_ARRAY:
2895 return _obj_arg_handler;
2896 case JVM_SIGNATURE_INT:
2897 case JVM_SIGNATURE_BOOLEAN:
2898 case JVM_SIGNATURE_CHAR:
2899 case JVM_SIGNATURE_BYTE:
2900 case JVM_SIGNATURE_SHORT:
2901 return _int_arg_handler;
2902 }
2903 } else if (total_args_passed == 2 &&
2904 !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2905 switch (method->signature()->char_at(1)) {
2906 case JVM_SIGNATURE_CLASS: {
2907 if (InlineTypePassFieldsAsArgs) {
2908 SignatureStream ss(method->signature());
2909 InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2910 if (vk != nullptr) {
2911 return nullptr;
2912 }
2913 }
2914 return _obj_obj_arg_handler;
2915 }
2916 case JVM_SIGNATURE_ARRAY:
2917 return _obj_obj_arg_handler;
2918 case JVM_SIGNATURE_INT:
2919 case JVM_SIGNATURE_BOOLEAN:
2920 case JVM_SIGNATURE_CHAR:
2921 case JVM_SIGNATURE_BYTE:
2922 case JVM_SIGNATURE_SHORT:
2923 return _obj_int_arg_handler;
2924 }
2925 }
2926 return nullptr;
2927 }
2928
2929 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2930 _method(method), _num_inline_args(0), _has_inline_recv(false),
2931 _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2932 _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2933 _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2934 _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2935 _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2936 _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2937 }
2938
2939 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2940 // or the same entry for VEP and VIEP(RO).
2941 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2942 if (!has_scalarized_args()) {
2943 // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2944 return CodeOffsets::Verified_Entry;
2945 }
2946 if (_method->is_static()) {
2947 // Static methods don't need VIEP(RO)
2948 return CodeOffsets::Verified_Entry;
2949 }
2950
2951 if (has_inline_recv()) {
2952 if (num_inline_args() == 1) {
2953 // Share same entry for VIEP and VIEP(RO).
2954 // This is quite common: we have an instance method in an InlineKlass that has
2955 // no inline type args other than <this>.
2956 return CodeOffsets::Verified_Inline_Entry;
2957 } else {
2958 assert(num_inline_args() > 1, "must be");
2959 // No sharing:
2960 // VIEP(RO) -- <this> is passed as object
2961 // VEP -- <this> is passed as fields
2962 return CodeOffsets::Verified_Inline_Entry_RO;
2963 }
2964 }
2965
2966 // Either a static method, or <this> is not an inline type
2967 if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2968 // No sharing:
2969 // Some arguments are passed on the stack, and we have inserted reserved entries
2970 // into the VEP, but we never insert reserved entries into the VIEP(RO).
2971 return CodeOffsets::Verified_Inline_Entry_RO;
2972 } else {
2973 // Share same entry for VEP and VIEP(RO).
2974 return CodeOffsets::Verified_Entry;
2975 }
2976 }
2977
2978 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2979 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2980 if (_supers != nullptr) {
2981 return _supers;
2982 }
2983 _supers = new GrowableArray<Method*>();
2984 // Skip private, static, and <init> methods
2985 if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2986 return _supers;
2987 }
2988 Symbol* name = _method->name();
2989 Symbol* signature = _method->signature();
2990 const Klass* holder = _method->method_holder()->super();
2991 Symbol* holder_name = holder->name();
2992 ThreadInVMfromUnknown tiv;
2993 JavaThread* current = JavaThread::current();
2994 HandleMark hm(current);
2995 Handle loader(current, _method->method_holder()->class_loader());
2996
2997 // Walk up the class hierarchy and search for super methods
2998 while (holder != nullptr) {
2999 Method* super_method = holder->lookup_method(name, signature);
3000 if (super_method == nullptr) {
3001 break;
3002 }
3003 if (!super_method->is_static() && !super_method->is_private() &&
3004 (!super_method->is_package_private() ||
3005 super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
3006 _supers->push(super_method);
3007 }
3008 holder = super_method->method_holder()->super();
3009 }
3010 // Search interfaces for super methods
3011 Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
3012 for (int i = 0; i < interfaces->length(); ++i) {
3013 Method* m = interfaces->at(i)->lookup_method(name, signature);
3014 if (m != nullptr && !m->is_static() && m->is_public()) {
3015 _supers->push(m);
3016 }
3017 }
3018 return _supers;
3019 }
3020
3021 // Iterate over arguments and compute scalarized and non-scalarized signatures
3022 void CompiledEntrySignature::compute_calling_conventions(bool init) {
3023 bool has_scalarized = false;
3024 if (_method != nullptr) {
3025 InstanceKlass* holder = _method->method_holder();
3026 int arg_num = 0;
3027 if (!_method->is_static()) {
3028 if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() &&
3029 (init || _method->is_scalarized_arg(arg_num))) {
3030 _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
3031 has_scalarized = true;
3032 _has_inline_recv = true;
3033 _num_inline_args++;
3034 } else {
3035 SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
3036 }
3037 SigEntry::add_entry(_sig, T_OBJECT, holder->name());
3038 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
3039 arg_num++;
3040 }
3041 for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
3042 BasicType bt = ss.type();
3043 if (bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) {
3044 InlineKlass* vk = ss.as_inline_klass(holder);
3045 if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
3046 // Check for a calling convention mismatch with super method(s)
3047 bool scalar_super = false;
3048 bool non_scalar_super = false;
3049 GrowableArray<Method*>* supers = get_supers();
3050 for (int i = 0; i < supers->length(); ++i) {
3051 Method* super_method = supers->at(i);
3052 if (super_method->is_scalarized_arg(arg_num)) {
3053 scalar_super = true;
3054 } else {
3055 non_scalar_super = true;
3056 }
3057 }
3058 #ifdef ASSERT
3059 // Randomly enable below code paths for stress testing
3060 bool stress = init && StressCallingConvention;
3061 if (stress && (os::random() & 1) == 1) {
3062 non_scalar_super = true;
3063 if ((os::random() & 1) == 1) {
3064 scalar_super = true;
3065 }
3066 }
3067 #endif
3068 if (non_scalar_super) {
3069 // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
3070 if (scalar_super) {
3071 // Found non-scalar *and* scalar super methods. We can't handle both.
3072 // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
3073 for (int i = 0; i < supers->length(); ++i) {
3074 Method* super_method = supers->at(i);
3075 if (super_method->is_scalarized_arg(arg_num) debug_only(|| (stress && (os::random() & 1) == 1))) {
3076 super_method->set_mismatch();
3077 MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
3078 JavaThread* thread = JavaThread::current();
3079 HandleMark hm(thread);
3080 methodHandle mh(thread, super_method);
3081 DeoptimizationScope deopt_scope;
3082 CodeCache::mark_for_deoptimization(&deopt_scope, mh());
3083 deopt_scope.deoptimize_marked();
3084 }
3085 }
3086 }
3087 // Fall back to non-scalarized calling convention
3088 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
3089 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
3090 } else {
3091 _num_inline_args++;
3092 has_scalarized = true;
3093 int last = _sig_cc->length();
3094 int last_ro = _sig_cc_ro->length();
3095 _sig_cc->appendAll(vk->extended_sig());
3096 _sig_cc_ro->appendAll(vk->extended_sig());
3097 if (bt == T_OBJECT) {
3098 // Nullable inline type argument, insert InlineTypeNode::IsInit field right after T_PRIMITIVE_OBJECT
3099 _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, nullptr));
3100 _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, nullptr));
3101 }
3102 }
3103 } else {
3104 SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
3105 SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
3106 }
3107 bt = T_OBJECT;
3108 } else {
3109 SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
3110 SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
3111 }
3112 SigEntry::add_entry(_sig, bt, ss.as_symbol());
3113 if (bt != T_VOID) {
3114 arg_num++;
3115 }
3116 }
3117 }
3118
3119 // Compute the non-scalarized calling convention
3120 _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3121 _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3122
3123 // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3124 if (has_scalarized && !_method->is_native()) {
3125 _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3126 _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3127
3128 _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3129 _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3130
3131 _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3132 _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3133
3134 // Upper bound on stack arguments to avoid hitting the argument limit and
3135 // bailing out of compilation ("unsupported incoming calling sequence").
3136 // TODO we need a reasonable limit (flag?) here
3137 if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
3138 return; // Success
3139 }
3140 }
3141
3142 // No scalarized args
3143 _sig_cc = _sig;
3144 _regs_cc = _regs;
3145 _args_on_stack_cc = _args_on_stack;
3146
3147 _sig_cc_ro = _sig;
3148 _regs_cc_ro = _regs;
3149 _args_on_stack_cc_ro = _args_on_stack;
3150 }
3151
3152 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3153 // Use customized signature handler. Need to lock around updates to
3154 // the _adapter_handler_table (it is not safe for concurrent readers
3155 // and a single writer: this could be fixed if it becomes a
3156 // problem).
3157
3158 // Fast-path for trivial adapters
3159 AdapterHandlerEntry* entry = get_simple_adapter(method);
3160 if (entry != nullptr) {
3161 return entry;
3162 }
3163
3164 ResourceMark rm;
3165 AdapterBlob* new_adapter = nullptr;
3166
3167 CompiledEntrySignature ces(method());
3168 ces.compute_calling_conventions();
3169 if (ces.has_scalarized_args()) {
3170 method->set_has_scalarized_args();
3171 if (ces.c1_needs_stack_repair()) {
3172 method->set_c1_needs_stack_repair();
3173 }
3174 if (ces.c2_needs_stack_repair()) {
3175 method->set_c2_needs_stack_repair();
3176 }
3177 } else if (method->is_abstract()) {
3178 return _abstract_method_handler;
3179 }
3180
3181 {
3182 MutexLocker mu(AdapterHandlerLibrary_lock);
3183
3184 if (ces.has_scalarized_args() && method->is_abstract()) {
3185 // Save a C heap allocated version of the signature for abstract methods with scalarized inline type arguments
3186 address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
3187 entry = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(nullptr),
3188 StubRoutines::throw_AbstractMethodError_entry(),
3189 wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
3190 wrong_method_abstract, wrong_method_abstract);
3191 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro()->length(), mtInternal);
3192 heap_sig->appendAll(ces.sig_cc_ro());
3193 entry->set_sig_cc(heap_sig);
3194 return entry;
3195 }
3196
3197 // Lookup method signature's fingerprint
3198 entry = lookup(ces.sig_cc(), ces.has_inline_recv());
3199
3200 if (entry != nullptr) {
3201 #ifdef ASSERT
3202 if (VerifyAdapterSharing) {
3203 AdapterBlob* comparison_blob = nullptr;
3204 AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, ces, false);
3205 assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
3206 assert(comparison_entry->compare_code(entry), "code must match");
3207 // Release the one just created and return the original
3208 delete comparison_entry;
3209 }
3210 #endif
3211 return entry;
3212 }
3213
3214 entry = create_adapter(new_adapter, ces, /* allocate_code_blob */ true);
3215 }
3216
3217 // Outside of the lock
3218 if (new_adapter != nullptr) {
3219 post_adapter_creation(new_adapter, entry);
3220 }
3221 return entry;
3222 }
3223
3224 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
3225 CompiledEntrySignature& ces,
3226 bool allocate_code_blob) {
3227
3228 // StubRoutines::_final_stubs_code is initialized after this function can be called. As a result,
3229 // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated prior
3230 // to all StubRoutines::_final_stubs_code being set. Checks refer to runtime range checks generated
3231 // in an I2C stub that ensure that an I2C stub is called from an interpreter frame or stubs.
3232 bool contains_all_checks = StubRoutines::final_stubs_code() != nullptr;
3233
3234 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3235 CodeBuffer buffer(buf);
3236 short buffer_locs[20];
3237 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3238 sizeof(buffer_locs)/sizeof(relocInfo));
3239
3240 // Make a C heap allocated version of the fingerprint to store in the adapter
3241 AdapterFingerPrint* fingerprint = new AdapterFingerPrint(ces.sig_cc(), ces.has_inline_recv());
3242 MacroAssembler _masm(&buffer);
3243 AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
3244 ces.args_on_stack(),
3245 ces.sig(),
3246 ces.regs(),
3247 ces.sig_cc(),
3248 ces.regs_cc(),
3249 ces.sig_cc_ro(),
3250 ces.regs_cc_ro(),
3251 fingerprint,
3252 new_adapter,
3253 allocate_code_blob);
3254
3255 if (ces.has_scalarized_args()) {
3256 // Save a C heap allocated version of the scalarized signature and store it in the adapter
3257 GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3258 heap_sig->appendAll(ces.sig_cc());
3259 entry->set_sig_cc(heap_sig);
3260 }
3261
3262 #ifdef ASSERT
3263 if (VerifyAdapterSharing) {
3264 entry->save_code(buf->code_begin(), buffer.insts_size());
3265 if (!allocate_code_blob) {
3266 return entry;
3267 }
3268 }
3269 #endif
3270
3271 NOT_PRODUCT(int insts_size = buffer.insts_size());
3272 if (new_adapter == nullptr) {
3273 // CodeCache is full, disable compilation
3274 // Ought to log this but compile log is only per compile thread
3275 // and we're some non descript Java thread.
3276 return nullptr;
3277 }
3278 entry->relocate(new_adapter->content_begin());
3279 #ifndef PRODUCT
3280 // debugging support
3281 if (PrintAdapterHandlers || PrintStubCode) {
3282 ttyLocker ttyl;
3283 entry->print_adapter_on(tty);
3284 tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
3285 _adapter_handler_table.number_of_entries(), fingerprint->as_basic_args_string(),
3286 fingerprint->as_string(), insts_size);
3287 tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
3288 if (Verbose || PrintStubCode) {
3289 address first_pc = entry->base_address();
3290 if (first_pc != nullptr) {
3292 NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
3293 tty->cr();
3294 }
3295 }
3296 }
3297 #endif
3298
3299 // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
3300 // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
3301 if (contains_all_checks || !VerifyAdapterCalls) {
3302 assert_lock_strong(AdapterHandlerLibrary_lock);
3303 _adapter_handler_table.put(fingerprint, entry);
3304 }
3305 return entry;
3306 }
3307
3308 address AdapterHandlerEntry::base_address() {
3309 address base = _i2c_entry;
3310 if (base == nullptr) base = _c2i_entry;
3311 assert(base <= _c2i_entry || _c2i_entry == nullptr, "");
3312 assert(base <= _c2i_inline_entry || _c2i_inline_entry == nullptr, "");
3313 assert(base <= _c2i_inline_ro_entry || _c2i_inline_ro_entry == nullptr, "");
3314 assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");
3315 assert(base <= _c2i_unverified_inline_entry || _c2i_unverified_inline_entry == nullptr, "");
3316 assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
3317 return base;
3318 }
3319
3320 void AdapterHandlerEntry::relocate(address new_base) {
3321 address old_base = base_address();
3322 assert(old_base != nullptr, "");
3323 ptrdiff_t delta = new_base - old_base;
3324 if (_i2c_entry != nullptr)
3325 _i2c_entry += delta;
3326 if (_c2i_entry != nullptr)
3327 _c2i_entry += delta;
3328 if (_c2i_inline_entry != nullptr)
3329 _c2i_inline_entry += delta;
3330 if (_c2i_inline_ro_entry != nullptr)
3331 _c2i_inline_ro_entry += delta;
3332 if (_c2i_unverified_entry != nullptr)
3333 _c2i_unverified_entry += delta;
3334 if (_c2i_unverified_inline_entry != nullptr)
3335 _c2i_unverified_inline_entry += delta;
3336 if (_c2i_no_clinit_check_entry != nullptr)
3337 _c2i_no_clinit_check_entry += delta;
3338 assert(base_address() == new_base, "");
3339 }
3340
3341
3342 AdapterHandlerEntry::~AdapterHandlerEntry() {
3343 delete _fingerprint;
3344 if (_sig_cc != nullptr) {
3345 delete _sig_cc;
3346 }
3347 #ifdef ASSERT
3348 FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3349 #endif
3350 }
3351
3352
3353 #ifdef ASSERT
3354 // Capture the code before relocation so that it can be compared
3355 // against other versions. If the code is captured after relocation
3356 // then relative instructions won't be equivalent.
3357 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3358 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3359 _saved_code_length = length;
3360 memcpy(_saved_code, buffer, length);
3361 }
3362
3363
3364 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3365 assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3366
3413
3414 struct { double data[20]; } locs_buf;
3415 struct { double data[20]; } stubs_locs_buf;
3416 buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3417 #if defined(AARCH64) || defined(PPC64)
3418 // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3419 // in the constant pool to ensure ordering between the barrier and oops
3420 // accesses. For native_wrappers we need a constant.
3421 // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3422 // static java call that is resolved in the runtime.
3423 if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3424 buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3425 }
3426 #endif
3427 buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3428 MacroAssembler _masm(&buffer);
3429
3430 // Fill in the signature array, for the calling-convention call.
3431 const int total_args_passed = method->size_of_parameters();
3432
3433 BasicType stack_sig_bt[16];
3434 VMRegPair stack_regs[16];
3435 BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3436 VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3437
3438 int i = 0;
3439 if (!method->is_static()) { // Pass in receiver first
3440 sig_bt[i++] = T_OBJECT;
3441 }
3442 SignatureStream ss(method->signature());
3443 for (; !ss.at_return_type(); ss.next()) {
3444 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
3445 if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3446 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
3447 }
3448 }
3449 assert(i == total_args_passed, "");
3450 BasicType ret_type = ss.type();
3451
3452 // Now get the compiled-Java arguments layout.
3453 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3454
3455 // Generate the compiled-to-native wrapper code
3456 nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3457
3458 if (nm != nullptr) {
3459 {
3460 MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
3461 if (nm->make_in_use()) {
3462 method->set_code(method, nm);
3463 }
3464 }
3465
3466 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
3467 if (directive->PrintAssemblyOption) {
3468 nm->print_code();
3469 }
3470 DirectivesStack::release(directive);
3667 st->print("Adapter for signature: ");
3668 a->print_adapter_on(st);
3669 return true;
3670 } else {
3671 return false; // keep looking
3672 }
3673 };
3674 assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3675 _adapter_handler_table.iterate(findblob);
3676 assert(found, "Should have found handler");
3677 }
3678
3679 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3680 st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3681 if (get_i2c_entry() != nullptr) {
3682 st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3683 }
3684 if (get_c2i_entry() != nullptr) {
3685 st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3686 }
3687 if (get_c2i_entry() != nullptr) {
3688 st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3689 }
3690 if (get_c2i_entry() != nullptr) {
3691 st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3692 }
3693 if (get_c2i_unverified_entry() != nullptr) {
3694 st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3695 }
3696 if (get_c2i_unverified_entry() != nullptr) {
3697 st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3698 }
3699 if (get_c2i_no_clinit_check_entry() != nullptr) {
3700 st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3701 }
3702 st->cr();
3703 }
3704
3705 #ifndef PRODUCT
3706
3707 void AdapterHandlerLibrary::print_statistics() {
3708 print_table_statistics();
3709 }
3710
3711 #endif /* PRODUCT */
3712
3713 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3714 assert(current == JavaThread::current(), "pre-condition");
3715 StackOverflow* overflow_state = current->stack_overflow_state();
3716 overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3717 overflow_state->set_reserved_stack_activation(current->stack_base());
3766 event.set_method(method);
3767 event.commit();
3768 }
3769 }
3770 }
3771 return activation;
3772 }
3773
3774 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3775 // After any safepoint, just before going back to compiled code,
3776 // we inform the GC that we will be doing initializing writes to
3777 // this object in the future without emitting card-marks, so
3778 // GC may take any compensating steps.
3779
3780 oop new_obj = current->vm_result();
3781 if (new_obj == nullptr) return;
3782
3783 BarrierSet *bs = BarrierSet::barrier_set();
3784 bs->on_slowpath_allocation_exit(current, new_obj);
3785 }
3786
3787 // We are at a compiled code to interpreter call. We need backing
3788 // buffers for all inline type arguments. Allocate an object array to
3789 // hold them (convenient because once we're done with it we don't have
3790 // to worry about freeing it).
3791 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
3792 assert(InlineTypePassFieldsAsArgs, "no reason to call this");
3793 ResourceMark rm;
3794
3795 int nb_slots = 0;
3796 InstanceKlass* holder = callee->method_holder();
3797 allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
3798 if (allocate_receiver) {
3799 nb_slots++;
3800 }
3801 int arg_num = callee->is_static() ? 0 : 1;
3802 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3803 BasicType bt = ss.type();
3804 if ((bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) && callee->is_scalarized_arg(arg_num)) {
3805 nb_slots++;
3806 }
3807 if (bt != T_VOID) {
3808 arg_num++;
3809 }
3810 }
3811 objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
3812 objArrayHandle array(THREAD, array_oop);
3813 arg_num = callee->is_static() ? 0 : 1;
3814 int i = 0;
3815 if (allocate_receiver) {
3816 InlineKlass* vk = InlineKlass::cast(holder);
3817 oop res = vk->allocate_instance(CHECK_NULL);
3818 array->obj_at_put(i++, res);
3819 }
3820 for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3821 BasicType bt = ss.type();
3822 if ((bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) && callee->is_scalarized_arg(arg_num)) {
3823 InlineKlass* vk = ss.as_inline_klass(holder);
3824 assert(vk != nullptr, "Unexpected klass");
3825 oop res = vk->allocate_instance(CHECK_NULL);
3826 array->obj_at_put(i++, res);
3827 }
3828 if (bt != T_VOID) {
3829 arg_num++;
3830 }
3831 }
3832 return array();
3833 }
3834
3835 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
3836 methodHandle callee(current, callee_method);
3837 oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
3838 current->set_vm_result(array);
3839 current->set_vm_result_2(callee()); // TODO: required to keep callee live?
3840 JRT_END
3841
3842 // We're returning from an interpreted method: load each field into a
3843 // register following the calling convention
3844 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
3845 {
3846 assert(res->klass()->is_inline_klass(), "only inline types here");
3847 ResourceMark rm;
3848 RegisterMap reg_map(current,
3849 RegisterMap::UpdateMap::include,
3850 RegisterMap::ProcessFrames::include,
3851 RegisterMap::WalkContinuation::skip);
3852 frame stubFrame = current->last_frame();
3853 frame callerFrame = stubFrame.sender(®_map);
3854 assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
3855
3856 InlineKlass* vk = InlineKlass::cast(res->klass());
3857
3858 const Array<SigEntry>* sig_vk = vk->extended_sig();
3859 const Array<VMRegPair>* regs = vk->return_regs();
3860
3861 if (regs == nullptr) {
3862 // The fields of the inline klass don't fit in registers, bail out
3863 return;
3864 }
3865
3866 int j = 1;
3867 for (int i = 0; i < sig_vk->length(); i++) {
3868 BasicType bt = sig_vk->at(i)._bt;
3869 if (bt == T_PRIMITIVE_OBJECT) {
3870 continue;
3871 }
3872 if (bt == T_VOID) {
3873 if (sig_vk->at(i-1)._bt == T_LONG ||
3874 sig_vk->at(i-1)._bt == T_DOUBLE) {
3875 j++;
3876 }
3877 continue;
3878 }
3879 int off = sig_vk->at(i)._offset;
3880 assert(off > 0, "offset in object should be positive");
3881 VMRegPair pair = regs->at(j);
3882 address loc = reg_map.location(pair.first(), nullptr);
3883 switch(bt) {
3884 case T_BOOLEAN:
3885 *(jboolean*)loc = res->bool_field(off);
3886 break;
3887 case T_CHAR:
3888 *(jchar*)loc = res->char_field(off);
3889 break;
3890 case T_BYTE:
3891 *(jbyte*)loc = res->byte_field(off);
3892 break;
3893 case T_SHORT:
3894 *(jshort*)loc = res->short_field(off);
3895 break;
3896 case T_INT: {
3897 *(jint*)loc = res->int_field(off);
3898 break;
3899 }
3900 case T_LONG:
3901 #ifdef _LP64
3902 *(intptr_t*)loc = res->long_field(off);
3903 #else
3904 Unimplemented();
3905 #endif
3906 break;
3907 case T_OBJECT:
3908 case T_ARRAY: {
3909 *(oop*)loc = res->obj_field(off);
3910 break;
3911 }
3912 case T_FLOAT:
3913 *(jfloat*)loc = res->float_field(off);
3914 break;
3915 case T_DOUBLE:
3916 *(jdouble*)loc = res->double_field(off);
3917 break;
3918 default:
3919 ShouldNotReachHere();
3920 }
3921 j++;
3922 }
3923 assert(j == regs->length(), "missed a field?");
3924
3925 #ifdef ASSERT
3926 VMRegPair pair = regs->at(0);
3927 address loc = reg_map.location(pair.first(), nullptr);
3928 assert(*(oopDesc**)loc == res, "overwritten object");
3929 #endif
3930
3931 current->set_vm_result(res);
3932 }
3933 JRT_END
3934
3935 // We've returned to an interpreted method, the interpreter needs a
3936 // reference to an inline type instance. Allocate it and initialize it
3937 // from field's values in registers.
3938 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
3939 {
3940 ResourceMark rm;
3941 RegisterMap reg_map(current,
3942 RegisterMap::UpdateMap::include,
3943 RegisterMap::ProcessFrames::include,
3944 RegisterMap::WalkContinuation::skip);
3945 frame stubFrame = current->last_frame();
3946 frame callerFrame = stubFrame.sender(®_map);
3947
3948 #ifdef ASSERT
3949 InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
3950 #endif
3951
3952 if (!is_set_nth_bit(res, 0)) {
3953 // We're not returning with inline type fields in registers (the
3954 // calling convention didn't allow it for this inline klass)
3955 assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
3956 current->set_vm_result((oopDesc*)res);
3957 assert(verif_vk == nullptr, "broken calling convention");
3958 return;
3959 }
3960
3961 clear_nth_bit(res, 0);
3962 InlineKlass* vk = (InlineKlass*)res;
3963 assert(verif_vk == vk, "broken calling convention");
3964 assert(Metaspace::contains((void*)res), "should be klass");
3965
3966 // Allocate handles for every oop field so they are safe in case of
3967 // a safepoint when allocating
3968 GrowableArray<Handle> handles;
3969 vk->save_oop_fields(reg_map, handles);
3970
3971 // It's unsafe to safepoint until we are here
3972 JRT_BLOCK;
3973 {
3974 JavaThread* THREAD = current;
3975 oop vt = vk->realloc_result(reg_map, handles, CHECK);
3976 current->set_vm_result(vt);
3977 }
3978 JRT_BLOCK_END;
3979 }
3980 JRT_END
3981
|