< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp

Print this page

  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/vmClasses.hpp"
  30 #include "classfile/vmSymbols.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/compiledMethod.inline.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "code/vtableStubs.hpp"
  37 #include "compiler/abstractCompiler.hpp"
  38 #include "compiler/compileBroker.hpp"
  39 #include "compiler/disassembler.hpp"
  40 #include "gc/shared/barrierSet.hpp"
  41 #include "gc/shared/collectedHeap.hpp"
  42 #include "gc/shared/gcLocker.inline.hpp"
  43 #include "interpreter/interpreter.hpp"
  44 #include "interpreter/interpreterRuntime.hpp"
  45 #include "jfr/jfrEvents.hpp"
  46 #include "logging/log.hpp"

  47 #include "memory/resourceArea.hpp"
  48 #include "memory/universe.hpp"


  49 #include "oops/compiledICHolder.inline.hpp"
  50 #include "oops/klass.hpp"
  51 #include "oops/method.inline.hpp"
  52 #include "oops/objArrayKlass.hpp"

  53 #include "oops/oop.inline.hpp"

  54 #include "prims/forte.hpp"
  55 #include "prims/jvmtiExport.hpp"
  56 #include "prims/methodHandles.hpp"
  57 #include "prims/nativeLookup.hpp"
  58 #include "runtime/atomic.hpp"
  59 #include "runtime/frame.inline.hpp"
  60 #include "runtime/handles.inline.hpp"
  61 #include "runtime/init.hpp"
  62 #include "runtime/interfaceSupport.inline.hpp"
  63 #include "runtime/java.hpp"
  64 #include "runtime/javaCalls.hpp"
  65 #include "runtime/sharedRuntime.hpp"
  66 #include "runtime/stackWatermarkSet.hpp"
  67 #include "runtime/stubRoutines.hpp"
  68 #include "runtime/synchronizer.hpp"
  69 #include "runtime/vframe.inline.hpp"
  70 #include "runtime/vframeArray.hpp"
  71 #include "runtime/vm_version.hpp"
  72 #include "utilities/copy.hpp"
  73 #include "utilities/dtrace.hpp"
  74 #include "utilities/events.hpp"
  75 #include "utilities/resourceHash.hpp"
  76 #include "utilities/macros.hpp"
  77 #include "utilities/xmlstream.hpp"
  78 #ifdef COMPILER1
  79 #include "c1/c1_Runtime1.hpp"
  80 #endif
  81 
  82 // Shared stub locations
  83 RuntimeStub*        SharedRuntime::_wrong_method_blob;
  84 RuntimeStub*        SharedRuntime::_wrong_method_abstract_blob;
  85 RuntimeStub*        SharedRuntime::_ic_miss_blob;
  86 RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
  87 RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
  88 RuntimeStub*        SharedRuntime::_resolve_static_call_blob;
  89 address             SharedRuntime::_resolve_static_call_entry;
  90 
  91 DeoptimizationBlob* SharedRuntime::_deopt_blob;
  92 SafepointBlob*      SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
  93 SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
  94 SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
  95 
  96 #ifdef COMPILER2
  97 UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
  98 #endif // COMPILER2
  99 
 100 nmethod*            SharedRuntime::_cont_doYield_stub;
 101 
 102 //----------------------------generate_stubs-----------------------------------
 103 void SharedRuntime::generate_stubs() {
 104   _wrong_method_blob                   = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),          "wrong_method_stub");
 105   _wrong_method_abstract_blob          = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
 106   _ic_miss_blob                        = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),  "ic_miss_stub");
 107   _resolve_opt_virtual_call_blob       = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),   "resolve_opt_virtual_call");
 108   _resolve_virtual_call_blob           = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),       "resolve_virtual_call");
 109   _resolve_static_call_blob            = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),        "resolve_static_call");
 110   _resolve_static_call_entry           = _resolve_static_call_blob->entry_point();
 111 
 112   AdapterHandlerLibrary::initialize();
 113 
 114 #if COMPILER2_OR_JVMCI
 115   // Vectors are generated only by C2 and JVMCI.
 116   bool support_wide = is_wide_vector(MaxVectorSize);
 117   if (support_wide) {
 118     _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
 119   }
 120 #endif // COMPILER2_OR_JVMCI
 121   _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
 122   _polling_page_return_handler_blob    = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
 123 
 124   generate_deopt_blob();
 125 
 126 #ifdef COMPILER2
 127   generate_uncommon_trap_blob();
 128 #endif // COMPILER2
 129 }
 130 

 979   // forwarded before we look at the return value.
 980   THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badAddress);
 981 }
 982 JNI_END
 983 
 984 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
 985   return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
 986 }
 987 
 988 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current, oopDesc* obj))
 989 #if INCLUDE_JVMCI
 990   if (!obj->klass()->has_finalizer()) {
 991     return;
 992   }
 993 #endif // INCLUDE_JVMCI
 994   assert(oopDesc::is_oop(obj), "must be a valid oop");
 995   assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
 996   InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
 997 JRT_END
 998 

 999 jlong SharedRuntime::get_java_tid(Thread* thread) {
1000   if (thread != NULL && thread->is_Java_thread()) {
1001     Thread* current = Thread::current();
1002     guarantee(current != thread || JavaThread::cast(thread)->is_oop_safe(),
1003               "current cannot touch oops after its GC barrier is detached.");
1004     oop obj = JavaThread::cast(thread)->threadObj();
1005     return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
1006   }
1007   return 0;
1008 }
1009 
1010 /**
1011  * This function ought to be a void function, but cannot be because
1012  * it gets turned into a tail-call on sparc, which runs into dtrace bug
1013  * 6254741.  Once that is fixed we can remove the dummy return value.
1014  */
1015 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
1016   return dtrace_object_alloc(Thread::current(), o, o->size());
1017 }
1018 

1088 // for a call current in progress, i.e., arguments has been pushed on stack
1089 // but callee has not been invoked yet.  Caller frame must be compiled.
1090 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1091                                               CallInfo& callinfo, TRAPS) {
1092   Handle receiver;
1093   Handle nullHandle;  // create a handy null handle for exception returns
1094   JavaThread* current = THREAD;
1095 
1096   assert(!vfst.at_end(), "Java frame must exist");
1097 
1098   // Find caller and bci from vframe
1099   methodHandle caller(current, vfst.method());
1100   int          bci   = vfst.bci();
1101 
1102   if (caller->is_continuation_enter_intrinsic()) {
1103     bc = Bytecodes::_invokestatic;
1104     LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1105     return receiver;
1106   }
1107 















1108   Bytecode_invoke bytecode(caller, bci);
1109   int bytecode_index = bytecode.index();
1110   bc = bytecode.invoke_code();
1111 
1112   methodHandle attached_method(current, extract_attached_method(vfst));
1113   if (attached_method.not_null()) {
1114     Method* callee = bytecode.static_target(CHECK_NH);
1115     vmIntrinsics::ID id = callee->intrinsic_id();
1116     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1117     // it attaches statically resolved method to the call site.
1118     if (MethodHandles::is_signature_polymorphic(id) &&
1119         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1120       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1121 
1122       // Adjust invocation mode according to the attached method.
1123       switch (bc) {
1124         case Bytecodes::_invokevirtual:
1125           if (attached_method->method_holder()->is_interface()) {
1126             bc = Bytecodes::_invokeinterface;
1127           }
1128           break;
1129         case Bytecodes::_invokeinterface:
1130           if (!attached_method->method_holder()->is_interface()) {
1131             bc = Bytecodes::_invokevirtual;
1132           }
1133           break;
1134         case Bytecodes::_invokehandle:
1135           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1136             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1137                                               : Bytecodes::_invokevirtual;
1138           }
1139           break;
1140         default:
1141           break;
1142       }






1143     }
1144   }
1145 
1146   assert(bc != Bytecodes::_illegal, "not initialized");
1147 
1148   bool has_receiver = bc != Bytecodes::_invokestatic &&
1149                       bc != Bytecodes::_invokedynamic &&
1150                       bc != Bytecodes::_invokehandle;

1151 
1152   // Find receiver for non-static call
1153   if (has_receiver) {
1154     // This register map must be update since we need to find the receiver for
1155     // compiled frames. The receiver might be in a register.
1156     RegisterMap reg_map2(current,
1157                          RegisterMap::UpdateMap::include,
1158                          RegisterMap::ProcessFrames::include,
1159                          RegisterMap::WalkContinuation::skip);
1160     frame stubFrame   = current->last_frame();
1161     // Caller-frame is a compiled frame
1162     frame callerFrame = stubFrame.sender(&reg_map2);





1163 
1164     if (attached_method.is_null()) {
1165       Method* callee = bytecode.static_target(CHECK_NH);

1166       if (callee == NULL) {
1167         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1168       }
1169     }
1170 
1171     // Retrieve from a compiled argument list
1172     receiver = Handle(current, callerFrame.retrieve_receiver(&reg_map2));
1173     assert(oopDesc::is_oop_or_null(receiver()), "");
1174 
1175     if (receiver.is_null()) {
1176       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);








1177     }
1178   }
1179 
1180   // Resolve method
1181   if (attached_method.not_null()) {
1182     // Parameterized by attached method.
1183     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1184   } else {
1185     // Parameterized by bytecode.
1186     constantPoolHandle constants(current, caller->constants());
1187     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1188   }
1189 
1190 #ifdef ASSERT
1191   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1192   if (has_receiver) {
1193     assert(receiver.not_null(), "should have thrown exception");
1194     Klass* receiver_klass = receiver->klass();
1195     Klass* rk = NULL;
1196     if (attached_method.not_null()) {
1197       // In case there's resolved method attached, use its holder during the check.
1198       rk = attached_method->method_holder();
1199     } else {
1200       // Klass is already loaded.
1201       constantPoolHandle constants(current, caller->constants());
1202       rk = constants->klass_ref_at(bytecode_index, CHECK_NH);
1203     }
1204     Klass* static_receiver_klass = rk;
1205     assert(receiver_klass->is_subtype_of(static_receiver_klass),
1206            "actual receiver must be subclass of static receiver klass");
1207     if (receiver_klass->is_instance_klass()) {
1208       if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1209         tty->print_cr("ERROR: Klass not yet initialized!!");
1210         receiver_klass->print();
1211       }
1212       assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");

1233                         RegisterMap::UpdateMap::skip,
1234                         RegisterMap::ProcessFrames::include,
1235                         RegisterMap::WalkContinuation::skip);
1236     frame fr = current->last_frame();
1237     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1238     fr = fr.sender(&reg_map);
1239     assert(fr.is_entry_frame(), "must be");
1240     // fr is now pointing to the entry frame.
1241     callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1242   } else {
1243     Bytecodes::Code bc;
1244     CallInfo callinfo;
1245     find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1246     callee_method = methodHandle(current, callinfo.selected_method());
1247   }
1248   assert(callee_method()->is_method(), "must be");
1249   return callee_method;
1250 }
1251 
1252 // Resolves a call.
1253 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1254   methodHandle callee_method;
1255   callee_method = resolve_sub_helper(is_virtual, is_optimized, THREAD);
1256   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1257     int retry_count = 0;
1258     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1259            callee_method->method_holder() != vmClasses::Object_klass()) {
1260       // If has a pending exception then there is no need to re-try to
1261       // resolve this method.
1262       // If the method has been redefined, we need to try again.
1263       // Hack: we have no way to update the vtables of arrays, so don't
1264       // require that java.lang.Object has been updated.
1265 
1266       // It is very unlikely that method is redefined more than 100 times
1267       // in the middle of resolve. If it is looping here more than 100 times
1268       // means then there could be a bug here.
1269       guarantee((retry_count++ < 100),
1270                 "Could not resolve to latest version of redefined method");
1271       // method is redefined in the middle of resolve so re-try.
1272       callee_method = resolve_sub_helper(is_virtual, is_optimized, THREAD);
1273     }
1274   }
1275   return callee_method;
1276 }
1277 
1278 // This fails if resolution required refilling of IC stubs
1279 bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
1280                                                 CompiledMethod* caller_nm, bool is_virtual, bool is_optimized,
1281                                                 Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) {
1282   StaticCallInfo static_call_info;
1283   CompiledICInfo virtual_call_info;
1284 
1285   // Make sure the callee nmethod does not get deoptimized and removed before
1286   // we are done patching the code.
1287   CompiledMethod* callee = callee_method->code();
1288 
1289   if (callee != NULL) {
1290     assert(callee->is_compiled(), "must be nmethod for patching");
1291   }
1292 
1293   if (callee != NULL && !callee->is_in_use()) {
1294     // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1295     callee = NULL;
1296   }
1297 #ifdef ASSERT
1298   address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
1299 #endif
1300 
1301   bool is_nmethod = caller_nm->is_nmethod();

1302 
1303   if (is_virtual) {
1304     assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");







1305     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1306     Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass();
1307     CompiledIC::compute_monomorphic_entry(callee_method, klass,
1308                      is_optimized, static_bound, is_nmethod, virtual_call_info,
1309                      CHECK_false);
1310   } else {
1311     // static call
1312     CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info);
1313   }
1314 
1315   // grab lock, check for deoptimization and potentially patch caller
1316   {
1317     CompiledICLocker ml(caller_nm);
1318 
1319     // Lock blocks for safepoint during which both nmethods can change state.
1320 
1321     // Now that we are ready to patch if the Method* was redefined then
1322     // don't update call site and let the caller retry.
1323     // Don't update call site if callee nmethod was unloaded or deoptimized.
1324     // Don't update call site if callee nmethod was replaced by an other nmethod
1325     // which may happen when multiply alive nmethod (tiered compilation)
1326     // will be supported.
1327     if (!callee_method->is_old() &&
1328         (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) {
1329       NoSafepointVerifier nsv;
1330 #ifdef ASSERT
1331       // We must not try to patch to jump to an already unloaded method.
1332       if (dest_entry_point != 0) {

1345       } else {
1346         if (VM_Version::supports_fast_class_init_checks() &&
1347             invoke_code == Bytecodes::_invokestatic &&
1348             callee_method->needs_clinit_barrier() &&
1349             callee != NULL && callee->is_compiled_by_jvmci()) {
1350           return true; // skip patching for JVMCI
1351         }
1352         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
1353         if (is_nmethod && caller_nm->method()->is_continuation_enter_intrinsic()) {
1354           ssc->compute_entry_for_continuation_entry(callee_method, static_call_info);
1355         }
1356         if (ssc->is_clean()) ssc->set(static_call_info);
1357       }
1358     }
1359   } // unlock CompiledICLocker
1360   return true;
1361 }
1362 
1363 // Resolves a call.  The compilers generate code for calls that go here
1364 // and are patched with the real destination of the call.
1365 methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimized, TRAPS) {
1366   JavaThread* current = THREAD;
1367   ResourceMark rm(current);
1368   RegisterMap cbl_map(current,
1369                       RegisterMap::UpdateMap::skip,
1370                       RegisterMap::ProcessFrames::include,
1371                       RegisterMap::WalkContinuation::skip);
1372   frame caller_frame = current->last_frame().sender(&cbl_map);
1373 
1374   CodeBlob* caller_cb = caller_frame.cb();
1375   guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
1376   CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();

1377 
1378   // determine call info & receiver
1379   // note: a) receiver is NULL for static calls
1380   //       b) an exception is thrown if receiver is NULL for non-static calls
1381   CallInfo call_info;
1382   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1383   Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1384   methodHandle callee_method(current, call_info.selected_method());
1385 
1386   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1387          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1388          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1389          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1390          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1391 
1392   assert(!caller_nm->is_unloading(), "It should not be unloading");
1393 
1394 #ifndef PRODUCT
1395   // tracing/debugging/statistics
1396   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :

1455     }
1456   }
1457 
1458 }
1459 
1460 
1461 // Inline caches exist only in compiled code
1462 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1463 #ifdef ASSERT
1464   RegisterMap reg_map(current,
1465                       RegisterMap::UpdateMap::skip,
1466                       RegisterMap::ProcessFrames::include,
1467                       RegisterMap::WalkContinuation::skip);
1468   frame stub_frame = current->last_frame();
1469   assert(stub_frame.is_runtime_frame(), "sanity check");
1470   frame caller_frame = stub_frame.sender(&reg_map);
1471   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1472 #endif /* ASSERT */
1473 
1474   methodHandle callee_method;


1475   JRT_BLOCK
1476     callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1477     // Return Method* through TLS
1478     current->set_vm_result_2(callee_method());
1479   JRT_BLOCK_END
1480   // return compiled code entry point after potential safepoints
1481   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1482   return callee_method->verified_code_entry();
1483 JRT_END
1484 
1485 
1486 // Handle call site that has been made non-entrant
1487 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1488   // 6243940 We might end up in here if the callee is deoptimized
1489   // as we race to call it.  We don't want to take a safepoint if
1490   // the caller was interpreted because the caller frame will look
1491   // interpreted to the stack walkers and arguments are now
1492   // "compiled" so it is much better to make this transition
1493   // invisible to the stack walking code. The i2c path will
1494   // place the callee method in the callee_target. It is stashed
1495   // there because if we try and find the callee by normal means a
1496   // safepoint is possible and have trouble gc'ing the compiled args.
1497   RegisterMap reg_map(current,
1498                       RegisterMap::UpdateMap::skip,
1499                       RegisterMap::ProcessFrames::include,
1500                       RegisterMap::WalkContinuation::skip);
1501   frame stub_frame = current->last_frame();
1502   assert(stub_frame.is_runtime_frame(), "sanity check");

1509     guarantee(callee != NULL && callee->is_method(), "bad handshake");
1510     current->set_vm_result_2(callee);
1511     current->set_callee_target(NULL);
1512     if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1513       // Bypass class initialization checks in c2i when caller is in native.
1514       // JNI calls to static methods don't have class initialization checks.
1515       // Fast class initialization checks are present in c2i adapters and call into
1516       // SharedRuntime::handle_wrong_method() on the slow path.
1517       //
1518       // JVM upcalls may land here as well, but there's a proper check present in
1519       // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1520       // so bypassing it in c2i adapter is benign.
1521       return callee->get_c2i_no_clinit_check_entry();
1522     } else {
1523       return callee->get_c2i_entry();
1524     }
1525   }
1526 
1527   // Must be compiled to compiled path which is safe to stackwalk
1528   methodHandle callee_method;



1529   JRT_BLOCK
1530     // Force resolving of caller (if we called from compiled frame)
1531     callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1532     current->set_vm_result_2(callee_method());
1533   JRT_BLOCK_END
1534   // return compiled code entry point after potential safepoints
1535   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1536   return callee_method->verified_code_entry();
1537 JRT_END
1538 
1539 // Handle abstract method call
1540 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1541   // Verbose error message for AbstractMethodError.
1542   // Get the called method from the invoke bytecode.
1543   vframeStream vfst(current, true);
1544   assert(!vfst.at_end(), "Java frame must exist");
1545   methodHandle caller(current, vfst.method());
1546   Bytecode_invoke invoke(caller, vfst.bci());
1547   DEBUG_ONLY( invoke.verify(); )
1548 
1549   // Find the compiled caller frame.
1550   RegisterMap reg_map(current,
1551                       RegisterMap::UpdateMap::include,
1552                       RegisterMap::ProcessFrames::include,
1553                       RegisterMap::WalkContinuation::skip);
1554   frame stubFrame = current->last_frame();
1555   assert(stubFrame.is_runtime_frame(), "must be");
1556   frame callerFrame = stubFrame.sender(&reg_map);
1557   assert(callerFrame.is_compiled_frame(), "must be");
1558 
1559   // Install exception and return forward entry.
1560   address res = StubRoutines::throw_AbstractMethodError_entry();
1561   JRT_BLOCK
1562     methodHandle callee(current, invoke.static_target(current));
1563     if (!callee.is_null()) {
1564       oop recv = callerFrame.retrieve_receiver(&reg_map);
1565       Klass *recv_klass = (recv != NULL) ? recv->klass() : NULL;
1566       res = StubRoutines::forward_exception_entry();
1567       LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1568     }
1569   JRT_BLOCK_END
1570   return res;
1571 JRT_END
1572 
1573 
1574 // resolve a static call and patch code
1575 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1576   methodHandle callee_method;

1577   bool enter_special = false;
1578   JRT_BLOCK
1579     callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1580     current->set_vm_result_2(callee_method());
1581 
1582     if (current->is_interp_only_mode()) {
1583       RegisterMap reg_map(current,
1584                           RegisterMap::UpdateMap::skip,
1585                           RegisterMap::ProcessFrames::include,
1586                           RegisterMap::WalkContinuation::skip);
1587       frame stub_frame = current->last_frame();
1588       assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1589       frame caller = stub_frame.sender(&reg_map);
1590       enter_special = caller.cb() != NULL && caller.cb()->is_compiled()
1591         && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic();
1592     }
1593   JRT_BLOCK_END
1594 
1595   if (current->is_interp_only_mode() && enter_special) {
1596     // enterSpecial is compiled and calls this method to resolve the call to Continuation::enter
1597     // but in interp_only_mode we need to go to the interpreted entry
1598     // The c2i won't patch in this mode -- see fixup_callers_callsite
1599     //
1600     // This should probably be done in all cases, not just enterSpecial (see JDK-8218403),
1601     // but that's part of a larger fix, and the situation is worse for enterSpecial, as it has no
1602     // interpreted version.
1603     return callee_method->get_c2i_entry();
1604   }
1605 
1606   // return compiled code entry point after potential safepoints
1607   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1608   return callee_method->verified_code_entry();


1609 JRT_END
1610 
1611 
1612 // resolve virtual call and update inline cache to monomorphic
1613 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1614   methodHandle callee_method;

1615   JRT_BLOCK
1616     callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1617     current->set_vm_result_2(callee_method());
1618   JRT_BLOCK_END
1619   // return compiled code entry point after potential safepoints
1620   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1621   return callee_method->verified_code_entry();


1622 JRT_END
1623 
1624 
1625 // Resolve a virtual call that can be statically bound (e.g., always
1626 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1627 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1628   methodHandle callee_method;

1629   JRT_BLOCK
1630     callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1631     current->set_vm_result_2(callee_method());
1632   JRT_BLOCK_END
1633   // return compiled code entry point after potential safepoints
1634   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1635   return callee_method->verified_code_entry();


1636 JRT_END
1637 
1638 // The handle_ic_miss_helper_internal function returns false if it failed due
1639 // to either running out of vtable stubs or ic stubs due to IC transitions
1640 // to transitional states. The needs_ic_stub_refill value will be set if
1641 // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
1642 // refills the IC stubs and tries again.
1643 bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
1644                                                    const frame& caller_frame, methodHandle callee_method,
1645                                                    Bytecodes::Code bc, CallInfo& call_info,
1646                                                    bool& needs_ic_stub_refill, TRAPS) {
1647   CompiledICLocker ml(caller_nm);
1648   CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1649   bool should_be_mono = false;
1650   if (inline_cache->is_optimized()) {
1651     if (TraceCallFixup) {
1652       ResourceMark rm(THREAD);
1653       tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1654       callee_method->print_short_name(tty);
1655       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1656     }

1657     should_be_mono = true;
1658   } else if (inline_cache->is_icholder_call()) {
1659     CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1660     if (ic_oop != NULL) {
1661       if (!ic_oop->is_loader_alive()) {
1662         // Deferred IC cleaning due to concurrent class unloading
1663         if (!inline_cache->set_to_clean()) {
1664           needs_ic_stub_refill = true;
1665           return false;
1666         }
1667       } else if (receiver()->klass() == ic_oop->holder_klass()) {
1668         // This isn't a real miss. We must have seen that compiled code
1669         // is now available and we want the call site converted to a
1670         // monomorphic compiled call site.
1671         // We can't assert for callee_method->code() != NULL because it
1672         // could have been deoptimized in the meantime
1673         if (TraceCallFixup) {
1674           ResourceMark rm(THREAD);
1675           tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1676           callee_method->print_short_name(tty);
1677           tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1678         }
1679         should_be_mono = true;
1680       }
1681     }
1682   }
1683 
1684   if (should_be_mono) {
1685     // We have a path that was monomorphic but was going interpreted
1686     // and now we have (or had) a compiled entry. We correct the IC
1687     // by using a new icBuffer.
1688     CompiledICInfo info;
1689     Klass* receiver_klass = receiver()->klass();
1690     inline_cache->compute_monomorphic_entry(callee_method,
1691                                             receiver_klass,
1692                                             inline_cache->is_optimized(),
1693                                             false, caller_nm->is_nmethod(),

1694                                             info, CHECK_false);
1695     if (!inline_cache->set_to_monomorphic(info)) {
1696       needs_ic_stub_refill = true;
1697       return false;
1698     }
1699   } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1700     // Potential change to megamorphic
1701 
1702     bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false);
1703     if (needs_ic_stub_refill) {
1704       return false;
1705     }
1706     if (!successful) {
1707       if (!inline_cache->set_to_clean()) {
1708         needs_ic_stub_refill = true;
1709         return false;
1710       }
1711     }
1712   } else {
1713     // Either clean or megamorphic
1714   }
1715   return true;
1716 }
1717 
1718 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1719   JavaThread* current = THREAD;
1720   ResourceMark rm(current);
1721   CallInfo call_info;
1722   Bytecodes::Code bc;
1723 
1724   // receiver is NULL for static calls. An exception is thrown for NULL
1725   // receivers for non-static calls
1726   Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1727   // Compiler1 can produce virtual call sites that can actually be statically bound
1728   // If we fell thru to below we would think that the site was going megamorphic
1729   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1730   // we'd try and do a vtable dispatch however methods that can be statically bound
1731   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1732   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1733   // plain ic_miss) and the site will be converted to an optimized virtual call site
1734   // never to miss again. I don't believe C2 will produce code like this but if it
1735   // did this would still be the correct thing to do for it too, hence no ifdef.
1736   //
1737   if (call_info.resolved_method()->can_be_statically_bound()) {
1738     methodHandle callee_method = SharedRuntime::reresolve_call_site(CHECK_(methodHandle()));


1739     if (TraceCallFixup) {
1740       RegisterMap reg_map(current,
1741                           RegisterMap::UpdateMap::skip,
1742                           RegisterMap::ProcessFrames::include,
1743                           RegisterMap::WalkContinuation::skip);
1744       frame caller_frame = current->last_frame().sender(&reg_map);
1745       ResourceMark rm(current);
1746       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1747       callee_method->print_short_name(tty);
1748       tty->print_cr(" from pc: " INTPTR_FORMAT, p2i(caller_frame.pc()));
1749       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1750     }
1751     return callee_method;
1752   }
1753 
1754   methodHandle callee_method(current, call_info.selected_method());
1755 
1756 #ifndef PRODUCT
1757   Atomic::inc(&_ic_miss_ctr);
1758 

1777 #endif
1778 
1779   // install an event collector so that when a vtable stub is created the
1780   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1781   // event can't be posted when the stub is created as locks are held
1782   // - instead the event will be deferred until the event collector goes
1783   // out of scope.
1784   JvmtiDynamicCodeEventCollector event_collector;
1785 
1786   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1787   // Transitioning IC caches may require transition stubs. If we run out
1788   // of transition stubs, we have to drop locks and perform a safepoint
1789   // that refills them.
1790   RegisterMap reg_map(current,
1791                       RegisterMap::UpdateMap::skip,
1792                       RegisterMap::ProcessFrames::include,
1793                       RegisterMap::WalkContinuation::skip);
1794   frame caller_frame = current->last_frame().sender(&reg_map);
1795   CodeBlob* cb = caller_frame.cb();
1796   CompiledMethod* caller_nm = cb->as_compiled_method();

1797 
1798   for (;;) {
1799     ICRefillVerifier ic_refill_verifier;
1800     bool needs_ic_stub_refill = false;
1801     bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1802                                                      bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle()));
1803     if (successful || !needs_ic_stub_refill) {
1804       return callee_method;
1805     } else {
1806       InlineCacheBuffer::refill_ic_stubs();
1807     }
1808   }
1809 }
1810 
1811 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1812   CompiledICLocker ml(caller_nm);
1813   if (is_static_call) {
1814     CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1815     if (!ssc->is_clean()) {
1816       return ssc->set_to_clean();
1817     }
1818   } else {
1819     // compiled, dispatched call (which used to call an interpreted method)
1820     CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1821     if (!inline_cache->is_clean()) {
1822       return inline_cache->set_to_clean();
1823     }
1824   }
1825   return true;
1826 }
1827 
1828 //
1829 // Resets a call-site in compiled code so it will get resolved again.
1830 // This routines handles both virtual call sites, optimized virtual call
1831 // sites, and static call sites. Typically used to change a call sites
1832 // destination from compiled to interpreted.
1833 //
1834 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1835   JavaThread* current = THREAD;
1836   ResourceMark rm(current);
1837   RegisterMap reg_map(current,
1838                       RegisterMap::UpdateMap::skip,
1839                       RegisterMap::ProcessFrames::include,
1840                       RegisterMap::WalkContinuation::skip);
1841   frame stub_frame = current->last_frame();
1842   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1843   frame caller = stub_frame.sender(&reg_map);
1844 
1845   // Do nothing if the frame isn't a live compiled frame.
1846   // nmethod could be deoptimized by the time we get here
1847   // so no update to the caller is needed.
1848 
1849   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1850 
1851     address pc = caller.pc();
1852 
1853     // Check for static or virtual call
1854     bool is_static_call = false;
1855     CompiledMethod* caller_nm = CodeCache::find_compiled(pc);

1856 
1857     // Default call_addr is the location of the "basic" call.
1858     // Determine the address of the call we a reresolving. With
1859     // Inline Caches we will always find a recognizable call.
1860     // With Inline Caches disabled we may or may not find a
1861     // recognizable call. We will always find a call for static
1862     // calls and for optimized virtual calls. For vanilla virtual
1863     // calls it depends on the state of the UseInlineCaches switch.
1864     //
1865     // With Inline Caches disabled we can get here for a virtual call
1866     // for two reasons:
1867     //   1 - calling an abstract method. The vtable for abstract methods
1868     //       will run us thru handle_wrong_method and we will eventually
1869     //       end up in the interpreter to throw the ame.
1870     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1871     //       call and between the time we fetch the entry address and
1872     //       we jump to it the target gets deoptimized. Similar to 1
1873     //       we will wind up in the interprter (thru a c2i with c2).
1874     //
1875     address call_addr = NULL;
1876     {
1877       // Get call instruction under lock because another thread may be
1878       // busy patching it.
1879       CompiledICLocker ml(caller_nm);
1880       // Location of call instruction
1881       call_addr = caller_nm->call_instruction_address(pc);
1882     }
1883 
1884     // Check relocations for the matching call to 1) avoid false positives,
1885     // and 2) determine the type.
1886     if (call_addr != NULL) {
1887       // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1888       // bytes back in the instruction stream so we must also check for reloc info.
1889       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1890       bool ret = iter.next(); // Get item
1891       if (ret) {
1892         bool is_static_call = false;

1893         switch (iter.type()) {
1894           case relocInfo::static_call_type:
1895             is_static_call = true;
1896 
1897           case relocInfo::virtual_call_type:
1898           case relocInfo::opt_virtual_call_type:

1899             // Cleaning the inline cache will force a new resolve. This is more robust
1900             // than directly setting it to the new destination, since resolving of calls
1901             // is always done through the same code path. (experience shows that it
1902             // leads to very hard to track down bugs, if an inline cache gets updated
1903             // to a wrong method). It should not be performance critical, since the
1904             // resolve is only done once.
1905             guarantee(iter.addr() == call_addr, "must find call");
1906             for (;;) {
1907               ICRefillVerifier ic_refill_verifier;
1908               if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
1909                 InlineCacheBuffer::refill_ic_stubs();
1910               } else {
1911                 break;
1912               }
1913             }
1914             break;
1915           default:
1916             break;
1917         }
1918       }
1919     }
1920   }
1921 
1922   methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1923 
1924 
1925 #ifndef PRODUCT
1926   Atomic::inc(&_wrong_method_ctr);
1927 
1928   if (TraceCallFixup) {
1929     ResourceMark rm(current);
1930     tty->print("handle_wrong_method reresolving call to");
1931     callee_method->print_short_name(tty);
1932     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1933   }
1934 #endif
1935 
1936   return callee_method;
1937 }
1938 
1939 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1940   // The faulting unsafe accesses should be changed to throw the error
1941   // synchronously instead. Meanwhile the faulting instruction will be
1942   // skipped over (effectively turning it into a no-op) and an
1943   // asynchronous exception will be raised which the thread will
1944   // handle at a later point. If the instruction is a load it will

2000     if (TraceCallFixup) {
2001       tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
2002       moop->print_short_name(tty);
2003       tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
2004     }
2005   }
2006   return false;
2007 }
2008 
2009 // ---------------------------------------------------------------------------
2010 // We are calling the interpreter via a c2i. Normally this would mean that
2011 // we were called by a compiled method. However we could have lost a race
2012 // where we went int -> i2c -> c2i and so the caller could in fact be
2013 // interpreted. If the caller is compiled we attempt to patch the caller
2014 // so he no longer calls into the interpreter.
2015 JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
2016   Method* moop(method);
2017 
2018   AARCH64_PORT_ONLY(assert(pauth_ptr_is_raw(caller_pc), "should be raw"));
2019 
2020   address entry_point = moop->from_compiled_entry_no_trampoline();
2021 
2022   // It's possible that deoptimization can occur at a call site which hasn't
2023   // been resolved yet, in which case this function will be called from
2024   // an nmethod that has been patched for deopt and we can ignore the
2025   // request for a fixup.
2026   // Also it is possible that we lost a race in that from_compiled_entry
2027   // is now back to the i2c in that case we don't need to patch and if
2028   // we did we'd leap into space because the callsite needs to use
2029   // "to interpreter" stub in order to load up the Method*. Don't
2030   // ask me how I know this...
2031 
2032   CodeBlob* cb = CodeCache::find_blob(caller_pc);
2033   if (cb == NULL || !cb->is_compiled() || entry_point == moop->get_c2i_entry()) {




2034     return;
2035   }
2036 
2037   // The check above makes sure this is a nmethod.
2038   CompiledMethod* nm = cb->as_compiled_method_or_null();
2039   assert(nm, "must be");
2040 
2041   // Get the return PC for the passed caller PC.
2042   address return_pc = caller_pc + frame::pc_return_offset;
2043 
2044   assert(!JavaThread::current()->is_interp_only_mode() || !nm->method()->is_continuation_enter_intrinsic()
2045     || ContinuationEntry::is_interpreted_call(return_pc), "interp_only_mode but not in enterSpecial interpreted entry");
2046 
2047   // There is a benign race here. We could be attempting to patch to a compiled
2048   // entry point at the same time the callee is being deoptimized. If that is
2049   // the case then entry_point may in fact point to a c2i and we'd patch the
2050   // call site with the same old data. clear_code will set code() to NULL
2051   // at the end of it. If we happen to see that NULL then we can skip trying
2052   // to patch. If we hit the window where the callee has a c2i in the
2053   // from_compiled_entry and the NULL isn't present yet then we lose the race

2406  private:
2407   enum {
2408     _basic_type_bits = 4,
2409     _basic_type_mask = right_n_bits(_basic_type_bits),
2410     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2411     _compact_int_count = 3
2412   };
2413   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2414   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2415 
2416   union {
2417     int  _compact[_compact_int_count];
2418     int* _fingerprint;
2419   } _value;
2420   int _length; // A negative length indicates the fingerprint is in the compact form,
2421                // Otherwise _value._fingerprint is the array.
2422 
2423   // Remap BasicTypes that are handled equivalently by the adapters.
2424   // These are correct for the current system but someday it might be
2425   // necessary to make this mapping platform dependent.
2426   static int adapter_encoding(BasicType in) {
2427     switch (in) {
2428       case T_BOOLEAN:
2429       case T_BYTE:
2430       case T_SHORT:
2431       case T_CHAR:
2432         // There are all promoted to T_INT in the calling convention
2433         return T_INT;
2434 
2435       case T_OBJECT:
2436       case T_ARRAY:
2437         // In other words, we assume that any register good enough for
2438         // an int or long is good enough for a managed pointer.
2439 #ifdef _LP64
2440         return T_LONG;
2441 #else
2442         return T_INT;
2443 #endif
2444 
2445       case T_INT:
2446       case T_LONG:
2447       case T_FLOAT:
2448       case T_DOUBLE:
2449       case T_VOID:
2450         return in;
2451 
2452       default:
2453         ShouldNotReachHere();
2454         return T_CONFLICT;
2455     }
2456   }
2457 
2458  public:
2459   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2460     // The fingerprint is based on the BasicType signature encoded
2461     // into an array of ints with eight entries per int.

2462     int* ptr;
2463     int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2464     if (len <= _compact_int_count) {
2465       assert(_compact_int_count == 3, "else change next line");
2466       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2467       // Storing the signature encoded as signed chars hits about 98%
2468       // of the time.
2469       _length = -len;
2470       ptr = _value._compact;
2471     } else {
2472       _length = len;
2473       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2474       ptr = _value._fingerprint;
2475     }
2476 
2477     // Now pack the BasicTypes with 8 per int
2478     int sig_index = 0;


2479     for (int index = 0; index < len; index++) {
2480       int value = 0;
2481       for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2482         int bt = adapter_encoding(sig_bt[sig_index++]);
2483         assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2484         value = (value << _basic_type_bits) | bt;























2485       }
2486       ptr[index] = value;
2487     }

2488   }
2489 
2490   ~AdapterFingerPrint() {
2491     if (_length > 0) {
2492       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2493     }
2494   }
2495 
2496   int value(int index) {
2497     if (_length < 0) {
2498       return _value._compact[index];
2499     }
2500     return _value._fingerprint[index];
2501   }
2502   int length() {
2503     if (_length < 0) return -_length;
2504     return _length;
2505   }
2506 
2507   bool is_compact() {

2532   const char* as_basic_args_string() {
2533     stringStream st;
2534     bool long_prev = false;
2535     for (int i = 0; i < length(); i++) {
2536       unsigned val = (unsigned)value(i);
2537       // args are packed so that first/lower arguments are in the highest
2538       // bits of each int value, so iterate from highest to the lowest
2539       for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2540         unsigned v = (val >> j) & _basic_type_mask;
2541         if (v == 0) {
2542           assert(i == length() - 1, "Only expect zeroes in the last word");
2543           continue;
2544         }
2545         if (long_prev) {
2546           long_prev = false;
2547           if (v == T_VOID) {
2548             st.print("J");
2549           } else {
2550             st.print("L");
2551           }
2552         }
2553         switch (v) {
2554           case T_INT:    st.print("I");    break;
2555           case T_LONG:   long_prev = true; break;
2556           case T_FLOAT:  st.print("F");    break;
2557           case T_DOUBLE: st.print("D");    break;
2558           case T_VOID:   break;
2559           default: ShouldNotReachHere();
2560         }
2561       }
2562     }
2563     if (long_prev) {
2564       st.print("L");
2565     }
2566     return st.as_string();
2567   }
2568 #endif // !product
2569 
2570   bool equals(AdapterFingerPrint* other) {
2571     if (other->_length != _length) {
2572       return false;
2573     }
2574     if (_length < 0) {
2575       assert(_compact_int_count == 3, "else change next line");
2576       return _value._compact[0] == other->_value._compact[0] &&
2577              _value._compact[1] == other->_value._compact[1] &&
2578              _value._compact[2] == other->_value._compact[2];
2579     } else {

2586     return true;
2587   }
2588 
2589   static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2590     NOT_PRODUCT(_equals++);
2591     return fp1->equals(fp2);
2592   }
2593 
2594   static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2595     return fp->compute_hash();
2596   }
2597 };
2598 
2599 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2600 ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2601                   ResourceObj::C_HEAP, mtCode,
2602                   AdapterFingerPrint::compute_hash,
2603                   AdapterFingerPrint::equals> _adapter_handler_table;
2604 
2605 // Find a entry with the same fingerprint if it exists
2606 static AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
2607   NOT_PRODUCT(_lookups++);
2608   assert_lock_strong(AdapterHandlerLibrary_lock);
2609   AdapterFingerPrint fp(total_args_passed, sig_bt);
2610   AdapterHandlerEntry** entry = _adapter_handler_table.get(&fp);
2611   if (entry != nullptr) {
2612 #ifndef PRODUCT
2613     if (fp.is_compact()) _compact++;
2614     _hits++;
2615 #endif
2616     return *entry;
2617   }
2618   return nullptr;
2619 }
2620 
2621 #ifndef PRODUCT
2622 static void print_table_statistics() {
2623   auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2624     return sizeof(*key) + sizeof(*a);
2625   };
2626   TableStatistics ts = _adapter_handler_table.statistics_calculate(size);
2627   ts.print(tty, "AdapterHandlerTable");
2628   tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2629                 _adapter_handler_table.table_size(), _adapter_handler_table.number_of_entries());
2630   tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2631                 _lookups, _equals, _hits, _compact);
2632 }
2633 #endif
2634 
2635 // ---------------------------------------------------------------------------
2636 // Implementation of AdapterHandlerLibrary
2637 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
2638 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = NULL;
2639 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = NULL;
2640 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = NULL;
2641 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = NULL;
2642 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = NULL;
2643 const int AdapterHandlerLibrary_size = 16*K;
2644 BufferBlob* AdapterHandlerLibrary::_buffer = NULL;
2645 
2646 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2647   return _buffer;
2648 }
2649 
2650 extern "C" void unexpected_adapter_call() {
2651   ShouldNotCallThis();
2652 }
2653 
2654 static void post_adapter_creation(const AdapterBlob* new_adapter,
2655                                   const AdapterHandlerEntry* entry) {
2656   if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2657     char blob_id[256];
2658     jio_snprintf(blob_id,
2659                  sizeof(blob_id),
2660                  "%s(%s)",
2661                  new_adapter->name(),
2662                  entry->fingerprint()->as_string());
2663     if (Forte::is_enabled()) {

2669     }
2670   }
2671 }
2672 
2673 void AdapterHandlerLibrary::initialize() {
2674   ResourceMark rm;
2675   AdapterBlob* no_arg_blob = NULL;
2676   AdapterBlob* int_arg_blob = NULL;
2677   AdapterBlob* obj_arg_blob = NULL;
2678   AdapterBlob* obj_int_arg_blob = NULL;
2679   AdapterBlob* obj_obj_arg_blob = NULL;
2680   {
2681     MutexLocker mu(AdapterHandlerLibrary_lock);
2682 
2683     // Create a special handler for abstract methods.  Abstract methods
2684     // are never compiled so an i2c entry is somewhat meaningless, but
2685     // throw AbstractMethodError just in case.
2686     // Pass wrong_method_abstract for the c2i transitions to return
2687     // AbstractMethodError for invalid invocations.
2688     address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2689     _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
2690                                                                 StubRoutines::throw_AbstractMethodError_entry(),

2691                                                                 wrong_method_abstract, wrong_method_abstract);
2692 
2693     _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2694     _no_arg_handler = create_adapter(no_arg_blob, 0, NULL, true);
2695 
2696     BasicType obj_args[] = { T_OBJECT };
2697     _obj_arg_handler = create_adapter(obj_arg_blob, 1, obj_args, true);

2698 
2699     BasicType int_args[] = { T_INT };
2700     _int_arg_handler = create_adapter(int_arg_blob, 1, int_args, true);


2701 
2702     BasicType obj_int_args[] = { T_OBJECT, T_INT };
2703     _obj_int_arg_handler = create_adapter(obj_int_arg_blob, 2, obj_int_args, true);


2704 
2705     BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2706     _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, 2, obj_obj_args, true);









2707 
2708     assert(no_arg_blob != NULL &&
2709           obj_arg_blob != NULL &&
2710           int_arg_blob != NULL &&
2711           obj_int_arg_blob != NULL &&
2712           obj_obj_arg_blob != NULL, "Initial adapters must be properly created");
2713   }

2714 
2715   // Outside of the lock
2716   post_adapter_creation(no_arg_blob, _no_arg_handler);
2717   post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2718   post_adapter_creation(int_arg_blob, _int_arg_handler);
2719   post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2720   post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2721 }
2722 
2723 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2724                                                       address i2c_entry,
2725                                                       address c2i_entry,


2726                                                       address c2i_unverified_entry,

2727                                                       address c2i_no_clinit_check_entry) {
2728   // Insert an entry into the table
2729   return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry,
2730                                  c2i_no_clinit_check_entry);
2731 }
2732 
2733 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2734   if (method->is_abstract()) {
2735     return _abstract_method_handler;
2736   }
2737   int total_args_passed = method->size_of_parameters(); // All args on stack
2738   if (total_args_passed == 0) {
2739     return _no_arg_handler;
2740   } else if (total_args_passed == 1) {
2741     if (!method->is_static()) {



2742       return _obj_arg_handler;
2743     }
2744     switch (method->signature()->char_at(1)) {
2745       case JVM_SIGNATURE_CLASS:









2746       case JVM_SIGNATURE_ARRAY:
2747         return _obj_arg_handler;
2748       case JVM_SIGNATURE_INT:
2749       case JVM_SIGNATURE_BOOLEAN:
2750       case JVM_SIGNATURE_CHAR:
2751       case JVM_SIGNATURE_BYTE:
2752       case JVM_SIGNATURE_SHORT:
2753         return _int_arg_handler;
2754     }
2755   } else if (total_args_passed == 2 &&
2756              !method->is_static()) {
2757     switch (method->signature()->char_at(1)) {
2758       case JVM_SIGNATURE_CLASS:









2759       case JVM_SIGNATURE_ARRAY:
2760         return _obj_obj_arg_handler;
2761       case JVM_SIGNATURE_INT:
2762       case JVM_SIGNATURE_BOOLEAN:
2763       case JVM_SIGNATURE_CHAR:
2764       case JVM_SIGNATURE_BYTE:
2765       case JVM_SIGNATURE_SHORT:
2766         return _obj_int_arg_handler;
2767     }
2768   }
2769   return NULL;
2770 }
2771 
2772 class AdapterSignatureIterator : public SignatureIterator {
2773  private:
2774   BasicType stack_sig_bt[16];
2775   BasicType* sig_bt;
2776   int index;




2777 
2778  public:
2779   AdapterSignatureIterator(Symbol* signature,
2780                            fingerprint_t fingerprint,
2781                            bool is_static,
2782                            int total_args_passed) :
2783     SignatureIterator(signature, fingerprint),
2784     index(0)
2785   {
2786     sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2787     if (!is_static) { // Pass in receiver first
2788       sig_bt[index++] = T_OBJECT;
2789     }
2790     do_parameters_on(this);
2791   }
2792 
2793   BasicType* basic_types() {
2794     return sig_bt;











2795   }
2796 
2797 #ifdef ASSERT
2798   int slots() {
2799     return index;




























































2800   }
2801 #endif
2802 
2803  private:










2804 
2805   friend class SignatureIterator;  // so do_parameters_on can call do_type
2806   void do_type(BasicType type) {
2807     sig_bt[index++] = type;
2808     if (type == T_LONG || type == T_DOUBLE) {
2809       sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots



2810     }
2811   }
2812 };









2813 
2814 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2815   // Use customized signature handler.  Need to lock around updates to
2816   // the _adapter_handler_table (it is not safe for concurrent readers
2817   // and a single writer: this could be fixed if it becomes a
2818   // problem).
2819 
2820   // Fast-path for trivial adapters
2821   AdapterHandlerEntry* entry = get_simple_adapter(method);
2822   if (entry != NULL) {
2823     return entry;
2824   }
2825 
2826   ResourceMark rm;
2827   AdapterBlob* new_adapter = NULL;
2828 
2829   // Fill in the signature array, for the calling-convention call.
2830   int total_args_passed = method->size_of_parameters(); // All args on stack







2831 
2832   AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2833                               method->is_static(), total_args_passed);
2834   assert(si.slots() == total_args_passed, "");
2835   BasicType* sig_bt = si.basic_types();
2836   {
2837     MutexLocker mu(AdapterHandlerLibrary_lock);
2838 













2839     // Lookup method signature's fingerprint
2840     entry = lookup(total_args_passed, sig_bt);
2841 
2842     if (entry != NULL) {
2843 #ifdef ASSERT
2844       if (VerifyAdapterSharing) {
2845         AdapterBlob* comparison_blob = NULL;
2846         AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, total_args_passed, sig_bt, false);
2847         assert(comparison_blob == NULL, "no blob should be created when creating an adapter for comparison");
2848         assert(comparison_entry->compare_code(entry), "code must match");
2849         // Release the one just created and return the original
2850         delete comparison_entry;
2851       }
2852 #endif
2853       return entry;
2854     }
2855 
2856     entry = create_adapter(new_adapter, total_args_passed, sig_bt, /* allocate_code_blob */ true);
2857   }
2858 
2859   // Outside of the lock
2860   if (new_adapter != NULL) {
2861     post_adapter_creation(new_adapter, entry);
2862   }
2863   return entry;
2864 }
2865 
2866 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
2867                                                            int total_args_passed,
2868                                                            BasicType* sig_bt,
2869                                                            bool allocate_code_blob) {
2870 
2871   // StubRoutines::code2() is initialized after this function can be called. As a result,
2872   // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated
2873   // prior to StubRoutines::code2() being set. Checks refer to checks generated in an I2C
2874   // stub that ensure that an I2C stub is called from an interpreter frame.
2875   bool contains_all_checks = StubRoutines::code2() != NULL;
2876 
2877   VMRegPair stack_regs[16];
2878   VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2879 
2880   // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2881   int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2882   BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2883   CodeBuffer buffer(buf);
2884   short buffer_locs[20];
2885   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2886                                           sizeof(buffer_locs)/sizeof(relocInfo));
2887 
2888   // Make a C heap allocated version of the fingerprint to store in the adapter
2889   AdapterFingerPrint* fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
2890   MacroAssembler _masm(&buffer);
2891   AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2892                                                 total_args_passed,
2893                                                 comp_args_on_stack,
2894                                                 sig_bt,
2895                                                 regs,
2896                                                 fingerprint);












2897 
2898 #ifdef ASSERT
2899   if (VerifyAdapterSharing) {
2900     entry->save_code(buf->code_begin(), buffer.insts_size());
2901     if (!allocate_code_blob) {
2902       return entry;
2903     }
2904   }
2905 #endif
2906 
2907   new_adapter = AdapterBlob::create(&buffer);
2908   NOT_PRODUCT(int insts_size = buffer.insts_size());
2909   if (new_adapter == NULL) {
2910     // CodeCache is full, disable compilation
2911     // Ought to log this but compile log is only per compile thread
2912     // and we're some non descript Java thread.
2913     return NULL;
2914   }
2915   entry->relocate(new_adapter->content_begin());
2916 #ifndef PRODUCT
2917   // debugging support
2918   if (PrintAdapterHandlers || PrintStubCode) {
2919     ttyLocker ttyl;
2920     entry->print_adapter_on(tty);
2921     tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
2922                   _adapter_handler_table.number_of_entries(), fingerprint->as_basic_args_string(),
2923                   fingerprint->as_string(), insts_size);
2924     tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
2925     if (Verbose || PrintStubCode) {
2926       address first_pc = entry->base_address();
2927       if (first_pc != NULL) {

2929                              NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
2930         tty->cr();
2931       }
2932     }
2933   }
2934 #endif
2935 
2936   // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
2937   // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
2938   if (contains_all_checks || !VerifyAdapterCalls) {
2939     assert_lock_strong(AdapterHandlerLibrary_lock);
2940     _adapter_handler_table.put(fingerprint, entry);
2941   }
2942   return entry;
2943 }
2944 
2945 address AdapterHandlerEntry::base_address() {
2946   address base = _i2c_entry;
2947   if (base == NULL)  base = _c2i_entry;
2948   assert(base <= _c2i_entry || _c2i_entry == NULL, "");


2949   assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, "");

2950   assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == NULL, "");
2951   return base;
2952 }
2953 
2954 void AdapterHandlerEntry::relocate(address new_base) {
2955   address old_base = base_address();
2956   assert(old_base != NULL, "");
2957   ptrdiff_t delta = new_base - old_base;
2958   if (_i2c_entry != NULL)
2959     _i2c_entry += delta;
2960   if (_c2i_entry != NULL)
2961     _c2i_entry += delta;




2962   if (_c2i_unverified_entry != NULL)
2963     _c2i_unverified_entry += delta;


2964   if (_c2i_no_clinit_check_entry != NULL)
2965     _c2i_no_clinit_check_entry += delta;
2966   assert(base_address() == new_base, "");
2967 }
2968 
2969 
2970 AdapterHandlerEntry::~AdapterHandlerEntry() {
2971   delete _fingerprint;



2972 #ifdef ASSERT
2973   FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
2974 #endif
2975 }
2976 
2977 
2978 #ifdef ASSERT
2979 // Capture the code before relocation so that it can be compared
2980 // against other versions.  If the code is captured after relocation
2981 // then relative instructions won't be equivalent.
2982 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
2983   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
2984   _saved_code_length = length;
2985   memcpy(_saved_code, buffer, length);
2986 }
2987 
2988 
2989 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
2990   assert(_saved_code != NULL && other->_saved_code != NULL, "code not saved");
2991 

3034 
3035       if (method->is_continuation_enter_intrinsic()) {
3036         buffer.initialize_stubs_size(128);
3037       }
3038 
3039       struct { double data[20]; } locs_buf;
3040       struct { double data[20]; } stubs_locs_buf;
3041       buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3042 #if defined(AARCH64)
3043       // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3044       // in the constant pool to ensure ordering between the barrier and oops
3045       // accesses. For native_wrappers we need a constant.
3046       buffer.initialize_consts_size(8);
3047 #endif
3048       buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3049       MacroAssembler _masm(&buffer);
3050 
3051       // Fill in the signature array, for the calling-convention call.
3052       const int total_args_passed = method->size_of_parameters();
3053 

3054       VMRegPair stack_regs[16];

3055       VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3056 
3057       AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3058                               method->is_static(), total_args_passed);
3059       BasicType* sig_bt = si.basic_types();
3060       assert(si.slots() == total_args_passed, "");
3061       BasicType ret_type = si.return_type();








3062 
3063       // Now get the compiled-Java arguments layout.
3064       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3065 
3066       // Generate the compiled-to-native wrapper code
3067       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3068 
3069       if (nm != NULL) {
3070         {
3071           MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
3072           if (nm->make_in_use()) {
3073             method->set_code(method, nm);
3074           }
3075         }
3076 
3077         DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
3078         if (directive->PrintAssemblyOption) {
3079           nm->print_code();
3080         }
3081         DirectivesStack::release(directive);

3276       st->print("Adapter for signature: ");
3277       a->print_adapter_on(st);
3278       return true;
3279     } else {
3280       return false; // keep looking
3281     }
3282   };
3283   assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3284   _adapter_handler_table.iterate(findblob);
3285   assert(found, "Should have found handler");
3286 }
3287 
3288 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3289   st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3290   if (get_i2c_entry() != NULL) {
3291     st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3292   }
3293   if (get_c2i_entry() != NULL) {
3294     st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3295   }









3296   if (get_c2i_unverified_entry() != NULL) {
3297     st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3298   }
3299   if (get_c2i_no_clinit_check_entry() != NULL) {
3300     st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3301   }
3302   st->cr();
3303 }
3304 
3305 #ifndef PRODUCT
3306 
3307 void AdapterHandlerLibrary::print_statistics() {
3308   print_table_statistics();
3309 }
3310 
3311 #endif /* PRODUCT */
3312 
3313 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3314   StackOverflow* overflow_state = current->stack_overflow_state();
3315   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3316   overflow_state->set_reserved_stack_activation(current->stack_base());
3317 JRT_END

3365         event.set_method(method);
3366         event.commit();
3367       }
3368     }
3369   }
3370   return activation;
3371 }
3372 
3373 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3374   // After any safepoint, just before going back to compiled code,
3375   // we inform the GC that we will be doing initializing writes to
3376   // this object in the future without emitting card-marks, so
3377   // GC may take any compensating steps.
3378 
3379   oop new_obj = current->vm_result();
3380   if (new_obj == NULL) return;
3381 
3382   BarrierSet *bs = BarrierSet::barrier_set();
3383   bs->on_slowpath_allocation_exit(current, new_obj);
3384 }





































































































































































































  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/vmClasses.hpp"
  30 #include "classfile/vmSymbols.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/compiledMethod.inline.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "code/vtableStubs.hpp"
  37 #include "compiler/abstractCompiler.hpp"
  38 #include "compiler/compileBroker.hpp"
  39 #include "compiler/disassembler.hpp"
  40 #include "gc/shared/barrierSet.hpp"
  41 #include "gc/shared/collectedHeap.hpp"
  42 #include "gc/shared/gcLocker.inline.hpp"
  43 #include "interpreter/interpreter.hpp"
  44 #include "interpreter/interpreterRuntime.hpp"
  45 #include "jfr/jfrEvents.hpp"
  46 #include "logging/log.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/access.hpp"
  51 #include "oops/fieldStreams.inline.hpp"
  52 #include "oops/compiledICHolder.inline.hpp"
  53 #include "oops/klass.hpp"
  54 #include "oops/method.inline.hpp"
  55 #include "oops/objArrayKlass.hpp"
  56 #include "oops/objArrayOop.inline.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "oops/inlineKlass.inline.hpp"
  59 #include "prims/forte.hpp"
  60 #include "prims/jvmtiExport.hpp"
  61 #include "prims/methodHandles.hpp"
  62 #include "prims/nativeLookup.hpp"
  63 #include "runtime/atomic.hpp"
  64 #include "runtime/frame.inline.hpp"
  65 #include "runtime/handles.inline.hpp"
  66 #include "runtime/init.hpp"
  67 #include "runtime/interfaceSupport.inline.hpp"
  68 #include "runtime/java.hpp"
  69 #include "runtime/javaCalls.hpp"
  70 #include "runtime/sharedRuntime.hpp"
  71 #include "runtime/stackWatermarkSet.hpp"
  72 #include "runtime/stubRoutines.hpp"
  73 #include "runtime/synchronizer.hpp"
  74 #include "runtime/vframe.inline.hpp"
  75 #include "runtime/vframeArray.hpp"
  76 #include "runtime/vm_version.hpp"
  77 #include "utilities/copy.hpp"
  78 #include "utilities/dtrace.hpp"
  79 #include "utilities/events.hpp"
  80 #include "utilities/resourceHash.hpp"
  81 #include "utilities/macros.hpp"
  82 #include "utilities/xmlstream.hpp"
  83 #ifdef COMPILER1
  84 #include "c1/c1_Runtime1.hpp"
  85 #endif
  86 
  87 // Shared stub locations
  88 RuntimeStub*        SharedRuntime::_wrong_method_blob;
  89 RuntimeStub*        SharedRuntime::_wrong_method_abstract_blob;
  90 RuntimeStub*        SharedRuntime::_ic_miss_blob;
  91 RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
  92 RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
  93 RuntimeStub*        SharedRuntime::_resolve_static_call_blob;

  94 
  95 DeoptimizationBlob* SharedRuntime::_deopt_blob;
  96 SafepointBlob*      SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
  97 SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
  98 SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
  99 
 100 #ifdef COMPILER2
 101 UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
 102 #endif // COMPILER2
 103 
 104 nmethod*            SharedRuntime::_cont_doYield_stub;
 105 
 106 //----------------------------generate_stubs-----------------------------------
 107 void SharedRuntime::generate_stubs() {
 108   _wrong_method_blob                   = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),          "wrong_method_stub");
 109   _wrong_method_abstract_blob          = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
 110   _ic_miss_blob                        = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),  "ic_miss_stub");
 111   _resolve_opt_virtual_call_blob       = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),   "resolve_opt_virtual_call");
 112   _resolve_virtual_call_blob           = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),       "resolve_virtual_call");
 113   _resolve_static_call_blob            = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),        "resolve_static_call");

 114 
 115   AdapterHandlerLibrary::initialize();
 116 
 117 #if COMPILER2_OR_JVMCI
 118   // Vectors are generated only by C2 and JVMCI.
 119   bool support_wide = is_wide_vector(MaxVectorSize);
 120   if (support_wide) {
 121     _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
 122   }
 123 #endif // COMPILER2_OR_JVMCI
 124   _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
 125   _polling_page_return_handler_blob    = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
 126 
 127   generate_deopt_blob();
 128 
 129 #ifdef COMPILER2
 130   generate_uncommon_trap_blob();
 131 #endif // COMPILER2
 132 }
 133 

 982   // forwarded before we look at the return value.
 983   THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badAddress);
 984 }
 985 JNI_END
 986 
 987 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
 988   return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
 989 }
 990 
 991 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current, oopDesc* obj))
 992 #if INCLUDE_JVMCI
 993   if (!obj->klass()->has_finalizer()) {
 994     return;
 995   }
 996 #endif // INCLUDE_JVMCI
 997   assert(oopDesc::is_oop(obj), "must be a valid oop");
 998   assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
 999   InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1000 JRT_END
1001 
1002 
1003 jlong SharedRuntime::get_java_tid(Thread* thread) {
1004   if (thread != NULL && thread->is_Java_thread()) {
1005     Thread* current = Thread::current();
1006     guarantee(current != thread || JavaThread::cast(thread)->is_oop_safe(),
1007               "current cannot touch oops after its GC barrier is detached.");
1008     oop obj = JavaThread::cast(thread)->threadObj();
1009     return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
1010   }
1011   return 0;
1012 }
1013 
1014 /**
1015  * This function ought to be a void function, but cannot be because
1016  * it gets turned into a tail-call on sparc, which runs into dtrace bug
1017  * 6254741.  Once that is fixed we can remove the dummy return value.
1018  */
1019 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
1020   return dtrace_object_alloc(Thread::current(), o, o->size());
1021 }
1022 

1092 // for a call current in progress, i.e., arguments has been pushed on stack
1093 // but callee has not been invoked yet.  Caller frame must be compiled.
1094 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1095                                               CallInfo& callinfo, TRAPS) {
1096   Handle receiver;
1097   Handle nullHandle;  // create a handy null handle for exception returns
1098   JavaThread* current = THREAD;
1099 
1100   assert(!vfst.at_end(), "Java frame must exist");
1101 
1102   // Find caller and bci from vframe
1103   methodHandle caller(current, vfst.method());
1104   int          bci   = vfst.bci();
1105 
1106   if (caller->is_continuation_enter_intrinsic()) {
1107     bc = Bytecodes::_invokestatic;
1108     LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1109     return receiver;
1110   }
1111 
1112   // Substitutability test implementation piggy backs on static call resolution
1113   Bytecodes::Code code = caller->java_code_at(bci);
1114   if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1115     bc = Bytecodes::_invokestatic;
1116     methodHandle attached_method(THREAD, extract_attached_method(vfst));
1117     assert(attached_method.not_null(), "must have attached method");
1118     vmClasses::PrimitiveObjectMethods_klass()->initialize(CHECK_NH);
1119     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1120 #ifdef ASSERT
1121     Method* is_subst = vmClasses::PrimitiveObjectMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature());
1122     assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1123 #endif
1124     return receiver;
1125   }
1126 
1127   Bytecode_invoke bytecode(caller, bci);
1128   int bytecode_index = bytecode.index();
1129   bc = bytecode.invoke_code();
1130 
1131   methodHandle attached_method(current, extract_attached_method(vfst));
1132   if (attached_method.not_null()) {
1133     Method* callee = bytecode.static_target(CHECK_NH);
1134     vmIntrinsics::ID id = callee->intrinsic_id();
1135     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1136     // it attaches statically resolved method to the call site.
1137     if (MethodHandles::is_signature_polymorphic(id) &&
1138         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1139       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1140 
1141       // Adjust invocation mode according to the attached method.
1142       switch (bc) {
1143         case Bytecodes::_invokevirtual:
1144           if (attached_method->method_holder()->is_interface()) {
1145             bc = Bytecodes::_invokeinterface;
1146           }
1147           break;
1148         case Bytecodes::_invokeinterface:
1149           if (!attached_method->method_holder()->is_interface()) {
1150             bc = Bytecodes::_invokevirtual;
1151           }
1152           break;
1153         case Bytecodes::_invokehandle:
1154           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1155             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1156                                               : Bytecodes::_invokevirtual;
1157           }
1158           break;
1159         default:
1160           break;
1161       }
1162     } else {
1163       assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1164       if (!attached_method->method_holder()->is_inline_klass()) {
1165         // Ignore the attached method in this case to not confuse below code
1166         attached_method = methodHandle(current, NULL);
1167       }
1168     }
1169   }
1170 
1171   assert(bc != Bytecodes::_illegal, "not initialized");
1172 
1173   bool has_receiver = bc != Bytecodes::_invokestatic &&
1174                       bc != Bytecodes::_invokedynamic &&
1175                       bc != Bytecodes::_invokehandle;
1176   bool check_null_and_abstract = true;
1177 
1178   // Find receiver for non-static call
1179   if (has_receiver) {
1180     // This register map must be update since we need to find the receiver for
1181     // compiled frames. The receiver might be in a register.
1182     RegisterMap reg_map2(current,
1183                          RegisterMap::UpdateMap::include,
1184                          RegisterMap::ProcessFrames::include,
1185                          RegisterMap::WalkContinuation::skip);
1186     frame stubFrame   = current->last_frame();
1187     // Caller-frame is a compiled frame
1188     frame callerFrame = stubFrame.sender(&reg_map2);
1189     bool caller_is_c1 = false;
1190 
1191     if (callerFrame.is_compiled_frame() && !callerFrame.is_deoptimized_frame()) {
1192       caller_is_c1 = callerFrame.cb()->is_compiled_by_c1();
1193     }
1194 
1195     Method* callee = attached_method();
1196     if (callee == NULL) {
1197       callee = bytecode.static_target(CHECK_NH);
1198       if (callee == NULL) {
1199         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1200       }
1201     }
1202     if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1203       // If the receiver is an inline type that is passed as fields, no oop is available
1204       // Resolve the call without receiver null checking.
1205       assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1206       if (bc == Bytecodes::_invokeinterface) {
1207         bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1208       }
1209       check_null_and_abstract = false;
1210     } else {
1211       // Retrieve from a compiled argument list
1212       receiver = Handle(current, callerFrame.retrieve_receiver(&reg_map2));
1213       assert(oopDesc::is_oop_or_null(receiver()), "");
1214       if (receiver.is_null()) {
1215         THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1216       }
1217     }
1218   }
1219 
1220   // Resolve method
1221   if (attached_method.not_null()) {
1222     // Parameterized by attached method.
1223     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1224   } else {
1225     // Parameterized by bytecode.
1226     constantPoolHandle constants(current, caller->constants());
1227     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1228   }
1229 
1230 #ifdef ASSERT
1231   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1232   if (has_receiver && check_null_and_abstract) {
1233     assert(receiver.not_null(), "should have thrown exception");
1234     Klass* receiver_klass = receiver->klass();
1235     Klass* rk = NULL;
1236     if (attached_method.not_null()) {
1237       // In case there's resolved method attached, use its holder during the check.
1238       rk = attached_method->method_holder();
1239     } else {
1240       // Klass is already loaded.
1241       constantPoolHandle constants(current, caller->constants());
1242       rk = constants->klass_ref_at(bytecode_index, CHECK_NH);
1243     }
1244     Klass* static_receiver_klass = rk;
1245     assert(receiver_klass->is_subtype_of(static_receiver_klass),
1246            "actual receiver must be subclass of static receiver klass");
1247     if (receiver_klass->is_instance_klass()) {
1248       if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1249         tty->print_cr("ERROR: Klass not yet initialized!!");
1250         receiver_klass->print();
1251       }
1252       assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");

1273                         RegisterMap::UpdateMap::skip,
1274                         RegisterMap::ProcessFrames::include,
1275                         RegisterMap::WalkContinuation::skip);
1276     frame fr = current->last_frame();
1277     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1278     fr = fr.sender(&reg_map);
1279     assert(fr.is_entry_frame(), "must be");
1280     // fr is now pointing to the entry frame.
1281     callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1282   } else {
1283     Bytecodes::Code bc;
1284     CallInfo callinfo;
1285     find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1286     callee_method = methodHandle(current, callinfo.selected_method());
1287   }
1288   assert(callee_method()->is_method(), "must be");
1289   return callee_method;
1290 }
1291 
1292 // Resolves a call.
1293 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool* caller_is_c1, TRAPS) {
1294   methodHandle callee_method;
1295   callee_method = resolve_sub_helper(is_virtual, is_optimized, caller_is_c1, THREAD);
1296   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1297     int retry_count = 0;
1298     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1299            callee_method->method_holder() != vmClasses::Object_klass()) {
1300       // If has a pending exception then there is no need to re-try to
1301       // resolve this method.
1302       // If the method has been redefined, we need to try again.
1303       // Hack: we have no way to update the vtables of arrays, so don't
1304       // require that java.lang.Object has been updated.
1305 
1306       // It is very unlikely that method is redefined more than 100 times
1307       // in the middle of resolve. If it is looping here more than 100 times
1308       // means then there could be a bug here.
1309       guarantee((retry_count++ < 100),
1310                 "Could not resolve to latest version of redefined method");
1311       // method is redefined in the middle of resolve so re-try.
1312       callee_method = resolve_sub_helper(is_virtual, is_optimized, caller_is_c1, THREAD);
1313     }
1314   }
1315   return callee_method;
1316 }
1317 
1318 // This fails if resolution required refilling of IC stubs
1319 bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
1320                                                 CompiledMethod* caller_nm, bool is_virtual, bool is_optimized,
1321                                                 Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) {
1322   StaticCallInfo static_call_info;
1323   CompiledICInfo virtual_call_info;
1324 
1325   // Make sure the callee nmethod does not get deoptimized and removed before
1326   // we are done patching the code.
1327   CompiledMethod* callee = callee_method->code();
1328 
1329   if (callee != NULL) {
1330     assert(callee->is_compiled(), "must be nmethod for patching");
1331   }
1332 
1333   if (callee != NULL && !callee->is_in_use()) {
1334     // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1335     callee = NULL;
1336   }
1337 #ifdef ASSERT
1338   address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
1339 #endif
1340 
1341   bool is_nmethod = caller_nm->is_nmethod();
1342   bool caller_is_c1 = caller_nm->is_compiled_by_c1();
1343 
1344   if (is_virtual) {
1345     Klass* receiver_klass = NULL;
1346     if (!caller_is_c1 && callee_method->is_scalarized_arg(0)) {
1347       // If the receiver is an inline type that is passed as fields, no oop is available
1348       receiver_klass = callee_method->method_holder();
1349     } else {
1350       assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
1351       receiver_klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass();
1352     }
1353     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1354     CompiledIC::compute_monomorphic_entry(callee_method, receiver_klass,
1355                      is_optimized, static_bound, is_nmethod, caller_is_c1, virtual_call_info,

1356                      CHECK_false);
1357   } else {
1358     // static call
1359     CompiledStaticCall::compute_entry(callee_method, caller_nm, static_call_info);
1360   }
1361 
1362   // grab lock, check for deoptimization and potentially patch caller
1363   {
1364     CompiledICLocker ml(caller_nm);
1365 
1366     // Lock blocks for safepoint during which both nmethods can change state.
1367 
1368     // Now that we are ready to patch if the Method* was redefined then
1369     // don't update call site and let the caller retry.
1370     // Don't update call site if callee nmethod was unloaded or deoptimized.
1371     // Don't update call site if callee nmethod was replaced by an other nmethod
1372     // which may happen when multiply alive nmethod (tiered compilation)
1373     // will be supported.
1374     if (!callee_method->is_old() &&
1375         (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) {
1376       NoSafepointVerifier nsv;
1377 #ifdef ASSERT
1378       // We must not try to patch to jump to an already unloaded method.
1379       if (dest_entry_point != 0) {

1392       } else {
1393         if (VM_Version::supports_fast_class_init_checks() &&
1394             invoke_code == Bytecodes::_invokestatic &&
1395             callee_method->needs_clinit_barrier() &&
1396             callee != NULL && callee->is_compiled_by_jvmci()) {
1397           return true; // skip patching for JVMCI
1398         }
1399         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
1400         if (is_nmethod && caller_nm->method()->is_continuation_enter_intrinsic()) {
1401           ssc->compute_entry_for_continuation_entry(callee_method, static_call_info);
1402         }
1403         if (ssc->is_clean()) ssc->set(static_call_info);
1404       }
1405     }
1406   } // unlock CompiledICLocker
1407   return true;
1408 }
1409 
1410 // Resolves a call.  The compilers generate code for calls that go here
1411 // and are patched with the real destination of the call.
1412 methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimized, bool* caller_is_c1, TRAPS) {
1413   JavaThread* current = THREAD;
1414   ResourceMark rm(current);
1415   RegisterMap cbl_map(current,
1416                       RegisterMap::UpdateMap::skip,
1417                       RegisterMap::ProcessFrames::include,
1418                       RegisterMap::WalkContinuation::skip);
1419   frame caller_frame = current->last_frame().sender(&cbl_map);
1420 
1421   CodeBlob* caller_cb = caller_frame.cb();
1422   guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
1423   CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
1424   *caller_is_c1 = caller_nm->is_compiled_by_c1();
1425 
1426   // determine call info & receiver
1427   // note: a) receiver is NULL for static calls
1428   //       b) an exception is thrown if receiver is NULL for non-static calls
1429   CallInfo call_info;
1430   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1431   Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1432   methodHandle callee_method(current, call_info.selected_method());
1433 
1434   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1435          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1436          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1437          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1438          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1439 
1440   assert(!caller_nm->is_unloading(), "It should not be unloading");
1441 
1442 #ifndef PRODUCT
1443   // tracing/debugging/statistics
1444   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :

1503     }
1504   }
1505 
1506 }
1507 
1508 
1509 // Inline caches exist only in compiled code
1510 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1511 #ifdef ASSERT
1512   RegisterMap reg_map(current,
1513                       RegisterMap::UpdateMap::skip,
1514                       RegisterMap::ProcessFrames::include,
1515                       RegisterMap::WalkContinuation::skip);
1516   frame stub_frame = current->last_frame();
1517   assert(stub_frame.is_runtime_frame(), "sanity check");
1518   frame caller_frame = stub_frame.sender(&reg_map);
1519   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1520 #endif /* ASSERT */
1521 
1522   methodHandle callee_method;
1523   bool is_optimized = false;
1524   bool caller_is_c1 = false;
1525   JRT_BLOCK
1526     callee_method = SharedRuntime::handle_ic_miss_helper(is_optimized, caller_is_c1, CHECK_NULL);
1527     // Return Method* through TLS
1528     current->set_vm_result_2(callee_method());
1529   JRT_BLOCK_END
1530   // return compiled code entry point after potential safepoints
1531   return entry_for_handle_wrong_method(callee_method, false, is_optimized, caller_is_c1);

1532 JRT_END
1533 
1534 
1535 // Handle call site that has been made non-entrant
1536 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1537   // 6243940 We might end up in here if the callee is deoptimized
1538   // as we race to call it.  We don't want to take a safepoint if
1539   // the caller was interpreted because the caller frame will look
1540   // interpreted to the stack walkers and arguments are now
1541   // "compiled" so it is much better to make this transition
1542   // invisible to the stack walking code. The i2c path will
1543   // place the callee method in the callee_target. It is stashed
1544   // there because if we try and find the callee by normal means a
1545   // safepoint is possible and have trouble gc'ing the compiled args.
1546   RegisterMap reg_map(current,
1547                       RegisterMap::UpdateMap::skip,
1548                       RegisterMap::ProcessFrames::include,
1549                       RegisterMap::WalkContinuation::skip);
1550   frame stub_frame = current->last_frame();
1551   assert(stub_frame.is_runtime_frame(), "sanity check");

1558     guarantee(callee != NULL && callee->is_method(), "bad handshake");
1559     current->set_vm_result_2(callee);
1560     current->set_callee_target(NULL);
1561     if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1562       // Bypass class initialization checks in c2i when caller is in native.
1563       // JNI calls to static methods don't have class initialization checks.
1564       // Fast class initialization checks are present in c2i adapters and call into
1565       // SharedRuntime::handle_wrong_method() on the slow path.
1566       //
1567       // JVM upcalls may land here as well, but there's a proper check present in
1568       // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1569       // so bypassing it in c2i adapter is benign.
1570       return callee->get_c2i_no_clinit_check_entry();
1571     } else {
1572       return callee->get_c2i_entry();
1573     }
1574   }
1575 
1576   // Must be compiled to compiled path which is safe to stackwalk
1577   methodHandle callee_method;
1578   bool is_static_call = false;
1579   bool is_optimized = false;
1580   bool caller_is_c1 = false;
1581   JRT_BLOCK
1582     // Force resolving of caller (if we called from compiled frame)
1583     callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_NULL);
1584     current->set_vm_result_2(callee_method());
1585   JRT_BLOCK_END
1586   // return compiled code entry point after potential safepoints
1587   return entry_for_handle_wrong_method(callee_method, is_static_call, is_optimized, caller_is_c1);

1588 JRT_END
1589 
1590 // Handle abstract method call
1591 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1592   // Verbose error message for AbstractMethodError.
1593   // Get the called method from the invoke bytecode.
1594   vframeStream vfst(current, true);
1595   assert(!vfst.at_end(), "Java frame must exist");
1596   methodHandle caller(current, vfst.method());
1597   Bytecode_invoke invoke(caller, vfst.bci());
1598   DEBUG_ONLY( invoke.verify(); )
1599 
1600   // Find the compiled caller frame.
1601   RegisterMap reg_map(current,
1602                       RegisterMap::UpdateMap::include,
1603                       RegisterMap::ProcessFrames::include,
1604                       RegisterMap::WalkContinuation::skip);
1605   frame stubFrame = current->last_frame();
1606   assert(stubFrame.is_runtime_frame(), "must be");
1607   frame callerFrame = stubFrame.sender(&reg_map);
1608   assert(callerFrame.is_compiled_frame(), "must be");
1609 
1610   // Install exception and return forward entry.
1611   address res = StubRoutines::throw_AbstractMethodError_entry();
1612   JRT_BLOCK
1613     methodHandle callee(current, invoke.static_target(current));
1614     if (!callee.is_null()) {
1615       oop recv = callerFrame.retrieve_receiver(&reg_map);
1616       Klass *recv_klass = (recv != NULL) ? recv->klass() : NULL;
1617       res = StubRoutines::forward_exception_entry();
1618       LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1619     }
1620   JRT_BLOCK_END
1621   return res;
1622 JRT_END
1623 
1624 
1625 // resolve a static call and patch code
1626 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1627   methodHandle callee_method;
1628   bool caller_is_c1;
1629   bool enter_special = false;
1630   JRT_BLOCK
1631     callee_method = SharedRuntime::resolve_helper(false, false, &caller_is_c1, CHECK_NULL);
1632     current->set_vm_result_2(callee_method());
1633 
1634     if (current->is_interp_only_mode()) {
1635       RegisterMap reg_map(current,
1636                           RegisterMap::UpdateMap::skip,
1637                           RegisterMap::ProcessFrames::include,
1638                           RegisterMap::WalkContinuation::skip);
1639       frame stub_frame = current->last_frame();
1640       assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1641       frame caller = stub_frame.sender(&reg_map);
1642       enter_special = caller.cb() != NULL && caller.cb()->is_compiled()
1643         && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic();
1644     }
1645   JRT_BLOCK_END
1646 
1647   if (current->is_interp_only_mode() && enter_special) {
1648     // enterSpecial is compiled and calls this method to resolve the call to Continuation::enter
1649     // but in interp_only_mode we need to go to the interpreted entry
1650     // The c2i won't patch in this mode -- see fixup_callers_callsite
1651     //
1652     // This should probably be done in all cases, not just enterSpecial (see JDK-8218403),
1653     // but that's part of a larger fix, and the situation is worse for enterSpecial, as it has no
1654     // interpreted version.
1655     return callee_method->get_c2i_entry();
1656   }
1657 
1658   // return compiled code entry point after potential safepoints
1659   address entry = caller_is_c1 ?
1660     callee_method->verified_inline_code_entry() : callee_method->verified_code_entry();
1661   assert(entry != NULL, "Jump to zero!");
1662   return entry;
1663 JRT_END
1664 
1665 
1666 // resolve virtual call and update inline cache to monomorphic
1667 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1668   methodHandle callee_method;
1669   bool caller_is_c1;
1670   JRT_BLOCK
1671     callee_method = SharedRuntime::resolve_helper(true, false, &caller_is_c1, CHECK_NULL);
1672     current->set_vm_result_2(callee_method());
1673   JRT_BLOCK_END
1674   // return compiled code entry point after potential safepoints
1675   address entry = caller_is_c1 ?
1676     callee_method->verified_inline_code_entry() : callee_method->verified_inline_ro_code_entry();
1677   assert(entry != NULL, "Jump to zero!");
1678   return entry;
1679 JRT_END
1680 
1681 
1682 // Resolve a virtual call that can be statically bound (e.g., always
1683 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1684 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1685   methodHandle callee_method;
1686   bool caller_is_c1;
1687   JRT_BLOCK
1688     callee_method = SharedRuntime::resolve_helper(true, true, &caller_is_c1, CHECK_NULL);
1689     current->set_vm_result_2(callee_method());
1690   JRT_BLOCK_END
1691   // return compiled code entry point after potential safepoints
1692   address entry = caller_is_c1 ?
1693     callee_method->verified_inline_code_entry() : callee_method->verified_code_entry();
1694   assert(entry != NULL, "Jump to zero!");
1695   return entry;
1696 JRT_END
1697 
1698 // The handle_ic_miss_helper_internal function returns false if it failed due
1699 // to either running out of vtable stubs or ic stubs due to IC transitions
1700 // to transitional states. The needs_ic_stub_refill value will be set if
1701 // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
1702 // refills the IC stubs and tries again.
1703 bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
1704                                                    const frame& caller_frame, methodHandle callee_method,
1705                                                    Bytecodes::Code bc, CallInfo& call_info,
1706                                                    bool& needs_ic_stub_refill, bool& is_optimized, bool caller_is_c1, TRAPS) {
1707   CompiledICLocker ml(caller_nm);
1708   CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1709   bool should_be_mono = false;
1710   if (inline_cache->is_optimized()) {
1711     if (TraceCallFixup) {
1712       ResourceMark rm(THREAD);
1713       tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1714       callee_method->print_short_name(tty);
1715       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1716     }
1717     is_optimized = true;
1718     should_be_mono = true;
1719   } else if (inline_cache->is_icholder_call()) {
1720     CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1721     if (ic_oop != NULL) {
1722       if (!ic_oop->is_loader_alive()) {
1723         // Deferred IC cleaning due to concurrent class unloading
1724         if (!inline_cache->set_to_clean()) {
1725           needs_ic_stub_refill = true;
1726           return false;
1727         }
1728       } else if (receiver()->klass() == ic_oop->holder_klass()) {
1729         // This isn't a real miss. We must have seen that compiled code
1730         // is now available and we want the call site converted to a
1731         // monomorphic compiled call site.
1732         // We can't assert for callee_method->code() != NULL because it
1733         // could have been deoptimized in the meantime
1734         if (TraceCallFixup) {
1735           ResourceMark rm(THREAD);
1736           tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1737           callee_method->print_short_name(tty);
1738           tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1739         }
1740         should_be_mono = true;
1741       }
1742     }
1743   }
1744 
1745   if (should_be_mono) {
1746     // We have a path that was monomorphic but was going interpreted
1747     // and now we have (or had) a compiled entry. We correct the IC
1748     // by using a new icBuffer.
1749     CompiledICInfo info;
1750     Klass* receiver_klass = receiver()->klass();
1751     inline_cache->compute_monomorphic_entry(callee_method,
1752                                             receiver_klass,
1753                                             inline_cache->is_optimized(),
1754                                             false, caller_nm->is_nmethod(),
1755                                             caller_nm->is_compiled_by_c1(),
1756                                             info, CHECK_false);
1757     if (!inline_cache->set_to_monomorphic(info)) {
1758       needs_ic_stub_refill = true;
1759       return false;
1760     }
1761   } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1762     // Potential change to megamorphic
1763 
1764     bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, caller_is_c1, CHECK_false);
1765     if (needs_ic_stub_refill) {
1766       return false;
1767     }
1768     if (!successful) {
1769       if (!inline_cache->set_to_clean()) {
1770         needs_ic_stub_refill = true;
1771         return false;
1772       }
1773     }
1774   } else {
1775     // Either clean or megamorphic
1776   }
1777   return true;
1778 }
1779 
1780 methodHandle SharedRuntime::handle_ic_miss_helper(bool& is_optimized, bool& caller_is_c1, TRAPS) {
1781   JavaThread* current = THREAD;
1782   ResourceMark rm(current);
1783   CallInfo call_info;
1784   Bytecodes::Code bc;
1785 
1786   // receiver is NULL for static calls. An exception is thrown for NULL
1787   // receivers for non-static calls
1788   Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1789   // Compiler1 can produce virtual call sites that can actually be statically bound
1790   // If we fell thru to below we would think that the site was going megamorphic
1791   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1792   // we'd try and do a vtable dispatch however methods that can be statically bound
1793   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1794   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1795   // plain ic_miss) and the site will be converted to an optimized virtual call site
1796   // never to miss again. I don't believe C2 will produce code like this but if it
1797   // did this would still be the correct thing to do for it too, hence no ifdef.
1798   //
1799   if (call_info.resolved_method()->can_be_statically_bound()) {
1800     bool is_static_call = false;
1801     methodHandle callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_(methodHandle()));
1802     assert(!is_static_call, "IC miss at static call?");
1803     if (TraceCallFixup) {
1804       RegisterMap reg_map(current,
1805                           RegisterMap::UpdateMap::skip,
1806                           RegisterMap::ProcessFrames::include,
1807                           RegisterMap::WalkContinuation::skip);
1808       frame caller_frame = current->last_frame().sender(&reg_map);
1809       ResourceMark rm(current);
1810       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1811       callee_method->print_short_name(tty);
1812       tty->print_cr(" from pc: " INTPTR_FORMAT, p2i(caller_frame.pc()));
1813       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1814     }
1815     return callee_method;
1816   }
1817 
1818   methodHandle callee_method(current, call_info.selected_method());
1819 
1820 #ifndef PRODUCT
1821   Atomic::inc(&_ic_miss_ctr);
1822 

1841 #endif
1842 
1843   // install an event collector so that when a vtable stub is created the
1844   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1845   // event can't be posted when the stub is created as locks are held
1846   // - instead the event will be deferred until the event collector goes
1847   // out of scope.
1848   JvmtiDynamicCodeEventCollector event_collector;
1849 
1850   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1851   // Transitioning IC caches may require transition stubs. If we run out
1852   // of transition stubs, we have to drop locks and perform a safepoint
1853   // that refills them.
1854   RegisterMap reg_map(current,
1855                       RegisterMap::UpdateMap::skip,
1856                       RegisterMap::ProcessFrames::include,
1857                       RegisterMap::WalkContinuation::skip);
1858   frame caller_frame = current->last_frame().sender(&reg_map);
1859   CodeBlob* cb = caller_frame.cb();
1860   CompiledMethod* caller_nm = cb->as_compiled_method();
1861   caller_is_c1 = caller_nm->is_compiled_by_c1();
1862 
1863   for (;;) {
1864     ICRefillVerifier ic_refill_verifier;
1865     bool needs_ic_stub_refill = false;
1866     bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1867                                                      bc, call_info, needs_ic_stub_refill, is_optimized, caller_is_c1, CHECK_(methodHandle()));
1868     if (successful || !needs_ic_stub_refill) {
1869       return callee_method;
1870     } else {
1871       InlineCacheBuffer::refill_ic_stubs();
1872     }
1873   }
1874 }
1875 
1876 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1877   CompiledICLocker ml(caller_nm);
1878   if (is_static_call) {
1879     CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1880     if (!ssc->is_clean()) {
1881       return ssc->set_to_clean();
1882     }
1883   } else {
1884     // compiled, dispatched call (which used to call an interpreted method)
1885     CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1886     if (!inline_cache->is_clean()) {
1887       return inline_cache->set_to_clean();
1888     }
1889   }
1890   return true;
1891 }
1892 
1893 //
1894 // Resets a call-site in compiled code so it will get resolved again.
1895 // This routines handles both virtual call sites, optimized virtual call
1896 // sites, and static call sites. Typically used to change a call sites
1897 // destination from compiled to interpreted.
1898 //
1899 methodHandle SharedRuntime::reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_is_c1, TRAPS) {
1900   JavaThread* current = THREAD;
1901   ResourceMark rm(current);
1902   RegisterMap reg_map(current,
1903                       RegisterMap::UpdateMap::skip,
1904                       RegisterMap::ProcessFrames::include,
1905                       RegisterMap::WalkContinuation::skip);
1906   frame stub_frame = current->last_frame();
1907   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1908   frame caller = stub_frame.sender(&reg_map);
1909 
1910   // Do nothing if the frame isn't a live compiled frame.
1911   // nmethod could be deoptimized by the time we get here
1912   // so no update to the caller is needed.
1913 
1914   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1915 
1916     address pc = caller.pc();
1917 
1918     // Check for static or virtual call

1919     CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
1920     caller_is_c1 = caller_nm->is_compiled_by_c1();
1921 
1922     // Default call_addr is the location of the "basic" call.
1923     // Determine the address of the call we a reresolving. With
1924     // Inline Caches we will always find a recognizable call.
1925     // With Inline Caches disabled we may or may not find a
1926     // recognizable call. We will always find a call for static
1927     // calls and for optimized virtual calls. For vanilla virtual
1928     // calls it depends on the state of the UseInlineCaches switch.
1929     //
1930     // With Inline Caches disabled we can get here for a virtual call
1931     // for two reasons:
1932     //   1 - calling an abstract method. The vtable for abstract methods
1933     //       will run us thru handle_wrong_method and we will eventually
1934     //       end up in the interpreter to throw the ame.
1935     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1936     //       call and between the time we fetch the entry address and
1937     //       we jump to it the target gets deoptimized. Similar to 1
1938     //       we will wind up in the interprter (thru a c2i with c2).
1939     //
1940     address call_addr = NULL;
1941     {
1942       // Get call instruction under lock because another thread may be
1943       // busy patching it.
1944       CompiledICLocker ml(caller_nm);
1945       // Location of call instruction
1946       call_addr = caller_nm->call_instruction_address(pc);
1947     }
1948 
1949     // Check relocations for the matching call to 1) avoid false positives,
1950     // and 2) determine the type.
1951     if (call_addr != NULL) {
1952       // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1953       // bytes back in the instruction stream so we must also check for reloc info.
1954       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1955       bool ret = iter.next(); // Get item
1956       if (ret) {
1957         is_static_call = false;
1958         is_optimized = false;
1959         switch (iter.type()) {
1960           case relocInfo::static_call_type:
1961             is_static_call = true;
1962 
1963           case relocInfo::virtual_call_type:
1964           case relocInfo::opt_virtual_call_type:
1965             is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
1966             // Cleaning the inline cache will force a new resolve. This is more robust
1967             // than directly setting it to the new destination, since resolving of calls
1968             // is always done through the same code path. (experience shows that it
1969             // leads to very hard to track down bugs, if an inline cache gets updated
1970             // to a wrong method). It should not be performance critical, since the
1971             // resolve is only done once.
1972             guarantee(iter.addr() == call_addr, "must find call");
1973             for (;;) {
1974               ICRefillVerifier ic_refill_verifier;
1975               if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
1976                 InlineCacheBuffer::refill_ic_stubs();
1977               } else {
1978                 break;
1979               }
1980             }
1981             break;
1982           default:
1983             break;
1984         }
1985       }
1986     }
1987   }
1988 
1989   methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1990 

1991 #ifndef PRODUCT
1992   Atomic::inc(&_wrong_method_ctr);
1993 
1994   if (TraceCallFixup) {
1995     ResourceMark rm(current);
1996     tty->print("handle_wrong_method reresolving call to");
1997     callee_method->print_short_name(tty);
1998     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1999   }
2000 #endif
2001 
2002   return callee_method;
2003 }
2004 
2005 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
2006   // The faulting unsafe accesses should be changed to throw the error
2007   // synchronously instead. Meanwhile the faulting instruction will be
2008   // skipped over (effectively turning it into a no-op) and an
2009   // asynchronous exception will be raised which the thread will
2010   // handle at a later point. If the instruction is a load it will

2066     if (TraceCallFixup) {
2067       tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
2068       moop->print_short_name(tty);
2069       tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
2070     }
2071   }
2072   return false;
2073 }
2074 
2075 // ---------------------------------------------------------------------------
2076 // We are calling the interpreter via a c2i. Normally this would mean that
2077 // we were called by a compiled method. However we could have lost a race
2078 // where we went int -> i2c -> c2i and so the caller could in fact be
2079 // interpreted. If the caller is compiled we attempt to patch the caller
2080 // so he no longer calls into the interpreter.
2081 JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
2082   Method* moop(method);
2083 
2084   AARCH64_PORT_ONLY(assert(pauth_ptr_is_raw(caller_pc), "should be raw"));
2085 


2086   // It's possible that deoptimization can occur at a call site which hasn't
2087   // been resolved yet, in which case this function will be called from
2088   // an nmethod that has been patched for deopt and we can ignore the
2089   // request for a fixup.
2090   // Also it is possible that we lost a race in that from_compiled_entry
2091   // is now back to the i2c in that case we don't need to patch and if
2092   // we did we'd leap into space because the callsite needs to use
2093   // "to interpreter" stub in order to load up the Method*. Don't
2094   // ask me how I know this...
2095 
2096   CodeBlob* cb = CodeCache::find_blob(caller_pc);
2097   if (cb == NULL || !cb->is_compiled()) {
2098     return;
2099   }
2100   address entry_point = moop->from_compiled_entry_no_trampoline(cb->is_compiled_by_c1());
2101   if (entry_point == moop->get_c2i_entry()) {
2102     return;
2103   }
2104 
2105   // The check above makes sure this is a nmethod.
2106   CompiledMethod* nm = cb->as_compiled_method_or_null();
2107   assert(nm, "must be");
2108 
2109   // Get the return PC for the passed caller PC.
2110   address return_pc = caller_pc + frame::pc_return_offset;
2111 
2112   assert(!JavaThread::current()->is_interp_only_mode() || !nm->method()->is_continuation_enter_intrinsic()
2113     || ContinuationEntry::is_interpreted_call(return_pc), "interp_only_mode but not in enterSpecial interpreted entry");
2114 
2115   // There is a benign race here. We could be attempting to patch to a compiled
2116   // entry point at the same time the callee is being deoptimized. If that is
2117   // the case then entry_point may in fact point to a c2i and we'd patch the
2118   // call site with the same old data. clear_code will set code() to NULL
2119   // at the end of it. If we happen to see that NULL then we can skip trying
2120   // to patch. If we hit the window where the callee has a c2i in the
2121   // from_compiled_entry and the NULL isn't present yet then we lose the race

2474  private:
2475   enum {
2476     _basic_type_bits = 4,
2477     _basic_type_mask = right_n_bits(_basic_type_bits),
2478     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2479     _compact_int_count = 3
2480   };
2481   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2482   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2483 
2484   union {
2485     int  _compact[_compact_int_count];
2486     int* _fingerprint;
2487   } _value;
2488   int _length; // A negative length indicates the fingerprint is in the compact form,
2489                // Otherwise _value._fingerprint is the array.
2490 
2491   // Remap BasicTypes that are handled equivalently by the adapters.
2492   // These are correct for the current system but someday it might be
2493   // necessary to make this mapping platform dependent.
2494   static BasicType adapter_encoding(BasicType in) {
2495     switch (in) {
2496       case T_BOOLEAN:
2497       case T_BYTE:
2498       case T_SHORT:
2499       case T_CHAR:
2500         // They are all promoted to T_INT in the calling convention
2501         return T_INT;
2502 
2503       case T_OBJECT:
2504       case T_ARRAY:
2505         // In other words, we assume that any register good enough for
2506         // an int or long is good enough for a managed pointer.
2507 #ifdef _LP64
2508         return T_LONG;
2509 #else
2510         return T_INT;
2511 #endif
2512 
2513       case T_INT:
2514       case T_LONG:
2515       case T_FLOAT:
2516       case T_DOUBLE:
2517       case T_VOID:
2518         return in;
2519 
2520       default:
2521         ShouldNotReachHere();
2522         return T_CONFLICT;
2523     }
2524   }
2525 
2526  public:
2527   AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2528     // The fingerprint is based on the BasicType signature encoded
2529     // into an array of ints with eight entries per int.
2530     int total_args_passed = (sig != NULL) ? sig->length() : 0;
2531     int* ptr;
2532     int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2533     if (len <= _compact_int_count) {
2534       assert(_compact_int_count == 3, "else change next line");
2535       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2536       // Storing the signature encoded as signed chars hits about 98%
2537       // of the time.
2538       _length = -len;
2539       ptr = _value._compact;
2540     } else {
2541       _length = len;
2542       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2543       ptr = _value._fingerprint;
2544     }
2545 
2546     // Now pack the BasicTypes with 8 per int
2547     int sig_index = 0;
2548     BasicType prev_bt = T_ILLEGAL;
2549     int vt_count = 0;
2550     for (int index = 0; index < len; index++) {
2551       int value = 0;
2552       for (int byte = 0; byte < _basic_types_per_int; byte++) {
2553         BasicType bt = T_ILLEGAL;
2554         if (sig_index < total_args_passed) {
2555           bt = sig->at(sig_index++)._bt;
2556           if (bt == T_PRIMITIVE_OBJECT) {
2557             // Found start of inline type in signature
2558             assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2559             if (sig_index == 1 && has_ro_adapter) {
2560               // With a ro_adapter, replace receiver inline type delimiter by T_VOID to prevent matching
2561               // with other adapters that have the same inline type as first argument and no receiver.
2562               bt = T_VOID;
2563             }
2564             vt_count++;
2565           } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2566             // Found end of inline type in signature
2567             assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2568             vt_count--;
2569             assert(vt_count >= 0, "invalid vt_count");
2570           } else if (vt_count == 0) {
2571             // Widen fields that are not part of a scalarized inline type argument
2572             bt = adapter_encoding(bt);
2573           }
2574           prev_bt = bt;
2575         }
2576         int bt_val = (bt == T_ILLEGAL) ? 0 : bt;
2577         assert((bt_val & _basic_type_mask) == bt_val, "must fit in 4 bits");
2578         value = (value << _basic_type_bits) | bt_val;
2579       }
2580       ptr[index] = value;
2581     }
2582     assert(vt_count == 0, "invalid vt_count");
2583   }
2584 
2585   ~AdapterFingerPrint() {
2586     if (_length > 0) {
2587       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2588     }
2589   }
2590 
2591   int value(int index) {
2592     if (_length < 0) {
2593       return _value._compact[index];
2594     }
2595     return _value._fingerprint[index];
2596   }
2597   int length() {
2598     if (_length < 0) return -_length;
2599     return _length;
2600   }
2601 
2602   bool is_compact() {

2627   const char* as_basic_args_string() {
2628     stringStream st;
2629     bool long_prev = false;
2630     for (int i = 0; i < length(); i++) {
2631       unsigned val = (unsigned)value(i);
2632       // args are packed so that first/lower arguments are in the highest
2633       // bits of each int value, so iterate from highest to the lowest
2634       for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2635         unsigned v = (val >> j) & _basic_type_mask;
2636         if (v == 0) {
2637           assert(i == length() - 1, "Only expect zeroes in the last word");
2638           continue;
2639         }
2640         if (long_prev) {
2641           long_prev = false;
2642           if (v == T_VOID) {
2643             st.print("J");
2644           } else {
2645             st.print("L");
2646           }
2647         } else if (v == T_LONG) {
2648           long_prev = true;
2649         } else if (v != T_VOID){
2650           st.print("%c", type2char((BasicType)v));




2651         }
2652       }
2653     }
2654     if (long_prev) {
2655       st.print("L");
2656     }
2657     return st.as_string();
2658   }
2659 #endif // !product
2660 
2661   bool equals(AdapterFingerPrint* other) {
2662     if (other->_length != _length) {
2663       return false;
2664     }
2665     if (_length < 0) {
2666       assert(_compact_int_count == 3, "else change next line");
2667       return _value._compact[0] == other->_value._compact[0] &&
2668              _value._compact[1] == other->_value._compact[1] &&
2669              _value._compact[2] == other->_value._compact[2];
2670     } else {

2677     return true;
2678   }
2679 
2680   static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2681     NOT_PRODUCT(_equals++);
2682     return fp1->equals(fp2);
2683   }
2684 
2685   static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2686     return fp->compute_hash();
2687   }
2688 };
2689 
2690 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2691 ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2692                   ResourceObj::C_HEAP, mtCode,
2693                   AdapterFingerPrint::compute_hash,
2694                   AdapterFingerPrint::equals> _adapter_handler_table;
2695 
2696 // Find a entry with the same fingerprint if it exists
2697 static AdapterHandlerEntry* lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2698   NOT_PRODUCT(_lookups++);
2699   assert_lock_strong(AdapterHandlerLibrary_lock);
2700   AdapterFingerPrint fp(sig, has_ro_adapter);
2701   AdapterHandlerEntry** entry = _adapter_handler_table.get(&fp);
2702   if (entry != nullptr) {
2703 #ifndef PRODUCT
2704     if (fp.is_compact()) _compact++;
2705     _hits++;
2706 #endif
2707     return *entry;
2708   }
2709   return nullptr;
2710 }
2711 
2712 #ifndef PRODUCT
2713 static void print_table_statistics() {
2714   auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2715     return sizeof(*key) + sizeof(*a);
2716   };
2717   TableStatistics ts = _adapter_handler_table.statistics_calculate(size);
2718   ts.print(tty, "AdapterHandlerTable");
2719   tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2720                 _adapter_handler_table.table_size(), _adapter_handler_table.number_of_entries());
2721   tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2722                 _lookups, _equals, _hits, _compact);
2723 }
2724 #endif
2725 
2726 // ---------------------------------------------------------------------------
2727 // Implementation of AdapterHandlerLibrary
2728 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
2729 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = NULL;
2730 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = NULL;
2731 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = NULL;
2732 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = NULL;
2733 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = NULL;
2734 const int AdapterHandlerLibrary_size = 48*K;
2735 BufferBlob* AdapterHandlerLibrary::_buffer = NULL;
2736 
2737 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2738   return _buffer;
2739 }
2740 
2741 extern "C" void unexpected_adapter_call() {
2742   ShouldNotCallThis();
2743 }
2744 
2745 static void post_adapter_creation(const AdapterBlob* new_adapter,
2746                                   const AdapterHandlerEntry* entry) {
2747   if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2748     char blob_id[256];
2749     jio_snprintf(blob_id,
2750                  sizeof(blob_id),
2751                  "%s(%s)",
2752                  new_adapter->name(),
2753                  entry->fingerprint()->as_string());
2754     if (Forte::is_enabled()) {

2760     }
2761   }
2762 }
2763 
2764 void AdapterHandlerLibrary::initialize() {
2765   ResourceMark rm;
2766   AdapterBlob* no_arg_blob = NULL;
2767   AdapterBlob* int_arg_blob = NULL;
2768   AdapterBlob* obj_arg_blob = NULL;
2769   AdapterBlob* obj_int_arg_blob = NULL;
2770   AdapterBlob* obj_obj_arg_blob = NULL;
2771   {
2772     MutexLocker mu(AdapterHandlerLibrary_lock);
2773 
2774     // Create a special handler for abstract methods.  Abstract methods
2775     // are never compiled so an i2c entry is somewhat meaningless, but
2776     // throw AbstractMethodError just in case.
2777     // Pass wrong_method_abstract for the c2i transitions to return
2778     // AbstractMethodError for invalid invocations.
2779     address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2780     _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(NULL),
2781                                                                 StubRoutines::throw_AbstractMethodError_entry(),
2782                                                                 wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
2783                                                                 wrong_method_abstract, wrong_method_abstract);

2784     _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);

2785 
2786     CompiledEntrySignature no_args;
2787     no_args.compute_calling_conventions();
2788     _no_arg_handler = create_adapter(no_arg_blob, no_args, true);
2789 
2790     CompiledEntrySignature obj_args;
2791     SigEntry::add_entry(&obj_args.sig(), T_OBJECT, NULL);
2792     obj_args.compute_calling_conventions();
2793     _obj_arg_handler = create_adapter(obj_arg_blob, obj_args, true);
2794 
2795     CompiledEntrySignature int_args;
2796     SigEntry::add_entry(&int_args.sig(), T_INT, NULL);
2797     int_args.compute_calling_conventions();
2798     _int_arg_handler = create_adapter(int_arg_blob, int_args, true);
2799 
2800     CompiledEntrySignature obj_int_args;
2801     SigEntry::add_entry(&obj_int_args.sig(), T_OBJECT, NULL);
2802     SigEntry::add_entry(&obj_int_args.sig(), T_INT, NULL);
2803     obj_int_args.compute_calling_conventions();
2804     _obj_int_arg_handler = create_adapter(obj_int_arg_blob, obj_int_args, true);
2805 
2806     CompiledEntrySignature obj_obj_args;
2807     SigEntry::add_entry(&obj_obj_args.sig(), T_OBJECT, NULL);
2808     SigEntry::add_entry(&obj_obj_args.sig(), T_OBJECT, NULL);
2809     obj_obj_args.compute_calling_conventions();
2810     _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, obj_obj_args, true);
2811 
2812     assert(no_arg_blob != NULL &&
2813           obj_arg_blob != NULL &&
2814           int_arg_blob != NULL &&
2815           obj_int_arg_blob != NULL &&
2816           obj_obj_arg_blob != NULL, "Initial adapters must be properly created");
2817   }
2818   return;
2819 
2820   // Outside of the lock
2821   post_adapter_creation(no_arg_blob, _no_arg_handler);
2822   post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2823   post_adapter_creation(int_arg_blob, _int_arg_handler);
2824   post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2825   post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2826 }
2827 
2828 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2829                                                       address i2c_entry,
2830                                                       address c2i_entry,
2831                                                       address c2i_inline_entry,
2832                                                       address c2i_inline_ro_entry,
2833                                                       address c2i_unverified_entry,
2834                                                       address c2i_unverified_inline_entry,
2835                                                       address c2i_no_clinit_check_entry) {
2836   return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry,
2837                               c2i_unverified_inline_entry, c2i_no_clinit_check_entry);

2838 }
2839 
2840 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2841   if (method->is_abstract()) {
2842     return NULL;
2843   }
2844   int total_args_passed = method->size_of_parameters(); // All args on stack
2845   if (total_args_passed == 0) {
2846     return _no_arg_handler;
2847   } else if (total_args_passed == 1) {
2848     if (!method->is_static()) {
2849       if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2850         return NULL;
2851       }
2852       return _obj_arg_handler;
2853     }
2854     switch (method->signature()->char_at(1)) {
2855       case JVM_SIGNATURE_CLASS: {
2856         if (InlineTypePassFieldsAsArgs) {
2857           SignatureStream ss(method->signature());
2858           InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2859           if (vk != NULL) {
2860             return NULL;
2861           }
2862         }
2863         return _obj_arg_handler;
2864       }
2865       case JVM_SIGNATURE_ARRAY:
2866         return _obj_arg_handler;
2867       case JVM_SIGNATURE_INT:
2868       case JVM_SIGNATURE_BOOLEAN:
2869       case JVM_SIGNATURE_CHAR:
2870       case JVM_SIGNATURE_BYTE:
2871       case JVM_SIGNATURE_SHORT:
2872         return _int_arg_handler;
2873     }
2874   } else if (total_args_passed == 2 &&
2875              !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2876     switch (method->signature()->char_at(1)) {
2877       case JVM_SIGNATURE_CLASS: {
2878         if (InlineTypePassFieldsAsArgs) {
2879           SignatureStream ss(method->signature());
2880           InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2881           if (vk != NULL) {
2882             return NULL;
2883           }
2884         }
2885         return _obj_obj_arg_handler;
2886       }
2887       case JVM_SIGNATURE_ARRAY:
2888         return _obj_obj_arg_handler;
2889       case JVM_SIGNATURE_INT:
2890       case JVM_SIGNATURE_BOOLEAN:
2891       case JVM_SIGNATURE_CHAR:
2892       case JVM_SIGNATURE_BYTE:
2893       case JVM_SIGNATURE_SHORT:
2894         return _obj_int_arg_handler;
2895     }
2896   }
2897   return NULL;
2898 }
2899 
2900 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2901   _method(method), _num_inline_args(0), _has_inline_recv(false),
2902   _regs(NULL), _regs_cc(NULL), _regs_cc_ro(NULL),
2903   _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2904   _c1_needs_stack_repair(false), _c2_needs_stack_repair(false) {
2905   _sig = new GrowableArray<SigEntry>((method != NULL) ? method->size_of_parameters() : 1);
2906   _sig_cc = new GrowableArray<SigEntry>((method != NULL) ? method->size_of_parameters() : 1);
2907   _sig_cc_ro = new GrowableArray<SigEntry>((method != NULL) ? method->size_of_parameters() : 1);
2908 }
2909 
2910 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2911 // or the same entry for VEP and VIEP(RO).
2912 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2913   if (!has_scalarized_args()) {
2914     // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2915     return CodeOffsets::Verified_Entry;
2916   }
2917   if (_method->is_static()) {
2918     // Static methods don't need VIEP(RO)
2919     return CodeOffsets::Verified_Entry;



2920   }
2921 
2922   if (has_inline_recv()) {
2923     if (num_inline_args() == 1) {
2924       // Share same entry for VIEP and VIEP(RO).
2925       // This is quite common: we have an instance method in an InlineKlass that has
2926       // no inline type args other than <this>.
2927       return CodeOffsets::Verified_Inline_Entry;
2928     } else {
2929       assert(num_inline_args() > 1, "must be");
2930       // No sharing:
2931       //   VIEP(RO) -- <this> is passed as object
2932       //   VEP      -- <this> is passed as fields
2933       return CodeOffsets::Verified_Inline_Entry_RO;
2934     }
2935   }
2936 
2937   // Either a static method, or <this> is not an inline type
2938   if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2939     // No sharing:
2940     // Some arguments are passed on the stack, and we have inserted reserved entries
2941     // into the VEP, but we never insert reserved entries into the VIEP(RO).
2942     return CodeOffsets::Verified_Inline_Entry_RO;
2943   } else {
2944     // Share same entry for VEP and VIEP(RO).
2945     return CodeOffsets::Verified_Entry;
2946   }
2947 }
2948 
2949 void CompiledEntrySignature::compute_calling_conventions(bool init) {
2950   // Iterate over arguments and compute scalarized and non-scalarized signatures
2951   bool has_scalarized = false;
2952   if (_method != NULL) {
2953     InstanceKlass* holder = _method->method_holder();
2954     int arg_num = 0;
2955     if (!_method->is_static()) {
2956       if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() &&
2957           (init || _method->is_scalarized_arg(arg_num))) {
2958         _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
2959         has_scalarized = true;
2960         _has_inline_recv = true;
2961         _num_inline_args++;
2962       } else {
2963         SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
2964       }
2965       SigEntry::add_entry(_sig, T_OBJECT, holder->name());
2966       SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
2967       arg_num++;
2968     }
2969     for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
2970       BasicType bt = ss.type();
2971       if (bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) {
2972         InlineKlass* vk = ss.as_inline_klass(holder);
2973         // TODO 8284443 Mismatch handling, we need to check parent method args (look at klassVtable::needs_new_vtable_entry)
2974         if (vk != NULL && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
2975           _num_inline_args++;
2976           has_scalarized = true;
2977           int last = _sig_cc->length();
2978           int last_ro = _sig_cc_ro->length();
2979           _sig_cc->appendAll(vk->extended_sig());
2980           _sig_cc_ro->appendAll(vk->extended_sig());
2981           if (bt == T_OBJECT) {
2982             // Nullable inline type argument, insert InlineTypeBaseNode::IsInit field right after T_PRIMITIVE_OBJECT
2983             _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, NULL));
2984             _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, NULL));
2985           }
2986         } else {
2987           SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
2988           SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
2989         }
2990         bt = T_OBJECT;
2991       } else {
2992         SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
2993         SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
2994       }
2995       SigEntry::add_entry(_sig, bt, ss.as_symbol());
2996       if (bt != T_VOID) {
2997         arg_num++;
2998       }
2999     }
3000   }

3001 
3002   // Compute the non-scalarized calling convention
3003   _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3004   _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3005 
3006   // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3007   if (has_scalarized && !_method->is_native()) {
3008     _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3009     _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3010 
3011     _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3012     _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3013 
3014     _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3015     _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3016 
3017     // Upper bound on stack arguments to avoid hitting the argument limit and
3018     // bailing out of compilation ("unsupported incoming calling sequence").
3019     // TODO we need a reasonable limit (flag?) here
3020     if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
3021       return; // Success
3022     }
3023   }
3024 
3025   // No scalarized args
3026   _sig_cc = _sig;
3027   _regs_cc = _regs;
3028   _args_on_stack_cc = _args_on_stack;
3029 
3030   _sig_cc_ro = _sig;
3031   _regs_cc_ro = _regs;
3032   _args_on_stack_cc_ro = _args_on_stack;
3033 }
3034 
3035 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3036   // Use customized signature handler.  Need to lock around updates to
3037   // the _adapter_handler_table (it is not safe for concurrent readers
3038   // and a single writer: this could be fixed if it becomes a
3039   // problem).
3040 
3041   // Fast-path for trivial adapters
3042   AdapterHandlerEntry* entry = get_simple_adapter(method);
3043   if (entry != NULL) {
3044     return entry;
3045   }
3046 
3047   ResourceMark rm;
3048   AdapterBlob* new_adapter = NULL;
3049 
3050   CompiledEntrySignature ces(method());
3051   ces.compute_calling_conventions();
3052   if (ces.has_scalarized_args()) {
3053     method->set_has_scalarized_args(true);
3054     method->set_c1_needs_stack_repair(ces.c1_needs_stack_repair());
3055     method->set_c2_needs_stack_repair(ces.c2_needs_stack_repair());
3056   } else if (method->is_abstract()) {
3057     return _abstract_method_handler;
3058   }
3059 




3060   {
3061     MutexLocker mu(AdapterHandlerLibrary_lock);
3062 
3063     if (ces.has_scalarized_args() && method->is_abstract()) {
3064       // Save a C heap allocated version of the signature for abstract methods with scalarized inline type arguments
3065       address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
3066       entry = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(NULL),
3067                                                StubRoutines::throw_AbstractMethodError_entry(),
3068                                                wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
3069                                                wrong_method_abstract, wrong_method_abstract);
3070       GrowableArray<SigEntry>* heap_sig = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro().length(), mtInternal);
3071       heap_sig->appendAll(&ces.sig_cc_ro());
3072       entry->set_sig_cc(heap_sig);
3073       return entry;
3074     }
3075 
3076     // Lookup method signature's fingerprint
3077     entry = lookup(&ces.sig_cc(), ces.has_inline_recv());
3078 
3079     if (entry != NULL) {
3080 #ifdef ASSERT
3081       if (VerifyAdapterSharing) {
3082         AdapterBlob* comparison_blob = NULL;
3083         AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, ces, false);
3084         assert(comparison_blob == NULL, "no blob should be created when creating an adapter for comparison");
3085         assert(comparison_entry->compare_code(entry), "code must match");
3086         // Release the one just created and return the original
3087         delete comparison_entry;
3088       }
3089 #endif
3090       return entry;
3091     }
3092 
3093     entry = create_adapter(new_adapter, ces, /* allocate_code_blob */ true);
3094   }
3095 
3096   // Outside of the lock
3097   if (new_adapter != NULL) {
3098     post_adapter_creation(new_adapter, entry);
3099   }
3100   return entry;
3101 }
3102 
3103 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
3104                                                            CompiledEntrySignature& ces,

3105                                                            bool allocate_code_blob) {
3106 
3107   // StubRoutines::code2() is initialized after this function can be called. As a result,
3108   // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated
3109   // prior to StubRoutines::code2() being set. Checks refer to checks generated in an I2C
3110   // stub that ensure that an I2C stub is called from an interpreter frame.
3111   bool contains_all_checks = StubRoutines::code2() != NULL;
3112 





3113   BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3114   CodeBuffer buffer(buf);
3115   short buffer_locs[20];
3116   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3117                                           sizeof(buffer_locs)/sizeof(relocInfo));
3118 
3119   // Make a C heap allocated version of the fingerprint to store in the adapter
3120   AdapterFingerPrint* fingerprint = new AdapterFingerPrint(&ces.sig_cc(), ces.has_inline_recv());
3121   MacroAssembler _masm(&buffer);
3122   AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
3123                                                 ces.args_on_stack(),
3124                                                 &ces.sig(),
3125                                                 ces.regs(),
3126                                                 &ces.sig_cc(),
3127                                                 ces.regs_cc(),
3128                                                 &ces.sig_cc_ro(),
3129                                                 ces.regs_cc_ro(),
3130                                                 fingerprint,
3131                                                 new_adapter,
3132                                                 allocate_code_blob);
3133 
3134   if (ces.has_scalarized_args()) {
3135     // Save a C heap allocated version of the scalarized signature and store it in the adapter
3136     GrowableArray<SigEntry>* heap_sig = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<SigEntry>(ces.sig_cc().length(), mtInternal);
3137     heap_sig->appendAll(&ces.sig_cc());
3138     entry->set_sig_cc(heap_sig);
3139   }
3140 
3141 #ifdef ASSERT
3142   if (VerifyAdapterSharing) {
3143     entry->save_code(buf->code_begin(), buffer.insts_size());
3144     if (!allocate_code_blob) {
3145       return entry;
3146     }
3147   }
3148 #endif
3149 

3150   NOT_PRODUCT(int insts_size = buffer.insts_size());
3151   if (new_adapter == NULL) {
3152     // CodeCache is full, disable compilation
3153     // Ought to log this but compile log is only per compile thread
3154     // and we're some non descript Java thread.
3155     return NULL;
3156   }
3157   entry->relocate(new_adapter->content_begin());
3158 #ifndef PRODUCT
3159   // debugging support
3160   if (PrintAdapterHandlers || PrintStubCode) {
3161     ttyLocker ttyl;
3162     entry->print_adapter_on(tty);
3163     tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
3164                   _adapter_handler_table.number_of_entries(), fingerprint->as_basic_args_string(),
3165                   fingerprint->as_string(), insts_size);
3166     tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
3167     if (Verbose || PrintStubCode) {
3168       address first_pc = entry->base_address();
3169       if (first_pc != NULL) {

3171                              NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
3172         tty->cr();
3173       }
3174     }
3175   }
3176 #endif
3177 
3178   // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
3179   // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
3180   if (contains_all_checks || !VerifyAdapterCalls) {
3181     assert_lock_strong(AdapterHandlerLibrary_lock);
3182     _adapter_handler_table.put(fingerprint, entry);
3183   }
3184   return entry;
3185 }
3186 
3187 address AdapterHandlerEntry::base_address() {
3188   address base = _i2c_entry;
3189   if (base == NULL)  base = _c2i_entry;
3190   assert(base <= _c2i_entry || _c2i_entry == NULL, "");
3191   assert(base <= _c2i_inline_entry || _c2i_inline_entry == NULL, "");
3192   assert(base <= _c2i_inline_ro_entry || _c2i_inline_ro_entry == NULL, "");
3193   assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, "");
3194   assert(base <= _c2i_unverified_inline_entry || _c2i_unverified_inline_entry == NULL, "");
3195   assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == NULL, "");
3196   return base;
3197 }
3198 
3199 void AdapterHandlerEntry::relocate(address new_base) {
3200   address old_base = base_address();
3201   assert(old_base != NULL, "");
3202   ptrdiff_t delta = new_base - old_base;
3203   if (_i2c_entry != NULL)
3204     _i2c_entry += delta;
3205   if (_c2i_entry != NULL)
3206     _c2i_entry += delta;
3207   if (_c2i_inline_entry != NULL)
3208     _c2i_inline_entry += delta;
3209   if (_c2i_inline_ro_entry != NULL)
3210     _c2i_inline_ro_entry += delta;
3211   if (_c2i_unverified_entry != NULL)
3212     _c2i_unverified_entry += delta;
3213   if (_c2i_unverified_inline_entry != NULL)
3214     _c2i_unverified_inline_entry += delta;
3215   if (_c2i_no_clinit_check_entry != NULL)
3216     _c2i_no_clinit_check_entry += delta;
3217   assert(base_address() == new_base, "");
3218 }
3219 
3220 
3221 AdapterHandlerEntry::~AdapterHandlerEntry() {
3222   delete _fingerprint;
3223   if (_sig_cc != NULL) {
3224     delete _sig_cc;
3225   }
3226 #ifdef ASSERT
3227   FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3228 #endif
3229 }
3230 
3231 
3232 #ifdef ASSERT
3233 // Capture the code before relocation so that it can be compared
3234 // against other versions.  If the code is captured after relocation
3235 // then relative instructions won't be equivalent.
3236 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3237   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3238   _saved_code_length = length;
3239   memcpy(_saved_code, buffer, length);
3240 }
3241 
3242 
3243 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3244   assert(_saved_code != NULL && other->_saved_code != NULL, "code not saved");
3245 

3288 
3289       if (method->is_continuation_enter_intrinsic()) {
3290         buffer.initialize_stubs_size(128);
3291       }
3292 
3293       struct { double data[20]; } locs_buf;
3294       struct { double data[20]; } stubs_locs_buf;
3295       buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3296 #if defined(AARCH64)
3297       // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3298       // in the constant pool to ensure ordering between the barrier and oops
3299       // accesses. For native_wrappers we need a constant.
3300       buffer.initialize_consts_size(8);
3301 #endif
3302       buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3303       MacroAssembler _masm(&buffer);
3304 
3305       // Fill in the signature array, for the calling-convention call.
3306       const int total_args_passed = method->size_of_parameters();
3307 
3308       BasicType stack_sig_bt[16];
3309       VMRegPair stack_regs[16];
3310       BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3311       VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3312 
3313       int i = 0;
3314       if (!method->is_static()) {  // Pass in receiver first
3315         sig_bt[i++] = T_OBJECT;
3316       }
3317       SignatureStream ss(method->signature());
3318       for (; !ss.at_return_type(); ss.next()) {
3319         sig_bt[i++] = ss.type();  // Collect remaining bits of signature
3320         if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3321           sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
3322         }
3323       }
3324       assert(i == total_args_passed, "");
3325       BasicType ret_type = ss.type();
3326 
3327       // Now get the compiled-Java arguments layout.
3328       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3329 
3330       // Generate the compiled-to-native wrapper code
3331       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3332 
3333       if (nm != NULL) {
3334         {
3335           MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
3336           if (nm->make_in_use()) {
3337             method->set_code(method, nm);
3338           }
3339         }
3340 
3341         DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
3342         if (directive->PrintAssemblyOption) {
3343           nm->print_code();
3344         }
3345         DirectivesStack::release(directive);

3540       st->print("Adapter for signature: ");
3541       a->print_adapter_on(st);
3542       return true;
3543     } else {
3544       return false; // keep looking
3545     }
3546   };
3547   assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3548   _adapter_handler_table.iterate(findblob);
3549   assert(found, "Should have found handler");
3550 }
3551 
3552 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3553   st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3554   if (get_i2c_entry() != NULL) {
3555     st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3556   }
3557   if (get_c2i_entry() != NULL) {
3558     st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3559   }
3560   if (get_c2i_entry() != NULL) {
3561     st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3562   }
3563   if (get_c2i_entry() != NULL) {
3564     st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3565   }
3566   if (get_c2i_unverified_entry() != NULL) {
3567     st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3568   }
3569   if (get_c2i_unverified_entry() != NULL) {
3570     st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3571   }
3572   if (get_c2i_no_clinit_check_entry() != NULL) {
3573     st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3574   }
3575   st->cr();
3576 }
3577 
3578 #ifndef PRODUCT
3579 
3580 void AdapterHandlerLibrary::print_statistics() {
3581   print_table_statistics();
3582 }
3583 
3584 #endif /* PRODUCT */
3585 
3586 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3587   StackOverflow* overflow_state = current->stack_overflow_state();
3588   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3589   overflow_state->set_reserved_stack_activation(current->stack_base());
3590 JRT_END

3638         event.set_method(method);
3639         event.commit();
3640       }
3641     }
3642   }
3643   return activation;
3644 }
3645 
3646 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3647   // After any safepoint, just before going back to compiled code,
3648   // we inform the GC that we will be doing initializing writes to
3649   // this object in the future without emitting card-marks, so
3650   // GC may take any compensating steps.
3651 
3652   oop new_obj = current->vm_result();
3653   if (new_obj == NULL) return;
3654 
3655   BarrierSet *bs = BarrierSet::barrier_set();
3656   bs->on_slowpath_allocation_exit(current, new_obj);
3657 }
3658 
3659 // We are at a compiled code to interpreter call. We need backing
3660 // buffers for all inline type arguments. Allocate an object array to
3661 // hold them (convenient because once we're done with it we don't have
3662 // to worry about freeing it).
3663 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
3664   assert(InlineTypePassFieldsAsArgs, "no reason to call this");
3665   ResourceMark rm;
3666 
3667   int nb_slots = 0;
3668   InstanceKlass* holder = callee->method_holder();
3669   allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
3670   if (allocate_receiver) {
3671     nb_slots++;
3672   }
3673   int arg_num = callee->is_static() ? 0 : 1;
3674   for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3675     BasicType bt = ss.type();
3676     if ((bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) && callee->is_scalarized_arg(arg_num)) {
3677       nb_slots++;
3678     }
3679     if (bt != T_VOID) {
3680       arg_num++;
3681     }
3682   }
3683   objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
3684   objArrayHandle array(THREAD, array_oop);
3685   arg_num = callee->is_static() ? 0 : 1;
3686   int i = 0;
3687   if (allocate_receiver) {
3688     InlineKlass* vk = InlineKlass::cast(holder);
3689     oop res = vk->allocate_instance(CHECK_NULL);
3690     array->obj_at_put(i++, res);
3691   }
3692   for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3693     BasicType bt = ss.type();
3694     if ((bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) && callee->is_scalarized_arg(arg_num)) {
3695       InlineKlass* vk = ss.as_inline_klass(holder);
3696       assert(vk != NULL, "Unexpected klass");
3697       oop res = vk->allocate_instance(CHECK_NULL);
3698       array->obj_at_put(i++, res);
3699     }
3700     if (bt != T_VOID) {
3701       arg_num++;
3702     }
3703   }
3704   return array();
3705 }
3706 
3707 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
3708   methodHandle callee(current, callee_method);
3709   oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
3710   current->set_vm_result(array);
3711   current->set_vm_result_2(callee()); // TODO: required to keep callee live?
3712 JRT_END
3713 
3714 // We're returning from an interpreted method: load each field into a
3715 // register following the calling convention
3716 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
3717 {
3718   assert(res->klass()->is_inline_klass(), "only inline types here");
3719   ResourceMark rm;
3720   RegisterMap reg_map(current,
3721                       RegisterMap::UpdateMap::include,
3722                       RegisterMap::ProcessFrames::include,
3723                       RegisterMap::WalkContinuation::skip);
3724   frame stubFrame = current->last_frame();
3725   frame callerFrame = stubFrame.sender(&reg_map);
3726   assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
3727 
3728   InlineKlass* vk = InlineKlass::cast(res->klass());
3729 
3730   const Array<SigEntry>* sig_vk = vk->extended_sig();
3731   const Array<VMRegPair>* regs = vk->return_regs();
3732 
3733   if (regs == NULL) {
3734     // The fields of the inline klass don't fit in registers, bail out
3735     return;
3736   }
3737 
3738   int j = 1;
3739   for (int i = 0; i < sig_vk->length(); i++) {
3740     BasicType bt = sig_vk->at(i)._bt;
3741     if (bt == T_PRIMITIVE_OBJECT) {
3742       continue;
3743     }
3744     if (bt == T_VOID) {
3745       if (sig_vk->at(i-1)._bt == T_LONG ||
3746           sig_vk->at(i-1)._bt == T_DOUBLE) {
3747         j++;
3748       }
3749       continue;
3750     }
3751     int off = sig_vk->at(i)._offset;
3752     assert(off > 0, "offset in object should be positive");
3753     VMRegPair pair = regs->at(j);
3754     address loc = reg_map.location(pair.first(), nullptr);
3755     switch(bt) {
3756     case T_BOOLEAN:
3757       *(jboolean*)loc = res->bool_field(off);
3758       break;
3759     case T_CHAR:
3760       *(jchar*)loc = res->char_field(off);
3761       break;
3762     case T_BYTE:
3763       *(jbyte*)loc = res->byte_field(off);
3764       break;
3765     case T_SHORT:
3766       *(jshort*)loc = res->short_field(off);
3767       break;
3768     case T_INT: {
3769       *(jint*)loc = res->int_field(off);
3770       break;
3771     }
3772     case T_LONG:
3773 #ifdef _LP64
3774       *(intptr_t*)loc = res->long_field(off);
3775 #else
3776       Unimplemented();
3777 #endif
3778       break;
3779     case T_OBJECT:
3780     case T_ARRAY: {
3781       *(oop*)loc = res->obj_field(off);
3782       break;
3783     }
3784     case T_FLOAT:
3785       *(jfloat*)loc = res->float_field(off);
3786       break;
3787     case T_DOUBLE:
3788       *(jdouble*)loc = res->double_field(off);
3789       break;
3790     default:
3791       ShouldNotReachHere();
3792     }
3793     j++;
3794   }
3795   assert(j == regs->length(), "missed a field?");
3796 
3797 #ifdef ASSERT
3798   VMRegPair pair = regs->at(0);
3799   address loc = reg_map.location(pair.first(), nullptr);
3800   assert(*(oopDesc**)loc == res, "overwritten object");
3801 #endif
3802 
3803   current->set_vm_result(res);
3804 }
3805 JRT_END
3806 
3807 // We've returned to an interpreted method, the interpreter needs a
3808 // reference to an inline type instance. Allocate it and initialize it
3809 // from field's values in registers.
3810 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
3811 {
3812   ResourceMark rm;
3813   RegisterMap reg_map(current,
3814                       RegisterMap::UpdateMap::include,
3815                       RegisterMap::ProcessFrames::include,
3816                       RegisterMap::WalkContinuation::skip);
3817   frame stubFrame = current->last_frame();
3818   frame callerFrame = stubFrame.sender(&reg_map);
3819 
3820 #ifdef ASSERT
3821   InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
3822 #endif
3823 
3824   if (!is_set_nth_bit(res, 0)) {
3825     // We're not returning with inline type fields in registers (the
3826     // calling convention didn't allow it for this inline klass)
3827     assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
3828     current->set_vm_result((oopDesc*)res);
3829     assert(verif_vk == NULL, "broken calling convention");
3830     return;
3831   }
3832 
3833   clear_nth_bit(res, 0);
3834   InlineKlass* vk = (InlineKlass*)res;
3835   assert(verif_vk == vk, "broken calling convention");
3836   assert(Metaspace::contains((void*)res), "should be klass");
3837 
3838   // Allocate handles for every oop field so they are safe in case of
3839   // a safepoint when allocating
3840   GrowableArray<Handle> handles;
3841   vk->save_oop_fields(reg_map, handles);
3842 
3843   // It's unsafe to safepoint until we are here
3844   JRT_BLOCK;
3845   {
3846     JavaThread* THREAD = current;
3847     oop vt = vk->realloc_result(reg_map, handles, CHECK);
3848     current->set_vm_result(vt);
3849   }
3850   JRT_BLOCK_END;
3851 }
3852 JRT_END
3853 
< prev index next >