< prev index next > src/hotspot/cpu/x86/methodHandles_x86.cpp
Print this page
jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry);
BLOCK_COMMENT("} jump_to_lambda_form");
}
+ void MethodHandles::jump_to_native_invoker(MacroAssembler* _masm, Register nep_reg, Register temp_target) {
+ BLOCK_COMMENT("jump_to_native_invoker {");
+ assert_different_registers(nep_reg, temp_target);
+ assert(nep_reg != noreg, "required register");
+
+ // Load the invoker, as NEP -> .invoker
+ __ verify_oop(nep_reg);
+ __ access_load_at(T_ADDRESS, IN_HEAP, temp_target,
+ Address(nep_reg, NONZERO(jdk_internal_invoke_NativeEntryPoint::invoker_offset_in_bytes())),
+ noreg, noreg);
+
+ __ jmp(temp_target);
+ BLOCK_COMMENT("} jump_to_native_invoker");
+ }
+
// Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
vmIntrinsics::ID iid) {
const bool not_for_compiler_entry = false; // this is the interpreter entry
#ifdef _LP64
Register temp1 = rscratch1;
Register temp2 = rscratch2;
Register temp3 = rax;
if (for_compiler_entry) {
- assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
+ assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic || iid == vmIntrinsics::_linkToNative ? noreg : j_rarg0), "only valid assignment");
assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
}
#else
Register temp1 = (for_compiler_entry ? rsi : rdx);
Register temp2 = rdi;
Register temp3 = rax;
if (for_compiler_entry) {
- assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment");
+ assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic || iid == vmIntrinsics::_linkToNative ? noreg : rcx), "only valid assignment");
assert_different_registers(temp1, rcx, rdx);
assert_different_registers(temp2, rcx, rdx);
assert_different_registers(temp3, rcx, rdx);
}
#endif
assert_different_registers(temp1, temp2, temp3, saved_last_sp_register()); // don't trash lastSP
}
assert_different_registers(temp1, temp2, temp3, receiver_reg);
assert_different_registers(temp1, temp2, temp3, member_reg);
- if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
- if (iid == vmIntrinsics::_linkToNative) {
- assert(for_compiler_entry, "only compiler entry is supported");
- }
+ if (iid == vmIntrinsics::_invokeBasic) {
// indirect through MH.form.vmentry.vmtarget
jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry);
-
+ } else if (iid == vmIntrinsics::_linkToNative) {
+ assert(for_compiler_entry, "only compiler entry is supported");
+ jump_to_native_invoker(_masm, member_reg, temp1);
} else {
// The method is a member invoker used by direct method handles.
if (VerifyMethodHandles) {
// make sure the trailing argument really is a MemberName (caller responsibility)
verify_klass(_masm, member_reg, VM_CLASS_ID(java_lang_invoke_MemberName),
< prev index next >