< prev index next >

src/hotspot/share/runtime/sharedRuntime.cpp

Print this page

        

*** 1,7 **** /* ! * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 42,57 **** --- 42,62 ---- #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "jfr/jfrEvents.hpp" #include "logging/log.hpp" #include "memory/metaspaceShared.hpp" + #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" + #include "oops/access.hpp" + #include "oops/fieldStreams.hpp" #include "oops/klass.hpp" #include "oops/method.inline.hpp" #include "oops/objArrayKlass.hpp" + #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" + #include "oops/valueKlass.hpp" #include "prims/forte.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" #include "prims/nativeLookup.hpp" #include "runtime/arguments.hpp"
*** 83,93 **** RuntimeStub* SharedRuntime::_wrong_method_abstract_blob; RuntimeStub* SharedRuntime::_ic_miss_blob; RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob; RuntimeStub* SharedRuntime::_resolve_virtual_call_blob; RuntimeStub* SharedRuntime::_resolve_static_call_blob; - address SharedRuntime::_resolve_static_call_entry; DeoptimizationBlob* SharedRuntime::_deopt_blob; SafepointBlob* SharedRuntime::_polling_page_vectors_safepoint_handler_blob; SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob; SafepointBlob* SharedRuntime::_polling_page_return_handler_blob; --- 88,97 ----
*** 103,113 **** _wrong_method_abstract_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub"); _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub"); _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call"); _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call"); _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call"); - _resolve_static_call_entry = _resolve_static_call_blob->entry_point(); #if COMPILER2_OR_JVMCI // Vectors are generated only by C2 and JVMCI. bool support_wide = is_wide_vector(MaxVectorSize); if (support_wide) { --- 107,116 ----
*** 1085,1094 **** --- 1088,1103 ---- } break; default: break; } + } else { + assert(attached_method->has_scalarized_args(), "invalid use of attached method"); + if (!attached_method->method_holder()->is_value()) { + // Ignore the attached method in this case to not confuse below code + attached_method = NULL; + } } } assert(bc != Bytecodes::_illegal, "not initialized");
*** 1102,1124 **** // compiled frames. The receiver might be in a register. RegisterMap reg_map2(thread); frame stubFrame = thread->last_frame(); // Caller-frame is a compiled frame frame callerFrame = stubFrame.sender(&reg_map2); ! if (attached_method.is_null()) { ! methodHandle callee = bytecode.static_target(CHECK_NH); if (callee.is_null()) { THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle); } } ! // Retrieve from a compiled argument list ! receiver = Handle(THREAD, callerFrame.retrieve_receiver(&reg_map2)); ! ! if (receiver.is_null()) { ! THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle); } } // Resolve method if (attached_method.not_null()) { --- 1111,1153 ---- // compiled frames. The receiver might be in a register. RegisterMap reg_map2(thread); frame stubFrame = thread->last_frame(); // Caller-frame is a compiled frame frame callerFrame = stubFrame.sender(&reg_map2); + bool caller_is_c1 = false; ! if (callerFrame.is_compiled_frame() && !callerFrame.is_deoptimized_frame()) { ! caller_is_c1 = callerFrame.cb()->is_compiled_by_c1(); ! } ! ! methodHandle callee = attached_method; ! if (callee.is_null()) { ! callee = bytecode.static_target(CHECK_NH); if (callee.is_null()) { THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle); } } + if (!caller_is_c1 && callee->has_scalarized_args() && callee->method_holder()->is_value()) { + // If the receiver is a value type that is passed as fields, no oop is available. + // Resolve the call without receiver null checking. + assert(!attached_method.is_null(), "must have attached method"); + if (bc == Bytecodes::_invokevirtual) { + LinkInfo link_info(attached_method->method_holder(), attached_method->name(), attached_method->signature()); + LinkResolver::resolve_virtual_call(callinfo, receiver, callee->method_holder(), link_info, /*check_null_and_abstract=*/ false, CHECK_NH); + } else { + assert(bc == Bytecodes::_invokeinterface, "anything else?"); + LinkInfo link_info(constantPoolHandle(THREAD, caller->constants()), bytecode_index, CHECK_NH); + LinkResolver::resolve_interface_call(callinfo, receiver, callee->method_holder(), link_info, /*check_null_and_abstract=*/ false, CHECK_NH); + } + return receiver; // is null + } else { + // Retrieve from a compiled argument list + receiver = Handle(THREAD, callerFrame.retrieve_receiver(&reg_map2)); ! if (receiver.is_null()) { ! THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle); ! } } } // Resolve method if (attached_method.not_null()) {
*** 1190,1202 **** } // Resolves a call. methodHandle SharedRuntime::resolve_helper(JavaThread *thread, bool is_virtual, ! bool is_optimized, TRAPS) { methodHandle callee_method; ! callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); if (JvmtiExport::can_hotswap_or_post_breakpoint()) { int retry_count = 0; while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && callee_method->method_holder() != SystemDictionary::Object_klass()) { // If has a pending exception then there is no need to re-try to --- 1219,1232 ---- } // Resolves a call. methodHandle SharedRuntime::resolve_helper(JavaThread *thread, bool is_virtual, ! bool is_optimized, ! bool* caller_is_c1, TRAPS) { methodHandle callee_method; ! callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, caller_is_c1, THREAD); if (JvmtiExport::can_hotswap_or_post_breakpoint()) { int retry_count = 0; while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && callee_method->method_holder() != SystemDictionary::Object_klass()) { // If has a pending exception then there is no need to re-try to
*** 1209,1219 **** // in the middle of resolve. If it is looping here more than 100 times // means then there could be a bug here. guarantee((retry_count++ < 100), "Could not resolve to latest version of redefined method"); // method is redefined in the middle of resolve so re-try. ! callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); } } return callee_method; } --- 1239,1249 ---- // in the middle of resolve. If it is looping here more than 100 times // means then there could be a bug here. guarantee((retry_count++ < 100), "Could not resolve to latest version of redefined method"); // method is redefined in the middle of resolve so re-try. ! callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, caller_is_c1, THREAD); } } return callee_method; }
*** 1240,1260 **** #ifdef ASSERT address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below #endif bool is_nmethod = caller_nm->is_nmethod(); if (is_virtual) { ! assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); bool static_bound = call_info.resolved_method()->can_be_statically_bound(); ! Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); ! CompiledIC::compute_monomorphic_entry(callee_method, klass, ! is_optimized, static_bound, is_nmethod, virtual_call_info, CHECK_false); } else { // static call ! CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); } // grab lock, check for deoptimization and potentially patch caller { CompiledICLocker ml(caller_nm); --- 1270,1297 ---- #ifdef ASSERT address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below #endif bool is_nmethod = caller_nm->is_nmethod(); + bool caller_is_c1 = caller_nm->is_compiled_by_c1(); if (is_virtual) { ! Klass* receiver_klass = NULL; ! if (ValueTypePassFieldsAsArgs && !caller_is_c1 && callee_method->method_holder()->is_value()) { ! // If the receiver is a value type that is passed as fields, no oop is available ! receiver_klass = callee_method->method_holder(); ! } else { ! assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); ! receiver_klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); ! } bool static_bound = call_info.resolved_method()->can_be_statically_bound(); ! CompiledIC::compute_monomorphic_entry(callee_method, receiver_klass, ! is_optimized, static_bound, is_nmethod, caller_is_c1, virtual_call_info, CHECK_false); } else { // static call ! CompiledStaticCall::compute_entry(callee_method, caller_nm, static_call_info); } // grab lock, check for deoptimization and potentially patch caller { CompiledICLocker ml(caller_nm);
*** 1301,1325 **** // Resolves a call. The compilers generate code for calls that go here // and are patched with the real destination of the call. methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, bool is_virtual, ! bool is_optimized, TRAPS) { ResourceMark rm(thread); RegisterMap cbl_map(thread, false); frame caller_frame = thread->last_frame().sender(&cbl_map); CodeBlob* caller_cb = caller_frame.cb(); guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method"); CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null(); // make sure caller is not getting deoptimized // and removed before we are done with it. // CLEANUP - with lazy deopt shouldn't need this lock nmethodLocker caller_lock(caller_nm); // determine call info & receiver // note: a) receiver is NULL for static calls // b) an exception is thrown if receiver is NULL for non-static calls CallInfo call_info; Bytecodes::Code invoke_code = Bytecodes::_illegal; --- 1338,1374 ---- // Resolves a call. The compilers generate code for calls that go here // and are patched with the real destination of the call. methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, bool is_virtual, ! bool is_optimized, ! bool* caller_is_c1, TRAPS) { ResourceMark rm(thread); RegisterMap cbl_map(thread, false); frame caller_frame = thread->last_frame().sender(&cbl_map); CodeBlob* caller_cb = caller_frame.cb(); guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method"); CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null(); + *caller_is_c1 = caller_nm->is_compiled_by_c1(); // make sure caller is not getting deoptimized // and removed before we are done with it. // CLEANUP - with lazy deopt shouldn't need this lock nmethodLocker caller_lock(caller_nm); + if (!is_virtual && !is_optimized) { + SimpleScopeDesc ssd(caller_nm, caller_frame.pc()); + Bytecode bc(ssd.method(), ssd.method()->bcp_from(ssd.bci())); + // Substitutability test implementation piggy backs on static call resolution + if (bc.code() == Bytecodes::_if_acmpeq || bc.code() == Bytecodes::_if_acmpne) { + SystemDictionary::ValueBootstrapMethods_klass()->initialize(CHECK_NULL); + return SystemDictionary::ValueBootstrapMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature()); + } + } + // determine call info & receiver // note: a) receiver is NULL for static calls // b) an exception is thrown if receiver is NULL for non-static calls CallInfo call_info; Bytecodes::Code invoke_code = Bytecodes::_illegal;
*** 1411,1428 **** frame caller_frame = stub_frame.sender(&reg_map); assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame"); #endif /* ASSERT */ methodHandle callee_method; JRT_BLOCK ! callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL); // Return Method* through TLS thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); ! return callee_method->verified_code_entry(); JRT_END // Handle call site that has been made non-entrant JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread)) --- 1460,1478 ---- frame caller_frame = stub_frame.sender(&reg_map); assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame"); #endif /* ASSERT */ methodHandle callee_method; + bool is_optimized = false; + bool caller_is_c1 = false; JRT_BLOCK ! callee_method = SharedRuntime::handle_ic_miss_helper(thread, is_optimized, caller_is_c1, CHECK_NULL); // Return Method* through TLS thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! return entry_for_handle_wrong_method(callee_method, false, is_optimized, caller_is_c1); JRT_END // Handle call site that has been made non-entrant JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
*** 1449,1466 **** return callee->get_c2i_entry(); } // Must be compiled to compiled path which is safe to stackwalk methodHandle callee_method; JRT_BLOCK // Force resolving of caller (if we called from compiled frame) ! callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); ! return callee_method->verified_code_entry(); JRT_END // Handle abstract method call JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* thread)) // Verbose error message for AbstractMethodError. --- 1499,1518 ---- return callee->get_c2i_entry(); } // Must be compiled to compiled path which is safe to stackwalk methodHandle callee_method; + bool is_static_call = false; + bool is_optimized = false; + bool caller_is_c1 = false; JRT_BLOCK // Force resolving of caller (if we called from compiled frame) ! callee_method = SharedRuntime::reresolve_call_site(thread, is_static_call, is_optimized, caller_is_c1, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! return entry_for_handle_wrong_method(callee_method, is_static_call, is_optimized, caller_is_c1); JRT_END // Handle abstract method call JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* thread)) // Verbose error message for AbstractMethodError.
*** 1494,1558 **** // resolve a static call and patch code JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread )) methodHandle callee_method; JRT_BLOCK ! callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); ! return callee_method->verified_code_entry(); JRT_END // resolve virtual call and update inline cache to monomorphic JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread )) methodHandle callee_method; JRT_BLOCK ! callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); ! return callee_method->verified_code_entry(); JRT_END // Resolve a virtual call that can be statically bound (e.g., always // monomorphic, so it has no inline cache). Patch code to resolved target. JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread)) methodHandle callee_method; JRT_BLOCK ! callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); ! return callee_method->verified_code_entry(); JRT_END // The handle_ic_miss_helper_internal function returns false if it failed due // to either running out of vtable stubs or ic stubs due to IC transitions // to transitional states. The needs_ic_stub_refill value will be set if // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper // refills the IC stubs and tries again. bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame, methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, ! bool& needs_ic_stub_refill, TRAPS) { CompiledICLocker ml(caller_nm); CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); bool should_be_mono = false; if (inline_cache->is_optimized()) { if (TraceCallFixup) { ResourceMark rm(THREAD); tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); callee_method->print_short_name(tty); tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); } should_be_mono = true; } else if (inline_cache->is_icholder_call()) { CompiledICHolder* ic_oop = inline_cache->cached_icholder(); if (ic_oop != NULL) { if (!ic_oop->is_loader_alive()) { --- 1546,1620 ---- // resolve a static call and patch code JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread )) methodHandle callee_method; + bool caller_is_c1; JRT_BLOCK ! callee_method = SharedRuntime::resolve_helper(thread, false, false, &caller_is_c1, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! address entry = caller_is_c1 ? ! callee_method->verified_value_code_entry() : callee_method->verified_code_entry(); ! assert(entry != NULL, "Jump to zero!"); ! return entry; JRT_END // resolve virtual call and update inline cache to monomorphic JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread )) methodHandle callee_method; + bool caller_is_c1; JRT_BLOCK ! callee_method = SharedRuntime::resolve_helper(thread, true, false, &caller_is_c1, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! address entry = caller_is_c1 ? ! callee_method->verified_value_code_entry() : callee_method->verified_value_ro_code_entry(); ! assert(entry != NULL, "Jump to zero!"); ! return entry; JRT_END // Resolve a virtual call that can be statically bound (e.g., always // monomorphic, so it has no inline cache). Patch code to resolved target. JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread)) methodHandle callee_method; + bool caller_is_c1; JRT_BLOCK ! callee_method = SharedRuntime::resolve_helper(thread, true, true, &caller_is_c1, CHECK_NULL); thread->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints ! address entry = caller_is_c1 ? ! callee_method->verified_value_code_entry() : callee_method->verified_code_entry(); ! assert(entry != NULL, "Jump to zero!"); ! return entry; JRT_END // The handle_ic_miss_helper_internal function returns false if it failed due // to either running out of vtable stubs or ic stubs due to IC transitions // to transitional states. The needs_ic_stub_refill value will be set if // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper // refills the IC stubs and tries again. bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame, methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, ! bool& needs_ic_stub_refill, bool& is_optimized, bool caller_is_c1, TRAPS) { CompiledICLocker ml(caller_nm); CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); bool should_be_mono = false; if (inline_cache->is_optimized()) { if (TraceCallFixup) { ResourceMark rm(THREAD); tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); callee_method->print_short_name(tty); tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code())); } + is_optimized = true; should_be_mono = true; } else if (inline_cache->is_icholder_call()) { CompiledICHolder* ic_oop = inline_cache->cached_icholder(); if (ic_oop != NULL) { if (!ic_oop->is_loader_alive()) {
*** 1586,1604 **** Klass* receiver_klass = receiver()->klass(); inline_cache->compute_monomorphic_entry(callee_method, receiver_klass, inline_cache->is_optimized(), false, caller_nm->is_nmethod(), info, CHECK_false); if (!inline_cache->set_to_monomorphic(info)) { needs_ic_stub_refill = true; return false; } } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { // Potential change to megamorphic ! bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, CHECK_false); if (needs_ic_stub_refill) { return false; } if (!successful) { if (!inline_cache->set_to_clean()) { --- 1648,1667 ---- Klass* receiver_klass = receiver()->klass(); inline_cache->compute_monomorphic_entry(callee_method, receiver_klass, inline_cache->is_optimized(), false, caller_nm->is_nmethod(), + caller_nm->is_compiled_by_c1(), info, CHECK_false); if (!inline_cache->set_to_monomorphic(info)) { needs_ic_stub_refill = true; return false; } } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { // Potential change to megamorphic ! bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, caller_is_c1, CHECK_false); if (needs_ic_stub_refill) { return false; } if (!successful) { if (!inline_cache->set_to_clean()) {
*** 1610,1620 **** // Either clean or megamorphic } return true; } ! methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) { ResourceMark rm(thread); CallInfo call_info; Bytecodes::Code bc; // receiver is NULL for static calls. An exception is thrown for NULL --- 1673,1683 ---- // Either clean or megamorphic } return true; } ! methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, bool& is_optimized, bool& caller_is_c1, TRAPS) { ResourceMark rm(thread); CallInfo call_info; Bytecodes::Code bc; // receiver is NULL for static calls. An exception is thrown for NULL
*** 1630,1640 **** // plain ic_miss) and the site will be converted to an optimized virtual call site // never to miss again. I don't believe C2 will produce code like this but if it // did this would still be the correct thing to do for it too, hence no ifdef. // if (call_info.resolved_method()->can_be_statically_bound()) { ! methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle())); if (TraceCallFixup) { RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(&reg_map); ResourceMark rm(thread); tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc)); --- 1693,1705 ---- // plain ic_miss) and the site will be converted to an optimized virtual call site // never to miss again. I don't believe C2 will produce code like this but if it // did this would still be the correct thing to do for it too, hence no ifdef. // if (call_info.resolved_method()->can_be_statically_bound()) { ! bool is_static_call = false; ! methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, is_static_call, is_optimized, caller_is_c1, CHECK_(methodHandle())); ! assert(!is_static_call, "IC miss at static call?"); if (TraceCallFixup) { RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(&reg_map); ResourceMark rm(thread); tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
*** 1680,1695 **** // that refills them. RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(&reg_map); CodeBlob* cb = caller_frame.cb(); CompiledMethod* caller_nm = cb->as_compiled_method(); for (;;) { ICRefillVerifier ic_refill_verifier; bool needs_ic_stub_refill = false; bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method, ! bc, call_info, needs_ic_stub_refill, CHECK_(methodHandle())); if (successful || !needs_ic_stub_refill) { return callee_method; } else { InlineCacheBuffer::refill_ic_stubs(); } --- 1745,1761 ---- // that refills them. RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(&reg_map); CodeBlob* cb = caller_frame.cb(); CompiledMethod* caller_nm = cb->as_compiled_method(); + caller_is_c1 = caller_nm->is_compiled_by_c1(); for (;;) { ICRefillVerifier ic_refill_verifier; bool needs_ic_stub_refill = false; bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method, ! bc, call_info, needs_ic_stub_refill, is_optimized, caller_is_c1, CHECK_(methodHandle())); if (successful || !needs_ic_stub_refill) { return callee_method; } else { InlineCacheBuffer::refill_ic_stubs(); }
*** 1717,1727 **** // Resets a call-site in compiled code so it will get resolved again. // This routines handles both virtual call sites, optimized virtual call // sites, and static call sites. Typically used to change a call sites // destination from compiled to interpreted. // ! methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) { ResourceMark rm(thread); RegisterMap reg_map(thread, false); frame stub_frame = thread->last_frame(); assert(stub_frame.is_runtime_frame(), "must be a runtimeStub"); frame caller = stub_frame.sender(&reg_map); --- 1783,1793 ---- // Resets a call-site in compiled code so it will get resolved again. // This routines handles both virtual call sites, optimized virtual call // sites, and static call sites. Typically used to change a call sites // destination from compiled to interpreted. // ! methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, bool& is_static_call, bool& is_optimized, bool& caller_is_c1, TRAPS) { ResourceMark rm(thread); RegisterMap reg_map(thread, false); frame stub_frame = thread->last_frame(); assert(stub_frame.is_runtime_frame(), "must be a runtimeStub"); frame caller = stub_frame.sender(&reg_map);
*** 1733,1744 **** if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) { address pc = caller.pc(); // Check for static or virtual call - bool is_static_call = false; CompiledMethod* caller_nm = CodeCache::find_compiled(pc); // Default call_addr is the location of the "basic" call. // Determine the address of the call we a reresolving. With // Inline Caches we will always find a recognizable call. // With Inline Caches disabled we may or may not find a --- 1799,1810 ---- if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) { address pc = caller.pc(); // Check for static or virtual call CompiledMethod* caller_nm = CodeCache::find_compiled(pc); + caller_is_c1 = caller_nm->is_compiled_by_c1(); // Default call_addr is the location of the "basic" call. // Determine the address of the call we a reresolving. With // Inline Caches we will always find a recognizable call. // With Inline Caches disabled we may or may not find a
*** 1778,1787 **** --- 1844,1854 ---- is_static_call = true; } else { assert(iter.type() == relocInfo::virtual_call_type || iter.type() == relocInfo::opt_virtual_call_type , "unexpected relocInfo. type"); + is_optimized = (iter.type() == relocInfo::opt_virtual_call_type); } } else { assert(!UseInlineCaches, "relocation info. must exist for this address"); }
*** 1803,1813 **** } } methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle())); - #ifndef PRODUCT Atomic::inc(&_wrong_method_ctr); if (TraceCallFixup) { ResourceMark rm(thread); --- 1870,1879 ----
*** 1898,1909 **** // interpreted. If the caller is compiled we attempt to patch the caller // so he no longer calls into the interpreter. JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc)) Method* moop(method); - address entry_point = moop->from_compiled_entry_no_trampoline(); - // It's possible that deoptimization can occur at a call site which hasn't // been resolved yet, in which case this function will be called from // an nmethod that has been patched for deopt and we can ignore the // request for a fixup. // Also it is possible that we lost a race in that from_compiled_entry --- 1964,1973 ----
*** 1911,1921 **** // we did we'd leap into space because the callsite needs to use // "to interpreter" stub in order to load up the Method*. Don't // ask me how I know this... CodeBlob* cb = CodeCache::find_blob(caller_pc); ! if (cb == NULL || !cb->is_compiled() || entry_point == moop->get_c2i_entry()) { return; } // The check above makes sure this is a nmethod. CompiledMethod* nm = cb->as_compiled_method_or_null(); --- 1975,1989 ---- // we did we'd leap into space because the callsite needs to use // "to interpreter" stub in order to load up the Method*. Don't // ask me how I know this... CodeBlob* cb = CodeCache::find_blob(caller_pc); ! if (cb == NULL || !cb->is_compiled()) { ! return; ! } ! address entry_point = moop->from_compiled_entry_no_trampoline(cb->is_compiled_by_c1()); ! if (entry_point == moop->get_c2i_entry()) { return; } // The check above makes sure this is a nmethod. CompiledMethod* nm = cb->as_compiled_method_or_null();
*** 2299,2316 **** // Otherwise _value._fingerprint is the array. // Remap BasicTypes that are handled equivalently by the adapters. // These are correct for the current system but someday it might be // necessary to make this mapping platform dependent. ! static int adapter_encoding(BasicType in) { switch (in) { case T_BOOLEAN: case T_BYTE: case T_SHORT: ! case T_CHAR: ! // There are all promoted to T_INT in the calling convention ! return T_INT; case T_OBJECT: case T_ARRAY: // In other words, we assume that any register good enough for // an int or long is good enough for a managed pointer. --- 2367,2397 ---- // Otherwise _value._fingerprint is the array. // Remap BasicTypes that are handled equivalently by the adapters. // These are correct for the current system but someday it might be // necessary to make this mapping platform dependent. ! static int adapter_encoding(BasicType in, bool is_valuetype) { switch (in) { case T_BOOLEAN: case T_BYTE: case T_SHORT: ! case T_CHAR: { ! if (is_valuetype) { ! // Do not widen value type field types ! assert(ValueTypePassFieldsAsArgs, "must be enabled"); ! return in; ! } else { ! // They are all promoted to T_INT in the calling convention ! return T_INT; ! } ! } ! ! case T_VALUETYPE: { ! // If value types are passed as fields, return 'in' to differentiate ! // between a T_VALUETYPE and a T_OBJECT in the signature. ! return ValueTypePassFieldsAsArgs ? in : adapter_encoding(T_OBJECT, false); ! } case T_OBJECT: case T_ARRAY: // In other words, we assume that any register good enough for // an int or long is good enough for a managed pointer.
*** 2332,2344 **** return T_CONFLICT; } } public: ! AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) { // The fingerprint is based on the BasicType signature encoded // into an array of ints with eight entries per int. int* ptr; int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int; if (len <= _compact_int_count) { assert(_compact_int_count == 3, "else change next line"); _value._compact[0] = _value._compact[1] = _value._compact[2] = 0; --- 2413,2426 ---- return T_CONFLICT; } } public: ! AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) { // The fingerprint is based on the BasicType signature encoded // into an array of ints with eight entries per int. + int total_args_passed = (sig != NULL) ? sig->length() : 0; int* ptr; int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int; if (len <= _compact_int_count) { assert(_compact_int_count == 3, "else change next line"); _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
*** 2352,2372 **** ptr = _value._fingerprint; } // Now pack the BasicTypes with 8 per int int sig_index = 0; for (int index = 0; index < len; index++) { int value = 0; for (int byte = 0; byte < _basic_types_per_int; byte++) { ! int bt = ((sig_index < total_args_passed) ! ? adapter_encoding(sig_bt[sig_index++]) ! : 0); assert((bt & _basic_type_mask) == bt, "must fit in 4 bits"); value = (value << _basic_type_bits) | bt; } ptr[index] = value; } } ~AdapterFingerPrint() { if (_length > 0) { FREE_C_HEAP_ARRAY(int, _value._fingerprint); --- 2434,2474 ---- ptr = _value._fingerprint; } // Now pack the BasicTypes with 8 per int int sig_index = 0; + BasicType prev_sbt = T_ILLEGAL; + int vt_count = 0; for (int index = 0; index < len; index++) { int value = 0; for (int byte = 0; byte < _basic_types_per_int; byte++) { ! int bt = 0; ! if (sig_index < total_args_passed) { ! BasicType sbt = sig->at(sig_index++)._bt; ! if (ValueTypePassFieldsAsArgs && sbt == T_VALUETYPE) { ! // Found start of value type in signature ! vt_count++; ! if (sig_index == 1 && has_ro_adapter) { ! // With a ro_adapter, replace receiver value type delimiter by T_VOID to prevent matching ! // with other adapters that have the same value type as first argument and no receiver. ! sbt = T_VOID; ! } ! } else if (ValueTypePassFieldsAsArgs && sbt == T_VOID && ! prev_sbt != T_LONG && prev_sbt != T_DOUBLE) { ! // Found end of value type in signature ! vt_count--; ! assert(vt_count >= 0, "invalid vt_count"); ! } ! bt = adapter_encoding(sbt, vt_count > 0); ! prev_sbt = sbt; ! } assert((bt & _basic_type_mask) == bt, "must fit in 4 bits"); value = (value << _basic_type_bits) | bt; } ptr[index] = value; } + assert(vt_count == 0, "invalid vt_count"); } ~AdapterFingerPrint() { if (_length > 0) { FREE_C_HEAP_ARRAY(int, _value._fingerprint);
*** 2448,2460 **** public: AdapterHandlerTable() : BasicHashtable<mtCode>(293, (DumpSharedSpaces ? sizeof(CDSAdapterHandlerEntry) : sizeof(AdapterHandlerEntry))) { } // Create a new entry suitable for insertion in the table ! AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) { AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable<mtCode>::new_entry(fingerprint->compute_hash()); ! entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); if (DumpSharedSpaces) { ((CDSAdapterHandlerEntry*)entry)->init(); } return entry; } --- 2550,2565 ---- public: AdapterHandlerTable() : BasicHashtable<mtCode>(293, (DumpSharedSpaces ? sizeof(CDSAdapterHandlerEntry) : sizeof(AdapterHandlerEntry))) { } // Create a new entry suitable for insertion in the table ! AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, ! address c2i_value_entry, address c2i_value_ro_entry, ! address c2i_unverified_entry, address c2i_unverified_value_entry) { AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable<mtCode>::new_entry(fingerprint->compute_hash()); ! entry->init(fingerprint, i2c_entry, c2i_entry, c2i_value_entry, c2i_value_ro_entry, ! c2i_unverified_entry, c2i_unverified_value_entry); if (DumpSharedSpaces) { ((CDSAdapterHandlerEntry*)entry)->init(); } return entry; }
*** 2469,2481 **** entry->deallocate(); BasicHashtable<mtCode>::free_entry(entry); } // Find a entry with the same fingerprint if it exists ! AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) { NOT_PRODUCT(_lookups++); ! AdapterFingerPrint fp(total_args_passed, sig_bt); unsigned int hash = fp.compute_hash(); int index = hash_to_index(hash); for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) { NOT_PRODUCT(_buckets++); if (e->hash() == hash) { --- 2574,2586 ---- entry->deallocate(); BasicHashtable<mtCode>::free_entry(entry); } // Find a entry with the same fingerprint if it exists ! AdapterHandlerEntry* lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) { NOT_PRODUCT(_lookups++); ! AdapterFingerPrint fp(sig, has_ro_adapter); unsigned int hash = fp.compute_hash(); int index = hash_to_index(hash); for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) { NOT_PRODUCT(_buckets++); if (e->hash() == hash) {
*** 2591,2610 **** // are never compiled so an i2c entry is somewhat meaningless, but // throw AbstractMethodError just in case. // Pass wrong_method_abstract for the c2i transitions to return // AbstractMethodError for invalid invocations. address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub(); ! _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL), StubRoutines::throw_AbstractMethodError_entry(), wrong_method_abstract, wrong_method_abstract); } AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, ! address c2i_unverified_entry) { ! return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); } AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) { AdapterHandlerEntry* entry = get_adapter0(method); if (method->is_shared()) { --- 2696,2720 ---- // are never compiled so an i2c entry is somewhat meaningless, but // throw AbstractMethodError just in case. // Pass wrong_method_abstract for the c2i transitions to return // AbstractMethodError for invalid invocations. address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub(); ! _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(NULL), StubRoutines::throw_AbstractMethodError_entry(), + wrong_method_abstract, wrong_method_abstract, wrong_method_abstract, wrong_method_abstract, wrong_method_abstract); } AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, ! address c2i_value_entry, ! address c2i_value_ro_entry, ! address c2i_unverified_entry, ! address c2i_unverified_value_entry) { ! return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_value_entry, c2i_value_ro_entry, c2i_unverified_entry, ! c2i_unverified_value_entry); } AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) { AdapterHandlerEntry* entry = get_adapter0(method); if (method->is_shared()) {
*** 2627,2674 **** } return entry; } AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter0(const methodHandle& method) { // Use customized signature handler. Need to lock around updates to // the AdapterHandlerTable (it is not safe for concurrent readers // and a single writer: this could be fixed if it becomes a // problem). ResourceMark rm; ! NOT_PRODUCT(int insts_size); AdapterBlob* new_adapter = NULL; AdapterHandlerEntry* entry = NULL; AdapterFingerPrint* fingerprint = NULL; { MutexLocker mu(AdapterHandlerLibrary_lock); // make sure data structure is initialized initialize(); ! if (method->is_abstract()) { ! return _abstract_method_handler; } ! // Fill in the signature array, for the calling-convention call. ! int total_args_passed = method->size_of_parameters(); // All args on stack ! ! BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed); ! VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed); ! int i = 0; ! if (!method->is_static()) // Pass in receiver first ! sig_bt[i++] = T_OBJECT; ! for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) { ! sig_bt[i++] = ss.type(); // Collect remaining bits of signature ! if (ss.type() == T_LONG || ss.type() == T_DOUBLE) ! sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots } - assert(i == total_args_passed, ""); // Lookup method signature's fingerprint ! entry = _adapters->lookup(total_args_passed, sig_bt); #ifdef ASSERT AdapterHandlerEntry* shared_entry = NULL; // Start adapter sharing verification only after the VM is booted. if (VerifyAdapterSharing && (entry != NULL)) { --- 2737,2996 ---- } return entry; } + CompiledEntrySignature::CompiledEntrySignature(Method* method) : + _method(method), _num_value_args(0), _has_value_recv(false), + _sig_cc(NULL), _sig_cc_ro(NULL), _regs(NULL), _regs_cc(NULL), _regs_cc_ro(NULL), + _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0), + _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _has_scalarized_args(false) { + _has_reserved_entries = false; + _sig = new GrowableArray<SigEntry>(method->size_of_parameters()); + + } + + int CompiledEntrySignature::compute_scalarized_cc(GrowableArray<SigEntry>*& sig_cc, VMRegPair*& regs_cc, bool scalar_receiver) { + InstanceKlass* holder = _method->method_holder(); + sig_cc = new GrowableArray<SigEntry>(_method->size_of_parameters()); + if (!_method->is_static()) { + if (holder->is_value() && scalar_receiver && ValueKlass::cast(holder)->is_scalarizable()) { + sig_cc->appendAll(ValueKlass::cast(holder)->extended_sig()); + } else { + SigEntry::add_entry(sig_cc, T_OBJECT); + } + } + Thread* THREAD = Thread::current(); + for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) { + if (ss.type() == T_VALUETYPE) { + ValueKlass* vk = ss.as_value_klass(holder); + if (vk->is_scalarizable()) { + sig_cc->appendAll(vk->extended_sig()); + } else { + SigEntry::add_entry(sig_cc, T_OBJECT); + } + } else { + SigEntry::add_entry(sig_cc, ss.type()); + } + } + regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, sig_cc->length() + 2); + return SharedRuntime::java_calling_convention(sig_cc, regs_cc); + } + + int CompiledEntrySignature::insert_reserved_entry(int ret_off) { + // Find index in signature that belongs to return address slot + BasicType bt = T_ILLEGAL; + int i = 0; + for (uint off = 0; i < _sig_cc->length(); ++i) { + if (SigEntry::skip_value_delimiters(_sig_cc, i)) { + VMReg first = _regs_cc[off++].first(); + if (first->is_valid() && first->is_stack()) { + // Select a type for the reserved entry that will end up on the stack + bt = _sig_cc->at(i)._bt; + if (((int)first->reg2stack() + VMRegImpl::slots_per_word) == ret_off) { + break; // Index of the return address found + } + } + } + } + // Insert reserved entry and re-compute calling convention + SigEntry::insert_reserved_entry(_sig_cc, i, bt); + return SharedRuntime::java_calling_convention(_sig_cc, _regs_cc); + } + + // See if we can save space by sharing the same entry for VVEP and VVEP(RO), + // or the same entry for VEP and VVEP(RO). + CodeOffsets::Entries CompiledEntrySignature::c1_value_ro_entry_type() const { + if (!has_scalarized_args()) { + // VEP/VVEP/VVEP(RO) all share the same entry. There's no packing. + return CodeOffsets::Verified_Entry; + } + if (_method->is_static()) { + // Static methods don't need VVEP(RO) + return CodeOffsets::Verified_Entry; + } + + if (has_value_recv()) { + if (num_value_args() == 1) { + // Share same entry for VVEP and VVEP(RO). + // This is quite common: we have an instance method in a ValueKlass that has + // no value args other than <this>. + return CodeOffsets::Verified_Value_Entry; + } else { + assert(num_value_args() > 1, "must be"); + // No sharing: + // VVEP(RO) -- <this> is passed as object + // VEP -- <this> is passed as fields + return CodeOffsets::Verified_Value_Entry_RO; + } + } + + // Either a static method, or <this> is not a value type + if (args_on_stack_cc() != args_on_stack_cc_ro() || _has_reserved_entries) { + // No sharing: + // Some arguments are passed on the stack, and we have inserted reserved entries + // into the VEP, but we never insert reserved entries into the VVEP(RO). + return CodeOffsets::Verified_Value_Entry_RO; + } else { + // Share same entry for VEP and VVEP(RO). + return CodeOffsets::Verified_Entry; + } + } + + + void CompiledEntrySignature::compute_calling_conventions() { + // Get the (non-scalarized) signature and check for value type arguments + if (!_method->is_static()) { + if (_method->method_holder()->is_value() && ValueKlass::cast(_method->method_holder())->is_scalarizable()) { + _has_value_recv = true; + _num_value_args++; + } + SigEntry::add_entry(_sig, T_OBJECT); + } + for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) { + BasicType bt = ss.type(); + if (bt == T_VALUETYPE) { + if (ss.as_value_klass(_method->method_holder())->is_scalarizable()) { + _num_value_args++; + } + bt = T_OBJECT; + } + SigEntry::add_entry(_sig, bt); + } + if (_method->is_abstract() && !(ValueTypePassFieldsAsArgs && has_value_arg())) { + return; + } + + // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage + _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length()); + _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs); + + // Now compute the scalarized calling convention if there are value types in the signature + _sig_cc = _sig; + _sig_cc_ro = _sig; + _regs_cc = _regs; + _regs_cc_ro = _regs; + _args_on_stack_cc = _args_on_stack; + _args_on_stack_cc_ro = _args_on_stack; + + if (ValueTypePassFieldsAsArgs && has_value_arg() && !_method->is_native()) { + _args_on_stack_cc = compute_scalarized_cc(_sig_cc, _regs_cc, /* scalar_receiver = */ true); + + _sig_cc_ro = _sig_cc; + _regs_cc_ro = _regs_cc; + _args_on_stack_cc_ro = _args_on_stack_cc; + if (_has_value_recv || _args_on_stack_cc > _args_on_stack) { + // For interface calls, we need another entry point / adapter to unpack the receiver + _args_on_stack_cc_ro = compute_scalarized_cc(_sig_cc_ro, _regs_cc_ro, /* scalar_receiver = */ false); + } + + // Compute the stack extension that is required to convert between the calling conventions. + // The stack slots at these offsets are occupied by the return address with the unscalarized + // calling convention. Don't use them for arguments with the scalarized calling convention. + int ret_off = _args_on_stack_cc - _args_on_stack; + int ret_off_ro = _args_on_stack_cc - _args_on_stack_cc_ro; + assert(ret_off_ro <= 0 || ret_off > 0, "receiver unpacking requires more stack space than expected"); + + if (ret_off > 0) { + // Make sure the stack of the scalarized calling convention with the reserved + // entries (2 slots each) remains 16-byte (4 slots) aligned after stack extension. + int alignment = StackAlignmentInBytes / VMRegImpl::stack_slot_size; + if (ret_off_ro != ret_off && ret_off_ro >= 0) { + ret_off += 4; // Account for two reserved entries (4 slots) + ret_off_ro += 4; + ret_off = align_up(ret_off, alignment); + ret_off_ro = align_up(ret_off_ro, alignment); + // TODO can we avoid wasting a stack slot here? + //assert(ret_off != ret_off_ro, "fail"); + if (ret_off > ret_off_ro) { + swap(ret_off, ret_off_ro); // Sort by offset + } + _args_on_stack_cc = insert_reserved_entry(ret_off); + _args_on_stack_cc = insert_reserved_entry(ret_off_ro); + } else { + ret_off += 2; // Account for one reserved entry (2 slots) + ret_off = align_up(ret_off, alignment); + _args_on_stack_cc = insert_reserved_entry(ret_off); + } + + _has_reserved_entries = true; + } + + // Upper bound on stack arguments to avoid hitting the argument limit and + // bailing out of compilation ("unsupported incoming calling sequence"). + // TODO we need a reasonable limit (flag?) here + if (_args_on_stack_cc > 50) { + // Don't scalarize value type arguments + _sig_cc = _sig; + _sig_cc_ro = _sig; + _regs_cc = _regs; + _regs_cc_ro = _regs; + _args_on_stack_cc = _args_on_stack; + _args_on_stack_cc_ro = _args_on_stack; + } else { + _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack); + _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro); + _has_scalarized_args = true; + } + } + } + AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter0(const methodHandle& method) { // Use customized signature handler. Need to lock around updates to // the AdapterHandlerTable (it is not safe for concurrent readers // and a single writer: this could be fixed if it becomes a // problem). ResourceMark rm; ! NOT_PRODUCT(int insts_size = 0); AdapterBlob* new_adapter = NULL; AdapterHandlerEntry* entry = NULL; AdapterFingerPrint* fingerprint = NULL; + { MutexLocker mu(AdapterHandlerLibrary_lock); // make sure data structure is initialized initialize(); ! CompiledEntrySignature ces(method()); ! { ! MutexUnlocker mul(AdapterHandlerLibrary_lock); ! ces.compute_calling_conventions(); ! } ! GrowableArray<SigEntry>& sig = ces.sig(); ! GrowableArray<SigEntry>& sig_cc = ces.sig_cc(); ! GrowableArray<SigEntry>& sig_cc_ro = ces.sig_cc_ro(); ! VMRegPair* regs = ces.regs(); ! VMRegPair* regs_cc = ces.regs_cc(); ! VMRegPair* regs_cc_ro = ces.regs_cc_ro(); ! ! if (ces.has_scalarized_args()) { ! method->set_has_scalarized_args(true); ! method->set_c1_needs_stack_repair(ces.c1_needs_stack_repair()); ! method->set_c2_needs_stack_repair(ces.c2_needs_stack_repair()); } ! if (method->is_abstract()) { ! if (ces.has_scalarized_args()) { ! // Save a C heap allocated version of the signature for abstract methods with scalarized value type arguments ! address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub(); ! entry = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(NULL), ! StubRoutines::throw_AbstractMethodError_entry(), ! wrong_method_abstract, wrong_method_abstract, wrong_method_abstract, ! wrong_method_abstract, wrong_method_abstract); ! GrowableArray<SigEntry>* heap_sig = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<SigEntry>(sig_cc_ro.length(), true); ! heap_sig->appendAll(&sig_cc_ro); ! entry->set_sig_cc(heap_sig); ! return entry; ! } else { ! return _abstract_method_handler; ! } } // Lookup method signature's fingerprint ! entry = _adapters->lookup(&sig_cc, regs_cc != regs_cc_ro); #ifdef ASSERT AdapterHandlerEntry* shared_entry = NULL; // Start adapter sharing verification only after the VM is booted. if (VerifyAdapterSharing && (entry != NULL)) {
*** 2679,2693 **** if (entry != NULL) { return entry; } - // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage - int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false); - // Make a C heap allocated version of the fingerprint to store in the adapter ! fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt); // StubRoutines::code2() is initialized after this function can be called. As a result, // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated // prior to StubRoutines::code2() being set. Checks refer to checks generated in an I2C // stub that ensure that an I2C stub is called from an interpreter frame. --- 3001,3012 ---- if (entry != NULL) { return entry; } // Make a C heap allocated version of the fingerprint to store in the adapter ! fingerprint = new AdapterFingerPrint(&sig_cc, regs_cc != regs_cc_ro); // StubRoutines::code2() is initialized after this function can be called. As a result, // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated // prior to StubRoutines::code2() being set. Checks refer to checks generated in an I2C // stub that ensure that an I2C stub is called from an interpreter frame.
*** 2701,2729 **** buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs, sizeof(buffer_locs)/sizeof(relocInfo)); MacroAssembler _masm(&buffer); entry = SharedRuntime::generate_i2c2i_adapters(&_masm, ! total_args_passed, ! comp_args_on_stack, ! sig_bt, regs, ! fingerprint); #ifdef ASSERT if (VerifyAdapterSharing) { if (shared_entry != NULL) { assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size()), "code must match"); // Release the one just created and return the original _adapters->free_entry(entry); return shared_entry; } else { entry->save_code(buf->code_begin(), buffer.insts_size()); } } #endif - new_adapter = AdapterBlob::create(&buffer); NOT_PRODUCT(insts_size = buffer.insts_size()); } if (new_adapter == NULL) { // CodeCache is full, disable compilation // Ought to log this but compile log is only per compile thread --- 3020,3062 ---- buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs, sizeof(buffer_locs)/sizeof(relocInfo)); MacroAssembler _masm(&buffer); entry = SharedRuntime::generate_i2c2i_adapters(&_masm, ! ces.args_on_stack(), ! &sig, regs, ! &sig_cc, ! regs_cc, ! &sig_cc_ro, ! regs_cc_ro, ! fingerprint, ! new_adapter); ! ! if (ces.has_scalarized_args()) { ! // Save a C heap allocated version of the scalarized signature and store it in the adapter ! GrowableArray<SigEntry>* heap_sig = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<SigEntry>(sig_cc.length(), true); ! heap_sig->appendAll(&sig_cc); ! entry->set_sig_cc(heap_sig); ! } ! #ifdef ASSERT if (VerifyAdapterSharing) { if (shared_entry != NULL) { + if (!shared_entry->compare_code(buf->code_begin(), buffer.insts_size())) { + method->print(); + } assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size()), "code must match"); // Release the one just created and return the original _adapters->free_entry(entry); return shared_entry; } else { entry->save_code(buf->code_begin(), buffer.insts_size()); } } #endif NOT_PRODUCT(insts_size = buffer.insts_size()); } if (new_adapter == NULL) { // CodeCache is full, disable compilation // Ought to log this but compile log is only per compile thread
*** 2775,2785 **** --- 3108,3121 ---- address AdapterHandlerEntry::base_address() { address base = _i2c_entry; if (base == NULL) base = _c2i_entry; assert(base <= _c2i_entry || _c2i_entry == NULL, ""); + assert(base <= _c2i_value_entry || _c2i_value_entry == NULL, ""); + assert(base <= _c2i_value_ro_entry || _c2i_value_ro_entry == NULL, ""); assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, ""); + assert(base <= _c2i_unverified_value_entry || _c2i_unverified_value_entry == NULL, ""); return base; } void AdapterHandlerEntry::relocate(address new_base) { address old_base = base_address();
*** 2787,2804 **** --- 3123,3149 ---- ptrdiff_t delta = new_base - old_base; if (_i2c_entry != NULL) _i2c_entry += delta; if (_c2i_entry != NULL) _c2i_entry += delta; + if (_c2i_value_entry != NULL) + _c2i_value_entry += delta; + if (_c2i_value_ro_entry != NULL) + _c2i_value_ro_entry += delta; if (_c2i_unverified_entry != NULL) _c2i_unverified_entry += delta; + if (_c2i_unverified_value_entry != NULL) + _c2i_unverified_value_entry += delta; assert(base_address() == new_base, ""); } void AdapterHandlerEntry::deallocate() { delete _fingerprint; + if (_sig_cc != NULL) { + delete _sig_cc; + } #ifdef ASSERT if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code); #endif }
*** 2866,2876 **** int i=0; if (!method->is_static()) // Pass in receiver first sig_bt[i++] = T_OBJECT; SignatureStream ss(method->signature()); for (; !ss.at_return_type(); ss.next()) { ! sig_bt[i++] = ss.type(); // Collect remaining bits of signature if (ss.type() == T_LONG || ss.type() == T_DOUBLE) sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots } assert(i == total_args_passed, ""); BasicType ret_type = ss.type(); --- 3211,3222 ---- int i=0; if (!method->is_static()) // Pass in receiver first sig_bt[i++] = T_OBJECT; SignatureStream ss(method->signature()); for (; !ss.at_return_type(); ss.next()) { ! BasicType bt = ss.type(); ! sig_bt[i++] = bt; // Collect remaining bits of signature if (ss.type() == T_LONG || ss.type() == T_DOUBLE) sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots } assert(i == total_args_passed, ""); BasicType ret_type = ss.type();
*** 2981,2996 **** case 'V': sig_bt[cnt++] = T_VOID; break; case 'L': // Oop while (*s++ != ';'); // Skip signature sig_bt[cnt++] = T_OBJECT; break; case '[': { // Array do { // Skip optional size while (*s >= '0' && *s <= '9') s++; } while (*s++ == '['); // Nested arrays? // Skip element type ! if (s[-1] == 'L') while (*s++ != ';'); // Skip signature sig_bt[cnt++] = T_ARRAY; break; } default : ShouldNotReachHere(); --- 3327,3346 ---- case 'V': sig_bt[cnt++] = T_VOID; break; case 'L': // Oop while (*s++ != ';'); // Skip signature sig_bt[cnt++] = T_OBJECT; break; + case 'Q': // Value type + while (*s++ != ';'); // Skip signature + sig_bt[cnt++] = T_VALUETYPE; + break; case '[': { // Array do { // Skip optional size while (*s >= '0' && *s <= '9') s++; } while (*s++ == '['); // Nested arrays? // Skip element type ! if (s[-1] == 'L' || s[-1] == 'Q') while (*s++ != ';'); // Skip signature sig_bt[cnt++] = T_ARRAY; break; } default : ShouldNotReachHere();
*** 3127,3139 **** } assert(false, "Should have found handler"); } void AdapterHandlerEntry::print_adapter_on(outputStream* st) const { ! st->print_cr("AHE@" INTPTR_FORMAT ": %s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT, p2i(this), fingerprint()->as_string(), ! p2i(get_i2c_entry()), p2i(get_c2i_entry()), p2i(get_c2i_unverified_entry())); } #if INCLUDE_CDS --- 3477,3491 ---- } assert(false, "Should have found handler"); } void AdapterHandlerEntry::print_adapter_on(outputStream* st) const { ! st->print_cr("AHE@" INTPTR_FORMAT ": %s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iVE: " INTPTR_FORMAT ! " c2iVROE: " INTPTR_FORMAT " c2iUE: " INTPTR_FORMAT " c2iUVE: " INTPTR_FORMAT, p2i(this), fingerprint()->as_string(), ! p2i(get_i2c_entry()), p2i(get_c2i_entry()), p2i(get_c2i_value_entry()), ! p2i(get_c2i_value_ro_entry()), p2i(get_c2i_unverified_entry()), p2i(get_c2i_unverified_value_entry())); } #if INCLUDE_CDS
*** 3223,3227 **** --- 3575,3784 ---- if (new_obj == NULL) return; BarrierSet *bs = BarrierSet::barrier_set(); bs->on_slowpath_allocation_exit(thread, new_obj); } + + // We are at a compiled code to interpreter call. We need backing + // buffers for all value type arguments. Allocate an object array to + // hold them (convenient because once we're done with it we don't have + // to worry about freeing it). + oop SharedRuntime::allocate_value_types_impl(JavaThread* thread, methodHandle callee, bool allocate_receiver, TRAPS) { + assert(ValueTypePassFieldsAsArgs, "no reason to call this"); + ResourceMark rm; + + int nb_slots = 0; + InstanceKlass* holder = callee->method_holder(); + allocate_receiver &= !callee->is_static() && holder->is_value(); + if (allocate_receiver) { + nb_slots++; + } + for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) { + if (ss.type() == T_VALUETYPE) { + nb_slots++; + } + } + objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL); + objArrayHandle array(THREAD, array_oop); + int i = 0; + if (allocate_receiver) { + ValueKlass* vk = ValueKlass::cast(holder); + oop res = vk->allocate_instance(CHECK_NULL); + array->obj_at_put(i, res); + i++; + } + for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) { + if (ss.type() == T_VALUETYPE) { + ValueKlass* vk = ss.as_value_klass(holder); + oop res = vk->allocate_instance(CHECK_NULL); + array->obj_at_put(i, res); + i++; + } + } + return array(); + } + + JRT_ENTRY(void, SharedRuntime::allocate_value_types(JavaThread* thread, Method* callee_method, bool allocate_receiver)) + methodHandle callee(callee_method); + oop array = SharedRuntime::allocate_value_types_impl(thread, callee, allocate_receiver, CHECK); + thread->set_vm_result(array); + thread->set_vm_result_2(callee()); // TODO: required to keep callee live? + JRT_END + + // Iterate of the array of heap allocated value types and apply the GC post barrier to all reference fields. + // This is called from the C2I adapter after value type arguments are heap allocated and initialized. + JRT_LEAF(void, SharedRuntime::apply_post_barriers(JavaThread* thread, objArrayOopDesc* array)) + { + assert(ValueTypePassFieldsAsArgs, "no reason to call this"); + assert(oopDesc::is_oop(array), "should be oop"); + for (int i = 0; i < array->length(); ++i) { + instanceOop valueOop = (instanceOop)array->obj_at(i); + ValueKlass* vk = ValueKlass::cast(valueOop->klass()); + if (vk->contains_oops()) { + const address dst_oop_addr = ((address) (void*) valueOop); + OopMapBlock* map = vk->start_of_nonstatic_oop_maps(); + OopMapBlock* const end = map + vk->nonstatic_oop_map_count(); + while (map != end) { + address doop_address = dst_oop_addr + map->offset(); + barrier_set_cast<ModRefBarrierSet>(BarrierSet::barrier_set())-> + write_ref_array((HeapWord*) doop_address, map->count()); + map++; + } + } + } + } + JRT_END + + // We're returning from an interpreted method: load each field into a + // register following the calling convention + JRT_LEAF(void, SharedRuntime::load_value_type_fields_in_regs(JavaThread* thread, oopDesc* res)) + { + assert(res->klass()->is_value(), "only value types here"); + ResourceMark rm; + RegisterMap reg_map(thread); + frame stubFrame = thread->last_frame(); + frame callerFrame = stubFrame.sender(&reg_map); + assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter"); + + ValueKlass* vk = ValueKlass::cast(res->klass()); + + const Array<SigEntry>* sig_vk = vk->extended_sig(); + const Array<VMRegPair>* regs = vk->return_regs(); + + if (regs == NULL) { + // The fields of the value klass don't fit in registers, bail out + return; + } + + int j = 1; + for (int i = 0; i < sig_vk->length(); i++) { + BasicType bt = sig_vk->at(i)._bt; + if (bt == T_VALUETYPE) { + continue; + } + if (bt == T_VOID) { + if (sig_vk->at(i-1)._bt == T_LONG || + sig_vk->at(i-1)._bt == T_DOUBLE) { + j++; + } + continue; + } + int off = sig_vk->at(i)._offset; + assert(off > 0, "offset in object should be positive"); + VMRegPair pair = regs->at(j); + address loc = reg_map.location(pair.first()); + switch(bt) { + case T_BOOLEAN: + *(jboolean*)loc = res->bool_field(off); + break; + case T_CHAR: + *(jchar*)loc = res->char_field(off); + break; + case T_BYTE: + *(jbyte*)loc = res->byte_field(off); + break; + case T_SHORT: + *(jshort*)loc = res->short_field(off); + break; + case T_INT: { + *(jint*)loc = res->int_field(off); + break; + } + case T_LONG: + #ifdef _LP64 + *(intptr_t*)loc = res->long_field(off); + #else + Unimplemented(); + #endif + break; + case T_OBJECT: + case T_ARRAY: { + *(oop*)loc = res->obj_field(off); + break; + } + case T_FLOAT: + *(jfloat*)loc = res->float_field(off); + break; + case T_DOUBLE: + *(jdouble*)loc = res->double_field(off); + break; + default: + ShouldNotReachHere(); + } + j++; + } + assert(j == regs->length(), "missed a field?"); + + #ifdef ASSERT + VMRegPair pair = regs->at(0); + address loc = reg_map.location(pair.first()); + assert(*(oopDesc**)loc == res, "overwritten object"); + #endif + + thread->set_vm_result(res); + } + JRT_END + + // We've returned to an interpreted method, the interpreter needs a + // reference to a value type instance. Allocate it and initialize it + // from field's values in registers. + JRT_BLOCK_ENTRY(void, SharedRuntime::store_value_type_fields_to_buf(JavaThread* thread, intptr_t res)) + { + ResourceMark rm; + RegisterMap reg_map(thread); + frame stubFrame = thread->last_frame(); + frame callerFrame = stubFrame.sender(&reg_map); + + #ifdef ASSERT + ValueKlass* verif_vk = ValueKlass::returned_value_klass(reg_map); + #endif + + if (!is_set_nth_bit(res, 0)) { + // We're not returning with value type fields in registers (the + // calling convention didn't allow it for this value klass) + assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area"); + thread->set_vm_result((oopDesc*)res); + assert(verif_vk == NULL, "broken calling convention"); + return; + } + + clear_nth_bit(res, 0); + ValueKlass* vk = (ValueKlass*)res; + assert(verif_vk == vk, "broken calling convention"); + assert(Metaspace::contains((void*)res), "should be klass"); + + // Allocate handles for every oop field so they are safe in case of + // a safepoint when allocating + GrowableArray<Handle> handles; + vk->save_oop_fields(reg_map, handles); + + // It's unsafe to safepoint until we are here + JRT_BLOCK; + { + Thread* THREAD = thread; + oop vt = vk->realloc_result(reg_map, handles, CHECK); + thread->set_vm_result(vt); + } + JRT_BLOCK_END; + } + JRT_END +
< prev index next >