< prev index next >

src/hotspot/share/opto/graphKit.cpp

Print this page

        

*** 23,32 **** --- 23,33 ---- */ #include "precompiled.hpp" #include "ci/ciUtilities.hpp" #include "compiler/compileLog.hpp" + #include "ci/ciValueKlass.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/c2/barrierSetC2.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" #include "opto/addnode.hpp"
*** 35,62 **** #include "opto/graphKit.hpp" #include "opto/idealKit.hpp" #include "opto/intrinsicnode.hpp" #include "opto/locknode.hpp" #include "opto/machnode.hpp" #include "opto/opaquenode.hpp" #include "opto/parse.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "runtime/deoptimization.hpp" #include "runtime/sharedRuntime.hpp" //----------------------------GraphKit----------------------------------------- // Main utility constructor. ! GraphKit::GraphKit(JVMState* jvms) : Phase(Phase::Parser), _env(C->env()), ! _gvn(*C->initial_gvn()), _barrier_set(BarrierSet::barrier_set()->barrier_set_c2()) { _exceptions = jvms->map()->next_exception(); if (_exceptions != NULL) jvms->map()->set_next_exception(NULL); set_jvms(jvms); } // Private constructor for parser. GraphKit::GraphKit() : Phase(Phase::Parser), --- 36,73 ---- #include "opto/graphKit.hpp" #include "opto/idealKit.hpp" #include "opto/intrinsicnode.hpp" #include "opto/locknode.hpp" #include "opto/machnode.hpp" + #include "opto/narrowptrnode.hpp" #include "opto/opaquenode.hpp" #include "opto/parse.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" + #include "opto/valuetypenode.hpp" #include "runtime/deoptimization.hpp" #include "runtime/sharedRuntime.hpp" //----------------------------GraphKit----------------------------------------- // Main utility constructor. ! GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn) : Phase(Phase::Parser), _env(C->env()), ! _gvn((gvn != NULL) ? *gvn : *C->initial_gvn()), _barrier_set(BarrierSet::barrier_set()->barrier_set_c2()) { + assert(gvn == NULL || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled"); _exceptions = jvms->map()->next_exception(); if (_exceptions != NULL) jvms->map()->set_next_exception(NULL); set_jvms(jvms); + #ifdef ASSERT + if (_gvn.is_IterGVN() != NULL) { + assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used"); + // Save the initial size of _for_igvn worklist for verification (see ~GraphKit) + _worklist_size = _gvn.C->for_igvn()->size(); + } + #endif } // Private constructor for parser. GraphKit::GraphKit() : Phase(Phase::Parser),
*** 821,840 **** ciMethod* cur_method = jvms->method(); int cur_bci = jvms->bci(); if (cur_method != NULL && cur_bci != InvocationEntryBci) { Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci); return Interpreter::bytecode_should_reexecute(code) || ! (is_anewarray && code == Bytecodes::_multianewarray); // Reexecute _multianewarray bytecode which was replaced with // sequence of [a]newarray. See Parse::do_multianewarray(). // // Note: interpreter should not have it set since this optimization // is limited by dimensions and guarded by flag so in some cases // multianewarray() runtime calls will be generated and // the bytecode should not be reexecutes (stack will not be reset). ! } else return false; } // Helper function for adding JVMState and debug information to node void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) { // Add the safepoint edges to the call (or other safepoint). --- 832,852 ---- ciMethod* cur_method = jvms->method(); int cur_bci = jvms->bci(); if (cur_method != NULL && cur_bci != InvocationEntryBci) { Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci); return Interpreter::bytecode_should_reexecute(code) || ! (is_anewarray && (code == Bytecodes::_multianewarray)); // Reexecute _multianewarray bytecode which was replaced with // sequence of [a]newarray. See Parse::do_multianewarray(). // // Note: interpreter should not have it set since this optimization // is limited by dimensions and guarded by flag so in some cases // multianewarray() runtime calls will be generated and // the bytecode should not be reexecutes (stack will not be reset). ! } else { return false; + } } // Helper function for adding JVMState and debug information to node void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) { // Add the safepoint edges to the call (or other safepoint).
*** 1074,1083 **** --- 1086,1104 ---- assert(rsize == 1, ""); depth = rsize - inputs; } break; + case Bytecodes::_withfield: { + bool ignored_will_link; + ciField* field = method()->get_field_at_bci(bci(), ignored_will_link); + int size = field->type()->size(); + inputs = size+1; + depth = rsize - inputs; + break; + } + case Bytecodes::_ireturn: case Bytecodes::_lreturn: case Bytecodes::_freturn: case Bytecodes::_dreturn: case Bytecodes::_areturn:
*** 1199,1208 **** --- 1220,1230 ---- // Construct NULL check Node *chk = NULL; switch(type) { case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break; case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break; + case T_VALUETYPE : // fall through case T_ARRAY : // fall through type = T_OBJECT; // simplify further tests case T_OBJECT : { const Type *t = _gvn.type( value );
*** 1370,1383 **** --- 1392,1423 ---- } return value; } + Node* GraphKit::null2default(Node* value, ciValueKlass* vk) { + Node* null_ctl = top(); + value = null_check_oop(value, &null_ctl); + if (!null_ctl->is_top()) { + // Return default value if oop is null + Node* region = new RegionNode(3); + region->init_req(1, control()); + region->init_req(2, null_ctl); + value = PhiNode::make(region, value, TypeInstPtr::make(TypePtr::BotPTR, vk)); + value->set_req(2, ValueTypeNode::default_oop(gvn(), vk)); + set_control(gvn().transform(region)); + value = gvn().transform(value); + } + return value; + } //------------------------------cast_not_null---------------------------------- // Cast obj to not-null on this path Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { + if (obj->is_ValueType()) { + return obj; + } const Type *t = _gvn.type(obj); const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL); // Object is already not-null? if( t == t_not_null ) return obj;
*** 1502,1512 **** ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe); } else { ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe); } ld = _gvn.transform(ld); ! if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) { // Improve graph before escape analysis and boxing elimination. record_for_igvn(ld); } return ld; } --- 1542,1553 ---- ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe); } else { ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe); } ld = _gvn.transform(ld); ! ! if (((bt == T_OBJECT || bt == T_VALUETYPE) && C->do_escape_analysis()) || C->eliminate_boxing()) { // Improve graph before escape analysis and boxing elimination. record_for_igvn(ld); } return ld; }
*** 1553,1563 **** Node* adr, const TypePtr* adr_type, Node* val, const Type* val_type, BasicType bt, ! DecoratorSet decorators) { // Transformation of a value which could be NULL pointer (CastPP #NULL) // could be delayed during Parse (for example, in adjust_map_after_if()). // Execute transformation here to avoid barrier generation in such case. if (_gvn.type(val) == TypePtr::NULL_PTR) { val = _gvn.makecon(TypePtr::NULL_PTR); --- 1594,1606 ---- Node* adr, const TypePtr* adr_type, Node* val, const Type* val_type, BasicType bt, ! DecoratorSet decorators, ! bool deoptimize_on_exception, ! bool safe_for_replace) { // Transformation of a value which could be NULL pointer (CastPP #NULL) // could be delayed during Parse (for example, in adjust_map_after_if()). // Execute transformation here to avoid barrier generation in such case. if (_gvn.type(val) == TypePtr::NULL_PTR) { val = _gvn.makecon(TypePtr::NULL_PTR);
*** 1566,1575 **** --- 1609,1622 ---- if (stopped()) { return top(); // Dead path ? } assert(val != NULL, "not dead path"); + if (val->is_ValueType()) { + // Allocate value type and get oop + val = val->as_ValueType()->allocate(this, deoptimize_on_exception, safe_for_replace)->get_oop(); + } C2AccessValuePtr addr(adr, adr_type); C2AccessValue value(val, val_type); C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr); if (access.is_raw()) {
*** 1582,1598 **** Node* GraphKit::access_load_at(Node* obj, // containing obj Node* adr, // actual adress to store val at const TypePtr* adr_type, const Type* val_type, BasicType bt, ! DecoratorSet decorators) { if (stopped()) { return top(); // Dead path ? } C2AccessValuePtr addr(adr, adr_type); ! C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr); if (access.is_raw()) { return _barrier_set->BarrierSetC2::load_at(access, val_type); } else { return _barrier_set->load_at(access, val_type); } --- 1629,1646 ---- Node* GraphKit::access_load_at(Node* obj, // containing obj Node* adr, // actual adress to store val at const TypePtr* adr_type, const Type* val_type, BasicType bt, ! DecoratorSet decorators, ! Node* ctl) { if (stopped()) { return top(); // Dead path ? } C2AccessValuePtr addr(adr, adr_type); ! C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl); if (access.is_raw()) { return _barrier_set->BarrierSetC2::load_at(access, val_type); } else { return _barrier_set->load_at(access, val_type); }
*** 1686,1697 **** } else { return _barrier_set->atomic_add_at(access, new_val, value_type); } } ! void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) { ! return _barrier_set->clone(this, src, dst, size, is_array); } Node* GraphKit::access_resolve(Node* n, DecoratorSet decorators) { // Use stronger ACCESS_WRITE|ACCESS_READ by default. if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) { --- 1734,1745 ---- } else { return _barrier_set->atomic_add_at(access, new_val, value_type); } } ! void GraphKit::access_clone(Node* src_base, Node* dst_base, Node* countx, bool is_array) { ! return _barrier_set->clone(this, src_base, dst_base, countx, is_array); } Node* GraphKit::access_resolve(Node* n, DecoratorSet decorators) { // Use stronger ACCESS_WRITE|ACCESS_READ by default. if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
*** 1702,1711 **** --- 1750,1764 ---- //-------------------------array_element_address------------------------- Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, const TypeInt* sizetype, Node* ctrl) { uint shift = exact_log2(type2aelembytes(elembt)); + ciKlass* arytype_klass = _gvn.type(ary)->is_aryptr()->klass(); + if (arytype_klass != NULL && arytype_klass->is_value_array_klass()) { + ciValueArrayKlass* vak = arytype_klass->as_value_array_klass(); + shift = vak->log2_element_size(); + } uint header = arrayOopDesc::base_offset_in_bytes(elembt); // short-circuit a common case (saves lots of confusing waste motion) jint idx_con = find_int_con(idx, -1); if (idx_con >= 0) {
*** 1722,1747 **** //-------------------------load_array_element------------------------- Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) { const Type* elemtype = arytype->elem(); BasicType elembt = elemtype->array_element_basic_type(); Node* adr = array_element_address(ary, idx, elembt, arytype->size()); if (elembt == T_NARROWOOP) { elembt = T_OBJECT; // To satisfy switch in LoadNode::make() } Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered); return ld; } //-------------------------set_arguments_for_java_call------------------------- // Arguments (pre-popped from the stack) are taken from the JVMS. ! void GraphKit::set_arguments_for_java_call(CallJavaNode* call) { // Add the call arguments: ! uint nargs = call->method()->arg_size(); ! for (uint i = 0; i < nargs; i++) { ! Node* arg = argument(i); ! call->init_req(i + TypeFunc::Parms, arg); } } //---------------------------set_edges_for_java_call--------------------------- // Connect a newly created call into the current JVMS. --- 1775,1829 ---- //-------------------------load_array_element------------------------- Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) { const Type* elemtype = arytype->elem(); BasicType elembt = elemtype->array_element_basic_type(); + assert(elembt != T_VALUETYPE, "value types are not supported by this method"); Node* adr = array_element_address(ary, idx, elembt, arytype->size()); if (elembt == T_NARROWOOP) { elembt = T_OBJECT; // To satisfy switch in LoadNode::make() } Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered); return ld; } //-------------------------set_arguments_for_java_call------------------------- // Arguments (pre-popped from the stack) are taken from the JVMS. ! void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool incremental_inlining) { // Add the call arguments: ! const TypeTuple* domain = call->tf()->domain_sig(); ! ExtendedSignature sig_cc = ExtendedSignature(call->method()->get_sig_cc(), SigEntryFilter()); ! uint nargs = domain->cnt(); ! for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) { ! Node* arg = argument(i-TypeFunc::Parms); ! const Type* t = domain->field_at(i); ! if (call->method()->has_scalarized_args() && t->is_valuetypeptr() && !t->maybe_null()) { ! // We don't pass value type arguments by reference but instead pass each field of the value type ! ValueTypeNode* vt = arg->as_ValueType(); ! vt->pass_fields(this, call, sig_cc, idx); ! // If a value type argument is passed as fields, attach the Method* to the call site ! // to be able to access the extended signature later via attached_method_before_pc(). ! // For example, see CompiledMethod::preserve_callee_argument_oops(). ! call->set_override_symbolic_info(true); ! continue; ! } else if (arg->is_ValueType()) { ! // Pass value type argument via oop to callee ! if (!incremental_inlining) { ! arg = arg->as_ValueType()->allocate(this)->get_oop(); ! } else { ! arg = ValueTypePtrNode::make_from_value_type(this, arg->as_ValueType()); ! } ! } ! call->init_req(idx++, arg); ! // Skip reserved arguments ! BasicType bt = t->basic_type(); ! while (SigEntry::next_is_reserved(sig_cc, bt, true)) { ! call->init_req(idx++, top()); ! if (type2size[bt] == 2) { ! call->init_req(idx++, top()); ! } ! } } } //---------------------------set_edges_for_java_call--------------------------- // Connect a newly created call into the current JVMS.
*** 1775,1791 **** } Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) { if (stopped()) return top(); // maybe the call folded up? - // Capture the return value, if any. - Node* ret; - if (call->method() == NULL || - call->method()->return_type()->basic_type() == T_VOID) - ret = top(); - else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms)); - // Note: Since any out-of-line call can produce an exception, // we always insert an I_O projection from the call into the result. make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize); --- 1857,1866 ----
*** 1794,1803 **** --- 1869,1897 ---- // through and exceptional paths, so replace the projections for // the fall through path. set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) )); set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) )); } + + // Capture the return value, if any. + Node* ret; + if (call->method() == NULL || call->method()->return_type()->basic_type() == T_VOID) { + ret = top(); + } else if (call->tf()->returns_value_type_as_fields()) { + // Return of multiple values (value type fields): we create a + // ValueType node, each field is a projection from the call. + ciValueKlass* vk = call->method()->return_type()->as_value_klass(); + const Array<SigEntry>* sig_array = vk->extended_sig(); + GrowableArray<SigEntry> sig = GrowableArray<SigEntry>(sig_array->length()); + sig.appendAll(sig_array); + ExtendedSignature sig_cc = ExtendedSignature(&sig, SigEntryFilter()); + uint base_input = TypeFunc::Parms + 1; + ret = ValueTypeNode::make_from_multi(this, call, sig_cc, vk, base_input, false); + } else { + ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms)); + } + return ret; } //--------------------set_predefined_input_for_runtime_call-------------------- // Reading and setting the memory state is way conservative here.
*** 1884,1963 **** Node* ex_ctl = top(); SafePointNode* final_state = stop(); // Find all the needed outputs of this call ! CallProjections callprojs; ! call->extract_projections(&callprojs, true); Unique_Node_List wl; Node* init_mem = call->in(TypeFunc::Memory); Node* final_mem = final_state->in(TypeFunc::Memory); Node* final_ctl = final_state->in(TypeFunc::Control); Node* final_io = final_state->in(TypeFunc::I_O); // Replace all the old call edges with the edges from the inlining result ! if (callprojs.fallthrough_catchproj != NULL) { ! C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl); } ! if (callprojs.fallthrough_memproj != NULL) { if (final_mem->is_MergeMem()) { // Parser's exits MergeMem was not transformed but may be optimized final_mem = _gvn.transform(final_mem); } ! C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem); add_mergemem_users_to_worklist(wl, final_mem); } ! if (callprojs.fallthrough_ioproj != NULL) { ! C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io); } // Replace the result with the new result if it exists and is used ! if (callprojs.resproj != NULL && result != NULL) { ! C->gvn_replace_by(callprojs.resproj, result); } if (ejvms == NULL) { // No exception edges to simply kill off those paths ! if (callprojs.catchall_catchproj != NULL) { ! C->gvn_replace_by(callprojs.catchall_catchproj, C->top()); } ! if (callprojs.catchall_memproj != NULL) { ! C->gvn_replace_by(callprojs.catchall_memproj, C->top()); } ! if (callprojs.catchall_ioproj != NULL) { ! C->gvn_replace_by(callprojs.catchall_ioproj, C->top()); } // Replace the old exception object with top ! if (callprojs.exobj != NULL) { ! C->gvn_replace_by(callprojs.exobj, C->top()); } } else { GraphKit ekit(ejvms); // Load my combined exception state into the kit, with all phis transformed: SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states(); replaced_nodes_exception = ex_map->replaced_nodes(); Node* ex_oop = ekit.use_exception_state(ex_map); ! if (callprojs.catchall_catchproj != NULL) { ! C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control()); ex_ctl = ekit.control(); } ! if (callprojs.catchall_memproj != NULL) { Node* ex_mem = ekit.reset_memory(); ! C->gvn_replace_by(callprojs.catchall_memproj, ex_mem); add_mergemem_users_to_worklist(wl, ex_mem); } ! if (callprojs.catchall_ioproj != NULL) { ! C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o()); } // Replace the old exception object with the newly created one ! if (callprojs.exobj != NULL) { ! C->gvn_replace_by(callprojs.exobj, ex_oop); } } // Disconnect the call from the graph call->disconnect_inputs(NULL, C); --- 1978,2057 ---- Node* ex_ctl = top(); SafePointNode* final_state = stop(); // Find all the needed outputs of this call ! CallProjections* callprojs = call->extract_projections(true); Unique_Node_List wl; Node* init_mem = call->in(TypeFunc::Memory); Node* final_mem = final_state->in(TypeFunc::Memory); Node* final_ctl = final_state->in(TypeFunc::Control); Node* final_io = final_state->in(TypeFunc::I_O); // Replace all the old call edges with the edges from the inlining result ! if (callprojs->fallthrough_catchproj != NULL) { ! C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl); } ! if (callprojs->fallthrough_memproj != NULL) { if (final_mem->is_MergeMem()) { // Parser's exits MergeMem was not transformed but may be optimized final_mem = _gvn.transform(final_mem); } ! C->gvn_replace_by(callprojs->fallthrough_memproj, final_mem); add_mergemem_users_to_worklist(wl, final_mem); } ! if (callprojs->fallthrough_ioproj != NULL) { ! C->gvn_replace_by(callprojs->fallthrough_ioproj, final_io); } // Replace the result with the new result if it exists and is used ! if (callprojs->resproj[0] != NULL && result != NULL) { ! assert(callprojs->nb_resproj == 1, "unexpected number of results"); ! C->gvn_replace_by(callprojs->resproj[0], result); } if (ejvms == NULL) { // No exception edges to simply kill off those paths ! if (callprojs->catchall_catchproj != NULL) { ! C->gvn_replace_by(callprojs->catchall_catchproj, C->top()); } ! if (callprojs->catchall_memproj != NULL) { ! C->gvn_replace_by(callprojs->catchall_memproj, C->top()); } ! if (callprojs->catchall_ioproj != NULL) { ! C->gvn_replace_by(callprojs->catchall_ioproj, C->top()); } // Replace the old exception object with top ! if (callprojs->exobj != NULL) { ! C->gvn_replace_by(callprojs->exobj, C->top()); } } else { GraphKit ekit(ejvms); // Load my combined exception state into the kit, with all phis transformed: SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states(); replaced_nodes_exception = ex_map->replaced_nodes(); Node* ex_oop = ekit.use_exception_state(ex_map); ! if (callprojs->catchall_catchproj != NULL) { ! C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control()); ex_ctl = ekit.control(); } ! if (callprojs->catchall_memproj != NULL) { Node* ex_mem = ekit.reset_memory(); ! C->gvn_replace_by(callprojs->catchall_memproj, ex_mem); add_mergemem_users_to_worklist(wl, ex_mem); } ! if (callprojs->catchall_ioproj != NULL) { ! C->gvn_replace_by(callprojs->catchall_ioproj, ekit.i_o()); } // Replace the old exception object with the newly created one ! if (callprojs->exobj != NULL) { ! C->gvn_replace_by(callprojs->exobj, ex_oop); } } // Disconnect the call from the graph call->disconnect_inputs(NULL, C);
*** 1967,1977 **** // optimizer doesn't like that. while (wl.size() > 0) { _gvn.transform(wl.pop()); } ! if (callprojs.fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) { replaced_nodes.apply(C, final_ctl); } if (!ex_ctl->is_top() && do_replaced_nodes) { replaced_nodes_exception.apply(C, ex_ctl); } --- 2061,2071 ---- // optimizer doesn't like that. while (wl.size() > 0) { _gvn.transform(wl.pop()); } ! if (callprojs->fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) { replaced_nodes.apply(C, final_ctl); } if (!ex_ctl->is_top() && do_replaced_nodes) { replaced_nodes_exception.apply(C, ex_ctl); }
*** 2144,2156 **** void GraphKit::round_double_arguments(ciMethod* dest_method) { // (Note: TypeFunc::make has a cache that makes this fast.) const TypeFunc* tf = TypeFunc::make(dest_method); ! int nargs = tf->domain()->cnt() - TypeFunc::Parms; for (int j = 0; j < nargs; j++) { ! const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms); if( targ->basic_type() == T_DOUBLE ) { // If any parameters are doubles, they must be rounded before // the call, dstore_rounding does gvn.transform Node *arg = argument(j); arg = dstore_rounding(arg); --- 2238,2250 ---- void GraphKit::round_double_arguments(ciMethod* dest_method) { // (Note: TypeFunc::make has a cache that makes this fast.) const TypeFunc* tf = TypeFunc::make(dest_method); ! int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms; for (int j = 0; j < nargs; j++) { ! const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms); if( targ->basic_type() == T_DOUBLE ) { // If any parameters are doubles, they must be rounded before // the call, dstore_rounding does gvn.transform Node *arg = argument(j); arg = dstore_rounding(arg);
*** 2203,2213 **** } if (speculative != current_type->speculative()) { // Build a type with a speculative type (what we think we know // about the type but will need a guard when we use it) ! const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative); // We're changing the type, we need a new CheckCast node to carry // the new type. The new type depends on the control: what // profiling tells us is only valid from here as far as we can // tell. Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type)); --- 2297,2307 ---- } if (speculative != current_type->speculative()) { // Build a type with a speculative type (what we think we know // about the type but will need a guard when we use it) ! const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative); // We're changing the type, we need a new CheckCast node to carry // the new type. The new type depends on the control: what // profiling tells us is only valid from here as far as we can // tell. Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
*** 2268,2282 **** void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) { if (!UseTypeSpeculation) { return; } const TypeFunc* tf = TypeFunc::make(dest_method); ! int nargs = tf->domain()->cnt() - TypeFunc::Parms; int skip = Bytecodes::has_receiver(bc) ? 1 : 0; for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) { ! const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms); ! if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) { ProfilePtrKind ptr_kind = ProfileMaybeNull; ciKlass* better_type = NULL; if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) { record_profile_for_speculation(argument(j), better_type, ptr_kind); } --- 2362,2376 ---- void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) { if (!UseTypeSpeculation) { return; } const TypeFunc* tf = TypeFunc::make(dest_method); ! int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms; int skip = Bytecodes::has_receiver(bc) ? 1 : 0; for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) { ! const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms); ! if (targ->isa_oopptr()) { ProfilePtrKind ptr_kind = ProfileMaybeNull; ciKlass* better_type = NULL; if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) { record_profile_for_speculation(argument(j), better_type, ptr_kind); }
*** 2787,2815 **** Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass, float prob, Node* *casted_receiver) { const TypeKlassPtr* tklass = TypeKlassPtr::make(klass); Node* recv_klass = load_object_klass(receiver); ! Node* want_klass = makecon(tklass); ! Node* cmp = _gvn.transform( new CmpPNode(recv_klass, want_klass) ); ! Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) ); ! IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN); ! set_control( _gvn.transform( new IfTrueNode (iff) )); ! Node* fail = _gvn.transform( new IfFalseNode(iff) ); ! const TypeOopPtr* recv_xtype = tklass->as_instance_type(); assert(recv_xtype->klass_is_exact(), ""); // Subsume downstream occurrences of receiver with a cast to // recv_xtype, since now we know what the type will be. Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype); ! (*casted_receiver) = _gvn.transform(cast); // (User must make the replace_in_map call.) return fail; } //------------------------------subtype_check_receiver------------------------- Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass, Node** casted_receiver) { const TypeKlassPtr* tklass = TypeKlassPtr::make(klass); Node* recv_klass = load_object_klass(receiver); --- 2881,2920 ---- Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass, float prob, Node* *casted_receiver) { const TypeKlassPtr* tklass = TypeKlassPtr::make(klass); Node* recv_klass = load_object_klass(receiver); ! Node* fail = type_check(recv_klass, tklass, prob); const TypeOopPtr* recv_xtype = tklass->as_instance_type(); assert(recv_xtype->klass_is_exact(), ""); // Subsume downstream occurrences of receiver with a cast to // recv_xtype, since now we know what the type will be. Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype); ! Node* res = _gvn.transform(cast); ! if (recv_xtype->is_valuetypeptr() && recv_xtype->value_klass()->is_scalarizable()) { ! assert(!gvn().type(res)->maybe_null(), "receiver should never be null"); ! res = ValueTypeNode::make_from_oop(this, res, recv_xtype->value_klass()); ! } ! ! (*casted_receiver) = res; // (User must make the replace_in_map call.) return fail; } + Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass, + float prob) { + Node* want_klass = makecon(tklass); + Node* cmp = _gvn.transform( new CmpPNode(recv_klass, want_klass)); + Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) ); + IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN); + set_control( _gvn.transform( new IfTrueNode (iff))); + Node* fail = _gvn.transform( new IfFalseNode(iff)); + return fail; + } + //------------------------------subtype_check_receiver------------------------- Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass, Node** casted_receiver) { const TypeKlassPtr* tklass = TypeKlassPtr::make(klass); Node* recv_klass = load_object_klass(receiver);
*** 3030,3043 **** data = method()->method_data()->bci_to_data(bci()); } bool speculative_not_null = false; bool never_see_null = (ProfileDynamicTypes // aggressive use of profile && seems_never_null(obj, data, speculative_not_null)); // Null check; get casted pointer; set region slot 3 Node* null_ctl = top(); ! Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); // If not_null_obj is dead, only null-path is taken if (stopped()) { // Doing instance-of on a NULL? set_control(null_ctl); return intcon(0); --- 3135,3149 ---- data = method()->method_data()->bci_to_data(bci()); } bool speculative_not_null = false; bool never_see_null = (ProfileDynamicTypes // aggressive use of profile && seems_never_null(obj, data, speculative_not_null)); + bool is_value = obj->is_ValueType(); // Null check; get casted pointer; set region slot 3 Node* null_ctl = top(); ! Node* not_null_obj = is_value ? obj : null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); // If not_null_obj is dead, only null-path is taken if (stopped()) { // Doing instance-of on a NULL? set_control(null_ctl); return intcon(0);
*** 3051,3089 **** region->del_req(_null_path); phi ->del_req(_null_path); } // Do we know the type check always succeed? ! bool known_statically = false; ! if (_gvn.type(superklass)->singleton()) { ! ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass(); ! ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass(); ! if (subk != NULL && subk->is_loaded()) { ! int static_res = C->static_subtype_check(superk, subk); ! known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false); } - } ! if (!known_statically) { ! const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); ! // We may not have profiling here or it may not help us. If we ! // have a speculative type use it to perform an exact cast. ! ciKlass* spec_obj_type = obj_type->speculative_type(); ! if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) { ! Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace); ! if (stopped()) { // Profile disagrees with this path. ! set_control(null_ctl); // Null is the only remaining possibility. ! return intcon(0); ! } ! if (cast_obj != NULL) { ! not_null_obj = cast_obj; } } } // Load the object's klass ! Node* obj_klass = load_object_klass(not_null_obj); // Generate the subtype check Node* not_subtype_ctrl = gen_subtype_check(obj_klass, superklass); // Plug in the success path to the general merge in slot 1. --- 3157,3205 ---- region->del_req(_null_path); phi ->del_req(_null_path); } // Do we know the type check always succeed? ! if (!is_value) { ! bool known_statically = false; ! if (_gvn.type(superklass)->singleton()) { ! ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass(); ! ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass(); ! if (subk != NULL && subk->is_loaded()) { ! int static_res = C->static_subtype_check(superk, subk); ! known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false); ! } } ! if (!known_statically) { ! const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); ! // We may not have profiling here or it may not help us. If we ! // have a speculative type use it to perform an exact cast. ! ciKlass* spec_obj_type = obj_type->speculative_type(); ! if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) { ! Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace); ! if (stopped()) { // Profile disagrees with this path. ! set_control(null_ctl); // Null is the only remaining possibility. ! return intcon(0); ! } ! if (cast_obj != NULL && ! // A value that's sometimes null is not something we can optimize well ! !(cast_obj->is_ValueType() && null_ctl != top())) { ! not_null_obj = cast_obj; ! is_value = not_null_obj->is_ValueType(); ! } } } } // Load the object's klass ! Node* obj_klass = NULL; ! if (is_value) { ! obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->value_klass())); ! } else { ! obj_klass = load_object_klass(not_null_obj); ! } // Generate the subtype check Node* not_subtype_ctrl = gen_subtype_check(obj_klass, superklass); // Plug in the success path to the general merge in slot 1.
*** 3099,3109 **** record_for_igvn(region); // If we know the type check always succeeds then we don't use the // profiling data at this bytecode. Don't lose it, feed it to the // type system as a speculative type. ! if (safe_for_replace) { Node* casted_obj = record_profiled_receiver_for_speculation(obj); replace_in_map(obj, casted_obj); } return _gvn.transform(phi); --- 3215,3225 ---- record_for_igvn(region); // If we know the type check always succeeds then we don't use the // profiling data at this bytecode. Don't lose it, feed it to the // type system as a speculative type. ! if (safe_for_replace && !is_value) { Node* casted_obj = record_profiled_receiver_for_speculation(obj); replace_in_map(obj, casted_obj); } return _gvn.transform(phi);
*** 3114,3148 **** // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the // uncommon-trap paths work. Adjust stack after this call. // If failure_control is supplied and not null, it is filled in with // the control edge for the cast failure. Otherwise, an appropriate // uncommon trap or exception is thrown. ! Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, ! Node* *failure_control) { kill_dead_locals(); // Benefit all the uncommon traps ! const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr(); ! const Type *toop = TypeOopPtr::make_from_klass(tk->klass()); // Fast cutout: Check the case that the cast is vacuously true. // This detects the common cases where the test will short-circuit // away completely. We do this before we perform the null check, // because if the test is going to turn into zero code, we don't // want a residual null check left around. (Causes a slowdown, // for example, in some objArray manipulations, such as a[i]=a[j].) if (tk->singleton()) { ! const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr(); ! if (objtp != NULL && objtp->klass() != NULL) { ! switch (C->static_subtype_check(tk->klass(), objtp->klass())) { case Compile::SSC_always_true: // If we know the type check always succeed then we don't use // the profiling data at this bytecode. Don't lose it, feed it // to the type system as a speculative type. ! return record_profiled_receiver_for_speculation(obj); case Compile::SSC_always_false: ! // It needs a null check because a null will *pass* the cast check. ! // A non-null value will always produce an exception. ! return null_assert(obj); } } } ciProfileData* data = NULL; --- 3230,3290 ---- // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the // uncommon-trap paths work. Adjust stack after this call. // If failure_control is supplied and not null, it is filled in with // the control edge for the cast failure. Otherwise, an appropriate // uncommon trap or exception is thrown. ! Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control, bool never_null) { kill_dead_locals(); // Benefit all the uncommon traps ! const TypeKlassPtr* tk = _gvn.type(superklass)->is_klassptr(); ! const TypeOopPtr* toop = TypeOopPtr::make_from_klass(tk->klass()); ! assert(!never_null || toop->is_valuetypeptr(), "must be a value type pointer"); ! bool is_value = obj->is_ValueType(); // Fast cutout: Check the case that the cast is vacuously true. // This detects the common cases where the test will short-circuit // away completely. We do this before we perform the null check, // because if the test is going to turn into zero code, we don't // want a residual null check left around. (Causes a slowdown, // for example, in some objArray manipulations, such as a[i]=a[j].) if (tk->singleton()) { ! ciKlass* klass = NULL; ! if (is_value) { ! klass = _gvn.type(obj)->value_klass(); ! } else { ! const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr(); ! if (objtp != NULL) { ! klass = objtp->klass(); ! } ! } ! if (klass != NULL) { ! switch (C->static_subtype_check(tk->klass(), klass)) { case Compile::SSC_always_true: // If we know the type check always succeed then we don't use // the profiling data at this bytecode. Don't lose it, feed it // to the type system as a speculative type. ! if (!is_value) { ! obj = record_profiled_receiver_for_speculation(obj); ! if (never_null) { ! obj = null_check(obj); ! } ! if (toop->is_valuetypeptr() && toop->value_klass()->is_scalarizable() && !gvn().type(obj)->maybe_null()) { ! obj = ValueTypeNode::make_from_oop(this, obj, toop->value_klass()); ! } ! } ! return obj; case Compile::SSC_always_false: ! if (is_value || never_null) { ! if (!is_value) { ! null_check(obj); ! } ! // Value type is never null. Always throw an exception. ! builtin_throw(Deoptimization::Reason_class_check, makecon(TypeKlassPtr::make(klass))); ! return top(); ! } else { ! // It needs a null check because a null will *pass* the cast check. ! return null_assert(obj); ! } } } } ciProfileData* data = NULL;
*** 3157,3176 **** // Make the merge point enum { _obj_path = 1, _null_path, PATH_LIMIT }; RegionNode* region = new RegionNode(PATH_LIMIT); Node* phi = new PhiNode(region, toop); C->set_has_split_ifs(true); // Has chance for split-if optimization // Use null-cast information if it is available bool speculative_not_null = false; bool never_see_null = ((failure_control == NULL) // regular case only && seems_never_null(obj, data, speculative_not_null)); // Null check; get casted pointer; set region slot 3 Node* null_ctl = top(); ! Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); // If not_null_obj is dead, only null-path is taken if (stopped()) { // Doing instance-of on a NULL? set_control(null_ctl); return null(); --- 3299,3328 ---- // Make the merge point enum { _obj_path = 1, _null_path, PATH_LIMIT }; RegionNode* region = new RegionNode(PATH_LIMIT); Node* phi = new PhiNode(region, toop); + _gvn.set_type(region, Type::CONTROL); + _gvn.set_type(phi, toop); + C->set_has_split_ifs(true); // Has chance for split-if optimization // Use null-cast information if it is available bool speculative_not_null = false; bool never_see_null = ((failure_control == NULL) // regular case only && seems_never_null(obj, data, speculative_not_null)); // Null check; get casted pointer; set region slot 3 Node* null_ctl = top(); ! Node* not_null_obj = NULL; ! if (is_value) { ! not_null_obj = obj; ! } else if (never_null) { ! not_null_obj = null_check(obj); ! } else { ! not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); ! } // If not_null_obj is dead, only null-path is taken if (stopped()) { // Doing instance-of on a NULL? set_control(null_ctl); return null();
*** 3184,3204 **** region->del_req(_null_path); phi ->del_req(_null_path); } Node* cast_obj = NULL; ! if (tk->klass_is_exact()) { // The following optimization tries to statically cast the speculative type of the object // (for example obtained during profiling) to the type of the superklass and then do a // dynamic check that the type of the object is what we expect. To work correctly // for checkcast and aastore the type of superklass should be exact. const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); // We may not have profiling here or it may not help us. If we have // a speculative type use it to perform an exact cast. ciKlass* spec_obj_type = obj_type->speculative_type(); if (spec_obj_type != NULL || data != NULL) { cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace); if (cast_obj != NULL) { if (failure_control != NULL) // failure is now impossible (*failure_control) = top(); // adjust the type of the phi to the exact klass: phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR)); --- 3336,3363 ---- region->del_req(_null_path); phi ->del_req(_null_path); } Node* cast_obj = NULL; ! if (!is_value && tk->klass_is_exact()) { // The following optimization tries to statically cast the speculative type of the object // (for example obtained during profiling) to the type of the superklass and then do a // dynamic check that the type of the object is what we expect. To work correctly // for checkcast and aastore the type of superklass should be exact. const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); // We may not have profiling here or it may not help us. If we have // a speculative type use it to perform an exact cast. ciKlass* spec_obj_type = obj_type->speculative_type(); if (spec_obj_type != NULL || data != NULL) { cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace); + if (cast_obj != NULL && cast_obj->is_ValueType()) { + if (null_ctl != top()) { + cast_obj = NULL; // A value that's sometimes null is not something we can optimize well + } else { + return cast_obj; + } + } if (cast_obj != NULL) { if (failure_control != NULL) // failure is now impossible (*failure_control) = top(); // adjust the type of the phi to the exact klass: phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
*** 3206,3222 **** } } if (cast_obj == NULL) { // Load the object's klass ! Node* obj_klass = load_object_klass(not_null_obj); // Generate the subtype check Node* not_subtype_ctrl = gen_subtype_check( obj_klass, superklass ); // Plug in success path into the merge ! cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop)); // Failure path ends in uncommon trap (or may be dead - failure impossible) if (failure_control == NULL) { if (not_subtype_ctrl != top()) { // If failure is possible PreserveJVMState pjvms(this); set_control(not_subtype_ctrl); --- 3365,3386 ---- } } if (cast_obj == NULL) { // Load the object's klass ! Node* obj_klass = NULL; ! if (is_value) { ! obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->value_klass())); ! } else { ! obj_klass = load_object_klass(not_null_obj); ! } // Generate the subtype check Node* not_subtype_ctrl = gen_subtype_check( obj_klass, superklass ); // Plug in success path into the merge ! cast_obj = is_value ? not_null_obj : _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop)); // Failure path ends in uncommon trap (or may be dead - failure impossible) if (failure_control == NULL) { if (not_subtype_ctrl != top()) { // If failure is possible PreserveJVMState pjvms(this); set_control(not_subtype_ctrl);
*** 3245,3257 **** // Return final merged results set_control( _gvn.transform(region) ); record_for_igvn(region); ! return record_profiled_receiver_for_speculation(res); } //------------------------------next_monitor----------------------------------- // What number should be given to the next monitor? int GraphKit::next_monitor() { int current = jvms()->monitor_depth()* C->sync_stack_slots(); int next = current + C->sync_stack_slots(); --- 3409,3570 ---- // Return final merged results set_control( _gvn.transform(region) ); record_for_igvn(region); ! bool not_null_free = !toop->can_be_value_type(); ! bool not_flattenable = !ValueArrayFlatten || not_null_free || (toop->is_valuetypeptr() && !toop->value_klass()->flatten_array()); ! if (EnableValhalla && not_flattenable) { ! // Check if obj has been loaded from an array ! obj = obj->isa_DecodeN() ? obj->in(1) : obj; ! Node* array = NULL; ! if (obj->isa_Load()) { ! Node* address = obj->in(MemNode::Address); ! if (address->isa_AddP()) { ! array = address->as_AddP()->in(AddPNode::Base); ! } ! } else if (obj->is_Phi()) { ! Node* region = obj->in(0); ! if (region->req() == 3 && region->in(2) != NULL) { ! IfNode* iff = region->in(2)->in(0)->isa_If(); ! if (iff != NULL) { ! iff->is_flattened_array_check(&_gvn, array); ! } ! } ! } ! if (array != NULL) { ! const TypeAryPtr* ary_t = _gvn.type(array)->isa_aryptr(); ! if (ary_t != NULL) { ! if (!ary_t->is_not_null_free() && not_null_free) { ! // Casting array element to a non-inline-type, mark array as not null-free. ! Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_null_free())); ! replace_in_map(array, cast); ! } else if (!ary_t->is_not_flat()) { ! // Casting array element to a non-flattenable type, mark array as not flat. ! Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_flat())); ! replace_in_map(array, cast); ! } ! } ! } ! } ! ! if (!is_value) { ! res = record_profiled_receiver_for_speculation(res); ! if (toop->is_valuetypeptr() && toop->value_klass()->is_scalarizable() && !gvn().type(res)->maybe_null()) { ! res = ValueTypeNode::make_from_oop(this, res, toop->value_klass()); ! } ! } ! return res; ! } ! ! Node* GraphKit::is_always_locked(Node* obj) { ! Node* mark_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); ! Node* mark = make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); ! Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); ! return _gvn.transform(new AndXNode(mark, value_mask)); ! } ! ! Node* GraphKit::is_value_mirror(Node* mirror) { ! Node* p = basic_plus_adr(mirror, java_lang_Class::inline_mirror_offset_in_bytes()); ! Node* inline_mirror = access_load_at(mirror, p, _gvn.type(p)->is_ptr(), TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR), T_OBJECT, IN_HEAP); ! Node* cmp = _gvn.transform(new CmpPNode(mirror, inline_mirror)); ! return _gvn.transform(new BoolNode(cmp, BoolTest::eq)); ! } ! ! // Deoptimize if 'obj' is a value type ! void GraphKit::gen_value_type_guard(Node* obj, int nargs) { ! assert(EnableValhalla, "should only be used if value types are enabled"); ! Node* bol = NULL; ! if (obj->is_ValueTypeBase()) { ! bol = intcon(0); ! } else { ! Node* is_value = is_always_locked(obj); ! Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); ! Node* cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); ! bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne)); ! } ! { BuildCutout unless(this, bol, PROB_MAX); ! inc_sp(nargs); ! uncommon_trap(Deoptimization::Reason_class_check, ! Deoptimization::Action_none); ! } ! } ! ! // Check if 'ary' is a null-free value type array ! Node* GraphKit::gen_null_free_array_check(Node* ary) { ! assert(EnableValhalla, "should only be used if value types are enabled"); ! // Extract null free property from klass pointer ! Node* k_adr = basic_plus_adr(ary, oopDesc::klass_offset_in_bytes()); ! const TypePtr* k_adr_type = k_adr->bottom_type()->isa_ptr(); ! Node* klass = NULL; ! if (k_adr_type->is_ptr_to_narrowklass()) { ! klass = _gvn.transform(new LoadNKlassNode(NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT->make_narrowklass(), MemNode::unordered)); ! } else { ! klass = _gvn.transform(new LoadKlassNode(NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT, MemNode::unordered)); ! } ! Node* null_free = _gvn.transform(new GetNullFreePropertyNode(klass)); ! Node* cmp = NULL; ! if (_gvn.type(klass)->isa_klassptr()) { ! cmp = _gvn.transform(new CmpLNode(null_free, zerocon(T_LONG))); ! } else { ! cmp = _gvn.transform(new CmpINode(null_free, zerocon(T_INT))); ! } ! return _gvn.transform(new BoolNode(cmp, BoolTest::eq)); ! } ! ! // Deoptimize if 'ary' is a null-free value type array and 'val' is null ! Node* GraphKit::gen_value_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace) { ! const Type* val_t = _gvn.type(val); ! if (val->is_ValueType() || !TypePtr::NULL_PTR->higher_equal(val_t)) { ! return ary; // Never null ! } ! RegionNode* region = new RegionNode(3); ! Node* null_ctl = top(); ! null_check_oop(val, &null_ctl); ! if (null_ctl != top()) { ! PreserveJVMState pjvms(this); ! set_control(null_ctl); ! // Deoptimize if null-free array ! Node* bol = gen_null_free_array_check(ary); ! { BuildCutout unless(this, bol, PROB_MAX); ! inc_sp(nargs); ! uncommon_trap(Deoptimization::Reason_null_check, ! Deoptimization::Action_none); ! } ! region->init_req(1, control()); ! } ! region->init_req(2, control()); ! set_control(_gvn.transform(region)); ! record_for_igvn(region); ! const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); ! if (val_t == TypePtr::NULL_PTR && !ary_t->is_not_null_free()) { ! // Since we were just successfully storing null, the array can't be null free. ! ary_t = ary_t->cast_to_not_null_free(); ! Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t)); ! if (safe_for_replace) { ! replace_in_map(ary, cast); ! } ! ary = cast; ! } ! return ary; ! } ! ! Node* GraphKit::load_lh_array_tag(Node* kls) { ! Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset())); ! Node* layout_val = _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), lhp, lhp->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered)); ! ! return _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift))); } + + Node* GraphKit::gen_lh_array_test(Node* kls, unsigned int lh_value) { + Node* layout_val = load_lh_array_tag(kls); + Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(lh_value))); + return cmp; + } + + //------------------------------next_monitor----------------------------------- // What number should be given to the next monitor? int GraphKit::next_monitor() { int current = jvms()->monitor_depth()* C->sync_stack_slots(); int next = current + C->sync_stack_slots();
*** 3325,3334 **** --- 3638,3648 ---- // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces assert(SynchronizationEntryBCI == InvocationEntryBci, ""); if( !GenerateSynchronizationCode ) return NULL; // Not locking things? + if (stopped()) // Dead monitor? return NULL; assert(dead_locals_are_killed(), "should kill locals before sync. point");
*** 3399,3408 **** --- 3713,3723 ---- return; if (stopped()) { // Dead monitor? map()->pop_monitor(); // Kill monitor from debug info return; } + assert(!obj->is_ValueTypeBase(), "should not unlock on value type"); // Memory barrier to avoid floating things down past the locked region insert_mem_bar(Op_MemBarReleaseLock); const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
*** 3439,3450 **** // almost always feature constant types. Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) { const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr(); if (!StressReflectiveCode && inst_klass != NULL) { ciKlass* klass = inst_klass->klass(); bool xklass = inst_klass->klass_is_exact(); ! if (xklass || klass->is_array_klass()) { jint lhelper = klass->layout_helper(); if (lhelper != Klass::_lh_neutral_value) { constant_value = lhelper; return (Node*) NULL; } --- 3754,3771 ---- // almost always feature constant types. Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) { const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr(); if (!StressReflectiveCode && inst_klass != NULL) { ciKlass* klass = inst_klass->klass(); + assert(klass != NULL, "klass should not be NULL"); bool xklass = inst_klass->klass_is_exact(); ! bool can_be_flattened = false; ! if (ValueArrayFlatten && klass->is_obj_array_klass()) { ! ciKlass* elem = klass->as_obj_array_klass()->element_klass(); ! can_be_flattened = elem->is_java_lang_Object() || elem->is_interface() || (elem->is_valuetype() && !klass->as_array_klass()->storage_properties().is_null_free()); ! } ! if (xklass || (klass->is_array_klass() && !can_be_flattened)) { jint lhelper = klass->layout_helper(); if (lhelper != Klass::_lh_neutral_value) { constant_value = lhelper; return (Node*) NULL; }
*** 3502,3522 **** // and link them properly (as a group) to the InitializeNode. assert(init->in(InitializeNode::Memory) == malloc, ""); MergeMemNode* minit_in = MergeMemNode::make(malloc); init->set_req(InitializeNode::Memory, minit_in); record_for_igvn(minit_in); // fold it up later, if possible Node* minit_out = memory(rawidx); assert(minit_out->is_Proj() && minit_out->in(0) == init, ""); // Add an edge in the MergeMem for the header fields so an access // to one of those has correct memory state set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes()))); set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes()))); if (oop_type->isa_aryptr()) { ! const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot); ! int elemidx = C->get_alias_index(telemref); ! hook_memory_on_init(*this, elemidx, minit_in, minit_out); } else if (oop_type->isa_instptr()) { ciInstanceKlass* ik = oop_type->klass()->as_instance_klass(); for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) { ciField* field = ik->nonstatic_field_at(i); if (field->offset() >= TrackedInitializationLimit * HeapWordSize) continue; // do not bother to track really large numbers of fields --- 3823,3868 ---- // and link them properly (as a group) to the InitializeNode. assert(init->in(InitializeNode::Memory) == malloc, ""); MergeMemNode* minit_in = MergeMemNode::make(malloc); init->set_req(InitializeNode::Memory, minit_in); record_for_igvn(minit_in); // fold it up later, if possible + _gvn.set_type(minit_in, Type::MEMORY); Node* minit_out = memory(rawidx); assert(minit_out->is_Proj() && minit_out->in(0) == init, ""); // Add an edge in the MergeMem for the header fields so an access // to one of those has correct memory state set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes()))); set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes()))); if (oop_type->isa_aryptr()) { ! const TypeAryPtr* arytype = oop_type->is_aryptr(); ! if (arytype->klass()->is_value_array_klass()) { ! // Initially all flattened array accesses share a single slice ! // but that changes after parsing. Prepare the memory graph so ! // it can optimize flattened array accesses properly once they ! // don't share a single slice. ! assert(C->flattened_accesses_share_alias(), "should be set at parse time"); ! C->set_flattened_accesses_share_alias(false); ! ciValueArrayKlass* vak = arytype->klass()->as_value_array_klass(); ! ciValueKlass* vk = vak->element_klass()->as_value_klass(); ! for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) { ! ciField* field = vk->nonstatic_field_at(i); ! if (field->offset() >= TrackedInitializationLimit * HeapWordSize) ! continue; // do not bother to track really large numbers of fields ! int off_in_vt = field->offset() - vk->first_field_offset(); ! const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot); ! int fieldidx = C->get_alias_index(adr_type, true); ! hook_memory_on_init(*this, fieldidx, minit_in, minit_out); ! } ! C->set_flattened_accesses_share_alias(true); ! hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::VALUES), minit_in, minit_out); ! } else { ! const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot); ! int elemidx = C->get_alias_index(telemref); ! hook_memory_on_init(*this, elemidx, minit_in, minit_out); ! } } else if (oop_type->isa_instptr()) { + set_memory(minit_out, C->get_alias_index(oop_type)); // mark word ciInstanceKlass* ik = oop_type->klass()->as_instance_klass(); for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) { ciField* field = ik->nonstatic_field_at(i); if (field->offset() >= TrackedInitializationLimit * HeapWordSize) continue; // do not bother to track really large numbers of fields
*** 3563,3580 **** // - If 'return_size_val', report the the total object size to the caller. // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize) Node* GraphKit::new_instance(Node* klass_node, Node* extra_slow_test, Node* *return_size_val, ! bool deoptimize_on_exception) { // Compute size in doublewords // The size is always an integral number of doublewords, represented // as a positive bytewise size stored in the klass's layout_helper. // The layout_helper also encodes (in a low bit) the need for a slow path. jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); ! int layout_is_con = (layout_val == NULL); if (extra_slow_test == NULL) extra_slow_test = intcon(0); // Generate the initial go-slow test. It's either ALWAYS (return a // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective // case) a computed value derived from the layout_helper. --- 3909,3927 ---- // - If 'return_size_val', report the the total object size to the caller. // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize) Node* GraphKit::new_instance(Node* klass_node, Node* extra_slow_test, Node* *return_size_val, ! bool deoptimize_on_exception, ! ValueTypeBaseNode* value_node) { // Compute size in doublewords // The size is always an integral number of doublewords, represented // as a positive bytewise size stored in the klass's layout_helper. // The layout_helper also encodes (in a low bit) the need for a slow path. jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); ! bool layout_is_con = (layout_val == NULL); if (extra_slow_test == NULL) extra_slow_test = intcon(0); // Generate the initial go-slow test. It's either ALWAYS (return a // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective // case) a computed value derived from the layout_helper.
*** 3621,3654 **** const TypeOopPtr* oop_type = tklass->as_instance_type(); // Now generate allocation code // The entire memory state is needed for slow path of the allocation ! // since GC and deoptimization can happened. Node *mem = reset_memory(); set_all_memory(mem); // Create new memory state AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP), control(), mem, i_o(), size, klass_node, ! initial_slow_test); return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception); } //-------------------------------new_array------------------------------------- ! // helper for both newarray and anewarray // The 'length' parameter is (obviously) the length of the array. // See comments on new_instance for the meaning of the other arguments. Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) Node* length, // number of array elements int nargs, // number of arguments to push back for uncommon trap Node* *return_size_val, ! bool deoptimize_on_exception) { jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); ! int layout_is_con = (layout_val == NULL); if (!layout_is_con && !StressReflectiveCode && !too_many_traps(Deoptimization::Reason_class_check)) { // This is a reflective array creation site. // Optimistically assume that it is a subtype of Object[], --- 3968,4010 ---- const TypeOopPtr* oop_type = tklass->as_instance_type(); // Now generate allocation code // The entire memory state is needed for slow path of the allocation ! // since GC and deoptimization can happen. Node *mem = reset_memory(); set_all_memory(mem); // Create new memory state AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP), control(), mem, i_o(), size, klass_node, ! initial_slow_test, value_node); return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception); } + // With compressed oops, the 64 bit init value for non flattened value + // arrays is built from 2 32 bit compressed oops + static Node* raw_default_for_coops(Node* default_value, GraphKit& kit) { + Node* lower = kit.gvn().transform(new CastP2XNode(kit.control(), default_value)); + Node* upper = kit.gvn().transform(new LShiftLNode(lower, kit.intcon(32))); + return kit.gvn().transform(new OrLNode(lower, upper)); + } + //-------------------------------new_array------------------------------------- ! // helper for newarray and anewarray // The 'length' parameter is (obviously) the length of the array. // See comments on new_instance for the meaning of the other arguments. Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) Node* length, // number of array elements int nargs, // number of arguments to push back for uncommon trap Node* *return_size_val, ! bool deoptimize_on_exception, ! Node* elem_mirror) { jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); ! bool layout_is_con = (layout_val == NULL); if (!layout_is_con && !StressReflectiveCode && !too_many_traps(Deoptimization::Reason_class_check)) { // This is a reflective array creation site. // Optimistically assume that it is a subtype of Object[],
*** 3674,3684 **** int fast_size_limit = FastAllocateSizeLimit; if (layout_is_con) { assert(!StressReflectiveCode, "stress mode does not use these paths"); // Increase the size limit if we have exact knowledge of array type. int log2_esize = Klass::layout_helper_log2_element_size(layout_con); ! fast_size_limit <<= (LogBytesPerLong - log2_esize); } Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) ); Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) ); --- 4030,4040 ---- int fast_size_limit = FastAllocateSizeLimit; if (layout_is_con) { assert(!StressReflectiveCode, "stress mode does not use these paths"); // Increase the size limit if we have exact knowledge of array type. int log2_esize = Klass::layout_helper_log2_element_size(layout_con); ! fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0); } Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) ); Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
*** 3692,3705 **** int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); // (T_BYTE has the weakest alignment and size restrictions...) if (layout_is_con) { int hsize = Klass::layout_helper_header_size(layout_con); int eshift = Klass::layout_helper_log2_element_size(layout_con); ! BasicType etype = Klass::layout_helper_element_type(layout_con); if ((round_mask & ~right_n_bits(eshift)) == 0) round_mask = 0; // strength-reduce it if it goes away completely ! assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded"); assert(header_size_min <= hsize, "generic minimum is smallest"); header_size_min = hsize; header_size = intcon(hsize + round_mask); } else { Node* hss = intcon(Klass::_lh_header_size_shift); --- 4048,4061 ---- int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); // (T_BYTE has the weakest alignment and size restrictions...) if (layout_is_con) { int hsize = Klass::layout_helper_header_size(layout_con); int eshift = Klass::layout_helper_log2_element_size(layout_con); ! bool is_value_array = Klass::layout_helper_is_valueArray(layout_con); if ((round_mask & ~right_n_bits(eshift)) == 0) round_mask = 0; // strength-reduce it if it goes away completely ! assert(is_value_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded"); assert(header_size_min <= hsize, "generic minimum is smallest"); header_size_min = hsize; header_size = intcon(hsize + round_mask); } else { Node* hss = intcon(Klass::_lh_header_size_shift);
*** 3779,3811 **** } // Now generate allocation code // The entire memory state is needed for slow path of the allocation ! // since GC and deoptimization can happened. Node *mem = reset_memory(); set_all_memory(mem); // Create new memory state if (initial_slow_test->is_Bool()) { // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick. initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn); } // Create the AllocateArrayNode and its result projections ! AllocateArrayNode* alloc ! = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT), ! control(), mem, i_o(), ! size, klass_node, ! initial_slow_test, ! length); // Cast to correct type. Note that the klass_node may be constant or not, // and in the latter case the actual array type will be inexact also. // (This happens via a non-constant argument to inline_native_newArray.) // In any case, the value of klass_node provides the desired array type. const TypeInt* length_type = _gvn.find_int_type(length); - const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type(); if (ary_type->isa_aryptr() && length_type != NULL) { // Try to get a better type than POS for the size ary_type = ary_type->is_aryptr()->cast_to_size(length_type); } --- 4135,4259 ---- } // Now generate allocation code // The entire memory state is needed for slow path of the allocation ! // since GC and deoptimization can happen. Node *mem = reset_memory(); set_all_memory(mem); // Create new memory state if (initial_slow_test->is_Bool()) { // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick. initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn); } + const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type(); + const TypeAryPtr* ary_ptr = ary_type->isa_aryptr(); + const Type* elem = NULL; + ciKlass* elem_klass = NULL; + + // Compute default value and storage properties for value type arrays: + // - null-ok: MyValue.box[] (ciObjArrayKlass "[LMyValue") + // - null-free: MyValue.val[] (ciObjArrayKlass "[QMyValue") + // - null-free, flattened: MyValue.val[] (ciValueArrayKlass "[QMyValue") + Node* storage_properties = NULL; + Node* default_value = NULL; + Node* raw_default_value = NULL; + int props_shift = UseCompressedClassPointers ? oopDesc::narrow_storage_props_shift : oopDesc::wide_storage_props_shift; + if (ary_ptr != NULL && ary_ptr->klass_is_exact()) { + // Array type is known + elem = ary_ptr->elem(); + ciArrayKlass* ary_klass = ary_ptr->klass()->as_array_klass(); + elem_klass = ary_klass->element_klass(); + + ArrayStorageProperties props = ary_klass->storage_properties(); + if (!props.is_empty() && elem_klass->is_valuetype()) { + if (props.is_null_free() && !props.is_flattened()) { + default_value = ValueTypeNode::default_oop(gvn(), elem_klass->as_value_klass()); + if (elem->isa_narrowoop()) { + default_value = _gvn.transform(new EncodePNode(default_value, elem)); + raw_default_value = raw_default_for_coops(default_value, *this); + } else { + raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value)); + } + } + storage_properties = MakeConX(props.encode<NOT_LP64(jint) LP64_ONLY(jlong)>(props_shift)); + } + } + + if (EnableValhalla && (elem == NULL || (elem_klass != NULL && (elem_klass->is_java_lang_Object() || elem_klass->is_valuetype()) && + !ary_type->klass_is_exact()))) { + // Array type is not known, compute default value and storage properties for initialization. + assert(default_value == NULL && raw_default_value == NULL && storage_properties == NULL, "shouldn't be set yet"); + assert(elem_mirror != NULL, "should not be null"); + + Node* r = new RegionNode(4); + default_value = new PhiNode(r, TypeInstPtr::BOTTOM); + storage_properties = new PhiNode(r, TypeX_X); + + Node* empty = MakeConX(ArrayStorageProperties::empty.encode<NOT_LP64(jint) LP64_ONLY(jlong)>(props_shift)); + Node* null_free = MakeConX(ArrayStorageProperties::null_free.encode<NOT_LP64(jint) LP64_ONLY(jlong)>(props_shift)); + Node* flat = MakeConX(ArrayStorageProperties::flattened_and_null_free.encode<NOT_LP64(jint) LP64_ONLY(jlong)>(props_shift)); + + // Check if element mirror is a value mirror + IfNode* iff = create_and_map_if(control(), is_value_mirror(elem_mirror), PROB_FAIR, COUNT_UNKNOWN); + + // Not a value mirror but a box mirror or not a value type array, initialize with all zero + r->init_req(1, _gvn.transform(new IfFalseNode(iff))); + default_value->init_req(1, null()); + storage_properties->init_req(1, empty); + + // Value mirror (= null-free), check if flattened + set_control(_gvn.transform(new IfTrueNode(iff))); + Node* cmp = gen_lh_array_test(klass_node, Klass::_lh_array_tag_vt_value); + Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq)); + iff = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN); + + // Flattened, initialize with all zero + r->init_req(2, _gvn.transform(new IfTrueNode(iff))); + default_value->init_req(2, null()); + storage_properties->init_req(2, flat); + + // Non-flattened, initialize with the default value + set_control(_gvn.transform(new IfFalseNode(iff))); + Node* p = basic_plus_adr(klass_node, in_bytes(ArrayKlass::element_klass_offset())); + Node* eklass = _gvn.transform(LoadKlassNode::make(_gvn, control(), immutable_memory(), p, TypeInstPtr::KLASS)); + Node* adr_fixed_block_addr = basic_plus_adr(eklass, in_bytes(InstanceKlass::adr_valueklass_fixed_block_offset())); + Node* adr_fixed_block = make_load(control(), adr_fixed_block_addr, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); + Node* default_value_offset_addr = basic_plus_adr(adr_fixed_block, in_bytes(ValueKlass::default_value_offset_offset())); + Node* default_value_offset = make_load(control(), default_value_offset_addr, TypeInt::INT, T_INT, MemNode::unordered); + Node* default_value_addr = basic_plus_adr(elem_mirror, ConvI2X(default_value_offset)); + Node* val = access_load_at(elem_mirror, default_value_addr, _gvn.type(default_value_addr)->is_ptr(), TypeInstPtr::BOTTOM, T_OBJECT, IN_HEAP); + r->init_req(3, control()); + default_value->init_req(3, val); + storage_properties->init_req(3, null_free); + + set_control(_gvn.transform(r)); + default_value = _gvn.transform(default_value); + storage_properties = _gvn.transform(storage_properties); + if (UseCompressedOops) { + default_value = _gvn.transform(new EncodePNode(default_value, default_value->bottom_type()->make_narrowoop())); + raw_default_value = raw_default_for_coops(default_value, *this); + } else { + raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value)); + } + } + // Create the AllocateArrayNode and its result projections ! AllocateArrayNode* alloc = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT), ! control(), mem, i_o(), ! size, klass_node, ! initial_slow_test, ! length, default_value, ! raw_default_value, ! storage_properties); // Cast to correct type. Note that the klass_node may be constant or not, // and in the latter case the actual array type will be inexact also. // (This happens via a non-constant argument to inline_native_newArray.) // In any case, the value of klass_node provides the desired array type. const TypeInt* length_type = _gvn.find_int_type(length); if (ary_type->isa_aryptr() && length_type != NULL) { // Try to get a better type than POS for the size ary_type = ary_type->is_aryptr()->cast_to_size(length_type); }
*** 3956,3970 **** } Node* GraphKit::load_String_value(Node* str, bool set_ctrl) { int value_offset = java_lang_String::value_offset_in_bytes(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), ! false, NULL, 0); const TypePtr* value_field_type = string_type->add_offset(value_offset); const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, ! TypeAry::make(TypeInt::BYTE, TypeInt::POS), ! ciTypeArrayKlass::make(T_BYTE), true, 0); Node* p = basic_plus_adr(str, str, value_offset); Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT, IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED); return load; } --- 4404,4418 ---- } Node* GraphKit::load_String_value(Node* str, bool set_ctrl) { int value_offset = java_lang_String::value_offset_in_bytes(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), ! false, NULL, Type::Offset(0)); const TypePtr* value_field_type = string_type->add_offset(value_offset); const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, ! TypeAry::make(TypeInt::BYTE, TypeInt::POS, false, true, true), ! ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0)); Node* p = basic_plus_adr(str, str, value_offset); Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT, IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED); return load; }
*** 3973,3983 **** if (!CompactStrings) { return intcon(java_lang_String::CODER_UTF16); } int coder_offset = java_lang_String::coder_offset_in_bytes(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), ! false, NULL, 0); const TypePtr* coder_field_type = string_type->add_offset(coder_offset); Node* p = basic_plus_adr(str, str, coder_offset); Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE, IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED); --- 4421,4431 ---- if (!CompactStrings) { return intcon(java_lang_String::CODER_UTF16); } int coder_offset = java_lang_String::coder_offset_in_bytes(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), ! false, NULL, Type::Offset(0)); const TypePtr* coder_field_type = string_type->add_offset(coder_offset); Node* p = basic_plus_adr(str, str, coder_offset); Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE, IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
*** 3985,4005 **** } void GraphKit::store_String_value(Node* str, Node* value) { int value_offset = java_lang_String::value_offset_in_bytes(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), ! false, NULL, 0); const TypePtr* value_field_type = string_type->add_offset(value_offset); access_store_at(str, basic_plus_adr(str, value_offset), value_field_type, value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED); } void GraphKit::store_String_coder(Node* str, Node* value) { int coder_offset = java_lang_String::coder_offset_in_bytes(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), ! false, NULL, 0); const TypePtr* coder_field_type = string_type->add_offset(coder_offset); access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type, value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED); } --- 4433,4453 ---- } void GraphKit::store_String_value(Node* str, Node* value) { int value_offset = java_lang_String::value_offset_in_bytes(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), ! false, NULL, Type::Offset(0)); const TypePtr* value_field_type = string_type->add_offset(value_offset); access_store_at(str, basic_plus_adr(str, value_offset), value_field_type, value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED); } void GraphKit::store_String_coder(Node* str, Node* value) { int coder_offset = java_lang_String::coder_offset_in_bytes(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), ! false, NULL, Type::Offset(0)); const TypePtr* coder_field_type = string_type->add_offset(coder_offset); access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type, value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED); }
*** 4108,4116 **** } } const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(), /*is_unsigned_load=*/false); if (con_type != NULL) { ! return makecon(con_type); } return NULL; } --- 4556,4578 ---- } } const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(), /*is_unsigned_load=*/false); if (con_type != NULL) { ! Node* con = makecon(con_type); ! if (field->layout_type() == T_VALUETYPE && field->type()->as_value_klass()->is_scalarizable() && !con_type->maybe_null()) { ! // Load value type from constant oop ! con = ValueTypeNode::make_from_oop(this, con, field->type()->as_value_klass()); ! } ! return con; } return NULL; } + + //---------------------------load_mirror_from_klass---------------------------- + // Given a klass oop, load its java mirror (a java.lang.Class oop). + Node* GraphKit::load_mirror_from_klass(Node* klass) { + Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset())); + Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); + // mirror = ((OopHandle)mirror)->resolve(); + return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE); + }
< prev index next >