< prev index next >

src/hotspot/share/opto/callnode.cpp

Print this page

        

*** 1,7 **** /* ! * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 40,49 **** --- 40,51 ---- #include "opto/parse.hpp" #include "opto/regalloc.hpp" #include "opto/regmask.hpp" #include "opto/rootnode.hpp" #include "opto/runtime.hpp" + #include "opto/valuetypenode.hpp" + #include "runtime/sharedRuntime.hpp" // Portions of code courtesy of Clifford Click // Optimization - Graph Style
*** 73,83 **** return RegMask::Empty; } //------------------------------match------------------------------------------ // Construct projections for incoming parameters, and their RegMask info ! Node *StartNode::match( const ProjNode *proj, const Matcher *match ) { switch (proj->_con) { case TypeFunc::Control: case TypeFunc::I_O: case TypeFunc::Memory: return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); --- 75,85 ---- return RegMask::Empty; } //------------------------------match------------------------------------------ // Construct projections for incoming parameters, and their RegMask info ! Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { switch (proj->_con) { case TypeFunc::Control: case TypeFunc::I_O: case TypeFunc::Memory: return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
*** 477,486 **** --- 479,496 ---- st->print("[%d]", spobj->n_fields()); int ndim = cik->as_array_klass()->dimension() - 1; while (ndim-- > 0) { st->print("[]"); } + } else if (cik->is_value_array_klass()) { + ciKlass* cie = cik->as_value_array_klass()->base_element_klass(); + cie->print_name_on(st); + st->print("[%d]", spobj->n_fields()); + int ndim = cik->as_array_klass()->dimension() - 1; + while (ndim-- > 0) { + st->print("[]"); + } } st->print("={"); uint nf = spobj->n_fields(); if (nf > 0) { uint first_ind = spobj->first_index(mcall->jvms());
*** 686,734 **** if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); if (jvms() != NULL) jvms()->dump_spec(st); } #endif ! const Type *CallNode::bottom_type() const { return tf()->range(); } const Type* CallNode::Value(PhaseGVN* phase) const { ! if (phase->type(in(0)) == Type::TOP) return Type::TOP; ! return tf()->range(); } //------------------------------calling_convention----------------------------- ! void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { // Use the standard compiler calling convention Matcher::calling_convention( sig_bt, parm_regs, argcnt, true ); } //------------------------------match------------------------------------------ // Construct projections for control, I/O, memory-fields, ..., and // return result(s) along with their RegMask info ! Node *CallNode::match( const ProjNode *proj, const Matcher *match ) { ! switch (proj->_con) { case TypeFunc::Control: case TypeFunc::I_O: case TypeFunc::Memory: return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); - case TypeFunc::Parms+1: // For LONG & DOUBLE returns - assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, ""); - // 2nd half of doubles and longs - return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad); - - case TypeFunc::Parms: { // Normal returns - uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg(); - OptoRegPair regs = is_CallRuntime() - ? match->c_return_value(ideal_reg,true) // Calls into C runtime - : match-> return_value(ideal_reg,true); // Calls into compiled Java code - RegMask rm = RegMask(regs.first()); - if( OptoReg::is_valid(regs.second()) ) - rm.Insert( regs.second() ); - return new MachProjNode(this,proj->_con,rm,ideal_reg); - } - case TypeFunc::ReturnAdr: case TypeFunc::FramePtr: default: ShouldNotReachHere(); } --- 696,763 ---- if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); if (jvms() != NULL) jvms()->dump_spec(st); } #endif ! const Type *CallNode::bottom_type() const { return tf()->range_cc(); } const Type* CallNode::Value(PhaseGVN* phase) const { ! if (!in(0) || phase->type(in(0)) == Type::TOP) { ! return Type::TOP; ! } ! return tf()->range_cc(); } //------------------------------calling_convention----------------------------- ! void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { ! if (_entry_point == StubRoutines::store_value_type_fields_to_buf()) { ! // The call to that stub is a special case: its inputs are ! // multiple values returned from a call and so it should follow ! // the return convention. ! SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); ! return; ! } // Use the standard compiler calling convention Matcher::calling_convention( sig_bt, parm_regs, argcnt, true ); } //------------------------------match------------------------------------------ // Construct projections for control, I/O, memory-fields, ..., and // return result(s) along with their RegMask info ! Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { ! uint con = proj->_con; ! const TypeTuple *range_cc = tf()->range_cc(); ! if (con >= TypeFunc::Parms) { ! if (is_CallRuntime()) { ! if (con == TypeFunc::Parms) { ! uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg(); ! OptoRegPair regs = match->c_return_value(ideal_reg,true); ! RegMask rm = RegMask(regs.first()); ! if (OptoReg::is_valid(regs.second())) { ! rm.Insert(regs.second()); ! } ! return new MachProjNode(this,con,rm,ideal_reg); ! } else { ! assert(con == TypeFunc::Parms+1, "only one return value"); ! assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, ""); ! return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad); ! } ! } else { ! // The Call may return multiple values (value type fields): we ! // create one projection per returned values. ! assert(con <= TypeFunc::Parms+1 || ValueTypeReturnedAsFields, "only for multi value return"); ! uint ideal_reg = range_cc->field_at(con)->ideal_reg(); ! return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg); ! } ! } ! ! switch (con) { case TypeFunc::Control: case TypeFunc::I_O: case TypeFunc::Memory: return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); case TypeFunc::ReturnAdr: case TypeFunc::FramePtr: default: ShouldNotReachHere(); }
*** 745,755 **** // instance at the specified offset. // bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { assert((t_oop != NULL), "sanity"); if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) { ! const TypeTuple* args = _tf->domain(); Node* dest = NULL; // Stubs that can be called once an ArrayCopyNode is expanded have // different signatures. Look for the second pointer argument, // that is the destination of the copy. for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { --- 774,784 ---- // instance at the specified offset. // bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { assert((t_oop != NULL), "sanity"); if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) { ! const TypeTuple* args = _tf->domain_sig(); Node* dest = NULL; // Stubs that can be called once an ArrayCopyNode is expanded have // different signatures. Look for the second pointer argument, // that is the destination of the copy. for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
*** 794,804 **** if ((inst_t != NULL) && (!inst_t->klass_is_exact() || (inst_t->klass() == boxing_klass))) { return true; } } ! const TypeTuple* d = tf()->domain(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); if ((inst_t != NULL) && (!inst_t->klass_is_exact() || (inst_t->klass() == boxing_klass))) { return true; --- 823,833 ---- if ((inst_t != NULL) && (!inst_t->klass_is_exact() || (inst_t->klass() == boxing_klass))) { return true; } } ! const TypeTuple* d = tf()->domain_cc(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); if ((inst_t != NULL) && (!inst_t->klass_is_exact() || (inst_t->klass() == boxing_klass))) { return true;
*** 810,829 **** return true; } // Does this call have a direct reference to n other than debug information? bool CallNode::has_non_debug_use(Node *n) { ! const TypeTuple * d = tf()->domain(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { Node *arg = in(i); if (arg == n) { return true; } } return false; } // Returns the unique CheckCastPP of a call // or 'this' if there are several CheckCastPP or unexpected uses // or returns NULL if there is no one. Node *CallNode::result_cast() { Node *cast = NULL; --- 839,869 ---- return true; } // Does this call have a direct reference to n other than debug information? bool CallNode::has_non_debug_use(Node *n) { ! const TypeTuple * d = tf()->domain_cc(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { Node *arg = in(i); if (arg == n) { return true; } } return false; } + bool CallNode::has_debug_use(Node *n) { + assert(jvms() != NULL, "jvms should not be null"); + for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) { + Node *arg = in(i); + if (arg == n) { + return true; + } + } + return false; + } + // Returns the unique CheckCastPP of a call // or 'this' if there are several CheckCastPP or unexpected uses // or returns NULL if there is no one. Node *CallNode::result_cast() { Node *cast = NULL;
*** 851,870 **** } return cast; } ! void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) { ! projs->fallthrough_proj = NULL; ! projs->fallthrough_catchproj = NULL; ! projs->fallthrough_ioproj = NULL; ! projs->catchall_ioproj = NULL; ! projs->catchall_catchproj = NULL; ! projs->fallthrough_memproj = NULL; ! projs->catchall_memproj = NULL; ! projs->resproj = NULL; ! projs->exobj = NULL; for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { ProjNode *pn = fast_out(i)->as_Proj(); if (pn->outcnt() == 0) continue; switch (pn->_con) { --- 891,915 ---- } return cast; } ! CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) { ! uint max_res = TypeFunc::Parms-1; ! for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { ! ProjNode *pn = fast_out(i)->as_Proj(); ! max_res = MAX2(max_res, pn->_con); ! } ! ! assert(max_res < _tf->range_cc()->cnt(), "result out of bounds"); ! ! uint projs_size = sizeof(CallProjections); ! if (max_res > TypeFunc::Parms) { ! projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*); ! } ! char* projs_storage = resource_allocate_bytes(projs_size); ! CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1); for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { ProjNode *pn = fast_out(i)->as_Proj(); if (pn->outcnt() == 0) continue; switch (pn->_con) {
*** 907,936 **** projs->catchall_memproj = pn; else projs->fallthrough_memproj = pn; break; case TypeFunc::Parms: ! projs->resproj = pn; break; default: ! assert(false, "unexpected projection from allocation node."); } } // The resproj may not exist because the result could be ignored // and the exception object may not exist if an exception handler // swallows the exception but all the other must exist and be found. - assert(projs->fallthrough_proj != NULL, "must be found"); do_asserts = do_asserts && !Compile::current()->inlining_incrementally(); assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found"); assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found"); assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found"); assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found"); if (separate_io_proj) { assert(!do_asserts || projs->catchall_memproj != NULL, "must be found"); assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found"); } } Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) { CallGenerator* cg = generator(); if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) { --- 952,984 ---- projs->catchall_memproj = pn; else projs->fallthrough_memproj = pn; break; case TypeFunc::Parms: ! projs->resproj[0] = pn; break; default: ! assert(pn->_con <= max_res, "unexpected projection from allocation node."); ! projs->resproj[pn->_con-TypeFunc::Parms] = pn; ! break; } } // The resproj may not exist because the result could be ignored // and the exception object may not exist if an exception handler // swallows the exception but all the other must exist and be found. do_asserts = do_asserts && !Compile::current()->inlining_incrementally(); + assert(!do_asserts || projs->fallthrough_proj != NULL, "must be found"); assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found"); assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found"); assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found"); assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found"); if (separate_io_proj) { assert(!do_asserts || projs->catchall_memproj != NULL, "must be found"); assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found"); } + return projs; } Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) { CallGenerator* cg = generator(); if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
*** 970,979 **** --- 1018,1031 ---- #ifdef ASSERT bool CallJavaNode::validate_symbolic_info() const { if (method() == NULL) { return true; // call into runtime or uncommon trap } + Bytecodes::Code bc = jvms()->method()->java_code_at_bci(_bci); + if (EnableValhalla && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) { + return true; + } ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(_bci); ciMethod* callee = method(); if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) { assert(override_symbolic_info(), "should be set"); }
*** 1081,1090 **** --- 1133,1149 ---- } #endif //------------------------------calling_convention----------------------------- void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { + if (_entry_point == NULL) { + // The call to that stub is a special case: its inputs are + // multiple values returned from a call and so it should follow + // the return convention. + SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); + return; + } Matcher::c_calling_convention( sig_bt, parm_regs, argcnt ); } //============================================================================= //------------------------------calling_convention-----------------------------
*** 1097,1106 **** --- 1156,1171 ---- st->print("%s", _name); CallNode::dump_spec(st); } #endif + uint CallLeafNoFPNode::match_edge(uint idx) const { + // Null entry point is a special case for which the target is in a + // register. Need to match that edge. + return entry_point() == NULL && idx == TypeFunc::Parms; + } + //============================================================================= void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { assert(verify_jvms(jvms), "jvms must match"); int loc = jvms->locoff() + idx;
*** 1357,1374 **** //============================================================================= uint AllocateNode::size_of() const { return sizeof(*this); } AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, ! Node *size, Node *klass_node, Node *initial_test) : CallNode(atype, NULL, TypeRawPtr::BOTTOM) { init_class_id(Class_Allocate); init_flags(Flag_is_macro); _is_scalar_replaceable = false; _is_non_escaping = false; _is_allocation_MemBar_redundant = false; Node *topnode = C->top(); init_req( TypeFunc::Control , ctrl ); init_req( TypeFunc::I_O , abio ); init_req( TypeFunc::Memory , mem ); --- 1422,1442 ---- //============================================================================= uint AllocateNode::size_of() const { return sizeof(*this); } AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, ! Node *size, Node *klass_node, ! Node* initial_test, ! ValueTypeBaseNode* value_node) : CallNode(atype, NULL, TypeRawPtr::BOTTOM) { init_class_id(Class_Allocate); init_flags(Flag_is_macro); _is_scalar_replaceable = false; _is_non_escaping = false; _is_allocation_MemBar_redundant = false; + _larval = false; Node *topnode = C->top(); init_req( TypeFunc::Control , ctrl ); init_req( TypeFunc::I_O , abio ); init_req( TypeFunc::Memory , mem );
*** 1376,1394 **** init_req( TypeFunc::FramePtr , topnode ); init_req( AllocSize , size); init_req( KlassNode , klass_node); init_req( InitialTest , initial_test); init_req( ALength , topnode); C->add_macro_node(this); } void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) { assert(initializer != NULL && ! initializer->is_initializer() && ! !initializer->is_static(), ! "unexpected initializer method"); BCEscapeAnalyzer* analyzer = initializer->get_bcea(); if (analyzer == NULL) { return; } --- 1444,1465 ---- init_req( TypeFunc::FramePtr , topnode ); init_req( AllocSize , size); init_req( KlassNode , klass_node); init_req( InitialTest , initial_test); init_req( ALength , topnode); + init_req( ValueNode , value_node); + // DefaultValue defaults to NULL + // RawDefaultValue defaults to NULL + // StorageProperties defaults to NULL C->add_macro_node(this); } void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) { assert(initializer != NULL && ! initializer->is_object_constructor_or_class_initializer(), ! "unexpected initializer method"); BCEscapeAnalyzer* analyzer = initializer->get_bcea(); if (analyzer == NULL) { return; }
*** 1396,1408 **** if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) { _is_allocation_MemBar_redundant = true; } } //============================================================================= Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { ! if (remove_dead_region(phase, can_reshape)) return this; // Don't bother trying to transform a dead node if (in(0) && in(0)->is_top()) return NULL; const Type* type = phase->type(Ideal_length()); if (type->isa_int() && type->is_int()->_hi < 0) { --- 1467,1548 ---- if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) { _is_allocation_MemBar_redundant = true; } } + Node* AllocateNode::Ideal(PhaseGVN* phase, bool can_reshape) { + // Check for unused value type allocation + if (can_reshape && in(AllocateNode::ValueNode) != NULL && + outcnt() != 0 && result_cast() == NULL) { + // Remove allocation by replacing the projection nodes with its inputs + InitializeNode* init = initialization(); + PhaseIterGVN* igvn = phase->is_IterGVN(); + CallProjections* projs = extract_projections(true, false); + assert(projs->nb_resproj <= 1, "unexpected number of results"); + if (projs->fallthrough_catchproj != NULL) { + igvn->replace_node(projs->fallthrough_catchproj, in(TypeFunc::Control)); + } + if (projs->fallthrough_memproj != NULL) { + igvn->replace_node(projs->fallthrough_memproj, in(TypeFunc::Memory)); + } + if (projs->catchall_memproj != NULL) { + igvn->replace_node(projs->catchall_memproj, phase->C->top()); + } + if (projs->fallthrough_ioproj != NULL) { + igvn->replace_node(projs->fallthrough_ioproj, in(TypeFunc::I_O)); + } + if (projs->catchall_ioproj != NULL) { + igvn->replace_node(projs->catchall_ioproj, phase->C->top()); + } + if (projs->catchall_catchproj != NULL) { + igvn->replace_node(projs->catchall_catchproj, phase->C->top()); + } + if (projs->resproj[0] != NULL) { + igvn->replace_node(projs->resproj[0], phase->C->top()); + } + igvn->replace_node(this, phase->C->top()); + if (init != NULL) { + Node* ctrl_proj = init->proj_out_or_null(TypeFunc::Control); + Node* mem_proj = init->proj_out_or_null(TypeFunc::Memory); + if (ctrl_proj != NULL) { + igvn->replace_node(ctrl_proj, init->in(TypeFunc::Control)); + } + if (mem_proj != NULL) { + igvn->replace_node(mem_proj, init->in(TypeFunc::Memory)); + } + } + return NULL; + } + + return CallNode::Ideal(phase, can_reshape); + } + + Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem, Node* klass_node) { + Node* mark_node = NULL; + // For now only enable fast locking for non-array types + if ((EnableValhalla || UseBiasedLocking) && Opcode() == Op_Allocate) { + if (klass_node == NULL) { + Node* k_adr = phase->transform(new AddPNode(obj, obj, phase->MakeConX(oopDesc::klass_offset_in_bytes()))); + klass_node = phase->transform(LoadKlassNode::make(*phase, NULL, phase->C->immutable_memory(), k_adr, phase->type(k_adr)->is_ptr())); + } + Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset())))); + mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); + } else { + mark_node = phase->MakeConX((intptr_t)markOopDesc::prototype()); + } + mark_node = phase->transform(mark_node); + // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal + return new OrXNode(mark_node, phase->MakeConX(_larval ? markOopDesc::larval_state_pattern : 0)); + } + + //============================================================================= Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { ! Node* res = SafePointNode::Ideal(phase, can_reshape); ! if (res != NULL) { ! return res; ! } // Don't bother trying to transform a dead node if (in(0) && in(0)->is_top()) return NULL; const Type* type = phase->type(Ideal_length()); if (type->isa_int() && type->is_int()->_hi < 0) {
*** 1823,1833 **** // Now see if we can optimize away this lock. We don't actually // remove the locking here, we simply set the _eliminate flag which // prevents macro expansion from expanding the lock. Since we don't // modify the graph, the value returned from this function is the // one computed above. ! if (can_reshape && EliminateLocks && !is_non_esc_obj()) { // // If we are locking an unescaped object, the lock/unlock is unnecessary // ConnectionGraph *cgr = phase->C->congraph(); if (cgr != NULL && cgr->not_global_escape(obj_node())) { --- 1963,1975 ---- // Now see if we can optimize away this lock. We don't actually // remove the locking here, we simply set the _eliminate flag which // prevents macro expansion from expanding the lock. Since we don't // modify the graph, the value returned from this function is the // one computed above. ! const Type* obj_type = phase->type(obj_node()); ! if (can_reshape && EliminateLocks && !is_non_esc_obj() && ! !obj_type->isa_valuetype() && !obj_type->is_valuetypeptr()) { // // If we are locking an unescaped object, the lock/unlock is unnecessary // ConnectionGraph *cgr = phase->C->congraph(); if (cgr != NULL && cgr->not_global_escape(obj_node())) {
*** 1991,2001 **** // remove the unlocking here, we simply set the _eliminate flag which // prevents macro expansion from expanding the unlock. Since we don't // modify the graph, the value returned from this function is the // one computed above. // Escape state is defined after Parse phase. ! if (can_reshape && EliminateLocks && !is_non_esc_obj()) { // // If we are unlocking an unescaped object, the lock/unlock is unnecessary. // ConnectionGraph *cgr = phase->C->congraph(); if (cgr != NULL && cgr->not_global_escape(obj_node())) { --- 2133,2145 ---- // remove the unlocking here, we simply set the _eliminate flag which // prevents macro expansion from expanding the unlock. Since we don't // modify the graph, the value returned from this function is the // one computed above. // Escape state is defined after Parse phase. ! const Type* obj_type = phase->type(obj_node()); ! if (can_reshape && EliminateLocks && !is_non_esc_obj() && ! !obj_type->isa_valuetype() && !obj_type->is_valuetypeptr()) { // // If we are unlocking an unescaped object, the lock/unlock is unnecessary. // ConnectionGraph *cgr = phase->C->congraph(); if (cgr != NULL && cgr->not_global_escape(obj_node())) {
*** 2073,2083 **** if (elem == Type::BOTTOM) { // An array but we don't know what elements are return true; } ! dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr(); uint dest_alias = phase->C->get_alias_index(dest_t); uint t_oop_alias = phase->C->get_alias_index(t_oop); return dest_alias == t_oop_alias; } --- 2217,2228 ---- if (elem == Type::BOTTOM) { // An array but we don't know what elements are return true; } ! dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr(); ! t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot); uint dest_alias = phase->C->get_alias_index(dest_t); uint t_oop_alias = phase->C->get_alias_index(t_oop); return dest_alias == t_oop_alias; }
< prev index next >