11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/bcEscapeAnalyzer.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/c2/barrierSetC2.hpp"
29 #include "libadt/vectset.hpp"
30 #include "memory/allocation.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "opto/arraycopynode.hpp"
33 #include "opto/c2compiler.hpp"
34 #include "opto/callnode.hpp"
35 #include "opto/castnode.hpp"
36 #include "opto/cfgnode.hpp"
37 #include "opto/compile.hpp"
38 #include "opto/escape.hpp"
39 #include "opto/locknode.hpp"
40 #include "opto/macro.hpp"
41 #include "opto/movenode.hpp"
42 #include "opto/narrowptrnode.hpp"
43 #include "opto/phaseX.hpp"
44 #include "opto/rootnode.hpp"
45 #include "utilities/macros.hpp"
46
47 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
48 // If ReduceAllocationMerges is enabled we might call split_through_phi during
49 // split_unique_types and that will create additional nodes that need to be
50 // pushed to the ConnectionGraph. The code below bumps the initial capacity of
51 // _nodes by 10% to account for these additional nodes. If capacity is exceeded
52 // the array will be reallocated.
53 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
54 _in_worklist(C->comp_arena()),
55 _next_pidx(0),
56 _collecting(true),
57 _verify(false),
58 _compile(C),
146 GrowableArray<SafePointNode*> sfn_worklist;
147 GrowableArray<MergeMemNode*> mergemem_worklist;
148 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
149
150 { Compile::TracePhase tp(Phase::_t_connectionGraph);
151
152 // 1. Populate Connection Graph (CG) with PointsTo nodes.
153 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space
154 // Initialize worklist
155 if (C->root() != nullptr) {
156 ideal_nodes.push(C->root());
157 }
158 // Processed ideal nodes are unique on ideal_nodes list
159 // but several ideal nodes are mapped to the phantom_obj.
160 // To avoid duplicated entries on the following worklists
161 // add the phantom_obj only once to them.
162 ptnodes_worklist.append(phantom_obj);
163 java_objects_worklist.append(phantom_obj);
164 for( uint next = 0; next < ideal_nodes.size(); ++next ) {
165 Node* n = ideal_nodes.at(next);
166 // Create PointsTo nodes and add them to Connection Graph. Called
167 // only once per ideal node since ideal_nodes is Unique_Node list.
168 add_node_to_connection_graph(n, &delayed_worklist);
169 PointsToNode* ptn = ptnode_adr(n->_idx);
170 if (ptn != nullptr && ptn != phantom_obj) {
171 ptnodes_worklist.append(ptn);
172 if (ptn->is_JavaObject()) {
173 java_objects_worklist.append(ptn->as_JavaObject());
174 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
175 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
176 // Only allocations and java static calls results are interesting.
177 non_escaped_allocs_worklist.append(ptn->as_JavaObject());
178 }
179 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
180 oop_fields_worklist.append(ptn->as_Field());
181 }
182 }
183 // Collect some interesting nodes for further use.
184 switch (n->Opcode()) {
185 case Op_MergeMem:
1235
1236 // The next two inputs are:
1237 // (1) A copy of the original pointer to NSR objects.
1238 // (2) A selector, used to decide if we need to rematerialize an object
1239 // or use the pointer to a NSR object.
1240 // See more details of these fields in the declaration of SafePointScalarMergeNode
1241 sfpt->add_req(nsr_merge_pointer);
1242 sfpt->add_req(selector);
1243
1244 for (uint i = 1; i < ophi->req(); i++) {
1245 Node* base = ophi->in(i);
1246 JavaObjectNode* ptn = unique_java_object(base);
1247
1248 // If the base is not scalar replaceable we don't need to register information about
1249 // it at this time.
1250 if (ptn == nullptr || !ptn->scalar_replaceable()) {
1251 continue;
1252 }
1253
1254 AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1255 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt);
1256 if (sobj == nullptr) {
1257 return false;
1258 }
1259
1260 // Now make a pass over the debug information replacing any references
1261 // to the allocated object with "sobj"
1262 Node* ccpp = alloc->result_cast();
1263 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1264
1265 // Register the scalarized object as a candidate for reallocation
1266 smerge->add_req(sobj);
1267 }
1268
1269 // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1270 sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1271
1272 // The call to 'replace_edges_in_range' above might have removed the
1273 // reference to ophi that we need at _merge_pointer_idx. The line below make
1274 // sure the reference is maintained.
1275 sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1276 _igvn->_worklist.push(sfpt);
1277 }
1278
1279 return true;
1280 }
1281
1282 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) {
1283 bool delay = _igvn->delay_transform();
1284 _igvn->set_delay_transform(true);
1285 _igvn->hash_delete(ophi);
1286
1445 return false;
1446 }
1447
1448 // Returns true if at least one of the arguments to the call is an object
1449 // that does not escape globally.
1450 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1451 if (call->method() != nullptr) {
1452 uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1453 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1454 Node* p = call->in(idx);
1455 if (not_global_escape(p)) {
1456 return true;
1457 }
1458 }
1459 } else {
1460 const char* name = call->as_CallStaticJava()->_name;
1461 assert(name != nullptr, "no name");
1462 // no arg escapes through uncommon traps
1463 if (strcmp(name, "uncommon_trap") != 0) {
1464 // process_call_arguments() assumes that all arguments escape globally
1465 const TypeTuple* d = call->tf()->domain();
1466 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1467 const Type* at = d->field_at(i);
1468 if (at->isa_oopptr() != nullptr) {
1469 return true;
1470 }
1471 }
1472 }
1473 }
1474 return false;
1475 }
1476
1477
1478
1479 // Utility function for nodes that load an object
1480 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1481 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1482 // ThreadLocal has RawPtr type.
1483 const Type* t = _igvn->type(n);
1484 if (t->make_ptr() != nullptr) {
1485 Node* adr = n->in(MemNode::Address);
1519 // first IGVN optimization when escape information is still available.
1520 record_for_optimizer(n);
1521 } else if (n->is_Allocate()) {
1522 add_call_node(n->as_Call());
1523 record_for_optimizer(n);
1524 } else {
1525 if (n->is_CallStaticJava()) {
1526 const char* name = n->as_CallStaticJava()->_name;
1527 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1528 return; // Skip uncommon traps
1529 }
1530 }
1531 // Don't mark as processed since call's arguments have to be processed.
1532 delayed_worklist->push(n);
1533 // Check if a call returns an object.
1534 if ((n->as_Call()->returns_pointer() &&
1535 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1536 (n->is_CallStaticJava() &&
1537 n->as_CallStaticJava()->is_boxing_method())) {
1538 add_call_node(n->as_Call());
1539 }
1540 }
1541 return;
1542 }
1543 // Put this check here to process call arguments since some call nodes
1544 // point to phantom_obj.
1545 if (n_ptn == phantom_obj || n_ptn == null_obj) {
1546 return; // Skip predefined nodes.
1547 }
1548 switch (opcode) {
1549 case Op_AddP: {
1550 Node* base = get_addp_base(n);
1551 PointsToNode* ptn_base = ptnode_adr(base->_idx);
1552 // Field nodes are created for all field types. They are used in
1553 // adjust_scalar_replaceable_state() and split_unique_types().
1554 // Note, non-oop fields will have only base edges in Connection
1555 // Graph because such fields are not used for oop loads and stores.
1556 int offset = address_offset(n, igvn);
1557 add_field(n, PointsToNode::NoEscape, offset);
1558 if (ptn_base == nullptr) {
1559 delayed_worklist->push(n); // Process it later.
1560 } else {
1561 n_ptn = ptnode_adr(n_idx);
1562 add_base(n_ptn->as_Field(), ptn_base);
1563 }
1564 break;
1565 }
1566 case Op_CastX2P: {
1567 map_ideal_node(n, phantom_obj);
1568 break;
1569 }
1570 case Op_CastPP:
1571 case Op_CheckCastPP:
1572 case Op_EncodeP:
1573 case Op_DecodeN:
1574 case Op_EncodePKlass:
1575 case Op_DecodeNKlass: {
1576 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1577 break;
1578 }
1579 case Op_CMoveP: {
1580 add_local_var(n, PointsToNode::NoEscape);
1581 // Do not add edges during first iteration because some could be
1582 // not defined yet.
1583 delayed_worklist->push(n);
1584 break;
1585 }
1586 case Op_ConP:
1587 case Op_ConN:
1588 case Op_ConNKlass: {
1589 // assume all oop constants globally escape except for null
1621 case Op_PartialSubtypeCheck: {
1622 // Produces Null or notNull and is used in only in CmpP so
1623 // phantom_obj could be used.
1624 map_ideal_node(n, phantom_obj); // Result is unknown
1625 break;
1626 }
1627 case Op_Phi: {
1628 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1629 // ThreadLocal has RawPtr type.
1630 const Type* t = n->as_Phi()->type();
1631 if (t->make_ptr() != nullptr) {
1632 add_local_var(n, PointsToNode::NoEscape);
1633 // Do not add edges during first iteration because some could be
1634 // not defined yet.
1635 delayed_worklist->push(n);
1636 }
1637 break;
1638 }
1639 case Op_Proj: {
1640 // we are only interested in the oop result projection from a call
1641 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1642 n->in(0)->as_Call()->returns_pointer()) {
1643 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1644 }
1645 break;
1646 }
1647 case Op_Rethrow: // Exception object escapes
1648 case Op_Return: {
1649 if (n->req() > TypeFunc::Parms &&
1650 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1651 // Treat Return value as LocalVar with GlobalEscape escape state.
1652 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1653 }
1654 break;
1655 }
1656 case Op_CompareAndExchangeP:
1657 case Op_CompareAndExchangeN:
1658 case Op_GetAndSetP:
1659 case Op_GetAndSetN: {
1660 add_objload_to_connection_graph(n, delayed_worklist);
1661 // fall-through
1662 }
1724 if (n->is_Call()) {
1725 process_call_arguments(n->as_Call());
1726 return;
1727 }
1728 assert(n->is_Store() || n->is_LoadStore() ||
1729 ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1730 "node should be registered already");
1731 int opcode = n->Opcode();
1732 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1733 if (gc_handled) {
1734 return; // Ignore node if already handled by GC.
1735 }
1736 switch (opcode) {
1737 case Op_AddP: {
1738 Node* base = get_addp_base(n);
1739 PointsToNode* ptn_base = ptnode_adr(base->_idx);
1740 assert(ptn_base != nullptr, "field's base should be registered");
1741 add_base(n_ptn->as_Field(), ptn_base);
1742 break;
1743 }
1744 case Op_CastPP:
1745 case Op_CheckCastPP:
1746 case Op_EncodeP:
1747 case Op_DecodeN:
1748 case Op_EncodePKlass:
1749 case Op_DecodeNKlass: {
1750 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1751 break;
1752 }
1753 case Op_CMoveP: {
1754 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1755 Node* in = n->in(i);
1756 if (in == nullptr) {
1757 continue; // ignore null
1758 }
1759 Node* uncast_in = in->uncast();
1760 if (uncast_in->is_top() || uncast_in == n) {
1761 continue; // ignore top or inputs which go back this node
1762 }
1763 PointsToNode* ptn = ptnode_adr(in->_idx);
1778 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1779 // ThreadLocal has RawPtr type.
1780 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1781 for (uint i = 1; i < n->req(); i++) {
1782 Node* in = n->in(i);
1783 if (in == nullptr) {
1784 continue; // ignore null
1785 }
1786 Node* uncast_in = in->uncast();
1787 if (uncast_in->is_top() || uncast_in == n) {
1788 continue; // ignore top or inputs which go back this node
1789 }
1790 PointsToNode* ptn = ptnode_adr(in->_idx);
1791 assert(ptn != nullptr, "node should be registered");
1792 add_edge(n_ptn, ptn);
1793 }
1794 break;
1795 }
1796 case Op_Proj: {
1797 // we are only interested in the oop result projection from a call
1798 assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1799 n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
1800 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1801 break;
1802 }
1803 case Op_Rethrow: // Exception object escapes
1804 case Op_Return: {
1805 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1806 "Unexpected node type");
1807 // Treat Return value as LocalVar with GlobalEscape escape state.
1808 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1809 break;
1810 }
1811 case Op_CompareAndExchangeP:
1812 case Op_CompareAndExchangeN:
1813 case Op_GetAndSetP:
1814 case Op_GetAndSetN:{
1815 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1816 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1817 // fall-through
1818 }
1819 case Op_CompareAndSwapP:
1955 PointsToNode* ptn = ptnode_adr(val->_idx);
1956 assert(ptn != nullptr, "node should be registered");
1957 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1958 // Add edge to object for unsafe access with offset.
1959 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1960 assert(adr_ptn != nullptr, "node should be registered");
1961 if (adr_ptn->is_Field()) {
1962 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
1963 add_edge(adr_ptn, ptn);
1964 }
1965 return true;
1966 }
1967 #ifdef ASSERT
1968 n->dump(1);
1969 assert(false, "not unsafe");
1970 #endif
1971 return false;
1972 }
1973
1974 void ConnectionGraph::add_call_node(CallNode* call) {
1975 assert(call->returns_pointer(), "only for call which returns pointer");
1976 uint call_idx = call->_idx;
1977 if (call->is_Allocate()) {
1978 Node* k = call->in(AllocateNode::KlassNode);
1979 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
1980 assert(kt != nullptr, "TypeKlassPtr required.");
1981 PointsToNode::EscapeState es = PointsToNode::NoEscape;
1982 bool scalar_replaceable = true;
1983 NOT_PRODUCT(const char* nsr_reason = "");
1984 if (call->is_AllocateArray()) {
1985 if (!kt->isa_aryklassptr()) { // StressReflectiveCode
1986 es = PointsToNode::GlobalEscape;
1987 } else {
1988 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
1989 if (length < 0) {
1990 // Not scalar replaceable if the length is not constant.
1991 scalar_replaceable = false;
1992 NOT_PRODUCT(nsr_reason = "has a non-constant length");
1993 } else if (length > EliminateAllocationArraySizeLimit) {
1994 // Not scalar replaceable if the length is too big.
1995 scalar_replaceable = false;
2031 //
2032 // - all oop arguments are escaping globally;
2033 //
2034 // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2035 //
2036 // - the same as CallDynamicJavaNode if can't do bytecode analysis;
2037 //
2038 // - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2039 // - mapped to NoEscape JavaObject node if non-escaping object allocated
2040 // during call is returned;
2041 // - mapped to ArgEscape LocalVar node pointed to object arguments
2042 // which are returned and does not escape during call;
2043 //
2044 // - oop arguments escaping status is defined by bytecode analysis;
2045 //
2046 // For a static call, we know exactly what method is being called.
2047 // Use bytecode estimator to record whether the call's return value escapes.
2048 ciMethod* meth = call->as_CallJava()->method();
2049 if (meth == nullptr) {
2050 const char* name = call->as_CallStaticJava()->_name;
2051 assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "TODO: add failed case check");
2052 // Returns a newly allocated non-escaped object.
2053 add_java_object(call, PointsToNode::NoEscape);
2054 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2055 } else if (meth->is_boxing_method()) {
2056 // Returns boxing object
2057 PointsToNode::EscapeState es;
2058 vmIntrinsics::ID intr = meth->intrinsic_id();
2059 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2060 // It does not escape if object is always allocated.
2061 es = PointsToNode::NoEscape;
2062 } else {
2063 // It escapes globally if object could be loaded from cache.
2064 es = PointsToNode::GlobalEscape;
2065 }
2066 add_java_object(call, es);
2067 if (es == PointsToNode::GlobalEscape) {
2068 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2069 }
2070 } else {
2071 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2072 call_analyzer->copy_dependencies(_compile->dependencies());
2073 if (call_analyzer->is_return_allocated()) {
2074 // Returns a newly allocated non-escaped object, simply
2075 // update dependency information.
2076 // Mark it as NoEscape so that objects referenced by
2077 // it's fields will be marked as NoEscape at least.
2078 add_java_object(call, PointsToNode::NoEscape);
2079 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2080 } else {
2081 // Determine whether any arguments are returned.
2082 const TypeTuple* d = call->tf()->domain();
2083 bool ret_arg = false;
2084 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2085 if (d->field_at(i)->isa_ptr() != nullptr &&
2086 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2087 ret_arg = true;
2088 break;
2089 }
2090 }
2091 if (ret_arg) {
2092 add_local_var(call, PointsToNode::ArgEscape);
2093 } else {
2094 // Returns unknown object.
2095 map_ideal_node(call, phantom_obj);
2096 }
2097 }
2098 }
2099 } else {
2100 // An other type of call, assume the worst case:
2101 // returned value is unknown and globally escapes.
2102 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
2110 #ifdef ASSERT
2111 case Op_Allocate:
2112 case Op_AllocateArray:
2113 case Op_Lock:
2114 case Op_Unlock:
2115 assert(false, "should be done already");
2116 break;
2117 #endif
2118 case Op_ArrayCopy:
2119 case Op_CallLeafNoFP:
2120 // Most array copies are ArrayCopy nodes at this point but there
2121 // are still a few direct calls to the copy subroutines (See
2122 // PhaseStringOpts::copy_string())
2123 is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2124 call->as_CallLeaf()->is_call_to_arraycopystub();
2125 // fall through
2126 case Op_CallLeafVector:
2127 case Op_CallLeaf: {
2128 // Stub calls, objects do not escape but they are not scale replaceable.
2129 // Adjust escape state for outgoing arguments.
2130 const TypeTuple * d = call->tf()->domain();
2131 bool src_has_oops = false;
2132 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2133 const Type* at = d->field_at(i);
2134 Node *arg = call->in(i);
2135 if (arg == nullptr) {
2136 continue;
2137 }
2138 const Type *aat = _igvn->type(arg);
2139 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2140 continue;
2141 }
2142 if (arg->is_AddP()) {
2143 //
2144 // The inline_native_clone() case when the arraycopy stub is called
2145 // after the allocation before Initialize and CheckCastPP nodes.
2146 // Or normal arraycopy for object arrays case.
2147 //
2148 // Set AddP's base (Allocate) as not scalar replaceable since
2149 // pointer to the base (with offset) is passed as argument.
2150 //
2151 arg = get_addp_base(arg);
2152 }
2153 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2154 assert(arg_ptn != nullptr, "should be registered");
2155 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2156 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2157 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2158 aat->isa_ptr() != nullptr, "expecting an Ptr");
2159 bool arg_has_oops = aat->isa_oopptr() &&
2160 (aat->isa_instptr() ||
2161 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));
2162 if (i == TypeFunc::Parms) {
2163 src_has_oops = arg_has_oops;
2164 }
2165 //
2166 // src or dst could be j.l.Object when other is basic type array:
2167 //
2168 // arraycopy(char[],0,Object*,0,size);
2169 // arraycopy(Object*,0,char[],0,size);
2170 //
2171 // Don't add edges in such cases.
2172 //
2173 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2174 arg_has_oops && (i > TypeFunc::Parms);
2175 #ifdef ASSERT
2176 if (!(is_arraycopy ||
2177 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2178 (call->as_CallLeaf()->_name != nullptr &&
2179 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2180 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2181 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
2205 strcmp(call->as_CallLeaf()->_name, "dilithiumMontMulByConstant") == 0 ||
2206 strcmp(call->as_CallLeaf()->_name, "dilithiumDecomposePoly") == 0 ||
2207 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2208 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2209 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2210 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2211 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2212 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2213 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2214 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2215 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2216 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2217 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2218 strcmp(call->as_CallLeaf()->_name, "double_keccak") == 0 ||
2219 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2220 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2221 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2222 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2223 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2224 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
2225 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2226 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2227 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2228 strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2229 strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2230 strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2231 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2232 strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2233 ))) {
2234 call->dump();
2235 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2236 }
2237 #endif
2238 // Always process arraycopy's destination object since
2239 // we need to add all possible edges to references in
2240 // source object.
2241 if (arg_esc >= PointsToNode::ArgEscape &&
2242 !arg_is_arraycopy_dest) {
2243 continue;
2244 }
2267 }
2268 }
2269 }
2270 break;
2271 }
2272 case Op_CallStaticJava: {
2273 // For a static call, we know exactly what method is being called.
2274 // Use bytecode estimator to record the call's escape affects
2275 #ifdef ASSERT
2276 const char* name = call->as_CallStaticJava()->_name;
2277 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2278 #endif
2279 ciMethod* meth = call->as_CallJava()->method();
2280 if ((meth != nullptr) && meth->is_boxing_method()) {
2281 break; // Boxing methods do not modify any oops.
2282 }
2283 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2284 // fall-through if not a Java method or no analyzer information
2285 if (call_analyzer != nullptr) {
2286 PointsToNode* call_ptn = ptnode_adr(call->_idx);
2287 const TypeTuple* d = call->tf()->domain();
2288 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2289 const Type* at = d->field_at(i);
2290 int k = i - TypeFunc::Parms;
2291 Node* arg = call->in(i);
2292 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2293 if (at->isa_ptr() != nullptr &&
2294 call_analyzer->is_arg_returned(k)) {
2295 // The call returns arguments.
2296 if (call_ptn != nullptr) { // Is call's result used?
2297 assert(call_ptn->is_LocalVar(), "node should be registered");
2298 assert(arg_ptn != nullptr, "node should be registered");
2299 add_edge(call_ptn, arg_ptn);
2300 }
2301 }
2302 if (at->isa_oopptr() != nullptr &&
2303 arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2304 if (!call_analyzer->is_arg_stack(k)) {
2305 // The argument global escapes
2306 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2307 } else {
2311 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2312 }
2313 }
2314 }
2315 }
2316 if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2317 // The call returns arguments.
2318 assert(call_ptn->edge_count() > 0, "sanity");
2319 if (!call_analyzer->is_return_local()) {
2320 // Returns also unknown object.
2321 add_edge(call_ptn, phantom_obj);
2322 }
2323 }
2324 break;
2325 }
2326 }
2327 default: {
2328 // Fall-through here if not a Java method or no analyzer information
2329 // or some other type of call, assume the worst case: all arguments
2330 // globally escape.
2331 const TypeTuple* d = call->tf()->domain();
2332 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2333 const Type* at = d->field_at(i);
2334 if (at->isa_oopptr() != nullptr) {
2335 Node* arg = call->in(i);
2336 if (arg->is_AddP()) {
2337 arg = get_addp_base(arg);
2338 }
2339 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2340 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2341 }
2342 }
2343 }
2344 }
2345 }
2346
2347
2348 // Finish Graph construction.
2349 bool ConnectionGraph::complete_connection_graph(
2350 GrowableArray<PointsToNode*>& ptnodes_worklist,
2351 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2724 PointsToNode* base = i.get();
2725 if (base->is_JavaObject()) {
2726 // Skip Allocate's fields which will be processed later.
2727 if (base->ideal_node()->is_Allocate()) {
2728 return 0;
2729 }
2730 assert(base == null_obj, "only null ptr base expected here");
2731 }
2732 }
2733 if (add_edge(field, phantom_obj)) {
2734 // New edge was added
2735 new_edges++;
2736 add_field_uses_to_worklist(field);
2737 }
2738 return new_edges;
2739 }
2740
2741 // Find fields initializing values for allocations.
2742 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2743 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2744 Node* alloc = pta->ideal_node();
2745
2746 // Do nothing for Allocate nodes since its fields values are
2747 // "known" unless they are initialized by arraycopy/clone.
2748 if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2749 return 0;
2750 }
2751 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");
2752 #ifdef ASSERT
2753 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
2754 const char* name = alloc->as_CallStaticJava()->_name;
2755 assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "sanity");
2756 }
2757 #endif
2758 // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2759 int new_edges = 0;
2760 for (EdgeIterator i(pta); i.has_next(); i.next()) {
2761 PointsToNode* field = i.get();
2762 if (field->is_Field() && field->as_Field()->is_oop()) {
2763 if (add_edge(field, phantom_obj)) {
2764 // New edge was added
2765 new_edges++;
2766 add_field_uses_to_worklist(field->as_Field());
2767 }
2768 }
2769 }
2770 return new_edges;
2771 }
2772
2773 // Find fields initializing values for allocations.
2774 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2775 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2776 Node* alloc = pta->ideal_node();
2777 // Do nothing for Call nodes since its fields values are unknown.
2778 if (!alloc->is_Allocate()) {
2779 return 0;
2780 }
2781 InitializeNode* ini = alloc->as_Allocate()->initialization();
2782 bool visited_bottom_offset = false;
2783 GrowableArray<int> offsets_worklist;
2784 int new_edges = 0;
2785
2786 // Check if an oop field's initializing value is recorded and add
2787 // a corresponding null if field's value if it is not recorded.
2788 // Connection Graph does not record a default initialization by null
2789 // captured by Initialize node.
2790 //
2791 for (EdgeIterator i(pta); i.has_next(); i.next()) {
2792 PointsToNode* field = i.get(); // Field (AddP)
2793 if (!field->is_Field() || !field->as_Field()->is_oop()) {
2794 continue; // Not oop field
2795 }
2796 int offset = field->as_Field()->offset();
2797 if (offset == Type::OffsetBot) {
2798 if (!visited_bottom_offset) {
2844 } else {
2845 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2846 tty->print_cr("----------init store has invalid value -----");
2847 store->dump();
2848 val->dump();
2849 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2850 }
2851 for (EdgeIterator j(val); j.has_next(); j.next()) {
2852 PointsToNode* obj = j.get();
2853 if (obj->is_JavaObject()) {
2854 if (!field->points_to(obj->as_JavaObject())) {
2855 missed_obj = obj;
2856 break;
2857 }
2858 }
2859 }
2860 }
2861 if (missed_obj != nullptr) {
2862 tty->print_cr("----------field---------------------------------");
2863 field->dump();
2864 tty->print_cr("----------missed referernce to object-----------");
2865 missed_obj->dump();
2866 tty->print_cr("----------object referernced by init store -----");
2867 store->dump();
2868 val->dump();
2869 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2870 }
2871 }
2872 #endif
2873 } else {
2874 // There could be initializing stores which follow allocation.
2875 // For example, a volatile field store is not collected
2876 // by Initialize node.
2877 //
2878 // Need to check for dependent loads to separate such stores from
2879 // stores which follow loads. For now, add initial value null so
2880 // that compare pointers optimization works correctly.
2881 }
2882 }
2883 if (value == nullptr) {
2884 // A field's initializing value was not recorded. Add null.
2885 if (add_edge(field, null_obj)) {
2886 // New edge was added
3202 assert(field->edge_count() > 0, "sanity");
3203 }
3204 }
3205 }
3206 }
3207 #endif
3208
3209 // Optimize ideal graph.
3210 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3211 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3212 Compile* C = _compile;
3213 PhaseIterGVN* igvn = _igvn;
3214 if (EliminateLocks) {
3215 // Mark locks before changing ideal graph.
3216 int cnt = C->macro_count();
3217 for (int i = 0; i < cnt; i++) {
3218 Node *n = C->macro_node(i);
3219 if (n->is_AbstractLock()) { // Lock and Unlock nodes
3220 AbstractLockNode* alock = n->as_AbstractLock();
3221 if (!alock->is_non_esc_obj()) {
3222 if (can_eliminate_lock(alock)) {
3223 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3224 // The lock could be marked eliminated by lock coarsening
3225 // code during first IGVN before EA. Replace coarsened flag
3226 // to eliminate all associated locks/unlocks.
3227 #ifdef ASSERT
3228 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3229 #endif
3230 alock->set_non_esc_obj();
3231 }
3232 }
3233 }
3234 }
3235 }
3236
3237 if (OptimizePtrCompare) {
3238 for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3239 Node *n = ptr_cmp_worklist.at(i);
3240 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3241 const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3242 if (tcmp->singleton()) {
3244 #ifndef PRODUCT
3245 if (PrintOptimizePtrCompare) {
3246 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3247 if (Verbose) {
3248 n->dump(1);
3249 }
3250 }
3251 #endif
3252 igvn->replace_node(n, cmp);
3253 }
3254 }
3255 }
3256
3257 // For MemBarStoreStore nodes added in library_call.cpp, check
3258 // escape status of associated AllocateNode and optimize out
3259 // MemBarStoreStore node if the allocated object never escapes.
3260 for (int i = 0; i < storestore_worklist.length(); i++) {
3261 Node* storestore = storestore_worklist.at(i);
3262 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3263 if (alloc->is_Allocate() && not_global_escape(alloc)) {
3264 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3265 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
3266 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3267 igvn->register_new_node_with_optimizer(mb);
3268 igvn->replace_node(storestore, mb);
3269 }
3270 }
3271 }
3272
3273 // Optimize objects compare.
3274 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3275 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1]
3276 if (!OptimizePtrCompare) {
3277 return UNKNOWN;
3278 }
3279 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3280 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3281
3282 PointsToNode* ptn1 = ptnode_adr(left->_idx);
3283 PointsToNode* ptn2 = ptnode_adr(right->_idx);
3284 JavaObjectNode* jobj1 = unique_java_object(left);
3285 JavaObjectNode* jobj2 = unique_java_object(right);
3286
3287 // The use of this method during allocation merge reduction may cause 'left'
3288 // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3412 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3413 assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3414 PointsToNode* ptadr = _nodes.at(n->_idx);
3415 if (ptadr != nullptr) {
3416 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3417 return;
3418 }
3419 Compile* C = _compile;
3420 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3421 map_ideal_node(n, ptadr);
3422 // Add edge from arraycopy node to source object.
3423 (void)add_edge(ptadr, src);
3424 src->set_arraycopy_src();
3425 // Add edge from destination object to arraycopy node.
3426 (void)add_edge(dst, ptadr);
3427 dst->set_arraycopy_dst();
3428 }
3429
3430 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3431 const Type* adr_type = n->as_AddP()->bottom_type();
3432 BasicType bt = T_INT;
3433 if (offset == Type::OffsetBot) {
3434 // Check only oop fields.
3435 if (!adr_type->isa_aryptr() ||
3436 adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3437 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3438 // OffsetBot is used to reference array's element. Ignore first AddP.
3439 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3440 bt = T_OBJECT;
3441 }
3442 }
3443 } else if (offset != oopDesc::klass_offset_in_bytes()) {
3444 if (adr_type->isa_instptr()) {
3445 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
3446 if (field != nullptr) {
3447 bt = field->layout_type();
3448 } else {
3449 // Check for unsafe oop field access
3450 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3451 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3452 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3453 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3454 bt = T_OBJECT;
3455 (*unsafe) = true;
3456 }
3457 }
3458 } else if (adr_type->isa_aryptr()) {
3459 if (offset == arrayOopDesc::length_offset_in_bytes()) {
3460 // Ignore array length load.
3461 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3462 // Ignore first AddP.
3463 } else {
3464 const Type* elemtype = adr_type->isa_aryptr()->elem();
3465 bt = elemtype->array_element_basic_type();
3466 }
3467 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3468 // Allocation initialization, ThreadLocal field access, unsafe access
3469 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3470 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3471 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3472 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3473 bt = T_OBJECT;
3474 }
3475 }
3476 }
3477 // Note: T_NARROWOOP is not classed as a real reference type
3478 return (is_reference_type(bt) || bt == T_NARROWOOP);
3479 }
3480
3481 // Returns unique pointed java object or null.
3482 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3483 // If the node was created after the escape computation we can't answer.
3484 uint idx = n->_idx;
3485 if (idx >= nodes_size()) {
3642 return true;
3643 }
3644 }
3645 }
3646 }
3647 }
3648 return false;
3649 }
3650
3651 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3652 const Type *adr_type = phase->type(adr);
3653 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3654 // We are computing a raw address for a store captured by an Initialize
3655 // compute an appropriate address type. AddP cases #3 and #5 (see below).
3656 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3657 assert(offs != Type::OffsetBot ||
3658 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3659 "offset must be a constant or it is initialization of array");
3660 return offs;
3661 }
3662 const TypePtr *t_ptr = adr_type->isa_ptr();
3663 assert(t_ptr != nullptr, "must be a pointer type");
3664 return t_ptr->offset();
3665 }
3666
3667 Node* ConnectionGraph::get_addp_base(Node *addp) {
3668 assert(addp->is_AddP(), "must be AddP");
3669 //
3670 // AddP cases for Base and Address inputs:
3671 // case #1. Direct object's field reference:
3672 // Allocate
3673 // |
3674 // Proj #5 ( oop result )
3675 // |
3676 // CheckCastPP (cast to instance type)
3677 // | |
3678 // AddP ( base == address )
3679 //
3680 // case #2. Indirect object's field reference:
3681 // Phi
3682 // |
3683 // CastPP (cast to instance type)
3684 // | |
3798 }
3799 return nullptr;
3800 }
3801
3802 //
3803 // Adjust the type and inputs of an AddP which computes the
3804 // address of a field of an instance
3805 //
3806 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3807 PhaseGVN* igvn = _igvn;
3808 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3809 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3810 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3811 if (t == nullptr) {
3812 // We are computing a raw address for a store captured by an Initialize
3813 // compute an appropriate address type (cases #3 and #5).
3814 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3815 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3816 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3817 assert(offs != Type::OffsetBot, "offset must be a constant");
3818 t = base_t->add_offset(offs)->is_oopptr();
3819 }
3820 int inst_id = base_t->instance_id();
3821 assert(!t->is_known_instance() || t->instance_id() == inst_id,
3822 "old type must be non-instance or match new type");
3823
3824 // The type 't' could be subclass of 'base_t'.
3825 // As result t->offset() could be large then base_t's size and it will
3826 // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3827 // constructor verifies correctness of the offset.
3828 //
3829 // It could happened on subclass's branch (from the type profiling
3830 // inlining) which was not eliminated during parsing since the exactness
3831 // of the allocation type was not propagated to the subclass type check.
3832 //
3833 // Or the type 't' could be not related to 'base_t' at all.
3834 // It could happened when CHA type is different from MDO type on a dead path
3835 // (for example, from instanceof check) which is not collapsed during parsing.
3836 //
3837 // Do nothing for such AddP node and don't process its users since
3838 // this code branch will go away.
3839 //
3840 if (!t->is_known_instance() &&
3841 !base_t->maybe_java_subtype_of(t)) {
3842 return false; // bail out
3843 }
3844 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
3845 // Do NOT remove the next line: ensure a new alias index is allocated
3846 // for the instance type. Note: C++ will not remove it since the call
3847 // has side effect.
3848 int alias_idx = _compile->get_alias_index(tinst);
3849 igvn->set_type(addp, tinst);
3850 // record the allocation in the node map
3851 set_map(addp, get_map(base->_idx));
3852 // Set addp's Base and Address to 'base'.
3853 Node *abase = addp->in(AddPNode::Base);
3854 Node *adr = addp->in(AddPNode::Address);
3855 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3856 adr->in(0)->_idx == (uint)inst_id) {
3857 // Skip AddP cases #3 and #5.
3858 } else {
3859 assert(!abase->is_top(), "sanity"); // AddP case #3
3860 if (abase != base) {
3861 igvn->hash_delete(addp);
3862 addp->set_req(AddPNode::Base, base);
3863 if (abase == adr) {
3864 addp->set_req(AddPNode::Address, base);
4530 ptnode_adr(n->_idx)->dump();
4531 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4532 #endif
4533 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4534 return;
4535 } else {
4536 Node *val = get_map(jobj->idx()); // CheckCastPP node
4537 TypeNode *tn = n->as_Type();
4538 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4539 assert(tinst != nullptr && tinst->is_known_instance() &&
4540 tinst->instance_id() == jobj->idx() , "instance type expected.");
4541
4542 const Type *tn_type = igvn->type(tn);
4543 const TypeOopPtr *tn_t;
4544 if (tn_type->isa_narrowoop()) {
4545 tn_t = tn_type->make_ptr()->isa_oopptr();
4546 } else {
4547 tn_t = tn_type->isa_oopptr();
4548 }
4549 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
4550 if (tn_type->isa_narrowoop()) {
4551 tn_type = tinst->make_narrowoop();
4552 } else {
4553 tn_type = tinst;
4554 }
4555 igvn->hash_delete(tn);
4556 igvn->set_type(tn, tn_type);
4557 tn->set_type(tn_type);
4558 igvn->hash_insert(tn);
4559 record_for_optimizer(n);
4560 } else {
4561 assert(tn_type == TypePtr::NULL_PTR ||
4562 (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4563 "unexpected type");
4564 continue; // Skip dead path with different type
4565 }
4566 }
4567 } else {
4568 DEBUG_ONLY(n->dump();)
4569 assert(false, "EA: unexpected node");
4570 continue;
4571 }
4572 // push allocation's users on appropriate worklist
4573 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4574 Node *use = n->fast_out(i);
4575 if(use->is_Mem() && use->in(MemNode::Address) == n) {
4576 // Load/store to instance's field
4577 memnode_worklist.append_if_missing(use);
4578 } else if (use->is_MemBar()) {
4579 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4580 memnode_worklist.append_if_missing(use);
4581 }
4582 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4583 Node* addp2 = find_second_addp(use, n);
4584 if (addp2 != nullptr) {
4585 alloc_worklist.append_if_missing(addp2);
4586 }
4587 alloc_worklist.append_if_missing(use);
4588 } else if (use->is_Phi() ||
4589 use->is_CheckCastPP() ||
4590 use->is_EncodeNarrowPtr() ||
4591 use->is_DecodeNarrowPtr() ||
4592 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4593 alloc_worklist.append_if_missing(use);
4594 #ifdef ASSERT
4595 } else if (use->is_Mem()) {
4596 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4597 } else if (use->is_MergeMem()) {
4598 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4599 } else if (use->is_SafePoint()) {
4600 // Look for MergeMem nodes for calls which reference unique allocation
4601 // (through CheckCastPP nodes) even for debug info.
4602 Node* m = use->in(TypeFunc::Memory);
4603 if (m->is_MergeMem()) {
4604 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4605 }
4606 } else if (use->Opcode() == Op_EncodeISOArray) {
4607 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4608 // EncodeISOArray overwrites destination array
4609 memnode_worklist.append_if_missing(use);
4610 }
4611 } else {
4612 uint op = use->Opcode();
4613 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4614 (use->in(MemNode::Memory) == n)) {
4615 // They overwrite memory edge corresponding to destination array,
4616 memnode_worklist.append_if_missing(use);
4617 } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4618 op == Op_CastP2X ||
4619 op == Op_FastLock || op == Op_AryEq ||
4620 op == Op_StrComp || op == Op_CountPositives ||
4621 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4622 op == Op_StrEquals || op == Op_VectorizedHashCode ||
4623 op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4624 op == Op_SubTypeCheck ||
4625 op == Op_ReinterpretS2HF ||
4626 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4627 n->dump();
4628 use->dump();
4629 assert(false, "EA: missing allocation reference path");
4630 }
4631 #endif
4632 }
4633 }
4634
4635 }
4636
4637 #ifdef ASSERT
4638 if (VerifyReduceAllocationMerges) {
4639 for (uint i = 0; i < reducible_merges.size(); i++) {
4640 Node* phi = reducible_merges.at(i);
4641
4642 if (!reduced_merges.member(phi)) {
4643 phi->dump(2);
4644 phi->dump(-2);
4708 // we don't need to do anything, but the users must be pushed
4709 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4710 if (n == nullptr) {
4711 continue;
4712 }
4713 } else if (n->is_CallLeaf()) {
4714 // Runtime calls with narrow memory input (no MergeMem node)
4715 // get the memory projection
4716 n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4717 if (n == nullptr) {
4718 continue;
4719 }
4720 } else if (n->Opcode() == Op_StrInflatedCopy) {
4721 // Check direct uses of StrInflatedCopy.
4722 // It is memory type Node - no special SCMemProj node.
4723 } else if (n->Opcode() == Op_StrCompressedCopy ||
4724 n->Opcode() == Op_EncodeISOArray) {
4725 // get the memory projection
4726 n = n->find_out_with(Op_SCMemProj);
4727 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4728 } else {
4729 #ifdef ASSERT
4730 if (!n->is_Mem()) {
4731 n->dump();
4732 }
4733 assert(n->is_Mem(), "memory node required.");
4734 #endif
4735 Node *addr = n->in(MemNode::Address);
4736 const Type *addr_t = igvn->type(addr);
4737 if (addr_t == Type::TOP) {
4738 continue;
4739 }
4740 assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4741 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4742 assert ((uint)alias_idx < new_index_end, "wrong alias index");
4743 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4744 if (_compile->failing()) {
4745 return;
4746 }
4747 if (mem != n->in(MemNode::Memory)) {
4752 if (n->is_Load()) {
4753 continue; // don't push users
4754 } else if (n->is_LoadStore()) {
4755 // get the memory projection
4756 n = n->find_out_with(Op_SCMemProj);
4757 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4758 }
4759 }
4760 // push user on appropriate worklist
4761 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4762 Node *use = n->fast_out(i);
4763 if (use->is_Phi() || use->is_ClearArray()) {
4764 memnode_worklist.append_if_missing(use);
4765 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4766 memnode_worklist.append_if_missing(use);
4767 } else if (use->is_MemBar() || use->is_CallLeaf()) {
4768 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4769 memnode_worklist.append_if_missing(use);
4770 }
4771 #ifdef ASSERT
4772 } else if(use->is_Mem()) {
4773 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4774 } else if (use->is_MergeMem()) {
4775 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4776 } else if (use->Opcode() == Op_EncodeISOArray) {
4777 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4778 // EncodeISOArray overwrites destination array
4779 memnode_worklist.append_if_missing(use);
4780 }
4781 } else {
4782 uint op = use->Opcode();
4783 if ((use->in(MemNode::Memory) == n) &&
4784 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4785 // They overwrite memory edge corresponding to destination array,
4786 memnode_worklist.append_if_missing(use);
4787 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4788 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4789 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4790 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
4791 n->dump();
4792 use->dump();
4793 assert(false, "EA: missing memory path");
4794 }
4795 #endif
4796 }
4797 }
4798 }
4799
4800 // Phase 3: Process MergeMem nodes from mergemem_worklist.
4801 // Walk each memory slice moving the first node encountered of each
4802 // instance type to the input corresponding to its alias index.
4803 uint length = mergemem_worklist.length();
4804 for( uint next = 0; next < length; ++next ) {
4805 MergeMemNode* nmm = mergemem_worklist.at(next);
4806 assert(!visited.test_set(nmm->_idx), "should not be visited before");
4807 // Note: we don't want to use MergeMemStream here because we only want to
4808 // scan inputs which exist at the start, not ones we add during processing.
4809 // Note 2: MergeMem may already contains instance memory slices added
4810 // during find_inst_mem() call when memory nodes were processed above.
4871 if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
4872 if (_compile->do_reduce_allocation_merges()) {
4873 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4874 } else if (_invocation > 0) {
4875 _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4876 } else {
4877 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4878 }
4879 return;
4880 }
4881
4882 igvn->hash_insert(nmm);
4883 record_for_optimizer(nmm);
4884 }
4885
4886 // Phase 4: Update the inputs of non-instance memory Phis and
4887 // the Memory input of memnodes
4888 // First update the inputs of any non-instance Phi's from
4889 // which we split out an instance Phi. Note we don't have
4890 // to recursively process Phi's encountered on the input memory
4891 // chains as is done in split_memory_phi() since they will
4892 // also be processed here.
4893 for (int j = 0; j < orig_phis.length(); j++) {
4894 PhiNode *phi = orig_phis.at(j);
4895 int alias_idx = _compile->get_alias_index(phi->adr_type());
4896 igvn->hash_delete(phi);
4897 for (uint i = 1; i < phi->req(); i++) {
4898 Node *mem = phi->in(i);
4899 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4900 if (_compile->failing()) {
4901 return;
4902 }
4903 if (mem != new_mem) {
4904 phi->set_req(i, new_mem);
4905 }
4906 }
4907 igvn->hash_insert(phi);
4908 record_for_optimizer(phi);
4909 }
4910
4911 // Update the memory inputs of MemNodes with the value we computed
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/bcEscapeAnalyzer.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/c2/barrierSetC2.hpp"
29 #include "libadt/vectset.hpp"
30 #include "memory/allocation.hpp"
31 #include "memory/metaspace.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "opto/arraycopynode.hpp"
34 #include "opto/c2compiler.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/compile.hpp"
39 #include "opto/escape.hpp"
40 #include "opto/inlinetypenode.hpp"
41 #include "opto/locknode.hpp"
42 #include "opto/macro.hpp"
43 #include "opto/movenode.hpp"
44 #include "opto/narrowptrnode.hpp"
45 #include "opto/phaseX.hpp"
46 #include "opto/rootnode.hpp"
47 #include "utilities/macros.hpp"
48
49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
50 // If ReduceAllocationMerges is enabled we might call split_through_phi during
51 // split_unique_types and that will create additional nodes that need to be
52 // pushed to the ConnectionGraph. The code below bumps the initial capacity of
53 // _nodes by 10% to account for these additional nodes. If capacity is exceeded
54 // the array will be reallocated.
55 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
56 _in_worklist(C->comp_arena()),
57 _next_pidx(0),
58 _collecting(true),
59 _verify(false),
60 _compile(C),
148 GrowableArray<SafePointNode*> sfn_worklist;
149 GrowableArray<MergeMemNode*> mergemem_worklist;
150 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
151
152 { Compile::TracePhase tp(Phase::_t_connectionGraph);
153
154 // 1. Populate Connection Graph (CG) with PointsTo nodes.
155 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space
156 // Initialize worklist
157 if (C->root() != nullptr) {
158 ideal_nodes.push(C->root());
159 }
160 // Processed ideal nodes are unique on ideal_nodes list
161 // but several ideal nodes are mapped to the phantom_obj.
162 // To avoid duplicated entries on the following worklists
163 // add the phantom_obj only once to them.
164 ptnodes_worklist.append(phantom_obj);
165 java_objects_worklist.append(phantom_obj);
166 for( uint next = 0; next < ideal_nodes.size(); ++next ) {
167 Node* n = ideal_nodes.at(next);
168 if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
169 !n->in(MemNode::Address)->is_AddP() &&
170 _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
171 // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
172 Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
173 _igvn->register_new_node_with_optimizer(addp);
174 _igvn->replace_input_of(n, MemNode::Address, addp);
175 ideal_nodes.push(addp);
176 _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
177 }
178 // Create PointsTo nodes and add them to Connection Graph. Called
179 // only once per ideal node since ideal_nodes is Unique_Node list.
180 add_node_to_connection_graph(n, &delayed_worklist);
181 PointsToNode* ptn = ptnode_adr(n->_idx);
182 if (ptn != nullptr && ptn != phantom_obj) {
183 ptnodes_worklist.append(ptn);
184 if (ptn->is_JavaObject()) {
185 java_objects_worklist.append(ptn->as_JavaObject());
186 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
187 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
188 // Only allocations and java static calls results are interesting.
189 non_escaped_allocs_worklist.append(ptn->as_JavaObject());
190 }
191 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
192 oop_fields_worklist.append(ptn->as_Field());
193 }
194 }
195 // Collect some interesting nodes for further use.
196 switch (n->Opcode()) {
197 case Op_MergeMem:
1247
1248 // The next two inputs are:
1249 // (1) A copy of the original pointer to NSR objects.
1250 // (2) A selector, used to decide if we need to rematerialize an object
1251 // or use the pointer to a NSR object.
1252 // See more details of these fields in the declaration of SafePointScalarMergeNode
1253 sfpt->add_req(nsr_merge_pointer);
1254 sfpt->add_req(selector);
1255
1256 for (uint i = 1; i < ophi->req(); i++) {
1257 Node* base = ophi->in(i);
1258 JavaObjectNode* ptn = unique_java_object(base);
1259
1260 // If the base is not scalar replaceable we don't need to register information about
1261 // it at this time.
1262 if (ptn == nullptr || !ptn->scalar_replaceable()) {
1263 continue;
1264 }
1265
1266 AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1267 Unique_Node_List value_worklist;
1268 #ifdef ASSERT
1269 const Type* res_type = alloc->result_cast()->bottom_type();
1270 if (res_type->is_inlinetypeptr() && !Compile::current()->has_circular_inline_type()) {
1271 PhiNode* phi = ophi->as_Phi();
1272 assert(!ophi->as_Phi()->can_push_inline_types_down(_igvn), "missed earlier scalarization opportunity");
1273 }
1274 #endif
1275 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
1276 if (sobj == nullptr) {
1277 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1278 return false;
1279 }
1280
1281 // Now make a pass over the debug information replacing any references
1282 // to the allocated object with "sobj"
1283 Node* ccpp = alloc->result_cast();
1284 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1285
1286 // Register the scalarized object as a candidate for reallocation
1287 smerge->add_req(sobj);
1288
1289 // Scalarize inline types that were added to the safepoint.
1290 // Don't allow linking a constant oop (if available) for flat array elements
1291 // because Deoptimization::reassign_flat_array_elements needs field values.
1292 const bool allow_oop = !merge_t->is_flat();
1293 for (uint j = 0; j < value_worklist.size(); ++j) {
1294 InlineTypeNode* vt = value_worklist.at(j)->as_InlineType();
1295 vt->make_scalar_in_safepoints(_igvn, allow_oop);
1296 }
1297 }
1298
1299 // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1300 sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1301
1302 // The call to 'replace_edges_in_range' above might have removed the
1303 // reference to ophi that we need at _merge_pointer_idx. The line below make
1304 // sure the reference is maintained.
1305 sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1306 _igvn->_worklist.push(sfpt);
1307 }
1308
1309 return true;
1310 }
1311
1312 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) {
1313 bool delay = _igvn->delay_transform();
1314 _igvn->set_delay_transform(true);
1315 _igvn->hash_delete(ophi);
1316
1475 return false;
1476 }
1477
1478 // Returns true if at least one of the arguments to the call is an object
1479 // that does not escape globally.
1480 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1481 if (call->method() != nullptr) {
1482 uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1483 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1484 Node* p = call->in(idx);
1485 if (not_global_escape(p)) {
1486 return true;
1487 }
1488 }
1489 } else {
1490 const char* name = call->as_CallStaticJava()->_name;
1491 assert(name != nullptr, "no name");
1492 // no arg escapes through uncommon traps
1493 if (strcmp(name, "uncommon_trap") != 0) {
1494 // process_call_arguments() assumes that all arguments escape globally
1495 const TypeTuple* d = call->tf()->domain_sig();
1496 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1497 const Type* at = d->field_at(i);
1498 if (at->isa_oopptr() != nullptr) {
1499 return true;
1500 }
1501 }
1502 }
1503 }
1504 return false;
1505 }
1506
1507
1508
1509 // Utility function for nodes that load an object
1510 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1511 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1512 // ThreadLocal has RawPtr type.
1513 const Type* t = _igvn->type(n);
1514 if (t->make_ptr() != nullptr) {
1515 Node* adr = n->in(MemNode::Address);
1549 // first IGVN optimization when escape information is still available.
1550 record_for_optimizer(n);
1551 } else if (n->is_Allocate()) {
1552 add_call_node(n->as_Call());
1553 record_for_optimizer(n);
1554 } else {
1555 if (n->is_CallStaticJava()) {
1556 const char* name = n->as_CallStaticJava()->_name;
1557 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1558 return; // Skip uncommon traps
1559 }
1560 }
1561 // Don't mark as processed since call's arguments have to be processed.
1562 delayed_worklist->push(n);
1563 // Check if a call returns an object.
1564 if ((n->as_Call()->returns_pointer() &&
1565 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1566 (n->is_CallStaticJava() &&
1567 n->as_CallStaticJava()->is_boxing_method())) {
1568 add_call_node(n->as_Call());
1569 } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
1570 bool returns_oop = false;
1571 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
1572 ProjNode* pn = n->fast_out(i)->as_Proj();
1573 if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
1574 returns_oop = true;
1575 }
1576 }
1577 if (returns_oop) {
1578 add_call_node(n->as_Call());
1579 }
1580 }
1581 }
1582 return;
1583 }
1584 // Put this check here to process call arguments since some call nodes
1585 // point to phantom_obj.
1586 if (n_ptn == phantom_obj || n_ptn == null_obj) {
1587 return; // Skip predefined nodes.
1588 }
1589 switch (opcode) {
1590 case Op_AddP: {
1591 Node* base = get_addp_base(n);
1592 PointsToNode* ptn_base = ptnode_adr(base->_idx);
1593 // Field nodes are created for all field types. They are used in
1594 // adjust_scalar_replaceable_state() and split_unique_types().
1595 // Note, non-oop fields will have only base edges in Connection
1596 // Graph because such fields are not used for oop loads and stores.
1597 int offset = address_offset(n, igvn);
1598 add_field(n, PointsToNode::NoEscape, offset);
1599 if (ptn_base == nullptr) {
1600 delayed_worklist->push(n); // Process it later.
1601 } else {
1602 n_ptn = ptnode_adr(n_idx);
1603 add_base(n_ptn->as_Field(), ptn_base);
1604 }
1605 break;
1606 }
1607 case Op_CastX2P:
1608 case Op_CastI2N: {
1609 map_ideal_node(n, phantom_obj);
1610 break;
1611 }
1612 case Op_InlineType:
1613 case Op_CastPP:
1614 case Op_CheckCastPP:
1615 case Op_EncodeP:
1616 case Op_DecodeN:
1617 case Op_EncodePKlass:
1618 case Op_DecodeNKlass: {
1619 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1620 break;
1621 }
1622 case Op_CMoveP: {
1623 add_local_var(n, PointsToNode::NoEscape);
1624 // Do not add edges during first iteration because some could be
1625 // not defined yet.
1626 delayed_worklist->push(n);
1627 break;
1628 }
1629 case Op_ConP:
1630 case Op_ConN:
1631 case Op_ConNKlass: {
1632 // assume all oop constants globally escape except for null
1664 case Op_PartialSubtypeCheck: {
1665 // Produces Null or notNull and is used in only in CmpP so
1666 // phantom_obj could be used.
1667 map_ideal_node(n, phantom_obj); // Result is unknown
1668 break;
1669 }
1670 case Op_Phi: {
1671 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1672 // ThreadLocal has RawPtr type.
1673 const Type* t = n->as_Phi()->type();
1674 if (t->make_ptr() != nullptr) {
1675 add_local_var(n, PointsToNode::NoEscape);
1676 // Do not add edges during first iteration because some could be
1677 // not defined yet.
1678 delayed_worklist->push(n);
1679 }
1680 break;
1681 }
1682 case Op_Proj: {
1683 // we are only interested in the oop result projection from a call
1684 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
1685 (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
1686 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1687 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1688 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1689 }
1690 break;
1691 }
1692 case Op_Rethrow: // Exception object escapes
1693 case Op_Return: {
1694 if (n->req() > TypeFunc::Parms &&
1695 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1696 // Treat Return value as LocalVar with GlobalEscape escape state.
1697 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1698 }
1699 break;
1700 }
1701 case Op_CompareAndExchangeP:
1702 case Op_CompareAndExchangeN:
1703 case Op_GetAndSetP:
1704 case Op_GetAndSetN: {
1705 add_objload_to_connection_graph(n, delayed_worklist);
1706 // fall-through
1707 }
1769 if (n->is_Call()) {
1770 process_call_arguments(n->as_Call());
1771 return;
1772 }
1773 assert(n->is_Store() || n->is_LoadStore() ||
1774 ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1775 "node should be registered already");
1776 int opcode = n->Opcode();
1777 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1778 if (gc_handled) {
1779 return; // Ignore node if already handled by GC.
1780 }
1781 switch (opcode) {
1782 case Op_AddP: {
1783 Node* base = get_addp_base(n);
1784 PointsToNode* ptn_base = ptnode_adr(base->_idx);
1785 assert(ptn_base != nullptr, "field's base should be registered");
1786 add_base(n_ptn->as_Field(), ptn_base);
1787 break;
1788 }
1789 case Op_InlineType:
1790 case Op_CastPP:
1791 case Op_CheckCastPP:
1792 case Op_EncodeP:
1793 case Op_DecodeN:
1794 case Op_EncodePKlass:
1795 case Op_DecodeNKlass: {
1796 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1797 break;
1798 }
1799 case Op_CMoveP: {
1800 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1801 Node* in = n->in(i);
1802 if (in == nullptr) {
1803 continue; // ignore null
1804 }
1805 Node* uncast_in = in->uncast();
1806 if (uncast_in->is_top() || uncast_in == n) {
1807 continue; // ignore top or inputs which go back this node
1808 }
1809 PointsToNode* ptn = ptnode_adr(in->_idx);
1824 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1825 // ThreadLocal has RawPtr type.
1826 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1827 for (uint i = 1; i < n->req(); i++) {
1828 Node* in = n->in(i);
1829 if (in == nullptr) {
1830 continue; // ignore null
1831 }
1832 Node* uncast_in = in->uncast();
1833 if (uncast_in->is_top() || uncast_in == n) {
1834 continue; // ignore top or inputs which go back this node
1835 }
1836 PointsToNode* ptn = ptnode_adr(in->_idx);
1837 assert(ptn != nullptr, "node should be registered");
1838 add_edge(n_ptn, ptn);
1839 }
1840 break;
1841 }
1842 case Op_Proj: {
1843 // we are only interested in the oop result projection from a call
1844 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1845 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1846 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1847 break;
1848 }
1849 case Op_Rethrow: // Exception object escapes
1850 case Op_Return: {
1851 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1852 "Unexpected node type");
1853 // Treat Return value as LocalVar with GlobalEscape escape state.
1854 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1855 break;
1856 }
1857 case Op_CompareAndExchangeP:
1858 case Op_CompareAndExchangeN:
1859 case Op_GetAndSetP:
1860 case Op_GetAndSetN:{
1861 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1862 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1863 // fall-through
1864 }
1865 case Op_CompareAndSwapP:
2001 PointsToNode* ptn = ptnode_adr(val->_idx);
2002 assert(ptn != nullptr, "node should be registered");
2003 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
2004 // Add edge to object for unsafe access with offset.
2005 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
2006 assert(adr_ptn != nullptr, "node should be registered");
2007 if (adr_ptn->is_Field()) {
2008 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
2009 add_edge(adr_ptn, ptn);
2010 }
2011 return true;
2012 }
2013 #ifdef ASSERT
2014 n->dump(1);
2015 assert(false, "not unsafe");
2016 #endif
2017 return false;
2018 }
2019
2020 void ConnectionGraph::add_call_node(CallNode* call) {
2021 assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
2022 uint call_idx = call->_idx;
2023 if (call->is_Allocate()) {
2024 Node* k = call->in(AllocateNode::KlassNode);
2025 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
2026 assert(kt != nullptr, "TypeKlassPtr required.");
2027 PointsToNode::EscapeState es = PointsToNode::NoEscape;
2028 bool scalar_replaceable = true;
2029 NOT_PRODUCT(const char* nsr_reason = "");
2030 if (call->is_AllocateArray()) {
2031 if (!kt->isa_aryklassptr()) { // StressReflectiveCode
2032 es = PointsToNode::GlobalEscape;
2033 } else {
2034 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2035 if (length < 0) {
2036 // Not scalar replaceable if the length is not constant.
2037 scalar_replaceable = false;
2038 NOT_PRODUCT(nsr_reason = "has a non-constant length");
2039 } else if (length > EliminateAllocationArraySizeLimit) {
2040 // Not scalar replaceable if the length is too big.
2041 scalar_replaceable = false;
2077 //
2078 // - all oop arguments are escaping globally;
2079 //
2080 // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2081 //
2082 // - the same as CallDynamicJavaNode if can't do bytecode analysis;
2083 //
2084 // - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2085 // - mapped to NoEscape JavaObject node if non-escaping object allocated
2086 // during call is returned;
2087 // - mapped to ArgEscape LocalVar node pointed to object arguments
2088 // which are returned and does not escape during call;
2089 //
2090 // - oop arguments escaping status is defined by bytecode analysis;
2091 //
2092 // For a static call, we know exactly what method is being called.
2093 // Use bytecode estimator to record whether the call's return value escapes.
2094 ciMethod* meth = call->as_CallJava()->method();
2095 if (meth == nullptr) {
2096 const char* name = call->as_CallStaticJava()->_name;
2097 assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 ||
2098 strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0 ||
2099 strncmp(name, "store_inline_type_fields_to_buf", 31) == 0, "TODO: add failed case check");
2100 // Returns a newly allocated non-escaped object.
2101 add_java_object(call, PointsToNode::NoEscape);
2102 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2103 } else if (meth->is_boxing_method()) {
2104 // Returns boxing object
2105 PointsToNode::EscapeState es;
2106 vmIntrinsics::ID intr = meth->intrinsic_id();
2107 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2108 // It does not escape if object is always allocated.
2109 es = PointsToNode::NoEscape;
2110 } else {
2111 // It escapes globally if object could be loaded from cache.
2112 es = PointsToNode::GlobalEscape;
2113 }
2114 add_java_object(call, es);
2115 if (es == PointsToNode::GlobalEscape) {
2116 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2117 }
2118 } else {
2119 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2120 call_analyzer->copy_dependencies(_compile->dependencies());
2121 if (call_analyzer->is_return_allocated()) {
2122 // Returns a newly allocated non-escaped object, simply
2123 // update dependency information.
2124 // Mark it as NoEscape so that objects referenced by
2125 // it's fields will be marked as NoEscape at least.
2126 add_java_object(call, PointsToNode::NoEscape);
2127 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2128 } else {
2129 // Determine whether any arguments are returned.
2130 const TypeTuple* d = call->tf()->domain_cc();
2131 bool ret_arg = false;
2132 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2133 if (d->field_at(i)->isa_ptr() != nullptr &&
2134 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2135 ret_arg = true;
2136 break;
2137 }
2138 }
2139 if (ret_arg) {
2140 add_local_var(call, PointsToNode::ArgEscape);
2141 } else {
2142 // Returns unknown object.
2143 map_ideal_node(call, phantom_obj);
2144 }
2145 }
2146 }
2147 } else {
2148 // An other type of call, assume the worst case:
2149 // returned value is unknown and globally escapes.
2150 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
2158 #ifdef ASSERT
2159 case Op_Allocate:
2160 case Op_AllocateArray:
2161 case Op_Lock:
2162 case Op_Unlock:
2163 assert(false, "should be done already");
2164 break;
2165 #endif
2166 case Op_ArrayCopy:
2167 case Op_CallLeafNoFP:
2168 // Most array copies are ArrayCopy nodes at this point but there
2169 // are still a few direct calls to the copy subroutines (See
2170 // PhaseStringOpts::copy_string())
2171 is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2172 call->as_CallLeaf()->is_call_to_arraycopystub();
2173 // fall through
2174 case Op_CallLeafVector:
2175 case Op_CallLeaf: {
2176 // Stub calls, objects do not escape but they are not scale replaceable.
2177 // Adjust escape state for outgoing arguments.
2178 const TypeTuple * d = call->tf()->domain_sig();
2179 bool src_has_oops = false;
2180 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2181 const Type* at = d->field_at(i);
2182 Node *arg = call->in(i);
2183 if (arg == nullptr) {
2184 continue;
2185 }
2186 const Type *aat = _igvn->type(arg);
2187 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2188 continue;
2189 }
2190 if (arg->is_AddP()) {
2191 //
2192 // The inline_native_clone() case when the arraycopy stub is called
2193 // after the allocation before Initialize and CheckCastPP nodes.
2194 // Or normal arraycopy for object arrays case.
2195 //
2196 // Set AddP's base (Allocate) as not scalar replaceable since
2197 // pointer to the base (with offset) is passed as argument.
2198 //
2199 arg = get_addp_base(arg);
2200 }
2201 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2202 assert(arg_ptn != nullptr, "should be registered");
2203 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2204 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2205 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2206 aat->isa_ptr() != nullptr, "expecting an Ptr");
2207 bool arg_has_oops = aat->isa_oopptr() &&
2208 (aat->isa_instptr() ||
2209 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
2210 (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
2211 aat->isa_aryptr()->is_flat() &&
2212 aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
2213 if (i == TypeFunc::Parms) {
2214 src_has_oops = arg_has_oops;
2215 }
2216 //
2217 // src or dst could be j.l.Object when other is basic type array:
2218 //
2219 // arraycopy(char[],0,Object*,0,size);
2220 // arraycopy(Object*,0,char[],0,size);
2221 //
2222 // Don't add edges in such cases.
2223 //
2224 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2225 arg_has_oops && (i > TypeFunc::Parms);
2226 #ifdef ASSERT
2227 if (!(is_arraycopy ||
2228 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2229 (call->as_CallLeaf()->_name != nullptr &&
2230 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2231 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2232 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
2256 strcmp(call->as_CallLeaf()->_name, "dilithiumMontMulByConstant") == 0 ||
2257 strcmp(call->as_CallLeaf()->_name, "dilithiumDecomposePoly") == 0 ||
2258 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2259 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2260 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2261 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2262 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2263 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2264 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2265 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2266 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2267 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2268 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2269 strcmp(call->as_CallLeaf()->_name, "double_keccak") == 0 ||
2270 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2271 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2272 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2273 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2274 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2275 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
2276 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2277 strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
2278 strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
2279 strcmp(call->as_CallLeaf()->_name, "store_inline_type_fields_to_buf") == 0 ||
2280 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2281 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2282 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2283 strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2284 strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2285 strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2286 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2287 strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2288 ))) {
2289 call->dump();
2290 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2291 }
2292 #endif
2293 // Always process arraycopy's destination object since
2294 // we need to add all possible edges to references in
2295 // source object.
2296 if (arg_esc >= PointsToNode::ArgEscape &&
2297 !arg_is_arraycopy_dest) {
2298 continue;
2299 }
2322 }
2323 }
2324 }
2325 break;
2326 }
2327 case Op_CallStaticJava: {
2328 // For a static call, we know exactly what method is being called.
2329 // Use bytecode estimator to record the call's escape affects
2330 #ifdef ASSERT
2331 const char* name = call->as_CallStaticJava()->_name;
2332 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2333 #endif
2334 ciMethod* meth = call->as_CallJava()->method();
2335 if ((meth != nullptr) && meth->is_boxing_method()) {
2336 break; // Boxing methods do not modify any oops.
2337 }
2338 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2339 // fall-through if not a Java method or no analyzer information
2340 if (call_analyzer != nullptr) {
2341 PointsToNode* call_ptn = ptnode_adr(call->_idx);
2342 const TypeTuple* d = call->tf()->domain_cc();
2343 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2344 const Type* at = d->field_at(i);
2345 int k = i - TypeFunc::Parms;
2346 Node* arg = call->in(i);
2347 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2348 if (at->isa_ptr() != nullptr &&
2349 call_analyzer->is_arg_returned(k)) {
2350 // The call returns arguments.
2351 if (call_ptn != nullptr) { // Is call's result used?
2352 assert(call_ptn->is_LocalVar(), "node should be registered");
2353 assert(arg_ptn != nullptr, "node should be registered");
2354 add_edge(call_ptn, arg_ptn);
2355 }
2356 }
2357 if (at->isa_oopptr() != nullptr &&
2358 arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2359 if (!call_analyzer->is_arg_stack(k)) {
2360 // The argument global escapes
2361 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2362 } else {
2366 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2367 }
2368 }
2369 }
2370 }
2371 if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2372 // The call returns arguments.
2373 assert(call_ptn->edge_count() > 0, "sanity");
2374 if (!call_analyzer->is_return_local()) {
2375 // Returns also unknown object.
2376 add_edge(call_ptn, phantom_obj);
2377 }
2378 }
2379 break;
2380 }
2381 }
2382 default: {
2383 // Fall-through here if not a Java method or no analyzer information
2384 // or some other type of call, assume the worst case: all arguments
2385 // globally escape.
2386 const TypeTuple* d = call->tf()->domain_cc();
2387 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2388 const Type* at = d->field_at(i);
2389 if (at->isa_oopptr() != nullptr) {
2390 Node* arg = call->in(i);
2391 if (arg->is_AddP()) {
2392 arg = get_addp_base(arg);
2393 }
2394 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2395 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2396 }
2397 }
2398 }
2399 }
2400 }
2401
2402
2403 // Finish Graph construction.
2404 bool ConnectionGraph::complete_connection_graph(
2405 GrowableArray<PointsToNode*>& ptnodes_worklist,
2406 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2779 PointsToNode* base = i.get();
2780 if (base->is_JavaObject()) {
2781 // Skip Allocate's fields which will be processed later.
2782 if (base->ideal_node()->is_Allocate()) {
2783 return 0;
2784 }
2785 assert(base == null_obj, "only null ptr base expected here");
2786 }
2787 }
2788 if (add_edge(field, phantom_obj)) {
2789 // New edge was added
2790 new_edges++;
2791 add_field_uses_to_worklist(field);
2792 }
2793 return new_edges;
2794 }
2795
2796 // Find fields initializing values for allocations.
2797 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2798 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2799 PointsToNode* init_val = phantom_obj;
2800 Node* alloc = pta->ideal_node();
2801
2802 // Do nothing for Allocate nodes since its fields values are
2803 // "known" unless they are initialized by arraycopy/clone.
2804 if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2805 if (alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) {
2806 // Null-free inline type arrays are initialized with an init value instead of null
2807 init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::InitValue)->_idx);
2808 assert(init_val != nullptr, "init value should be registered");
2809 } else {
2810 return 0;
2811 }
2812 }
2813 // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
2814 assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
2815 #ifdef ASSERT
2816 if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
2817 const char* name = alloc->as_CallStaticJava()->_name;
2818 assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 ||
2819 strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0 ||
2820 strncmp(name, "store_inline_type_fields_to_buf", 31) == 0, "sanity");
2821 }
2822 #endif
2823 // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2824 int new_edges = 0;
2825 for (EdgeIterator i(pta); i.has_next(); i.next()) {
2826 PointsToNode* field = i.get();
2827 if (field->is_Field() && field->as_Field()->is_oop()) {
2828 if (add_edge(field, init_val)) {
2829 // New edge was added
2830 new_edges++;
2831 add_field_uses_to_worklist(field->as_Field());
2832 }
2833 }
2834 }
2835 return new_edges;
2836 }
2837
2838 // Find fields initializing values for allocations.
2839 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2840 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2841 Node* alloc = pta->ideal_node();
2842 // Do nothing for Call nodes since its fields values are unknown.
2843 if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) {
2844 return 0;
2845 }
2846 InitializeNode* ini = alloc->as_Allocate()->initialization();
2847 bool visited_bottom_offset = false;
2848 GrowableArray<int> offsets_worklist;
2849 int new_edges = 0;
2850
2851 // Check if an oop field's initializing value is recorded and add
2852 // a corresponding null if field's value if it is not recorded.
2853 // Connection Graph does not record a default initialization by null
2854 // captured by Initialize node.
2855 //
2856 for (EdgeIterator i(pta); i.has_next(); i.next()) {
2857 PointsToNode* field = i.get(); // Field (AddP)
2858 if (!field->is_Field() || !field->as_Field()->is_oop()) {
2859 continue; // Not oop field
2860 }
2861 int offset = field->as_Field()->offset();
2862 if (offset == Type::OffsetBot) {
2863 if (!visited_bottom_offset) {
2909 } else {
2910 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2911 tty->print_cr("----------init store has invalid value -----");
2912 store->dump();
2913 val->dump();
2914 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2915 }
2916 for (EdgeIterator j(val); j.has_next(); j.next()) {
2917 PointsToNode* obj = j.get();
2918 if (obj->is_JavaObject()) {
2919 if (!field->points_to(obj->as_JavaObject())) {
2920 missed_obj = obj;
2921 break;
2922 }
2923 }
2924 }
2925 }
2926 if (missed_obj != nullptr) {
2927 tty->print_cr("----------field---------------------------------");
2928 field->dump();
2929 tty->print_cr("----------missed reference to object------------");
2930 missed_obj->dump();
2931 tty->print_cr("----------object referenced by init store-------");
2932 store->dump();
2933 val->dump();
2934 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2935 }
2936 }
2937 #endif
2938 } else {
2939 // There could be initializing stores which follow allocation.
2940 // For example, a volatile field store is not collected
2941 // by Initialize node.
2942 //
2943 // Need to check for dependent loads to separate such stores from
2944 // stores which follow loads. For now, add initial value null so
2945 // that compare pointers optimization works correctly.
2946 }
2947 }
2948 if (value == nullptr) {
2949 // A field's initializing value was not recorded. Add null.
2950 if (add_edge(field, null_obj)) {
2951 // New edge was added
3267 assert(field->edge_count() > 0, "sanity");
3268 }
3269 }
3270 }
3271 }
3272 #endif
3273
3274 // Optimize ideal graph.
3275 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3276 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3277 Compile* C = _compile;
3278 PhaseIterGVN* igvn = _igvn;
3279 if (EliminateLocks) {
3280 // Mark locks before changing ideal graph.
3281 int cnt = C->macro_count();
3282 for (int i = 0; i < cnt; i++) {
3283 Node *n = C->macro_node(i);
3284 if (n->is_AbstractLock()) { // Lock and Unlock nodes
3285 AbstractLockNode* alock = n->as_AbstractLock();
3286 if (!alock->is_non_esc_obj()) {
3287 const Type* obj_type = igvn->type(alock->obj_node());
3288 if (can_eliminate_lock(alock) && !obj_type->is_inlinetypeptr()) {
3289 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3290 // The lock could be marked eliminated by lock coarsening
3291 // code during first IGVN before EA. Replace coarsened flag
3292 // to eliminate all associated locks/unlocks.
3293 #ifdef ASSERT
3294 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3295 #endif
3296 alock->set_non_esc_obj();
3297 }
3298 }
3299 }
3300 }
3301 }
3302
3303 if (OptimizePtrCompare) {
3304 for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3305 Node *n = ptr_cmp_worklist.at(i);
3306 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3307 const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3308 if (tcmp->singleton()) {
3310 #ifndef PRODUCT
3311 if (PrintOptimizePtrCompare) {
3312 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3313 if (Verbose) {
3314 n->dump(1);
3315 }
3316 }
3317 #endif
3318 igvn->replace_node(n, cmp);
3319 }
3320 }
3321 }
3322
3323 // For MemBarStoreStore nodes added in library_call.cpp, check
3324 // escape status of associated AllocateNode and optimize out
3325 // MemBarStoreStore node if the allocated object never escapes.
3326 for (int i = 0; i < storestore_worklist.length(); i++) {
3327 Node* storestore = storestore_worklist.at(i);
3328 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3329 if (alloc->is_Allocate() && not_global_escape(alloc)) {
3330 if (alloc->in(AllocateNode::InlineType) != nullptr) {
3331 // Non-escaping inline type buffer allocations don't require a membar
3332 storestore->as_MemBar()->remove(_igvn);
3333 } else {
3334 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3335 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
3336 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3337 igvn->register_new_node_with_optimizer(mb);
3338 igvn->replace_node(storestore, mb);
3339 }
3340 }
3341 }
3342 }
3343
3344 // Optimize objects compare.
3345 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3346 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1]
3347 if (!OptimizePtrCompare) {
3348 return UNKNOWN;
3349 }
3350 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3351 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3352
3353 PointsToNode* ptn1 = ptnode_adr(left->_idx);
3354 PointsToNode* ptn2 = ptnode_adr(right->_idx);
3355 JavaObjectNode* jobj1 = unique_java_object(left);
3356 JavaObjectNode* jobj2 = unique_java_object(right);
3357
3358 // The use of this method during allocation merge reduction may cause 'left'
3359 // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3483 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3484 assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3485 PointsToNode* ptadr = _nodes.at(n->_idx);
3486 if (ptadr != nullptr) {
3487 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3488 return;
3489 }
3490 Compile* C = _compile;
3491 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3492 map_ideal_node(n, ptadr);
3493 // Add edge from arraycopy node to source object.
3494 (void)add_edge(ptadr, src);
3495 src->set_arraycopy_src();
3496 // Add edge from destination object to arraycopy node.
3497 (void)add_edge(dst, ptadr);
3498 dst->set_arraycopy_dst();
3499 }
3500
3501 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3502 const Type* adr_type = n->as_AddP()->bottom_type();
3503 int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
3504 BasicType bt = T_INT;
3505 if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
3506 // Check only oop fields.
3507 if (!adr_type->isa_aryptr() ||
3508 adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3509 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3510 // OffsetBot is used to reference array's element. Ignore first AddP.
3511 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3512 bt = T_OBJECT;
3513 }
3514 }
3515 } else if (offset != oopDesc::klass_offset_in_bytes()) {
3516 if (adr_type->isa_instptr()) {
3517 ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
3518 if (field != nullptr) {
3519 bt = field->layout_type();
3520 } else {
3521 // Check for unsafe oop field access
3522 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3523 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3524 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3525 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3526 bt = T_OBJECT;
3527 (*unsafe) = true;
3528 }
3529 }
3530 } else if (adr_type->isa_aryptr()) {
3531 if (offset == arrayOopDesc::length_offset_in_bytes()) {
3532 // Ignore array length load.
3533 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3534 // Ignore first AddP.
3535 } else {
3536 const Type* elemtype = adr_type->is_aryptr()->elem();
3537 if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
3538 ciInlineKlass* vk = elemtype->inline_klass();
3539 field_offset += vk->payload_offset();
3540 ciField* field = vk->get_field_by_offset(field_offset, false);
3541 if (field != nullptr) {
3542 bt = field->layout_type();
3543 } else {
3544 assert(field_offset == vk->payload_offset() + vk->null_marker_offset_in_payload(), "no field or null marker of %s at offset %d", vk->name()->as_utf8(), field_offset);
3545 bt = T_BOOLEAN;
3546 }
3547 } else {
3548 bt = elemtype->array_element_basic_type();
3549 }
3550 }
3551 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3552 // Allocation initialization, ThreadLocal field access, unsafe access
3553 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3554 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3555 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3556 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3557 bt = T_OBJECT;
3558 }
3559 }
3560 }
3561 // Note: T_NARROWOOP is not classed as a real reference type
3562 return (is_reference_type(bt) || bt == T_NARROWOOP);
3563 }
3564
3565 // Returns unique pointed java object or null.
3566 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3567 // If the node was created after the escape computation we can't answer.
3568 uint idx = n->_idx;
3569 if (idx >= nodes_size()) {
3726 return true;
3727 }
3728 }
3729 }
3730 }
3731 }
3732 return false;
3733 }
3734
3735 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3736 const Type *adr_type = phase->type(adr);
3737 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3738 // We are computing a raw address for a store captured by an Initialize
3739 // compute an appropriate address type. AddP cases #3 and #5 (see below).
3740 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3741 assert(offs != Type::OffsetBot ||
3742 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3743 "offset must be a constant or it is initialization of array");
3744 return offs;
3745 }
3746 return adr_type->is_ptr()->flat_offset();
3747 }
3748
3749 Node* ConnectionGraph::get_addp_base(Node *addp) {
3750 assert(addp->is_AddP(), "must be AddP");
3751 //
3752 // AddP cases for Base and Address inputs:
3753 // case #1. Direct object's field reference:
3754 // Allocate
3755 // |
3756 // Proj #5 ( oop result )
3757 // |
3758 // CheckCastPP (cast to instance type)
3759 // | |
3760 // AddP ( base == address )
3761 //
3762 // case #2. Indirect object's field reference:
3763 // Phi
3764 // |
3765 // CastPP (cast to instance type)
3766 // | |
3880 }
3881 return nullptr;
3882 }
3883
3884 //
3885 // Adjust the type and inputs of an AddP which computes the
3886 // address of a field of an instance
3887 //
3888 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3889 PhaseGVN* igvn = _igvn;
3890 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3891 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3892 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3893 if (t == nullptr) {
3894 // We are computing a raw address for a store captured by an Initialize
3895 // compute an appropriate address type (cases #3 and #5).
3896 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3897 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3898 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3899 assert(offs != Type::OffsetBot, "offset must be a constant");
3900 if (base_t->isa_aryptr() != nullptr) {
3901 // In the case of a flat inline type array, each field has its
3902 // own slice so we need to extract the field being accessed from
3903 // the address computation
3904 t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
3905 } else {
3906 t = base_t->add_offset(offs)->is_oopptr();
3907 }
3908 }
3909 int inst_id = base_t->instance_id();
3910 assert(!t->is_known_instance() || t->instance_id() == inst_id,
3911 "old type must be non-instance or match new type");
3912
3913 // The type 't' could be subclass of 'base_t'.
3914 // As result t->offset() could be large then base_t's size and it will
3915 // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3916 // constructor verifies correctness of the offset.
3917 //
3918 // It could happened on subclass's branch (from the type profiling
3919 // inlining) which was not eliminated during parsing since the exactness
3920 // of the allocation type was not propagated to the subclass type check.
3921 //
3922 // Or the type 't' could be not related to 'base_t' at all.
3923 // It could happen when CHA type is different from MDO type on a dead path
3924 // (for example, from instanceof check) which is not collapsed during parsing.
3925 //
3926 // Do nothing for such AddP node and don't process its users since
3927 // this code branch will go away.
3928 //
3929 if (!t->is_known_instance() &&
3930 !base_t->maybe_java_subtype_of(t)) {
3931 return false; // bail out
3932 }
3933 const TypePtr* tinst = base_t->add_offset(t->offset());
3934 if (tinst->isa_aryptr() && t->isa_aryptr()) {
3935 // In the case of a flat inline type array, each field has its
3936 // own slice so we need to keep track of the field being accessed.
3937 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
3938 // Keep array properties (not flat/null-free)
3939 tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
3940 if (tinst == nullptr) {
3941 return false; // Skip dead path with inconsistent properties
3942 }
3943 }
3944
3945 // Do NOT remove the next line: ensure a new alias index is allocated
3946 // for the instance type. Note: C++ will not remove it since the call
3947 // has side effect.
3948 int alias_idx = _compile->get_alias_index(tinst);
3949 igvn->set_type(addp, tinst);
3950 // record the allocation in the node map
3951 set_map(addp, get_map(base->_idx));
3952 // Set addp's Base and Address to 'base'.
3953 Node *abase = addp->in(AddPNode::Base);
3954 Node *adr = addp->in(AddPNode::Address);
3955 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3956 adr->in(0)->_idx == (uint)inst_id) {
3957 // Skip AddP cases #3 and #5.
3958 } else {
3959 assert(!abase->is_top(), "sanity"); // AddP case #3
3960 if (abase != base) {
3961 igvn->hash_delete(addp);
3962 addp->set_req(AddPNode::Base, base);
3963 if (abase == adr) {
3964 addp->set_req(AddPNode::Address, base);
4630 ptnode_adr(n->_idx)->dump();
4631 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4632 #endif
4633 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4634 return;
4635 } else {
4636 Node *val = get_map(jobj->idx()); // CheckCastPP node
4637 TypeNode *tn = n->as_Type();
4638 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4639 assert(tinst != nullptr && tinst->is_known_instance() &&
4640 tinst->instance_id() == jobj->idx() , "instance type expected.");
4641
4642 const Type *tn_type = igvn->type(tn);
4643 const TypeOopPtr *tn_t;
4644 if (tn_type->isa_narrowoop()) {
4645 tn_t = tn_type->make_ptr()->isa_oopptr();
4646 } else {
4647 tn_t = tn_type->isa_oopptr();
4648 }
4649 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
4650 if (tn_t->isa_aryptr()) {
4651 // Keep array properties (not flat/null-free)
4652 tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
4653 if (tinst == nullptr) {
4654 continue; // Skip dead path with inconsistent properties
4655 }
4656 }
4657 if (tn_type->isa_narrowoop()) {
4658 tn_type = tinst->make_narrowoop();
4659 } else {
4660 tn_type = tinst;
4661 }
4662 igvn->hash_delete(tn);
4663 igvn->set_type(tn, tn_type);
4664 tn->set_type(tn_type);
4665 igvn->hash_insert(tn);
4666 record_for_optimizer(n);
4667 } else {
4668 assert(tn_type == TypePtr::NULL_PTR ||
4669 (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4670 "unexpected type");
4671 continue; // Skip dead path with different type
4672 }
4673 }
4674 } else {
4675 DEBUG_ONLY(n->dump();)
4676 assert(false, "EA: unexpected node");
4677 continue;
4678 }
4679 // push allocation's users on appropriate worklist
4680 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4681 Node *use = n->fast_out(i);
4682 if (use->is_Mem() && use->in(MemNode::Address) == n) {
4683 // Load/store to instance's field
4684 memnode_worklist.append_if_missing(use);
4685 } else if (use->is_MemBar()) {
4686 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4687 memnode_worklist.append_if_missing(use);
4688 }
4689 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4690 Node* addp2 = find_second_addp(use, n);
4691 if (addp2 != nullptr) {
4692 alloc_worklist.append_if_missing(addp2);
4693 }
4694 alloc_worklist.append_if_missing(use);
4695 } else if (use->is_Phi() ||
4696 use->is_CheckCastPP() ||
4697 use->is_EncodeNarrowPtr() ||
4698 use->is_DecodeNarrowPtr() ||
4699 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4700 alloc_worklist.append_if_missing(use);
4701 #ifdef ASSERT
4702 } else if (use->is_Mem()) {
4703 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4704 } else if (use->is_MergeMem()) {
4705 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4706 } else if (use->is_SafePoint()) {
4707 // Look for MergeMem nodes for calls which reference unique allocation
4708 // (through CheckCastPP nodes) even for debug info.
4709 Node* m = use->in(TypeFunc::Memory);
4710 if (m->is_MergeMem()) {
4711 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4712 }
4713 } else if (use->Opcode() == Op_EncodeISOArray) {
4714 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4715 // EncodeISOArray overwrites destination array
4716 memnode_worklist.append_if_missing(use);
4717 }
4718 } else if (use->Opcode() == Op_Return) {
4719 // Allocation is referenced by field of returned inline type
4720 assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
4721 } else {
4722 uint op = use->Opcode();
4723 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4724 (use->in(MemNode::Memory) == n)) {
4725 // They overwrite memory edge corresponding to destination array,
4726 memnode_worklist.append_if_missing(use);
4727 } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4728 op == Op_CastP2X ||
4729 op == Op_FastLock || op == Op_AryEq ||
4730 op == Op_StrComp || op == Op_CountPositives ||
4731 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4732 op == Op_StrEquals || op == Op_VectorizedHashCode ||
4733 op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4734 op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
4735 op == Op_ReinterpretS2HF ||
4736 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4737 n->dump();
4738 use->dump();
4739 assert(false, "EA: missing allocation reference path");
4740 }
4741 #endif
4742 }
4743 }
4744
4745 }
4746
4747 #ifdef ASSERT
4748 if (VerifyReduceAllocationMerges) {
4749 for (uint i = 0; i < reducible_merges.size(); i++) {
4750 Node* phi = reducible_merges.at(i);
4751
4752 if (!reduced_merges.member(phi)) {
4753 phi->dump(2);
4754 phi->dump(-2);
4818 // we don't need to do anything, but the users must be pushed
4819 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4820 if (n == nullptr) {
4821 continue;
4822 }
4823 } else if (n->is_CallLeaf()) {
4824 // Runtime calls with narrow memory input (no MergeMem node)
4825 // get the memory projection
4826 n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4827 if (n == nullptr) {
4828 continue;
4829 }
4830 } else if (n->Opcode() == Op_StrInflatedCopy) {
4831 // Check direct uses of StrInflatedCopy.
4832 // It is memory type Node - no special SCMemProj node.
4833 } else if (n->Opcode() == Op_StrCompressedCopy ||
4834 n->Opcode() == Op_EncodeISOArray) {
4835 // get the memory projection
4836 n = n->find_out_with(Op_SCMemProj);
4837 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4838 } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
4839 strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4840 n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
4841 } else {
4842 #ifdef ASSERT
4843 if (!n->is_Mem()) {
4844 n->dump();
4845 }
4846 assert(n->is_Mem(), "memory node required.");
4847 #endif
4848 Node *addr = n->in(MemNode::Address);
4849 const Type *addr_t = igvn->type(addr);
4850 if (addr_t == Type::TOP) {
4851 continue;
4852 }
4853 assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4854 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4855 assert ((uint)alias_idx < new_index_end, "wrong alias index");
4856 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4857 if (_compile->failing()) {
4858 return;
4859 }
4860 if (mem != n->in(MemNode::Memory)) {
4865 if (n->is_Load()) {
4866 continue; // don't push users
4867 } else if (n->is_LoadStore()) {
4868 // get the memory projection
4869 n = n->find_out_with(Op_SCMemProj);
4870 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4871 }
4872 }
4873 // push user on appropriate worklist
4874 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4875 Node *use = n->fast_out(i);
4876 if (use->is_Phi() || use->is_ClearArray()) {
4877 memnode_worklist.append_if_missing(use);
4878 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4879 memnode_worklist.append_if_missing(use);
4880 } else if (use->is_MemBar() || use->is_CallLeaf()) {
4881 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4882 memnode_worklist.append_if_missing(use);
4883 }
4884 #ifdef ASSERT
4885 } else if (use->is_Mem()) {
4886 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4887 } else if (use->is_MergeMem()) {
4888 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4889 } else if (use->Opcode() == Op_EncodeISOArray) {
4890 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4891 // EncodeISOArray overwrites destination array
4892 memnode_worklist.append_if_missing(use);
4893 }
4894 } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
4895 strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4896 // store_unknown_inline overwrites destination array
4897 memnode_worklist.append_if_missing(use);
4898 } else {
4899 uint op = use->Opcode();
4900 if ((use->in(MemNode::Memory) == n) &&
4901 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4902 // They overwrite memory edge corresponding to destination array,
4903 memnode_worklist.append_if_missing(use);
4904 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4905 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4906 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4907 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
4908 n->dump();
4909 use->dump();
4910 assert(false, "EA: missing memory path");
4911 }
4912 #endif
4913 }
4914 }
4915 }
4916
4917 // Phase 3: Process MergeMem nodes from mergemem_worklist.
4918 // Walk each memory slice moving the first node encountered of each
4919 // instance type to the input corresponding to its alias index.
4920 uint length = mergemem_worklist.length();
4921 for( uint next = 0; next < length; ++next ) {
4922 MergeMemNode* nmm = mergemem_worklist.at(next);
4923 assert(!visited.test_set(nmm->_idx), "should not be visited before");
4924 // Note: we don't want to use MergeMemStream here because we only want to
4925 // scan inputs which exist at the start, not ones we add during processing.
4926 // Note 2: MergeMem may already contains instance memory slices added
4927 // during find_inst_mem() call when memory nodes were processed above.
4988 if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
4989 if (_compile->do_reduce_allocation_merges()) {
4990 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4991 } else if (_invocation > 0) {
4992 _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4993 } else {
4994 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4995 }
4996 return;
4997 }
4998
4999 igvn->hash_insert(nmm);
5000 record_for_optimizer(nmm);
5001 }
5002
5003 // Phase 4: Update the inputs of non-instance memory Phis and
5004 // the Memory input of memnodes
5005 // First update the inputs of any non-instance Phi's from
5006 // which we split out an instance Phi. Note we don't have
5007 // to recursively process Phi's encountered on the input memory
5008 // chains as is done in split_memory_phi() since they will
5009 // also be processed here.
5010 for (int j = 0; j < orig_phis.length(); j++) {
5011 PhiNode *phi = orig_phis.at(j);
5012 int alias_idx = _compile->get_alias_index(phi->adr_type());
5013 igvn->hash_delete(phi);
5014 for (uint i = 1; i < phi->req(); i++) {
5015 Node *mem = phi->in(i);
5016 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
5017 if (_compile->failing()) {
5018 return;
5019 }
5020 if (mem != new_mem) {
5021 phi->set_req(i, new_mem);
5022 }
5023 }
5024 igvn->hash_insert(phi);
5025 record_for_optimizer(phi);
5026 }
5027
5028 // Update the memory inputs of MemNodes with the value we computed
|