< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"

  32 #include "memory/resourceArea.hpp"
  33 #include "opto/c2compiler.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"

  39 #include "opto/macro.hpp"
  40 #include "opto/locknode.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/movenode.hpp"
  43 #include "opto/narrowptrnode.hpp"
  44 #include "opto/castnode.hpp"
  45 #include "opto/rootnode.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  49   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  50   // split_unique_types and that will create additional nodes that need to be
  51   // pushed to the ConnectionGraph. The code below bumps the initial capacity of
  52   // _nodes by 10% to account for these additional nodes. If capacity is exceeded
  53   // the array will be reallocated.
  54   _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
  55   _in_worklist(C->comp_arena()),
  56   _next_pidx(0),
  57   _collecting(true),
  58   _verify(false),

 147   GrowableArray<SafePointNode*>  sfn_worklist;
 148   GrowableArray<MergeMemNode*>   mergemem_worklist;
 149   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 150 
 151   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 152 
 153   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 154   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 155   // Initialize worklist
 156   if (C->root() != nullptr) {
 157     ideal_nodes.push(C->root());
 158   }
 159   // Processed ideal nodes are unique on ideal_nodes list
 160   // but several ideal nodes are mapped to the phantom_obj.
 161   // To avoid duplicated entries on the following worklists
 162   // add the phantom_obj only once to them.
 163   ptnodes_worklist.append(phantom_obj);
 164   java_objects_worklist.append(phantom_obj);
 165   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 166     Node* n = ideal_nodes.at(next);










 167     // Create PointsTo nodes and add them to Connection Graph. Called
 168     // only once per ideal node since ideal_nodes is Unique_Node list.
 169     add_node_to_connection_graph(n, &delayed_worklist);
 170     PointsToNode* ptn = ptnode_adr(n->_idx);
 171     if (ptn != nullptr && ptn != phantom_obj) {
 172       ptnodes_worklist.append(ptn);
 173       if (ptn->is_JavaObject()) {
 174         java_objects_worklist.append(ptn->as_JavaObject());
 175         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 176             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 177           // Only allocations and java static calls results are interesting.
 178           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 179         }
 180       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 181         oop_fields_worklist.append(ptn->as_Field());
 182       }
 183     }
 184     // Collect some interesting nodes for further use.
 185     switch (n->Opcode()) {
 186       case Op_MergeMem:

1229 
1230     // The next two inputs are:
1231     //  (1) A copy of the original pointer to NSR objects.
1232     //  (2) A selector, used to decide if we need to rematerialize an object
1233     //      or use the pointer to a NSR object.
1234     // See more details of these fields in the declaration of SafePointScalarMergeNode
1235     sfpt->add_req(nsr_merge_pointer);
1236     sfpt->add_req(selector);
1237 
1238     for (uint i = 1; i < ophi->req(); i++) {
1239       Node* base = ophi->in(i);
1240       JavaObjectNode* ptn = unique_java_object(base);
1241 
1242       // If the base is not scalar replaceable we don't need to register information about
1243       // it at this time.
1244       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1245         continue;
1246       }
1247 
1248       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1249       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt);








1250       if (sobj == nullptr) {

1251         return false;
1252       }
1253 
1254       // Now make a pass over the debug information replacing any references
1255       // to the allocated object with "sobj"
1256       Node* ccpp = alloc->result_cast();
1257       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1258 
1259       // Register the scalarized object as a candidate for reallocation
1260       smerge->add_req(sobj);









1261     }
1262 
1263     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1264     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1265 
1266     // The call to 'replace_edges_in_range' above might have removed the
1267     // reference to ophi that we need at _merge_pointer_idx. The line below make
1268     // sure the reference is maintained.
1269     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1270     _igvn->_worklist.push(sfpt);
1271   }
1272 
1273   return true;
1274 }
1275 
1276 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *>  &alloc_worklist, GrowableArray<Node *>  &memnode_worklist) {
1277   bool delay = _igvn->delay_transform();
1278   _igvn->set_delay_transform(true);
1279   _igvn->hash_delete(ophi);
1280 

1439   return false;
1440 }
1441 
1442 // Returns true if at least one of the arguments to the call is an object
1443 // that does not escape globally.
1444 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1445   if (call->method() != nullptr) {
1446     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1447     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1448       Node* p = call->in(idx);
1449       if (not_global_escape(p)) {
1450         return true;
1451       }
1452     }
1453   } else {
1454     const char* name = call->as_CallStaticJava()->_name;
1455     assert(name != nullptr, "no name");
1456     // no arg escapes through uncommon traps
1457     if (strcmp(name, "uncommon_trap") != 0) {
1458       // process_call_arguments() assumes that all arguments escape globally
1459       const TypeTuple* d = call->tf()->domain();
1460       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1461         const Type* at = d->field_at(i);
1462         if (at->isa_oopptr() != nullptr) {
1463           return true;
1464         }
1465       }
1466     }
1467   }
1468   return false;
1469 }
1470 
1471 
1472 
1473 // Utility function for nodes that load an object
1474 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1475   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1476   // ThreadLocal has RawPtr type.
1477   const Type* t = _igvn->type(n);
1478   if (t->make_ptr() != nullptr) {
1479     Node* adr = n->in(MemNode::Address);

1513       // first IGVN optimization when escape information is still available.
1514       record_for_optimizer(n);
1515     } else if (n->is_Allocate()) {
1516       add_call_node(n->as_Call());
1517       record_for_optimizer(n);
1518     } else {
1519       if (n->is_CallStaticJava()) {
1520         const char* name = n->as_CallStaticJava()->_name;
1521         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1522           return; // Skip uncommon traps
1523         }
1524       }
1525       // Don't mark as processed since call's arguments have to be processed.
1526       delayed_worklist->push(n);
1527       // Check if a call returns an object.
1528       if ((n->as_Call()->returns_pointer() &&
1529            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1530           (n->is_CallStaticJava() &&
1531            n->as_CallStaticJava()->is_boxing_method())) {
1532         add_call_node(n->as_Call());











1533       }
1534     }
1535     return;
1536   }
1537   // Put this check here to process call arguments since some call nodes
1538   // point to phantom_obj.
1539   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1540     return; // Skip predefined nodes.
1541   }
1542   switch (opcode) {
1543     case Op_AddP: {
1544       Node* base = get_addp_base(n);
1545       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1546       // Field nodes are created for all field types. They are used in
1547       // adjust_scalar_replaceable_state() and split_unique_types().
1548       // Note, non-oop fields will have only base edges in Connection
1549       // Graph because such fields are not used for oop loads and stores.
1550       int offset = address_offset(n, igvn);
1551       add_field(n, PointsToNode::NoEscape, offset);
1552       if (ptn_base == nullptr) {
1553         delayed_worklist->push(n); // Process it later.
1554       } else {
1555         n_ptn = ptnode_adr(n_idx);
1556         add_base(n_ptn->as_Field(), ptn_base);
1557       }
1558       break;
1559     }
1560     case Op_CastX2P: {
1561       map_ideal_node(n, phantom_obj);
1562       break;
1563     }

1564     case Op_CastPP:
1565     case Op_CheckCastPP:
1566     case Op_EncodeP:
1567     case Op_DecodeN:
1568     case Op_EncodePKlass:
1569     case Op_DecodeNKlass: {
1570       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1571       break;
1572     }
1573     case Op_CMoveP: {
1574       add_local_var(n, PointsToNode::NoEscape);
1575       // Do not add edges during first iteration because some could be
1576       // not defined yet.
1577       delayed_worklist->push(n);
1578       break;
1579     }
1580     case Op_ConP:
1581     case Op_ConN:
1582     case Op_ConNKlass: {
1583       // assume all oop constants globally escape except for null

1615     case Op_PartialSubtypeCheck: {
1616       // Produces Null or notNull and is used in only in CmpP so
1617       // phantom_obj could be used.
1618       map_ideal_node(n, phantom_obj); // Result is unknown
1619       break;
1620     }
1621     case Op_Phi: {
1622       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1623       // ThreadLocal has RawPtr type.
1624       const Type* t = n->as_Phi()->type();
1625       if (t->make_ptr() != nullptr) {
1626         add_local_var(n, PointsToNode::NoEscape);
1627         // Do not add edges during first iteration because some could be
1628         // not defined yet.
1629         delayed_worklist->push(n);
1630       }
1631       break;
1632     }
1633     case Op_Proj: {
1634       // we are only interested in the oop result projection from a call
1635       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1636           n->in(0)->as_Call()->returns_pointer()) {


1637         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1638       }
1639       break;
1640     }
1641     case Op_Rethrow: // Exception object escapes
1642     case Op_Return: {
1643       if (n->req() > TypeFunc::Parms &&
1644           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1645         // Treat Return value as LocalVar with GlobalEscape escape state.
1646         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1647       }
1648       break;
1649     }
1650     case Op_CompareAndExchangeP:
1651     case Op_CompareAndExchangeN:
1652     case Op_GetAndSetP:
1653     case Op_GetAndSetN: {
1654       add_objload_to_connection_graph(n, delayed_worklist);
1655       // fall-through
1656     }

1718   if (n->is_Call()) {
1719     process_call_arguments(n->as_Call());
1720     return;
1721   }
1722   assert(n->is_Store() || n->is_LoadStore() ||
1723          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1724          "node should be registered already");
1725   int opcode = n->Opcode();
1726   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1727   if (gc_handled) {
1728     return; // Ignore node if already handled by GC.
1729   }
1730   switch (opcode) {
1731     case Op_AddP: {
1732       Node* base = get_addp_base(n);
1733       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1734       assert(ptn_base != nullptr, "field's base should be registered");
1735       add_base(n_ptn->as_Field(), ptn_base);
1736       break;
1737     }

1738     case Op_CastPP:
1739     case Op_CheckCastPP:
1740     case Op_EncodeP:
1741     case Op_DecodeN:
1742     case Op_EncodePKlass:
1743     case Op_DecodeNKlass: {
1744       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1745       break;
1746     }
1747     case Op_CMoveP: {
1748       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1749         Node* in = n->in(i);
1750         if (in == nullptr) {
1751           continue;  // ignore null
1752         }
1753         Node* uncast_in = in->uncast();
1754         if (uncast_in->is_top() || uncast_in == n) {
1755           continue;  // ignore top or inputs which go back this node
1756         }
1757         PointsToNode* ptn = ptnode_adr(in->_idx);

1772       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1773       // ThreadLocal has RawPtr type.
1774       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1775       for (uint i = 1; i < n->req(); i++) {
1776         Node* in = n->in(i);
1777         if (in == nullptr) {
1778           continue;  // ignore null
1779         }
1780         Node* uncast_in = in->uncast();
1781         if (uncast_in->is_top() || uncast_in == n) {
1782           continue;  // ignore top or inputs which go back this node
1783         }
1784         PointsToNode* ptn = ptnode_adr(in->_idx);
1785         assert(ptn != nullptr, "node should be registered");
1786         add_edge(n_ptn, ptn);
1787       }
1788       break;
1789     }
1790     case Op_Proj: {
1791       // we are only interested in the oop result projection from a call
1792       assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1793              n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
1794       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1795       break;
1796     }
1797     case Op_Rethrow: // Exception object escapes
1798     case Op_Return: {
1799       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1800              "Unexpected node type");
1801       // Treat Return value as LocalVar with GlobalEscape escape state.
1802       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1803       break;
1804     }
1805     case Op_CompareAndExchangeP:
1806     case Op_CompareAndExchangeN:
1807     case Op_GetAndSetP:
1808     case Op_GetAndSetN:{
1809       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1810       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1811       // fall-through
1812     }
1813     case Op_CompareAndSwapP:

1949     PointsToNode* ptn = ptnode_adr(val->_idx);
1950     assert(ptn != nullptr, "node should be registered");
1951     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1952     // Add edge to object for unsafe access with offset.
1953     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1954     assert(adr_ptn != nullptr, "node should be registered");
1955     if (adr_ptn->is_Field()) {
1956       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
1957       add_edge(adr_ptn, ptn);
1958     }
1959     return true;
1960   }
1961 #ifdef ASSERT
1962   n->dump(1);
1963   assert(false, "not unsafe");
1964 #endif
1965   return false;
1966 }
1967 
1968 void ConnectionGraph::add_call_node(CallNode* call) {
1969   assert(call->returns_pointer(), "only for call which returns pointer");
1970   uint call_idx = call->_idx;
1971   if (call->is_Allocate()) {
1972     Node* k = call->in(AllocateNode::KlassNode);
1973     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
1974     assert(kt != nullptr, "TypeKlassPtr  required.");
1975     PointsToNode::EscapeState es = PointsToNode::NoEscape;
1976     bool scalar_replaceable = true;
1977     NOT_PRODUCT(const char* nsr_reason = "");
1978     if (call->is_AllocateArray()) {
1979       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
1980         es = PointsToNode::GlobalEscape;
1981       } else {
1982         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
1983         if (length < 0) {
1984           // Not scalar replaceable if the length is not constant.
1985           scalar_replaceable = false;
1986           NOT_PRODUCT(nsr_reason = "has a non-constant length");
1987         } else if (length > EliminateAllocationArraySizeLimit) {
1988           // Not scalar replaceable if the length is too big.
1989           scalar_replaceable = false;

2025     //
2026     //    - all oop arguments are escaping globally;
2027     //
2028     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2029     //
2030     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2031     //
2032     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2033     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2034     //      during call is returned;
2035     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2036     //      which are returned and does not escape during call;
2037     //
2038     //    - oop arguments escaping status is defined by bytecode analysis;
2039     //
2040     // For a static call, we know exactly what method is being called.
2041     // Use bytecode estimator to record whether the call's return value escapes.
2042     ciMethod* meth = call->as_CallJava()->method();
2043     if (meth == nullptr) {
2044       const char* name = call->as_CallStaticJava()->_name;
2045       assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "TODO: add failed case check");

2046       // Returns a newly allocated non-escaped object.
2047       add_java_object(call, PointsToNode::NoEscape);
2048       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2049     } else if (meth->is_boxing_method()) {
2050       // Returns boxing object
2051       PointsToNode::EscapeState es;
2052       vmIntrinsics::ID intr = meth->intrinsic_id();
2053       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2054         // It does not escape if object is always allocated.
2055         es = PointsToNode::NoEscape;
2056       } else {
2057         // It escapes globally if object could be loaded from cache.
2058         es = PointsToNode::GlobalEscape;
2059       }
2060       add_java_object(call, es);
2061       if (es == PointsToNode::GlobalEscape) {
2062         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2063       }
2064     } else {
2065       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2066       call_analyzer->copy_dependencies(_compile->dependencies());
2067       if (call_analyzer->is_return_allocated()) {
2068         // Returns a newly allocated non-escaped object, simply
2069         // update dependency information.
2070         // Mark it as NoEscape so that objects referenced by
2071         // it's fields will be marked as NoEscape at least.
2072         add_java_object(call, PointsToNode::NoEscape);
2073         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2074       } else {
2075         // Determine whether any arguments are returned.
2076         const TypeTuple* d = call->tf()->domain();
2077         bool ret_arg = false;
2078         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2079           if (d->field_at(i)->isa_ptr() != nullptr &&
2080               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2081             ret_arg = true;
2082             break;
2083           }
2084         }
2085         if (ret_arg) {
2086           add_local_var(call, PointsToNode::ArgEscape);
2087         } else {
2088           // Returns unknown object.
2089           map_ideal_node(call, phantom_obj);
2090         }
2091       }
2092     }
2093   } else {
2094     // An other type of call, assume the worst case:
2095     // returned value is unknown and globally escapes.
2096     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

2104 #ifdef ASSERT
2105     case Op_Allocate:
2106     case Op_AllocateArray:
2107     case Op_Lock:
2108     case Op_Unlock:
2109       assert(false, "should be done already");
2110       break;
2111 #endif
2112     case Op_ArrayCopy:
2113     case Op_CallLeafNoFP:
2114       // Most array copies are ArrayCopy nodes at this point but there
2115       // are still a few direct calls to the copy subroutines (See
2116       // PhaseStringOpts::copy_string())
2117       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2118         call->as_CallLeaf()->is_call_to_arraycopystub();
2119       // fall through
2120     case Op_CallLeafVector:
2121     case Op_CallLeaf: {
2122       // Stub calls, objects do not escape but they are not scale replaceable.
2123       // Adjust escape state for outgoing arguments.
2124       const TypeTuple * d = call->tf()->domain();
2125       bool src_has_oops = false;
2126       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2127         const Type* at = d->field_at(i);
2128         Node *arg = call->in(i);
2129         if (arg == nullptr) {
2130           continue;
2131         }
2132         const Type *aat = _igvn->type(arg);
2133         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2134           continue;
2135         }
2136         if (arg->is_AddP()) {
2137           //
2138           // The inline_native_clone() case when the arraycopy stub is called
2139           // after the allocation before Initialize and CheckCastPP nodes.
2140           // Or normal arraycopy for object arrays case.
2141           //
2142           // Set AddP's base (Allocate) as not scalar replaceable since
2143           // pointer to the base (with offset) is passed as argument.
2144           //
2145           arg = get_addp_base(arg);
2146         }
2147         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2148         assert(arg_ptn != nullptr, "should be registered");
2149         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2150         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2151           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2152                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2153           bool arg_has_oops = aat->isa_oopptr() &&
2154                               (aat->isa_instptr() ||
2155                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));



2156           if (i == TypeFunc::Parms) {
2157             src_has_oops = arg_has_oops;
2158           }
2159           //
2160           // src or dst could be j.l.Object when other is basic type array:
2161           //
2162           //   arraycopy(char[],0,Object*,0,size);
2163           //   arraycopy(Object*,0,char[],0,size);
2164           //
2165           // Don't add edges in such cases.
2166           //
2167           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2168                                        arg_has_oops && (i > TypeFunc::Parms);
2169 #ifdef ASSERT
2170           if (!(is_arraycopy ||
2171                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2172                 (call->as_CallLeaf()->_name != nullptr &&
2173                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2174                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2175                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

2186                   strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2187                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2188                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2189                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2190                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2191                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2192                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2193                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2194                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2195                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2196                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2197                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2198                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2199                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2200                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2201                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2202                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2203                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2204                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2205                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||



2206                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2207                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2208                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2209                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2210                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2211                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2212                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2213                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2214                  ))) {
2215             call->dump();
2216             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2217           }
2218 #endif
2219           // Always process arraycopy's destination object since
2220           // we need to add all possible edges to references in
2221           // source object.
2222           if (arg_esc >= PointsToNode::ArgEscape &&
2223               !arg_is_arraycopy_dest) {
2224             continue;
2225           }

2252           }
2253         }
2254       }
2255       break;
2256     }
2257     case Op_CallStaticJava: {
2258       // For a static call, we know exactly what method is being called.
2259       // Use bytecode estimator to record the call's escape affects
2260 #ifdef ASSERT
2261       const char* name = call->as_CallStaticJava()->_name;
2262       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2263 #endif
2264       ciMethod* meth = call->as_CallJava()->method();
2265       if ((meth != nullptr) && meth->is_boxing_method()) {
2266         break; // Boxing methods do not modify any oops.
2267       }
2268       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2269       // fall-through if not a Java method or no analyzer information
2270       if (call_analyzer != nullptr) {
2271         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2272         const TypeTuple* d = call->tf()->domain();
2273         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2274           const Type* at = d->field_at(i);
2275           int k = i - TypeFunc::Parms;
2276           Node* arg = call->in(i);
2277           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2278           if (at->isa_ptr() != nullptr &&
2279               call_analyzer->is_arg_returned(k)) {
2280             // The call returns arguments.
2281             if (call_ptn != nullptr) { // Is call's result used?
2282               assert(call_ptn->is_LocalVar(), "node should be registered");
2283               assert(arg_ptn != nullptr, "node should be registered");
2284               add_edge(call_ptn, arg_ptn);
2285             }
2286           }
2287           if (at->isa_oopptr() != nullptr &&
2288               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2289             if (!call_analyzer->is_arg_stack(k)) {
2290               // The argument global escapes
2291               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2292             } else {

2296                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2297               }
2298             }
2299           }
2300         }
2301         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2302           // The call returns arguments.
2303           assert(call_ptn->edge_count() > 0, "sanity");
2304           if (!call_analyzer->is_return_local()) {
2305             // Returns also unknown object.
2306             add_edge(call_ptn, phantom_obj);
2307           }
2308         }
2309         break;
2310       }
2311     }
2312     default: {
2313       // Fall-through here if not a Java method or no analyzer information
2314       // or some other type of call, assume the worst case: all arguments
2315       // globally escape.
2316       const TypeTuple* d = call->tf()->domain();
2317       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2318         const Type* at = d->field_at(i);
2319         if (at->isa_oopptr() != nullptr) {
2320           Node* arg = call->in(i);
2321           if (arg->is_AddP()) {
2322             arg = get_addp_base(arg);
2323           }
2324           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2325           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2326         }
2327       }
2328     }
2329   }
2330 }
2331 
2332 
2333 // Finish Graph construction.
2334 bool ConnectionGraph::complete_connection_graph(
2335                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2336                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

2709     PointsToNode* base = i.get();
2710     if (base->is_JavaObject()) {
2711       // Skip Allocate's fields which will be processed later.
2712       if (base->ideal_node()->is_Allocate()) {
2713         return 0;
2714       }
2715       assert(base == null_obj, "only null ptr base expected here");
2716     }
2717   }
2718   if (add_edge(field, phantom_obj)) {
2719     // New edge was added
2720     new_edges++;
2721     add_field_uses_to_worklist(field);
2722   }
2723   return new_edges;
2724 }
2725 
2726 // Find fields initializing values for allocations.
2727 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2728   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");

2729   Node* alloc = pta->ideal_node();
2730 
2731   // Do nothing for Allocate nodes since its fields values are
2732   // "known" unless they are initialized by arraycopy/clone.
2733   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2734     return 0;







2735   }
2736   assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");

2737 #ifdef ASSERT
2738   if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
2739     const char* name = alloc->as_CallStaticJava()->_name;
2740     assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "sanity");

2741   }
2742 #endif
2743   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2744   int new_edges = 0;
2745   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2746     PointsToNode* field = i.get();
2747     if (field->is_Field() && field->as_Field()->is_oop()) {
2748       if (add_edge(field, phantom_obj)) {
2749         // New edge was added
2750         new_edges++;
2751         add_field_uses_to_worklist(field->as_Field());
2752       }
2753     }
2754   }
2755   return new_edges;
2756 }
2757 
2758 // Find fields initializing values for allocations.
2759 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2760   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2761   Node* alloc = pta->ideal_node();
2762   // Do nothing for Call nodes since its fields values are unknown.
2763   if (!alloc->is_Allocate()) {
2764     return 0;
2765   }
2766   InitializeNode* ini = alloc->as_Allocate()->initialization();
2767   bool visited_bottom_offset = false;
2768   GrowableArray<int> offsets_worklist;
2769   int new_edges = 0;
2770 
2771   // Check if an oop field's initializing value is recorded and add
2772   // a corresponding null if field's value if it is not recorded.
2773   // Connection Graph does not record a default initialization by null
2774   // captured by Initialize node.
2775   //
2776   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2777     PointsToNode* field = i.get(); // Field (AddP)
2778     if (!field->is_Field() || !field->as_Field()->is_oop()) {
2779       continue; // Not oop field
2780     }
2781     int offset = field->as_Field()->offset();
2782     if (offset == Type::OffsetBot) {
2783       if (!visited_bottom_offset) {

2829               } else {
2830                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2831                   tty->print_cr("----------init store has invalid value -----");
2832                   store->dump();
2833                   val->dump();
2834                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2835                 }
2836                 for (EdgeIterator j(val); j.has_next(); j.next()) {
2837                   PointsToNode* obj = j.get();
2838                   if (obj->is_JavaObject()) {
2839                     if (!field->points_to(obj->as_JavaObject())) {
2840                       missed_obj = obj;
2841                       break;
2842                     }
2843                   }
2844                 }
2845               }
2846               if (missed_obj != nullptr) {
2847                 tty->print_cr("----------field---------------------------------");
2848                 field->dump();
2849                 tty->print_cr("----------missed referernce to object-----------");
2850                 missed_obj->dump();
2851                 tty->print_cr("----------object referernced by init store -----");
2852                 store->dump();
2853                 val->dump();
2854                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2855               }
2856             }
2857 #endif
2858           } else {
2859             // There could be initializing stores which follow allocation.
2860             // For example, a volatile field store is not collected
2861             // by Initialize node.
2862             //
2863             // Need to check for dependent loads to separate such stores from
2864             // stores which follow loads. For now, add initial value null so
2865             // that compare pointers optimization works correctly.
2866           }
2867         }
2868         if (value == nullptr) {
2869           // A field's initializing value was not recorded. Add null.
2870           if (add_edge(field, null_obj)) {
2871             // New edge was added

3187         assert(field->edge_count() > 0, "sanity");
3188       }
3189     }
3190   }
3191 }
3192 #endif
3193 
3194 // Optimize ideal graph.
3195 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3196                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3197   Compile* C = _compile;
3198   PhaseIterGVN* igvn = _igvn;
3199   if (EliminateLocks) {
3200     // Mark locks before changing ideal graph.
3201     int cnt = C->macro_count();
3202     for (int i = 0; i < cnt; i++) {
3203       Node *n = C->macro_node(i);
3204       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3205         AbstractLockNode* alock = n->as_AbstractLock();
3206         if (!alock->is_non_esc_obj()) {
3207           if (can_eliminate_lock(alock)) {

3208             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3209             // The lock could be marked eliminated by lock coarsening
3210             // code during first IGVN before EA. Replace coarsened flag
3211             // to eliminate all associated locks/unlocks.
3212 #ifdef ASSERT
3213             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3214 #endif
3215             alock->set_non_esc_obj();
3216           }
3217         }
3218       }
3219     }
3220   }
3221 
3222   if (OptimizePtrCompare) {
3223     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3224       Node *n = ptr_cmp_worklist.at(i);
3225       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3226       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3227       if (tcmp->singleton()) {

3229 #ifndef PRODUCT
3230         if (PrintOptimizePtrCompare) {
3231           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3232           if (Verbose) {
3233             n->dump(1);
3234           }
3235         }
3236 #endif
3237         igvn->replace_node(n, cmp);
3238       }
3239     }
3240   }
3241 
3242   // For MemBarStoreStore nodes added in library_call.cpp, check
3243   // escape status of associated AllocateNode and optimize out
3244   // MemBarStoreStore node if the allocated object never escapes.
3245   for (int i = 0; i < storestore_worklist.length(); i++) {
3246     Node* storestore = storestore_worklist.at(i);
3247     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3248     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3249       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3250       mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3251       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3252       igvn->register_new_node_with_optimizer(mb);
3253       igvn->replace_node(storestore, mb);





3254     }
3255   }
3256 }
3257 
3258 // Optimize objects compare.
3259 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3260   assert(OptimizePtrCompare, "sanity");
3261   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3262   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3263   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3264 
3265   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3266   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3267   JavaObjectNode* jobj1 = unique_java_object(left);
3268   JavaObjectNode* jobj2 = unique_java_object(right);
3269 
3270   // The use of this method during allocation merge reduction may cause 'left'
3271   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3272   // that doesn't reference an unique java object.
3273   if (ptn1 == nullptr || ptn2 == nullptr ||

3395   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3396   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3397   PointsToNode* ptadr = _nodes.at(n->_idx);
3398   if (ptadr != nullptr) {
3399     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3400     return;
3401   }
3402   Compile* C = _compile;
3403   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3404   map_ideal_node(n, ptadr);
3405   // Add edge from arraycopy node to source object.
3406   (void)add_edge(ptadr, src);
3407   src->set_arraycopy_src();
3408   // Add edge from destination object to arraycopy node.
3409   (void)add_edge(dst, ptadr);
3410   dst->set_arraycopy_dst();
3411 }
3412 
3413 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3414   const Type* adr_type = n->as_AddP()->bottom_type();

3415   BasicType bt = T_INT;
3416   if (offset == Type::OffsetBot) {
3417     // Check only oop fields.
3418     if (!adr_type->isa_aryptr() ||
3419         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3420         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3421       // OffsetBot is used to reference array's element. Ignore first AddP.
3422       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3423         bt = T_OBJECT;
3424       }
3425     }
3426   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3427     if (adr_type->isa_instptr()) {
3428       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
3429       if (field != nullptr) {
3430         bt = field->layout_type();
3431       } else {
3432         // Check for unsafe oop field access
3433         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3434             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3435             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3436             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3437           bt = T_OBJECT;
3438           (*unsafe) = true;
3439         }
3440       }
3441     } else if (adr_type->isa_aryptr()) {
3442       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3443         // Ignore array length load.
3444       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3445         // Ignore first AddP.
3446       } else {
3447         const Type* elemtype = adr_type->isa_aryptr()->elem();
3448         bt = elemtype->array_element_basic_type();






3449       }
3450     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3451       // Allocation initialization, ThreadLocal field access, unsafe access
3452       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3453           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3454           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3455           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3456         bt = T_OBJECT;
3457       }
3458     }
3459   }
3460   // Note: T_NARROWOOP is not classed as a real reference type
3461   return (is_reference_type(bt) || bt == T_NARROWOOP);
3462 }
3463 
3464 // Returns unique pointed java object or null.
3465 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3466   // If the node was created after the escape computation we can't answer.
3467   uint idx = n->_idx;
3468   if (idx >= nodes_size()) {

3625             return true;
3626           }
3627         }
3628       }
3629     }
3630   }
3631   return false;
3632 }
3633 
3634 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3635   const Type *adr_type = phase->type(adr);
3636   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3637     // We are computing a raw address for a store captured by an Initialize
3638     // compute an appropriate address type. AddP cases #3 and #5 (see below).
3639     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3640     assert(offs != Type::OffsetBot ||
3641            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3642            "offset must be a constant or it is initialization of array");
3643     return offs;
3644   }
3645   const TypePtr *t_ptr = adr_type->isa_ptr();
3646   assert(t_ptr != nullptr, "must be a pointer type");
3647   return t_ptr->offset();
3648 }
3649 
3650 Node* ConnectionGraph::get_addp_base(Node *addp) {
3651   assert(addp->is_AddP(), "must be AddP");
3652   //
3653   // AddP cases for Base and Address inputs:
3654   // case #1. Direct object's field reference:
3655   //     Allocate
3656   //       |
3657   //     Proj #5 ( oop result )
3658   //       |
3659   //     CheckCastPP (cast to instance type)
3660   //      | |
3661   //     AddP  ( base == address )
3662   //
3663   // case #2. Indirect object's field reference:
3664   //      Phi
3665   //       |
3666   //     CastPP (cast to instance type)
3667   //      | |

3781   }
3782   return nullptr;
3783 }
3784 
3785 //
3786 // Adjust the type and inputs of an AddP which computes the
3787 // address of a field of an instance
3788 //
3789 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3790   PhaseGVN* igvn = _igvn;
3791   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3792   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3793   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3794   if (t == nullptr) {
3795     // We are computing a raw address for a store captured by an Initialize
3796     // compute an appropriate address type (cases #3 and #5).
3797     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3798     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3799     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3800     assert(offs != Type::OffsetBot, "offset must be a constant");
3801     t = base_t->add_offset(offs)->is_oopptr();







3802   }
3803   int inst_id =  base_t->instance_id();
3804   assert(!t->is_known_instance() || t->instance_id() == inst_id,
3805                              "old type must be non-instance or match new type");
3806 
3807   // The type 't' could be subclass of 'base_t'.
3808   // As result t->offset() could be large then base_t's size and it will
3809   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3810   // constructor verifies correctness of the offset.
3811   //
3812   // It could happened on subclass's branch (from the type profiling
3813   // inlining) which was not eliminated during parsing since the exactness
3814   // of the allocation type was not propagated to the subclass type check.
3815   //
3816   // Or the type 't' could be not related to 'base_t' at all.
3817   // It could happened when CHA type is different from MDO type on a dead path
3818   // (for example, from instanceof check) which is not collapsed during parsing.
3819   //
3820   // Do nothing for such AddP node and don't process its users since
3821   // this code branch will go away.
3822   //
3823   if (!t->is_known_instance() &&
3824       !base_t->maybe_java_subtype_of(t)) {
3825      return false; // bail out
3826   }
3827   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();











3828   // Do NOT remove the next line: ensure a new alias index is allocated
3829   // for the instance type. Note: C++ will not remove it since the call
3830   // has side effect.
3831   int alias_idx = _compile->get_alias_index(tinst);
3832   igvn->set_type(addp, tinst);
3833   // record the allocation in the node map
3834   set_map(addp, get_map(base->_idx));
3835   // Set addp's Base and Address to 'base'.
3836   Node *abase = addp->in(AddPNode::Base);
3837   Node *adr   = addp->in(AddPNode::Address);
3838   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3839       adr->in(0)->_idx == (uint)inst_id) {
3840     // Skip AddP cases #3 and #5.
3841   } else {
3842     assert(!abase->is_top(), "sanity"); // AddP case #3
3843     if (abase != base) {
3844       igvn->hash_delete(addp);
3845       addp->set_req(AddPNode::Base, base);
3846       if (abase == adr) {
3847         addp->set_req(AddPNode::Address, base);

4513         ptnode_adr(n->_idx)->dump();
4514         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4515 #endif
4516         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4517         return;
4518       } else {
4519         Node *val = get_map(jobj->idx());   // CheckCastPP node
4520         TypeNode *tn = n->as_Type();
4521         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4522         assert(tinst != nullptr && tinst->is_known_instance() &&
4523                tinst->instance_id() == jobj->idx() , "instance type expected.");
4524 
4525         const Type *tn_type = igvn->type(tn);
4526         const TypeOopPtr *tn_t;
4527         if (tn_type->isa_narrowoop()) {
4528           tn_t = tn_type->make_ptr()->isa_oopptr();
4529         } else {
4530           tn_t = tn_type->isa_oopptr();
4531         }
4532         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {







4533           if (tn_type->isa_narrowoop()) {
4534             tn_type = tinst->make_narrowoop();
4535           } else {
4536             tn_type = tinst;
4537           }
4538           igvn->hash_delete(tn);
4539           igvn->set_type(tn, tn_type);
4540           tn->set_type(tn_type);
4541           igvn->hash_insert(tn);
4542           record_for_optimizer(n);
4543         } else {
4544           assert(tn_type == TypePtr::NULL_PTR ||
4545                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4546                  "unexpected type");
4547           continue; // Skip dead path with different type
4548         }
4549       }
4550     } else {
4551       debug_only(n->dump();)
4552       assert(false, "EA: unexpected node");
4553       continue;
4554     }
4555     // push allocation's users on appropriate worklist
4556     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4557       Node *use = n->fast_out(i);
4558       if(use->is_Mem() && use->in(MemNode::Address) == n) {
4559         // Load/store to instance's field
4560         memnode_worklist.append_if_missing(use);
4561       } else if (use->is_MemBar()) {
4562         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4563           memnode_worklist.append_if_missing(use);
4564         }
4565       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4566         Node* addp2 = find_second_addp(use, n);
4567         if (addp2 != nullptr) {
4568           alloc_worklist.append_if_missing(addp2);
4569         }
4570         alloc_worklist.append_if_missing(use);
4571       } else if (use->is_Phi() ||
4572                  use->is_CheckCastPP() ||
4573                  use->is_EncodeNarrowPtr() ||
4574                  use->is_DecodeNarrowPtr() ||
4575                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4576         alloc_worklist.append_if_missing(use);
4577 #ifdef ASSERT
4578       } else if (use->is_Mem()) {
4579         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4580       } else if (use->is_MergeMem()) {
4581         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4582       } else if (use->is_SafePoint()) {
4583         // Look for MergeMem nodes for calls which reference unique allocation
4584         // (through CheckCastPP nodes) even for debug info.
4585         Node* m = use->in(TypeFunc::Memory);
4586         if (m->is_MergeMem()) {
4587           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4588         }
4589       } else if (use->Opcode() == Op_EncodeISOArray) {
4590         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4591           // EncodeISOArray overwrites destination array
4592           memnode_worklist.append_if_missing(use);
4593         }



4594       } else {
4595         uint op = use->Opcode();
4596         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4597             (use->in(MemNode::Memory) == n)) {
4598           // They overwrite memory edge corresponding to destination array,
4599           memnode_worklist.append_if_missing(use);
4600         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4601               op == Op_CastP2X ||
4602               op == Op_FastLock || op == Op_AryEq ||
4603               op == Op_StrComp || op == Op_CountPositives ||
4604               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4605               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4606               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4607               op == Op_SubTypeCheck ||
4608               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4609           n->dump();
4610           use->dump();
4611           assert(false, "EA: missing allocation reference path");
4612         }
4613 #endif
4614       }
4615     }
4616 
4617   }
4618 
4619 #ifdef ASSERT
4620   if (VerifyReduceAllocationMerges) {
4621     for (uint i = 0; i < reducible_merges.size(); i++) {
4622       Node* phi = reducible_merges.at(i);
4623 
4624       if (!reduced_merges.member(phi)) {
4625         phi->dump(2);
4626         phi->dump(-2);
4627         assert(false, "This reducible merge wasn't reduced.");

4687     if (n->is_Phi() || n->is_ClearArray()) {
4688       // we don't need to do anything, but the users must be pushed
4689     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
4690       // we don't need to do anything, but the users must be pushed
4691       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4692       if (n == nullptr) {
4693         continue;
4694       }
4695     } else if (n->is_CallLeaf()) {
4696       // Runtime calls with narrow memory input (no MergeMem node)
4697       // get the memory projection
4698       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4699       if (n == nullptr) {
4700         continue;
4701       }
4702     } else if (n->Opcode() == Op_StrCompressedCopy ||
4703                n->Opcode() == Op_EncodeISOArray) {
4704       // get the memory projection
4705       n = n->find_out_with(Op_SCMemProj);
4706       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");



4707     } else {
4708       assert(n->is_Mem(), "memory node required.");
4709       Node *addr = n->in(MemNode::Address);
4710       const Type *addr_t = igvn->type(addr);
4711       if (addr_t == Type::TOP) {
4712         continue;
4713       }
4714       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4715       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4716       assert ((uint)alias_idx < new_index_end, "wrong alias index");
4717       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4718       if (_compile->failing()) {
4719         return;
4720       }
4721       if (mem != n->in(MemNode::Memory)) {
4722         // We delay the memory edge update since we need old one in
4723         // MergeMem code below when instances memory slices are separated.
4724         set_map(n, mem);
4725       }
4726       if (n->is_Load()) {
4727         continue;  // don't push users
4728       } else if (n->is_LoadStore()) {
4729         // get the memory projection
4730         n = n->find_out_with(Op_SCMemProj);
4731         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4732       }
4733     }
4734     // push user on appropriate worklist
4735     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4736       Node *use = n->fast_out(i);
4737       if (use->is_Phi() || use->is_ClearArray()) {
4738         memnode_worklist.append_if_missing(use);
4739       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4740         memnode_worklist.append_if_missing(use);
4741       } else if (use->is_MemBar() || use->is_CallLeaf()) {
4742         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4743           memnode_worklist.append_if_missing(use);
4744         }
4745 #ifdef ASSERT
4746       } else if(use->is_Mem()) {
4747         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4748       } else if (use->is_MergeMem()) {
4749         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4750       } else if (use->Opcode() == Op_EncodeISOArray) {
4751         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4752           // EncodeISOArray overwrites destination array
4753           memnode_worklist.append_if_missing(use);
4754         }




4755       } else {
4756         uint op = use->Opcode();
4757         if ((use->in(MemNode::Memory) == n) &&
4758             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4759           // They overwrite memory edge corresponding to destination array,
4760           memnode_worklist.append_if_missing(use);
4761         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4762               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4763               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4764               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
4765           n->dump();
4766           use->dump();
4767           assert(false, "EA: missing memory path");
4768         }
4769 #endif
4770       }
4771     }
4772   }
4773 
4774   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4775   //            Walk each memory slice moving the first node encountered of each
4776   //            instance type to the input corresponding to its alias index.
4777   uint length = mergemem_worklist.length();
4778   for( uint next = 0; next < length; ++next ) {
4779     MergeMemNode* nmm = mergemem_worklist.at(next);
4780     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4781     // Note: we don't want to use MergeMemStream here because we only want to
4782     // scan inputs which exist at the start, not ones we add during processing.
4783     // Note 2: MergeMem may already contains instance memory slices added
4784     // during find_inst_mem() call when memory nodes were processed above.

4845     if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
4846       if (_compile->do_reduce_allocation_merges()) {
4847         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4848       } else if (_invocation > 0) {
4849         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4850       } else {
4851         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4852       }
4853       return;
4854     }
4855 
4856     igvn->hash_insert(nmm);
4857     record_for_optimizer(nmm);
4858   }
4859 
4860   //  Phase 4:  Update the inputs of non-instance memory Phis and
4861   //            the Memory input of memnodes
4862   // First update the inputs of any non-instance Phi's from
4863   // which we split out an instance Phi.  Note we don't have
4864   // to recursively process Phi's encountered on the input memory
4865   // chains as is done in split_memory_phi() since they  will
4866   // also be processed here.
4867   for (int j = 0; j < orig_phis.length(); j++) {
4868     PhiNode *phi = orig_phis.at(j);
4869     int alias_idx = _compile->get_alias_index(phi->adr_type());
4870     igvn->hash_delete(phi);
4871     for (uint i = 1; i < phi->req(); i++) {
4872       Node *mem = phi->in(i);
4873       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4874       if (_compile->failing()) {
4875         return;
4876       }
4877       if (mem != new_mem) {
4878         phi->set_req(i, new_mem);
4879       }
4880     }
4881     igvn->hash_insert(phi);
4882     record_for_optimizer(phi);
4883   }
4884 
4885   // Update the memory inputs of MemNodes with the value we computed

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/metaspace.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "opto/c2compiler.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/escape.hpp"
  40 #include "opto/inlinetypenode.hpp"
  41 #include "opto/macro.hpp"
  42 #include "opto/locknode.hpp"
  43 #include "opto/phaseX.hpp"
  44 #include "opto/movenode.hpp"
  45 #include "opto/narrowptrnode.hpp"
  46 #include "opto/castnode.hpp"
  47 #include "opto/rootnode.hpp"
  48 #include "utilities/macros.hpp"
  49 
  50 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  51   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  52   // split_unique_types and that will create additional nodes that need to be
  53   // pushed to the ConnectionGraph. The code below bumps the initial capacity of
  54   // _nodes by 10% to account for these additional nodes. If capacity is exceeded
  55   // the array will be reallocated.
  56   _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
  57   _in_worklist(C->comp_arena()),
  58   _next_pidx(0),
  59   _collecting(true),
  60   _verify(false),

 149   GrowableArray<SafePointNode*>  sfn_worklist;
 150   GrowableArray<MergeMemNode*>   mergemem_worklist;
 151   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 152 
 153   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 154 
 155   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 156   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 157   // Initialize worklist
 158   if (C->root() != nullptr) {
 159     ideal_nodes.push(C->root());
 160   }
 161   // Processed ideal nodes are unique on ideal_nodes list
 162   // but several ideal nodes are mapped to the phantom_obj.
 163   // To avoid duplicated entries on the following worklists
 164   // add the phantom_obj only once to them.
 165   ptnodes_worklist.append(phantom_obj);
 166   java_objects_worklist.append(phantom_obj);
 167   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 168     Node* n = ideal_nodes.at(next);
 169     if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
 170         !n->in(MemNode::Address)->is_AddP() &&
 171         _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
 172       // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
 173       Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
 174       _igvn->register_new_node_with_optimizer(addp);
 175       _igvn->replace_input_of(n, MemNode::Address, addp);
 176       ideal_nodes.push(addp);
 177       _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
 178     }
 179     // Create PointsTo nodes and add them to Connection Graph. Called
 180     // only once per ideal node since ideal_nodes is Unique_Node list.
 181     add_node_to_connection_graph(n, &delayed_worklist);
 182     PointsToNode* ptn = ptnode_adr(n->_idx);
 183     if (ptn != nullptr && ptn != phantom_obj) {
 184       ptnodes_worklist.append(ptn);
 185       if (ptn->is_JavaObject()) {
 186         java_objects_worklist.append(ptn->as_JavaObject());
 187         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 188             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 189           // Only allocations and java static calls results are interesting.
 190           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 191         }
 192       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 193         oop_fields_worklist.append(ptn->as_Field());
 194       }
 195     }
 196     // Collect some interesting nodes for further use.
 197     switch (n->Opcode()) {
 198       case Op_MergeMem:

1241 
1242     // The next two inputs are:
1243     //  (1) A copy of the original pointer to NSR objects.
1244     //  (2) A selector, used to decide if we need to rematerialize an object
1245     //      or use the pointer to a NSR object.
1246     // See more details of these fields in the declaration of SafePointScalarMergeNode
1247     sfpt->add_req(nsr_merge_pointer);
1248     sfpt->add_req(selector);
1249 
1250     for (uint i = 1; i < ophi->req(); i++) {
1251       Node* base = ophi->in(i);
1252       JavaObjectNode* ptn = unique_java_object(base);
1253 
1254       // If the base is not scalar replaceable we don't need to register information about
1255       // it at this time.
1256       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1257         continue;
1258       }
1259 
1260       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1261       Unique_Node_List value_worklist;
1262 #ifdef ASSERT
1263       const Type* res_type = alloc->result_cast()->bottom_type();
1264       if (res_type->is_inlinetypeptr() && !Compile::current()->has_circular_inline_type()) {
1265         PhiNode* phi = ophi->as_Phi();
1266         assert(!ophi->as_Phi()->can_push_inline_types_down(_igvn), "missed earlier scalarization opportunity");
1267       }
1268 #endif
1269       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
1270       if (sobj == nullptr) {
1271         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1272         return false;
1273       }
1274 
1275       // Now make a pass over the debug information replacing any references
1276       // to the allocated object with "sobj"
1277       Node* ccpp = alloc->result_cast();
1278       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1279 
1280       // Register the scalarized object as a candidate for reallocation
1281       smerge->add_req(sobj);
1282 
1283       // Scalarize inline types that were added to the safepoint.
1284       // Don't allow linking a constant oop (if available) for flat array elements
1285       // because Deoptimization::reassign_flat_array_elements needs field values.
1286       const bool allow_oop = !merge_t->is_flat();
1287       for (uint j = 0; j < value_worklist.size(); ++j) {
1288         InlineTypeNode* vt = value_worklist.at(j)->as_InlineType();
1289         vt->make_scalar_in_safepoints(_igvn, allow_oop);
1290       }
1291     }
1292 
1293     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1294     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1295 
1296     // The call to 'replace_edges_in_range' above might have removed the
1297     // reference to ophi that we need at _merge_pointer_idx. The line below make
1298     // sure the reference is maintained.
1299     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1300     _igvn->_worklist.push(sfpt);
1301   }
1302 
1303   return true;
1304 }
1305 
1306 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *>  &alloc_worklist, GrowableArray<Node *>  &memnode_worklist) {
1307   bool delay = _igvn->delay_transform();
1308   _igvn->set_delay_transform(true);
1309   _igvn->hash_delete(ophi);
1310 

1469   return false;
1470 }
1471 
1472 // Returns true if at least one of the arguments to the call is an object
1473 // that does not escape globally.
1474 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1475   if (call->method() != nullptr) {
1476     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1477     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1478       Node* p = call->in(idx);
1479       if (not_global_escape(p)) {
1480         return true;
1481       }
1482     }
1483   } else {
1484     const char* name = call->as_CallStaticJava()->_name;
1485     assert(name != nullptr, "no name");
1486     // no arg escapes through uncommon traps
1487     if (strcmp(name, "uncommon_trap") != 0) {
1488       // process_call_arguments() assumes that all arguments escape globally
1489       const TypeTuple* d = call->tf()->domain_sig();
1490       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1491         const Type* at = d->field_at(i);
1492         if (at->isa_oopptr() != nullptr) {
1493           return true;
1494         }
1495       }
1496     }
1497   }
1498   return false;
1499 }
1500 
1501 
1502 
1503 // Utility function for nodes that load an object
1504 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1505   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1506   // ThreadLocal has RawPtr type.
1507   const Type* t = _igvn->type(n);
1508   if (t->make_ptr() != nullptr) {
1509     Node* adr = n->in(MemNode::Address);

1543       // first IGVN optimization when escape information is still available.
1544       record_for_optimizer(n);
1545     } else if (n->is_Allocate()) {
1546       add_call_node(n->as_Call());
1547       record_for_optimizer(n);
1548     } else {
1549       if (n->is_CallStaticJava()) {
1550         const char* name = n->as_CallStaticJava()->_name;
1551         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1552           return; // Skip uncommon traps
1553         }
1554       }
1555       // Don't mark as processed since call's arguments have to be processed.
1556       delayed_worklist->push(n);
1557       // Check if a call returns an object.
1558       if ((n->as_Call()->returns_pointer() &&
1559            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1560           (n->is_CallStaticJava() &&
1561            n->as_CallStaticJava()->is_boxing_method())) {
1562         add_call_node(n->as_Call());
1563       } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
1564         bool returns_oop = false;
1565         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
1566           ProjNode* pn = n->fast_out(i)->as_Proj();
1567           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
1568             returns_oop = true;
1569           }
1570         }
1571         if (returns_oop) {
1572           add_call_node(n->as_Call());
1573         }
1574       }
1575     }
1576     return;
1577   }
1578   // Put this check here to process call arguments since some call nodes
1579   // point to phantom_obj.
1580   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1581     return; // Skip predefined nodes.
1582   }
1583   switch (opcode) {
1584     case Op_AddP: {
1585       Node* base = get_addp_base(n);
1586       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1587       // Field nodes are created for all field types. They are used in
1588       // adjust_scalar_replaceable_state() and split_unique_types().
1589       // Note, non-oop fields will have only base edges in Connection
1590       // Graph because such fields are not used for oop loads and stores.
1591       int offset = address_offset(n, igvn);
1592       add_field(n, PointsToNode::NoEscape, offset);
1593       if (ptn_base == nullptr) {
1594         delayed_worklist->push(n); // Process it later.
1595       } else {
1596         n_ptn = ptnode_adr(n_idx);
1597         add_base(n_ptn->as_Field(), ptn_base);
1598       }
1599       break;
1600     }
1601     case Op_CastX2P: {
1602       map_ideal_node(n, phantom_obj);
1603       break;
1604     }
1605     case Op_InlineType:
1606     case Op_CastPP:
1607     case Op_CheckCastPP:
1608     case Op_EncodeP:
1609     case Op_DecodeN:
1610     case Op_EncodePKlass:
1611     case Op_DecodeNKlass: {
1612       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1613       break;
1614     }
1615     case Op_CMoveP: {
1616       add_local_var(n, PointsToNode::NoEscape);
1617       // Do not add edges during first iteration because some could be
1618       // not defined yet.
1619       delayed_worklist->push(n);
1620       break;
1621     }
1622     case Op_ConP:
1623     case Op_ConN:
1624     case Op_ConNKlass: {
1625       // assume all oop constants globally escape except for null

1657     case Op_PartialSubtypeCheck: {
1658       // Produces Null or notNull and is used in only in CmpP so
1659       // phantom_obj could be used.
1660       map_ideal_node(n, phantom_obj); // Result is unknown
1661       break;
1662     }
1663     case Op_Phi: {
1664       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1665       // ThreadLocal has RawPtr type.
1666       const Type* t = n->as_Phi()->type();
1667       if (t->make_ptr() != nullptr) {
1668         add_local_var(n, PointsToNode::NoEscape);
1669         // Do not add edges during first iteration because some could be
1670         // not defined yet.
1671         delayed_worklist->push(n);
1672       }
1673       break;
1674     }
1675     case Op_Proj: {
1676       // we are only interested in the oop result projection from a call
1677       if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
1678           (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
1679         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1680                n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1681         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1682       }
1683       break;
1684     }
1685     case Op_Rethrow: // Exception object escapes
1686     case Op_Return: {
1687       if (n->req() > TypeFunc::Parms &&
1688           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1689         // Treat Return value as LocalVar with GlobalEscape escape state.
1690         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1691       }
1692       break;
1693     }
1694     case Op_CompareAndExchangeP:
1695     case Op_CompareAndExchangeN:
1696     case Op_GetAndSetP:
1697     case Op_GetAndSetN: {
1698       add_objload_to_connection_graph(n, delayed_worklist);
1699       // fall-through
1700     }

1762   if (n->is_Call()) {
1763     process_call_arguments(n->as_Call());
1764     return;
1765   }
1766   assert(n->is_Store() || n->is_LoadStore() ||
1767          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1768          "node should be registered already");
1769   int opcode = n->Opcode();
1770   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1771   if (gc_handled) {
1772     return; // Ignore node if already handled by GC.
1773   }
1774   switch (opcode) {
1775     case Op_AddP: {
1776       Node* base = get_addp_base(n);
1777       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1778       assert(ptn_base != nullptr, "field's base should be registered");
1779       add_base(n_ptn->as_Field(), ptn_base);
1780       break;
1781     }
1782     case Op_InlineType:
1783     case Op_CastPP:
1784     case Op_CheckCastPP:
1785     case Op_EncodeP:
1786     case Op_DecodeN:
1787     case Op_EncodePKlass:
1788     case Op_DecodeNKlass: {
1789       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1790       break;
1791     }
1792     case Op_CMoveP: {
1793       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1794         Node* in = n->in(i);
1795         if (in == nullptr) {
1796           continue;  // ignore null
1797         }
1798         Node* uncast_in = in->uncast();
1799         if (uncast_in->is_top() || uncast_in == n) {
1800           continue;  // ignore top or inputs which go back this node
1801         }
1802         PointsToNode* ptn = ptnode_adr(in->_idx);

1817       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1818       // ThreadLocal has RawPtr type.
1819       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1820       for (uint i = 1; i < n->req(); i++) {
1821         Node* in = n->in(i);
1822         if (in == nullptr) {
1823           continue;  // ignore null
1824         }
1825         Node* uncast_in = in->uncast();
1826         if (uncast_in->is_top() || uncast_in == n) {
1827           continue;  // ignore top or inputs which go back this node
1828         }
1829         PointsToNode* ptn = ptnode_adr(in->_idx);
1830         assert(ptn != nullptr, "node should be registered");
1831         add_edge(n_ptn, ptn);
1832       }
1833       break;
1834     }
1835     case Op_Proj: {
1836       // we are only interested in the oop result projection from a call
1837       assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1838              n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1839       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1840       break;
1841     }
1842     case Op_Rethrow: // Exception object escapes
1843     case Op_Return: {
1844       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1845              "Unexpected node type");
1846       // Treat Return value as LocalVar with GlobalEscape escape state.
1847       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1848       break;
1849     }
1850     case Op_CompareAndExchangeP:
1851     case Op_CompareAndExchangeN:
1852     case Op_GetAndSetP:
1853     case Op_GetAndSetN:{
1854       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1855       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1856       // fall-through
1857     }
1858     case Op_CompareAndSwapP:

1994     PointsToNode* ptn = ptnode_adr(val->_idx);
1995     assert(ptn != nullptr, "node should be registered");
1996     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1997     // Add edge to object for unsafe access with offset.
1998     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1999     assert(adr_ptn != nullptr, "node should be registered");
2000     if (adr_ptn->is_Field()) {
2001       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
2002       add_edge(adr_ptn, ptn);
2003     }
2004     return true;
2005   }
2006 #ifdef ASSERT
2007   n->dump(1);
2008   assert(false, "not unsafe");
2009 #endif
2010   return false;
2011 }
2012 
2013 void ConnectionGraph::add_call_node(CallNode* call) {
2014   assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
2015   uint call_idx = call->_idx;
2016   if (call->is_Allocate()) {
2017     Node* k = call->in(AllocateNode::KlassNode);
2018     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
2019     assert(kt != nullptr, "TypeKlassPtr  required.");
2020     PointsToNode::EscapeState es = PointsToNode::NoEscape;
2021     bool scalar_replaceable = true;
2022     NOT_PRODUCT(const char* nsr_reason = "");
2023     if (call->is_AllocateArray()) {
2024       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
2025         es = PointsToNode::GlobalEscape;
2026       } else {
2027         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2028         if (length < 0) {
2029           // Not scalar replaceable if the length is not constant.
2030           scalar_replaceable = false;
2031           NOT_PRODUCT(nsr_reason = "has a non-constant length");
2032         } else if (length > EliminateAllocationArraySizeLimit) {
2033           // Not scalar replaceable if the length is too big.
2034           scalar_replaceable = false;

2070     //
2071     //    - all oop arguments are escaping globally;
2072     //
2073     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2074     //
2075     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2076     //
2077     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2078     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2079     //      during call is returned;
2080     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2081     //      which are returned and does not escape during call;
2082     //
2083     //    - oop arguments escaping status is defined by bytecode analysis;
2084     //
2085     // For a static call, we know exactly what method is being called.
2086     // Use bytecode estimator to record whether the call's return value escapes.
2087     ciMethod* meth = call->as_CallJava()->method();
2088     if (meth == nullptr) {
2089       const char* name = call->as_CallStaticJava()->_name;
2090       assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 ||
2091              strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0, "TODO: add failed case check");
2092       // Returns a newly allocated non-escaped object.
2093       add_java_object(call, PointsToNode::NoEscape);
2094       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2095     } else if (meth->is_boxing_method()) {
2096       // Returns boxing object
2097       PointsToNode::EscapeState es;
2098       vmIntrinsics::ID intr = meth->intrinsic_id();
2099       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2100         // It does not escape if object is always allocated.
2101         es = PointsToNode::NoEscape;
2102       } else {
2103         // It escapes globally if object could be loaded from cache.
2104         es = PointsToNode::GlobalEscape;
2105       }
2106       add_java_object(call, es);
2107       if (es == PointsToNode::GlobalEscape) {
2108         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2109       }
2110     } else {
2111       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2112       call_analyzer->copy_dependencies(_compile->dependencies());
2113       if (call_analyzer->is_return_allocated()) {
2114         // Returns a newly allocated non-escaped object, simply
2115         // update dependency information.
2116         // Mark it as NoEscape so that objects referenced by
2117         // it's fields will be marked as NoEscape at least.
2118         add_java_object(call, PointsToNode::NoEscape);
2119         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2120       } else {
2121         // Determine whether any arguments are returned.
2122         const TypeTuple* d = call->tf()->domain_cc();
2123         bool ret_arg = false;
2124         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2125           if (d->field_at(i)->isa_ptr() != nullptr &&
2126               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2127             ret_arg = true;
2128             break;
2129           }
2130         }
2131         if (ret_arg) {
2132           add_local_var(call, PointsToNode::ArgEscape);
2133         } else {
2134           // Returns unknown object.
2135           map_ideal_node(call, phantom_obj);
2136         }
2137       }
2138     }
2139   } else {
2140     // An other type of call, assume the worst case:
2141     // returned value is unknown and globally escapes.
2142     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

2150 #ifdef ASSERT
2151     case Op_Allocate:
2152     case Op_AllocateArray:
2153     case Op_Lock:
2154     case Op_Unlock:
2155       assert(false, "should be done already");
2156       break;
2157 #endif
2158     case Op_ArrayCopy:
2159     case Op_CallLeafNoFP:
2160       // Most array copies are ArrayCopy nodes at this point but there
2161       // are still a few direct calls to the copy subroutines (See
2162       // PhaseStringOpts::copy_string())
2163       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2164         call->as_CallLeaf()->is_call_to_arraycopystub();
2165       // fall through
2166     case Op_CallLeafVector:
2167     case Op_CallLeaf: {
2168       // Stub calls, objects do not escape but they are not scale replaceable.
2169       // Adjust escape state for outgoing arguments.
2170       const TypeTuple * d = call->tf()->domain_sig();
2171       bool src_has_oops = false;
2172       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2173         const Type* at = d->field_at(i);
2174         Node *arg = call->in(i);
2175         if (arg == nullptr) {
2176           continue;
2177         }
2178         const Type *aat = _igvn->type(arg);
2179         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2180           continue;
2181         }
2182         if (arg->is_AddP()) {
2183           //
2184           // The inline_native_clone() case when the arraycopy stub is called
2185           // after the allocation before Initialize and CheckCastPP nodes.
2186           // Or normal arraycopy for object arrays case.
2187           //
2188           // Set AddP's base (Allocate) as not scalar replaceable since
2189           // pointer to the base (with offset) is passed as argument.
2190           //
2191           arg = get_addp_base(arg);
2192         }
2193         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2194         assert(arg_ptn != nullptr, "should be registered");
2195         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2196         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2197           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2198                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2199           bool arg_has_oops = aat->isa_oopptr() &&
2200                               (aat->isa_instptr() ||
2201                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
2202                                (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
2203                                                                aat->isa_aryptr()->is_flat() &&
2204                                                                aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
2205           if (i == TypeFunc::Parms) {
2206             src_has_oops = arg_has_oops;
2207           }
2208           //
2209           // src or dst could be j.l.Object when other is basic type array:
2210           //
2211           //   arraycopy(char[],0,Object*,0,size);
2212           //   arraycopy(Object*,0,char[],0,size);
2213           //
2214           // Don't add edges in such cases.
2215           //
2216           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2217                                        arg_has_oops && (i > TypeFunc::Parms);
2218 #ifdef ASSERT
2219           if (!(is_arraycopy ||
2220                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2221                 (call->as_CallLeaf()->_name != nullptr &&
2222                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2223                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2224                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

2235                   strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2236                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2237                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2238                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2239                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2240                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2241                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2242                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2243                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2244                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2245                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2246                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2247                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2248                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2249                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2250                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2251                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2252                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2253                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2254                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
2255                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2256                   strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
2257                   strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
2258                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2259                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2260                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2261                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2262                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2263                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2264                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2265                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2266                  ))) {
2267             call->dump();
2268             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2269           }
2270 #endif
2271           // Always process arraycopy's destination object since
2272           // we need to add all possible edges to references in
2273           // source object.
2274           if (arg_esc >= PointsToNode::ArgEscape &&
2275               !arg_is_arraycopy_dest) {
2276             continue;
2277           }

2304           }
2305         }
2306       }
2307       break;
2308     }
2309     case Op_CallStaticJava: {
2310       // For a static call, we know exactly what method is being called.
2311       // Use bytecode estimator to record the call's escape affects
2312 #ifdef ASSERT
2313       const char* name = call->as_CallStaticJava()->_name;
2314       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2315 #endif
2316       ciMethod* meth = call->as_CallJava()->method();
2317       if ((meth != nullptr) && meth->is_boxing_method()) {
2318         break; // Boxing methods do not modify any oops.
2319       }
2320       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2321       // fall-through if not a Java method or no analyzer information
2322       if (call_analyzer != nullptr) {
2323         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2324         const TypeTuple* d = call->tf()->domain_cc();
2325         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2326           const Type* at = d->field_at(i);
2327           int k = i - TypeFunc::Parms;
2328           Node* arg = call->in(i);
2329           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2330           if (at->isa_ptr() != nullptr &&
2331               call_analyzer->is_arg_returned(k)) {
2332             // The call returns arguments.
2333             if (call_ptn != nullptr) { // Is call's result used?
2334               assert(call_ptn->is_LocalVar(), "node should be registered");
2335               assert(arg_ptn != nullptr, "node should be registered");
2336               add_edge(call_ptn, arg_ptn);
2337             }
2338           }
2339           if (at->isa_oopptr() != nullptr &&
2340               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2341             if (!call_analyzer->is_arg_stack(k)) {
2342               // The argument global escapes
2343               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2344             } else {

2348                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2349               }
2350             }
2351           }
2352         }
2353         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2354           // The call returns arguments.
2355           assert(call_ptn->edge_count() > 0, "sanity");
2356           if (!call_analyzer->is_return_local()) {
2357             // Returns also unknown object.
2358             add_edge(call_ptn, phantom_obj);
2359           }
2360         }
2361         break;
2362       }
2363     }
2364     default: {
2365       // Fall-through here if not a Java method or no analyzer information
2366       // or some other type of call, assume the worst case: all arguments
2367       // globally escape.
2368       const TypeTuple* d = call->tf()->domain_cc();
2369       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2370         const Type* at = d->field_at(i);
2371         if (at->isa_oopptr() != nullptr) {
2372           Node* arg = call->in(i);
2373           if (arg->is_AddP()) {
2374             arg = get_addp_base(arg);
2375           }
2376           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2377           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2378         }
2379       }
2380     }
2381   }
2382 }
2383 
2384 
2385 // Finish Graph construction.
2386 bool ConnectionGraph::complete_connection_graph(
2387                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2388                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

2761     PointsToNode* base = i.get();
2762     if (base->is_JavaObject()) {
2763       // Skip Allocate's fields which will be processed later.
2764       if (base->ideal_node()->is_Allocate()) {
2765         return 0;
2766       }
2767       assert(base == null_obj, "only null ptr base expected here");
2768     }
2769   }
2770   if (add_edge(field, phantom_obj)) {
2771     // New edge was added
2772     new_edges++;
2773     add_field_uses_to_worklist(field);
2774   }
2775   return new_edges;
2776 }
2777 
2778 // Find fields initializing values for allocations.
2779 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2780   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2781   PointsToNode* init_val = phantom_obj;
2782   Node* alloc = pta->ideal_node();
2783 
2784   // Do nothing for Allocate nodes since its fields values are
2785   // "known" unless they are initialized by arraycopy/clone.
2786   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2787     if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
2788       // Non-flat inline type arrays are initialized with
2789       // the default value instead of null. Handle them here.
2790       init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx);
2791       assert(init_val != nullptr, "default value should be registered");
2792     } else {
2793       return 0;
2794     }
2795   }
2796   // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
2797   assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
2798 #ifdef ASSERT
2799   if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
2800     const char* name = alloc->as_CallStaticJava()->_name;
2801     assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 ||
2802            strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0, "sanity");
2803   }
2804 #endif
2805   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2806   int new_edges = 0;
2807   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2808     PointsToNode* field = i.get();
2809     if (field->is_Field() && field->as_Field()->is_oop()) {
2810       if (add_edge(field, init_val)) {
2811         // New edge was added
2812         new_edges++;
2813         add_field_uses_to_worklist(field->as_Field());
2814       }
2815     }
2816   }
2817   return new_edges;
2818 }
2819 
2820 // Find fields initializing values for allocations.
2821 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2822   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2823   Node* alloc = pta->ideal_node();
2824   // Do nothing for Call nodes since its fields values are unknown.
2825   if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
2826     return 0;
2827   }
2828   InitializeNode* ini = alloc->as_Allocate()->initialization();
2829   bool visited_bottom_offset = false;
2830   GrowableArray<int> offsets_worklist;
2831   int new_edges = 0;
2832 
2833   // Check if an oop field's initializing value is recorded and add
2834   // a corresponding null if field's value if it is not recorded.
2835   // Connection Graph does not record a default initialization by null
2836   // captured by Initialize node.
2837   //
2838   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2839     PointsToNode* field = i.get(); // Field (AddP)
2840     if (!field->is_Field() || !field->as_Field()->is_oop()) {
2841       continue; // Not oop field
2842     }
2843     int offset = field->as_Field()->offset();
2844     if (offset == Type::OffsetBot) {
2845       if (!visited_bottom_offset) {

2891               } else {
2892                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2893                   tty->print_cr("----------init store has invalid value -----");
2894                   store->dump();
2895                   val->dump();
2896                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2897                 }
2898                 for (EdgeIterator j(val); j.has_next(); j.next()) {
2899                   PointsToNode* obj = j.get();
2900                   if (obj->is_JavaObject()) {
2901                     if (!field->points_to(obj->as_JavaObject())) {
2902                       missed_obj = obj;
2903                       break;
2904                     }
2905                   }
2906                 }
2907               }
2908               if (missed_obj != nullptr) {
2909                 tty->print_cr("----------field---------------------------------");
2910                 field->dump();
2911                 tty->print_cr("----------missed reference to object------------");
2912                 missed_obj->dump();
2913                 tty->print_cr("----------object referenced by init store-------");
2914                 store->dump();
2915                 val->dump();
2916                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2917               }
2918             }
2919 #endif
2920           } else {
2921             // There could be initializing stores which follow allocation.
2922             // For example, a volatile field store is not collected
2923             // by Initialize node.
2924             //
2925             // Need to check for dependent loads to separate such stores from
2926             // stores which follow loads. For now, add initial value null so
2927             // that compare pointers optimization works correctly.
2928           }
2929         }
2930         if (value == nullptr) {
2931           // A field's initializing value was not recorded. Add null.
2932           if (add_edge(field, null_obj)) {
2933             // New edge was added

3249         assert(field->edge_count() > 0, "sanity");
3250       }
3251     }
3252   }
3253 }
3254 #endif
3255 
3256 // Optimize ideal graph.
3257 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3258                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3259   Compile* C = _compile;
3260   PhaseIterGVN* igvn = _igvn;
3261   if (EliminateLocks) {
3262     // Mark locks before changing ideal graph.
3263     int cnt = C->macro_count();
3264     for (int i = 0; i < cnt; i++) {
3265       Node *n = C->macro_node(i);
3266       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3267         AbstractLockNode* alock = n->as_AbstractLock();
3268         if (!alock->is_non_esc_obj()) {
3269           const Type* obj_type = igvn->type(alock->obj_node());
3270           if (can_eliminate_lock(alock) && !obj_type->is_inlinetypeptr()) {
3271             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3272             // The lock could be marked eliminated by lock coarsening
3273             // code during first IGVN before EA. Replace coarsened flag
3274             // to eliminate all associated locks/unlocks.
3275 #ifdef ASSERT
3276             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3277 #endif
3278             alock->set_non_esc_obj();
3279           }
3280         }
3281       }
3282     }
3283   }
3284 
3285   if (OptimizePtrCompare) {
3286     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3287       Node *n = ptr_cmp_worklist.at(i);
3288       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3289       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3290       if (tcmp->singleton()) {

3292 #ifndef PRODUCT
3293         if (PrintOptimizePtrCompare) {
3294           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3295           if (Verbose) {
3296             n->dump(1);
3297           }
3298         }
3299 #endif
3300         igvn->replace_node(n, cmp);
3301       }
3302     }
3303   }
3304 
3305   // For MemBarStoreStore nodes added in library_call.cpp, check
3306   // escape status of associated AllocateNode and optimize out
3307   // MemBarStoreStore node if the allocated object never escapes.
3308   for (int i = 0; i < storestore_worklist.length(); i++) {
3309     Node* storestore = storestore_worklist.at(i);
3310     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3311     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3312       if (alloc->in(AllocateNode::InlineType) != nullptr) {
3313         // Non-escaping inline type buffer allocations don't require a membar
3314         storestore->as_MemBar()->remove(_igvn);
3315       } else {
3316         MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3317         mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3318         mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3319         igvn->register_new_node_with_optimizer(mb);
3320         igvn->replace_node(storestore, mb);
3321       }
3322     }
3323   }
3324 }
3325 
3326 // Optimize objects compare.
3327 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3328   assert(OptimizePtrCompare, "sanity");
3329   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3330   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3331   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3332 
3333   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3334   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3335   JavaObjectNode* jobj1 = unique_java_object(left);
3336   JavaObjectNode* jobj2 = unique_java_object(right);
3337 
3338   // The use of this method during allocation merge reduction may cause 'left'
3339   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3340   // that doesn't reference an unique java object.
3341   if (ptn1 == nullptr || ptn2 == nullptr ||

3463   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3464   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3465   PointsToNode* ptadr = _nodes.at(n->_idx);
3466   if (ptadr != nullptr) {
3467     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3468     return;
3469   }
3470   Compile* C = _compile;
3471   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3472   map_ideal_node(n, ptadr);
3473   // Add edge from arraycopy node to source object.
3474   (void)add_edge(ptadr, src);
3475   src->set_arraycopy_src();
3476   // Add edge from destination object to arraycopy node.
3477   (void)add_edge(dst, ptadr);
3478   dst->set_arraycopy_dst();
3479 }
3480 
3481 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3482   const Type* adr_type = n->as_AddP()->bottom_type();
3483   int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
3484   BasicType bt = T_INT;
3485   if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
3486     // Check only oop fields.
3487     if (!adr_type->isa_aryptr() ||
3488         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3489         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3490       // OffsetBot is used to reference array's element. Ignore first AddP.
3491       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3492         bt = T_OBJECT;
3493       }
3494     }
3495   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3496     if (adr_type->isa_instptr()) {
3497       ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
3498       if (field != nullptr) {
3499         bt = field->layout_type();
3500       } else {
3501         // Check for unsafe oop field access
3502         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3503             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3504             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3505             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3506           bt = T_OBJECT;
3507           (*unsafe) = true;
3508         }
3509       }
3510     } else if (adr_type->isa_aryptr()) {
3511       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3512         // Ignore array length load.
3513       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3514         // Ignore first AddP.
3515       } else {
3516         const Type* elemtype = adr_type->is_aryptr()->elem();
3517         if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
3518           ciInlineKlass* vk = elemtype->inline_klass();
3519           field_offset += vk->first_field_offset();
3520           bt = vk->get_field_by_offset(field_offset, false)->layout_type();
3521         } else {
3522           bt = elemtype->array_element_basic_type();
3523         }
3524       }
3525     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3526       // Allocation initialization, ThreadLocal field access, unsafe access
3527       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3528           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3529           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3530           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3531         bt = T_OBJECT;
3532       }
3533     }
3534   }
3535   // Note: T_NARROWOOP is not classed as a real reference type
3536   return (is_reference_type(bt) || bt == T_NARROWOOP);
3537 }
3538 
3539 // Returns unique pointed java object or null.
3540 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3541   // If the node was created after the escape computation we can't answer.
3542   uint idx = n->_idx;
3543   if (idx >= nodes_size()) {

3700             return true;
3701           }
3702         }
3703       }
3704     }
3705   }
3706   return false;
3707 }
3708 
3709 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3710   const Type *adr_type = phase->type(adr);
3711   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3712     // We are computing a raw address for a store captured by an Initialize
3713     // compute an appropriate address type. AddP cases #3 and #5 (see below).
3714     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3715     assert(offs != Type::OffsetBot ||
3716            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3717            "offset must be a constant or it is initialization of array");
3718     return offs;
3719   }
3720   return adr_type->is_ptr()->flat_offset();


3721 }
3722 
3723 Node* ConnectionGraph::get_addp_base(Node *addp) {
3724   assert(addp->is_AddP(), "must be AddP");
3725   //
3726   // AddP cases for Base and Address inputs:
3727   // case #1. Direct object's field reference:
3728   //     Allocate
3729   //       |
3730   //     Proj #5 ( oop result )
3731   //       |
3732   //     CheckCastPP (cast to instance type)
3733   //      | |
3734   //     AddP  ( base == address )
3735   //
3736   // case #2. Indirect object's field reference:
3737   //      Phi
3738   //       |
3739   //     CastPP (cast to instance type)
3740   //      | |

3854   }
3855   return nullptr;
3856 }
3857 
3858 //
3859 // Adjust the type and inputs of an AddP which computes the
3860 // address of a field of an instance
3861 //
3862 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3863   PhaseGVN* igvn = _igvn;
3864   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3865   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3866   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3867   if (t == nullptr) {
3868     // We are computing a raw address for a store captured by an Initialize
3869     // compute an appropriate address type (cases #3 and #5).
3870     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3871     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3872     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3873     assert(offs != Type::OffsetBot, "offset must be a constant");
3874     if (base_t->isa_aryptr() != nullptr) {
3875       // In the case of a flat inline type array, each field has its
3876       // own slice so we need to extract the field being accessed from
3877       // the address computation
3878       t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
3879     } else {
3880       t = base_t->add_offset(offs)->is_oopptr();
3881     }
3882   }
3883   int inst_id = base_t->instance_id();
3884   assert(!t->is_known_instance() || t->instance_id() == inst_id,
3885                              "old type must be non-instance or match new type");
3886 
3887   // The type 't' could be subclass of 'base_t'.
3888   // As result t->offset() could be large then base_t's size and it will
3889   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3890   // constructor verifies correctness of the offset.
3891   //
3892   // It could happened on subclass's branch (from the type profiling
3893   // inlining) which was not eliminated during parsing since the exactness
3894   // of the allocation type was not propagated to the subclass type check.
3895   //
3896   // Or the type 't' could be not related to 'base_t' at all.
3897   // It could happen when CHA type is different from MDO type on a dead path
3898   // (for example, from instanceof check) which is not collapsed during parsing.
3899   //
3900   // Do nothing for such AddP node and don't process its users since
3901   // this code branch will go away.
3902   //
3903   if (!t->is_known_instance() &&
3904       !base_t->maybe_java_subtype_of(t)) {
3905      return false; // bail out
3906   }
3907   const TypePtr* tinst = base_t->add_offset(t->offset());
3908   if (tinst->isa_aryptr() && t->isa_aryptr()) {
3909     // In the case of a flat inline type array, each field has its
3910     // own slice so we need to keep track of the field being accessed.
3911     tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
3912     // Keep array properties (not flat/null-free)
3913     tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
3914     if (tinst == nullptr) {
3915       return false; // Skip dead path with inconsistent properties
3916     }
3917   }
3918 
3919   // Do NOT remove the next line: ensure a new alias index is allocated
3920   // for the instance type. Note: C++ will not remove it since the call
3921   // has side effect.
3922   int alias_idx = _compile->get_alias_index(tinst);
3923   igvn->set_type(addp, tinst);
3924   // record the allocation in the node map
3925   set_map(addp, get_map(base->_idx));
3926   // Set addp's Base and Address to 'base'.
3927   Node *abase = addp->in(AddPNode::Base);
3928   Node *adr   = addp->in(AddPNode::Address);
3929   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3930       adr->in(0)->_idx == (uint)inst_id) {
3931     // Skip AddP cases #3 and #5.
3932   } else {
3933     assert(!abase->is_top(), "sanity"); // AddP case #3
3934     if (abase != base) {
3935       igvn->hash_delete(addp);
3936       addp->set_req(AddPNode::Base, base);
3937       if (abase == adr) {
3938         addp->set_req(AddPNode::Address, base);

4604         ptnode_adr(n->_idx)->dump();
4605         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4606 #endif
4607         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4608         return;
4609       } else {
4610         Node *val = get_map(jobj->idx());   // CheckCastPP node
4611         TypeNode *tn = n->as_Type();
4612         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4613         assert(tinst != nullptr && tinst->is_known_instance() &&
4614                tinst->instance_id() == jobj->idx() , "instance type expected.");
4615 
4616         const Type *tn_type = igvn->type(tn);
4617         const TypeOopPtr *tn_t;
4618         if (tn_type->isa_narrowoop()) {
4619           tn_t = tn_type->make_ptr()->isa_oopptr();
4620         } else {
4621           tn_t = tn_type->isa_oopptr();
4622         }
4623         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
4624           if (tn_t->isa_aryptr()) {
4625             // Keep array properties (not flat/null-free)
4626             tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
4627             if (tinst == nullptr) {
4628               continue; // Skip dead path with inconsistent properties
4629             }
4630           }
4631           if (tn_type->isa_narrowoop()) {
4632             tn_type = tinst->make_narrowoop();
4633           } else {
4634             tn_type = tinst;
4635           }
4636           igvn->hash_delete(tn);
4637           igvn->set_type(tn, tn_type);
4638           tn->set_type(tn_type);
4639           igvn->hash_insert(tn);
4640           record_for_optimizer(n);
4641         } else {
4642           assert(tn_type == TypePtr::NULL_PTR ||
4643                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4644                  "unexpected type");
4645           continue; // Skip dead path with different type
4646         }
4647       }
4648     } else {
4649       debug_only(n->dump();)
4650       assert(false, "EA: unexpected node");
4651       continue;
4652     }
4653     // push allocation's users on appropriate worklist
4654     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4655       Node *use = n->fast_out(i);
4656       if (use->is_Mem() && use->in(MemNode::Address) == n) {
4657         // Load/store to instance's field
4658         memnode_worklist.append_if_missing(use);
4659       } else if (use->is_MemBar()) {
4660         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4661           memnode_worklist.append_if_missing(use);
4662         }
4663       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4664         Node* addp2 = find_second_addp(use, n);
4665         if (addp2 != nullptr) {
4666           alloc_worklist.append_if_missing(addp2);
4667         }
4668         alloc_worklist.append_if_missing(use);
4669       } else if (use->is_Phi() ||
4670                  use->is_CheckCastPP() ||
4671                  use->is_EncodeNarrowPtr() ||
4672                  use->is_DecodeNarrowPtr() ||
4673                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4674         alloc_worklist.append_if_missing(use);
4675 #ifdef ASSERT
4676       } else if (use->is_Mem()) {
4677         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4678       } else if (use->is_MergeMem()) {
4679         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4680       } else if (use->is_SafePoint()) {
4681         // Look for MergeMem nodes for calls which reference unique allocation
4682         // (through CheckCastPP nodes) even for debug info.
4683         Node* m = use->in(TypeFunc::Memory);
4684         if (m->is_MergeMem()) {
4685           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4686         }
4687       } else if (use->Opcode() == Op_EncodeISOArray) {
4688         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4689           // EncodeISOArray overwrites destination array
4690           memnode_worklist.append_if_missing(use);
4691         }
4692       } else if (use->Opcode() == Op_Return) {
4693         // Allocation is referenced by field of returned inline type
4694         assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
4695       } else {
4696         uint op = use->Opcode();
4697         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4698             (use->in(MemNode::Memory) == n)) {
4699           // They overwrite memory edge corresponding to destination array,
4700           memnode_worklist.append_if_missing(use);
4701         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4702               op == Op_CastP2X ||
4703               op == Op_FastLock || op == Op_AryEq ||
4704               op == Op_StrComp || op == Op_CountPositives ||
4705               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4706               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4707               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4708               op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
4709               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4710           n->dump();
4711           use->dump();
4712           assert(false, "EA: missing allocation reference path");
4713         }
4714 #endif
4715       }
4716     }
4717 
4718   }
4719 
4720 #ifdef ASSERT
4721   if (VerifyReduceAllocationMerges) {
4722     for (uint i = 0; i < reducible_merges.size(); i++) {
4723       Node* phi = reducible_merges.at(i);
4724 
4725       if (!reduced_merges.member(phi)) {
4726         phi->dump(2);
4727         phi->dump(-2);
4728         assert(false, "This reducible merge wasn't reduced.");

4788     if (n->is_Phi() || n->is_ClearArray()) {
4789       // we don't need to do anything, but the users must be pushed
4790     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
4791       // we don't need to do anything, but the users must be pushed
4792       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4793       if (n == nullptr) {
4794         continue;
4795       }
4796     } else if (n->is_CallLeaf()) {
4797       // Runtime calls with narrow memory input (no MergeMem node)
4798       // get the memory projection
4799       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4800       if (n == nullptr) {
4801         continue;
4802       }
4803     } else if (n->Opcode() == Op_StrCompressedCopy ||
4804                n->Opcode() == Op_EncodeISOArray) {
4805       // get the memory projection
4806       n = n->find_out_with(Op_SCMemProj);
4807       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4808     } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
4809                strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4810       n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
4811     } else {
4812       assert(n->is_Mem(), "memory node required.");
4813       Node *addr = n->in(MemNode::Address);
4814       const Type *addr_t = igvn->type(addr);
4815       if (addr_t == Type::TOP) {
4816         continue;
4817       }
4818       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4819       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4820       assert ((uint)alias_idx < new_index_end, "wrong alias index");
4821       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4822       if (_compile->failing()) {
4823         return;
4824       }
4825       if (mem != n->in(MemNode::Memory)) {
4826         // We delay the memory edge update since we need old one in
4827         // MergeMem code below when instances memory slices are separated.
4828         set_map(n, mem);
4829       }
4830       if (n->is_Load()) {
4831         continue;  // don't push users
4832       } else if (n->is_LoadStore()) {
4833         // get the memory projection
4834         n = n->find_out_with(Op_SCMemProj);
4835         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4836       }
4837     }
4838     // push user on appropriate worklist
4839     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4840       Node *use = n->fast_out(i);
4841       if (use->is_Phi() || use->is_ClearArray()) {
4842         memnode_worklist.append_if_missing(use);
4843       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4844         memnode_worklist.append_if_missing(use);
4845       } else if (use->is_MemBar() || use->is_CallLeaf()) {
4846         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4847           memnode_worklist.append_if_missing(use);
4848         }
4849 #ifdef ASSERT
4850       } else if (use->is_Mem()) {
4851         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4852       } else if (use->is_MergeMem()) {
4853         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4854       } else if (use->Opcode() == Op_EncodeISOArray) {
4855         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4856           // EncodeISOArray overwrites destination array
4857           memnode_worklist.append_if_missing(use);
4858         }
4859       } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
4860                  strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4861         // store_unknown_inline overwrites destination array
4862         memnode_worklist.append_if_missing(use);
4863       } else {
4864         uint op = use->Opcode();
4865         if ((use->in(MemNode::Memory) == n) &&
4866             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4867           // They overwrite memory edge corresponding to destination array,
4868           memnode_worklist.append_if_missing(use);
4869         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4870               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4871               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4872               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
4873           n->dump();
4874           use->dump();
4875           assert(false, "EA: missing memory path");
4876         }
4877 #endif
4878       }
4879     }
4880   }
4881 
4882   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4883   //            Walk each memory slice moving the first node encountered of each
4884   //            instance type to the input corresponding to its alias index.
4885   uint length = mergemem_worklist.length();
4886   for( uint next = 0; next < length; ++next ) {
4887     MergeMemNode* nmm = mergemem_worklist.at(next);
4888     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4889     // Note: we don't want to use MergeMemStream here because we only want to
4890     // scan inputs which exist at the start, not ones we add during processing.
4891     // Note 2: MergeMem may already contains instance memory slices added
4892     // during find_inst_mem() call when memory nodes were processed above.

4953     if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
4954       if (_compile->do_reduce_allocation_merges()) {
4955         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4956       } else if (_invocation > 0) {
4957         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4958       } else {
4959         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4960       }
4961       return;
4962     }
4963 
4964     igvn->hash_insert(nmm);
4965     record_for_optimizer(nmm);
4966   }
4967 
4968   //  Phase 4:  Update the inputs of non-instance memory Phis and
4969   //            the Memory input of memnodes
4970   // First update the inputs of any non-instance Phi's from
4971   // which we split out an instance Phi.  Note we don't have
4972   // to recursively process Phi's encountered on the input memory
4973   // chains as is done in split_memory_phi() since they will
4974   // also be processed here.
4975   for (int j = 0; j < orig_phis.length(); j++) {
4976     PhiNode *phi = orig_phis.at(j);
4977     int alias_idx = _compile->get_alias_index(phi->adr_type());
4978     igvn->hash_delete(phi);
4979     for (uint i = 1; i < phi->req(); i++) {
4980       Node *mem = phi->in(i);
4981       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4982       if (_compile->failing()) {
4983         return;
4984       }
4985       if (mem != new_mem) {
4986         phi->set_req(i, new_mem);
4987       }
4988     }
4989     igvn->hash_insert(phi);
4990     record_for_optimizer(phi);
4991   }
4992 
4993   // Update the memory inputs of MemNodes with the value we computed
< prev index next >