< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"

  32 #include "memory/resourceArea.hpp"
  33 #include "opto/c2compiler.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"

  39 #include "opto/macro.hpp"
  40 #include "opto/locknode.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/movenode.hpp"
  43 #include "opto/narrowptrnode.hpp"
  44 #include "opto/castnode.hpp"
  45 #include "opto/rootnode.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  49   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  50   // split_unique_types and that will create additional nodes that need to be
  51   // pushed to the ConnectionGraph. The code below bumps the initial capacity of
  52   // _nodes by 10% to account for these additional nodes. If capacity is exceeded
  53   // the array will be reallocated.
  54   _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
  55   _in_worklist(C->comp_arena()),
  56   _next_pidx(0),
  57   _collecting(true),
  58   _verify(false),

 147   GrowableArray<SafePointNode*>  sfn_worklist;
 148   GrowableArray<MergeMemNode*>   mergemem_worklist;
 149   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 150 
 151   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 152 
 153   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 154   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 155   // Initialize worklist
 156   if (C->root() != nullptr) {
 157     ideal_nodes.push(C->root());
 158   }
 159   // Processed ideal nodes are unique on ideal_nodes list
 160   // but several ideal nodes are mapped to the phantom_obj.
 161   // To avoid duplicated entries on the following worklists
 162   // add the phantom_obj only once to them.
 163   ptnodes_worklist.append(phantom_obj);
 164   java_objects_worklist.append(phantom_obj);
 165   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 166     Node* n = ideal_nodes.at(next);










 167     // Create PointsTo nodes and add them to Connection Graph. Called
 168     // only once per ideal node since ideal_nodes is Unique_Node list.
 169     add_node_to_connection_graph(n, &delayed_worklist);
 170     PointsToNode* ptn = ptnode_adr(n->_idx);
 171     if (ptn != nullptr && ptn != phantom_obj) {
 172       ptnodes_worklist.append(ptn);
 173       if (ptn->is_JavaObject()) {
 174         java_objects_worklist.append(ptn->as_JavaObject());
 175         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 176             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 177           // Only allocations and java static calls results are interesting.
 178           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 179         }
 180       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 181         oop_fields_worklist.append(ptn->as_Field());
 182       }
 183     }
 184     // Collect some interesting nodes for further use.
 185     switch (n->Opcode()) {
 186       case Op_MergeMem:

1230 
1231     // The next two inputs are:
1232     //  (1) A copy of the original pointer to NSR objects.
1233     //  (2) A selector, used to decide if we need to rematerialize an object
1234     //      or use the pointer to a NSR object.
1235     // See more details of these fields in the declaration of SafePointScalarMergeNode
1236     sfpt->add_req(nsr_merge_pointer);
1237     sfpt->add_req(selector);
1238 
1239     for (uint i = 1; i < ophi->req(); i++) {
1240       Node* base = ophi->in(i);
1241       JavaObjectNode* ptn = unique_java_object(base);
1242 
1243       // If the base is not scalar replaceable we don't need to register information about
1244       // it at this time.
1245       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1246         continue;
1247       }
1248 
1249       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1250       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt);








1251       if (sobj == nullptr) {

1252         return false;
1253       }
1254 
1255       // Now make a pass over the debug information replacing any references
1256       // to the allocated object with "sobj"
1257       Node* ccpp = alloc->result_cast();
1258       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1259 
1260       // Register the scalarized object as a candidate for reallocation
1261       smerge->add_req(sobj);









1262     }
1263 
1264     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1265     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1266 
1267     // The call to 'replace_edges_in_range' above might have removed the
1268     // reference to ophi that we need at _merge_pointer_idx. The line below make
1269     // sure the reference is maintained.
1270     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1271     _igvn->_worklist.push(sfpt);
1272   }
1273 
1274   return true;
1275 }
1276 
1277 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *>  &alloc_worklist, GrowableArray<Node *>  &memnode_worklist) {
1278   bool delay = _igvn->delay_transform();
1279   _igvn->set_delay_transform(true);
1280   _igvn->hash_delete(ophi);
1281 

1440   return false;
1441 }
1442 
1443 // Returns true if at least one of the arguments to the call is an object
1444 // that does not escape globally.
1445 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1446   if (call->method() != nullptr) {
1447     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1448     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1449       Node* p = call->in(idx);
1450       if (not_global_escape(p)) {
1451         return true;
1452       }
1453     }
1454   } else {
1455     const char* name = call->as_CallStaticJava()->_name;
1456     assert(name != nullptr, "no name");
1457     // no arg escapes through uncommon traps
1458     if (strcmp(name, "uncommon_trap") != 0) {
1459       // process_call_arguments() assumes that all arguments escape globally
1460       const TypeTuple* d = call->tf()->domain();
1461       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1462         const Type* at = d->field_at(i);
1463         if (at->isa_oopptr() != nullptr) {
1464           return true;
1465         }
1466       }
1467     }
1468   }
1469   return false;
1470 }
1471 
1472 
1473 
1474 // Utility function for nodes that load an object
1475 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1476   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1477   // ThreadLocal has RawPtr type.
1478   const Type* t = _igvn->type(n);
1479   if (t->make_ptr() != nullptr) {
1480     Node* adr = n->in(MemNode::Address);

1514       // first IGVN optimization when escape information is still available.
1515       record_for_optimizer(n);
1516     } else if (n->is_Allocate()) {
1517       add_call_node(n->as_Call());
1518       record_for_optimizer(n);
1519     } else {
1520       if (n->is_CallStaticJava()) {
1521         const char* name = n->as_CallStaticJava()->_name;
1522         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1523           return; // Skip uncommon traps
1524         }
1525       }
1526       // Don't mark as processed since call's arguments have to be processed.
1527       delayed_worklist->push(n);
1528       // Check if a call returns an object.
1529       if ((n->as_Call()->returns_pointer() &&
1530            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1531           (n->is_CallStaticJava() &&
1532            n->as_CallStaticJava()->is_boxing_method())) {
1533         add_call_node(n->as_Call());











1534       }
1535     }
1536     return;
1537   }
1538   // Put this check here to process call arguments since some call nodes
1539   // point to phantom_obj.
1540   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1541     return; // Skip predefined nodes.
1542   }
1543   switch (opcode) {
1544     case Op_AddP: {
1545       Node* base = get_addp_base(n);
1546       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1547       // Field nodes are created for all field types. They are used in
1548       // adjust_scalar_replaceable_state() and split_unique_types().
1549       // Note, non-oop fields will have only base edges in Connection
1550       // Graph because such fields are not used for oop loads and stores.
1551       int offset = address_offset(n, igvn);
1552       add_field(n, PointsToNode::NoEscape, offset);
1553       if (ptn_base == nullptr) {
1554         delayed_worklist->push(n); // Process it later.
1555       } else {
1556         n_ptn = ptnode_adr(n_idx);
1557         add_base(n_ptn->as_Field(), ptn_base);
1558       }
1559       break;
1560     }
1561     case Op_CastX2P: {
1562       map_ideal_node(n, phantom_obj);
1563       break;
1564     }

1565     case Op_CastPP:
1566     case Op_CheckCastPP:
1567     case Op_EncodeP:
1568     case Op_DecodeN:
1569     case Op_EncodePKlass:
1570     case Op_DecodeNKlass: {
1571       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1572       break;
1573     }
1574     case Op_CMoveP: {
1575       add_local_var(n, PointsToNode::NoEscape);
1576       // Do not add edges during first iteration because some could be
1577       // not defined yet.
1578       delayed_worklist->push(n);
1579       break;
1580     }
1581     case Op_ConP:
1582     case Op_ConN:
1583     case Op_ConNKlass: {
1584       // assume all oop constants globally escape except for null

1616     case Op_PartialSubtypeCheck: {
1617       // Produces Null or notNull and is used in only in CmpP so
1618       // phantom_obj could be used.
1619       map_ideal_node(n, phantom_obj); // Result is unknown
1620       break;
1621     }
1622     case Op_Phi: {
1623       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1624       // ThreadLocal has RawPtr type.
1625       const Type* t = n->as_Phi()->type();
1626       if (t->make_ptr() != nullptr) {
1627         add_local_var(n, PointsToNode::NoEscape);
1628         // Do not add edges during first iteration because some could be
1629         // not defined yet.
1630         delayed_worklist->push(n);
1631       }
1632       break;
1633     }
1634     case Op_Proj: {
1635       // we are only interested in the oop result projection from a call
1636       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1637           n->in(0)->as_Call()->returns_pointer()) {


1638         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1639       }
1640       break;
1641     }
1642     case Op_Rethrow: // Exception object escapes
1643     case Op_Return: {
1644       if (n->req() > TypeFunc::Parms &&
1645           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1646         // Treat Return value as LocalVar with GlobalEscape escape state.
1647         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1648       }
1649       break;
1650     }
1651     case Op_CompareAndExchangeP:
1652     case Op_CompareAndExchangeN:
1653     case Op_GetAndSetP:
1654     case Op_GetAndSetN: {
1655       add_objload_to_connection_graph(n, delayed_worklist);
1656       // fall-through
1657     }

1719   if (n->is_Call()) {
1720     process_call_arguments(n->as_Call());
1721     return;
1722   }
1723   assert(n->is_Store() || n->is_LoadStore() ||
1724          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1725          "node should be registered already");
1726   int opcode = n->Opcode();
1727   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1728   if (gc_handled) {
1729     return; // Ignore node if already handled by GC.
1730   }
1731   switch (opcode) {
1732     case Op_AddP: {
1733       Node* base = get_addp_base(n);
1734       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1735       assert(ptn_base != nullptr, "field's base should be registered");
1736       add_base(n_ptn->as_Field(), ptn_base);
1737       break;
1738     }

1739     case Op_CastPP:
1740     case Op_CheckCastPP:
1741     case Op_EncodeP:
1742     case Op_DecodeN:
1743     case Op_EncodePKlass:
1744     case Op_DecodeNKlass: {
1745       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1746       break;
1747     }
1748     case Op_CMoveP: {
1749       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1750         Node* in = n->in(i);
1751         if (in == nullptr) {
1752           continue;  // ignore null
1753         }
1754         Node* uncast_in = in->uncast();
1755         if (uncast_in->is_top() || uncast_in == n) {
1756           continue;  // ignore top or inputs which go back this node
1757         }
1758         PointsToNode* ptn = ptnode_adr(in->_idx);

1773       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1774       // ThreadLocal has RawPtr type.
1775       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1776       for (uint i = 1; i < n->req(); i++) {
1777         Node* in = n->in(i);
1778         if (in == nullptr) {
1779           continue;  // ignore null
1780         }
1781         Node* uncast_in = in->uncast();
1782         if (uncast_in->is_top() || uncast_in == n) {
1783           continue;  // ignore top or inputs which go back this node
1784         }
1785         PointsToNode* ptn = ptnode_adr(in->_idx);
1786         assert(ptn != nullptr, "node should be registered");
1787         add_edge(n_ptn, ptn);
1788       }
1789       break;
1790     }
1791     case Op_Proj: {
1792       // we are only interested in the oop result projection from a call
1793       assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1794              n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
1795       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1796       break;
1797     }
1798     case Op_Rethrow: // Exception object escapes
1799     case Op_Return: {
1800       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1801              "Unexpected node type");
1802       // Treat Return value as LocalVar with GlobalEscape escape state.
1803       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1804       break;
1805     }
1806     case Op_CompareAndExchangeP:
1807     case Op_CompareAndExchangeN:
1808     case Op_GetAndSetP:
1809     case Op_GetAndSetN:{
1810       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1811       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1812       // fall-through
1813     }
1814     case Op_CompareAndSwapP:

1950     PointsToNode* ptn = ptnode_adr(val->_idx);
1951     assert(ptn != nullptr, "node should be registered");
1952     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1953     // Add edge to object for unsafe access with offset.
1954     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1955     assert(adr_ptn != nullptr, "node should be registered");
1956     if (adr_ptn->is_Field()) {
1957       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
1958       add_edge(adr_ptn, ptn);
1959     }
1960     return true;
1961   }
1962 #ifdef ASSERT
1963   n->dump(1);
1964   assert(false, "not unsafe");
1965 #endif
1966   return false;
1967 }
1968 
1969 void ConnectionGraph::add_call_node(CallNode* call) {
1970   assert(call->returns_pointer(), "only for call which returns pointer");
1971   uint call_idx = call->_idx;
1972   if (call->is_Allocate()) {
1973     Node* k = call->in(AllocateNode::KlassNode);
1974     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
1975     assert(kt != nullptr, "TypeKlassPtr  required.");
1976     PointsToNode::EscapeState es = PointsToNode::NoEscape;
1977     bool scalar_replaceable = true;
1978     NOT_PRODUCT(const char* nsr_reason = "");
1979     if (call->is_AllocateArray()) {
1980       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
1981         es = PointsToNode::GlobalEscape;
1982       } else {
1983         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
1984         if (length < 0) {
1985           // Not scalar replaceable if the length is not constant.
1986           scalar_replaceable = false;
1987           NOT_PRODUCT(nsr_reason = "has a non-constant length");
1988         } else if (length > EliminateAllocationArraySizeLimit) {
1989           // Not scalar replaceable if the length is too big.
1990           scalar_replaceable = false;

2026     //
2027     //    - all oop arguments are escaping globally;
2028     //
2029     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2030     //
2031     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2032     //
2033     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2034     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2035     //      during call is returned;
2036     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2037     //      which are returned and does not escape during call;
2038     //
2039     //    - oop arguments escaping status is defined by bytecode analysis;
2040     //
2041     // For a static call, we know exactly what method is being called.
2042     // Use bytecode estimator to record whether the call's return value escapes.
2043     ciMethod* meth = call->as_CallJava()->method();
2044     if (meth == nullptr) {
2045       const char* name = call->as_CallStaticJava()->_name;
2046       assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "TODO: add failed case check");

2047       // Returns a newly allocated non-escaped object.
2048       add_java_object(call, PointsToNode::NoEscape);
2049       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2050     } else if (meth->is_boxing_method()) {
2051       // Returns boxing object
2052       PointsToNode::EscapeState es;
2053       vmIntrinsics::ID intr = meth->intrinsic_id();
2054       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2055         // It does not escape if object is always allocated.
2056         es = PointsToNode::NoEscape;
2057       } else {
2058         // It escapes globally if object could be loaded from cache.
2059         es = PointsToNode::GlobalEscape;
2060       }
2061       add_java_object(call, es);
2062       if (es == PointsToNode::GlobalEscape) {
2063         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2064       }
2065     } else {
2066       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2067       call_analyzer->copy_dependencies(_compile->dependencies());
2068       if (call_analyzer->is_return_allocated()) {
2069         // Returns a newly allocated non-escaped object, simply
2070         // update dependency information.
2071         // Mark it as NoEscape so that objects referenced by
2072         // it's fields will be marked as NoEscape at least.
2073         add_java_object(call, PointsToNode::NoEscape);
2074         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2075       } else {
2076         // Determine whether any arguments are returned.
2077         const TypeTuple* d = call->tf()->domain();
2078         bool ret_arg = false;
2079         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2080           if (d->field_at(i)->isa_ptr() != nullptr &&
2081               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2082             ret_arg = true;
2083             break;
2084           }
2085         }
2086         if (ret_arg) {
2087           add_local_var(call, PointsToNode::ArgEscape);
2088         } else {
2089           // Returns unknown object.
2090           map_ideal_node(call, phantom_obj);
2091         }
2092       }
2093     }
2094   } else {
2095     // An other type of call, assume the worst case:
2096     // returned value is unknown and globally escapes.
2097     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

2105 #ifdef ASSERT
2106     case Op_Allocate:
2107     case Op_AllocateArray:
2108     case Op_Lock:
2109     case Op_Unlock:
2110       assert(false, "should be done already");
2111       break;
2112 #endif
2113     case Op_ArrayCopy:
2114     case Op_CallLeafNoFP:
2115       // Most array copies are ArrayCopy nodes at this point but there
2116       // are still a few direct calls to the copy subroutines (See
2117       // PhaseStringOpts::copy_string())
2118       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2119         call->as_CallLeaf()->is_call_to_arraycopystub();
2120       // fall through
2121     case Op_CallLeafVector:
2122     case Op_CallLeaf: {
2123       // Stub calls, objects do not escape but they are not scale replaceable.
2124       // Adjust escape state for outgoing arguments.
2125       const TypeTuple * d = call->tf()->domain();
2126       bool src_has_oops = false;
2127       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2128         const Type* at = d->field_at(i);
2129         Node *arg = call->in(i);
2130         if (arg == nullptr) {
2131           continue;
2132         }
2133         const Type *aat = _igvn->type(arg);
2134         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2135           continue;
2136         }
2137         if (arg->is_AddP()) {
2138           //
2139           // The inline_native_clone() case when the arraycopy stub is called
2140           // after the allocation before Initialize and CheckCastPP nodes.
2141           // Or normal arraycopy for object arrays case.
2142           //
2143           // Set AddP's base (Allocate) as not scalar replaceable since
2144           // pointer to the base (with offset) is passed as argument.
2145           //
2146           arg = get_addp_base(arg);
2147         }
2148         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2149         assert(arg_ptn != nullptr, "should be registered");
2150         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2151         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2152           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2153                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2154           bool arg_has_oops = aat->isa_oopptr() &&
2155                               (aat->isa_instptr() ||
2156                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));



2157           if (i == TypeFunc::Parms) {
2158             src_has_oops = arg_has_oops;
2159           }
2160           //
2161           // src or dst could be j.l.Object when other is basic type array:
2162           //
2163           //   arraycopy(char[],0,Object*,0,size);
2164           //   arraycopy(Object*,0,char[],0,size);
2165           //
2166           // Don't add edges in such cases.
2167           //
2168           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2169                                        arg_has_oops && (i > TypeFunc::Parms);
2170 #ifdef ASSERT
2171           if (!(is_arraycopy ||
2172                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2173                 (call->as_CallLeaf()->_name != nullptr &&
2174                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2175                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2176                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

2187                   strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2188                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2189                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2190                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2191                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2192                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2193                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2194                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2195                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2196                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2197                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2198                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2199                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2200                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2201                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2202                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2203                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2204                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2205                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2206                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||



2207                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2208                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2209                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2210                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2211                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2212                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2213                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2214                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2215                  ))) {
2216             call->dump();
2217             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2218           }
2219 #endif
2220           // Always process arraycopy's destination object since
2221           // we need to add all possible edges to references in
2222           // source object.
2223           if (arg_esc >= PointsToNode::ArgEscape &&
2224               !arg_is_arraycopy_dest) {
2225             continue;
2226           }

2253           }
2254         }
2255       }
2256       break;
2257     }
2258     case Op_CallStaticJava: {
2259       // For a static call, we know exactly what method is being called.
2260       // Use bytecode estimator to record the call's escape affects
2261 #ifdef ASSERT
2262       const char* name = call->as_CallStaticJava()->_name;
2263       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2264 #endif
2265       ciMethod* meth = call->as_CallJava()->method();
2266       if ((meth != nullptr) && meth->is_boxing_method()) {
2267         break; // Boxing methods do not modify any oops.
2268       }
2269       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2270       // fall-through if not a Java method or no analyzer information
2271       if (call_analyzer != nullptr) {
2272         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2273         const TypeTuple* d = call->tf()->domain();
2274         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2275           const Type* at = d->field_at(i);
2276           int k = i - TypeFunc::Parms;
2277           Node* arg = call->in(i);
2278           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2279           if (at->isa_ptr() != nullptr &&
2280               call_analyzer->is_arg_returned(k)) {
2281             // The call returns arguments.
2282             if (call_ptn != nullptr) { // Is call's result used?
2283               assert(call_ptn->is_LocalVar(), "node should be registered");
2284               assert(arg_ptn != nullptr, "node should be registered");
2285               add_edge(call_ptn, arg_ptn);
2286             }
2287           }
2288           if (at->isa_oopptr() != nullptr &&
2289               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2290             if (!call_analyzer->is_arg_stack(k)) {
2291               // The argument global escapes
2292               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2293             } else {

2297                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2298               }
2299             }
2300           }
2301         }
2302         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2303           // The call returns arguments.
2304           assert(call_ptn->edge_count() > 0, "sanity");
2305           if (!call_analyzer->is_return_local()) {
2306             // Returns also unknown object.
2307             add_edge(call_ptn, phantom_obj);
2308           }
2309         }
2310         break;
2311       }
2312     }
2313     default: {
2314       // Fall-through here if not a Java method or no analyzer information
2315       // or some other type of call, assume the worst case: all arguments
2316       // globally escape.
2317       const TypeTuple* d = call->tf()->domain();
2318       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2319         const Type* at = d->field_at(i);
2320         if (at->isa_oopptr() != nullptr) {
2321           Node* arg = call->in(i);
2322           if (arg->is_AddP()) {
2323             arg = get_addp_base(arg);
2324           }
2325           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2326           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2327         }
2328       }
2329     }
2330   }
2331 }
2332 
2333 
2334 // Finish Graph construction.
2335 bool ConnectionGraph::complete_connection_graph(
2336                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2337                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

2710     PointsToNode* base = i.get();
2711     if (base->is_JavaObject()) {
2712       // Skip Allocate's fields which will be processed later.
2713       if (base->ideal_node()->is_Allocate()) {
2714         return 0;
2715       }
2716       assert(base == null_obj, "only null ptr base expected here");
2717     }
2718   }
2719   if (add_edge(field, phantom_obj)) {
2720     // New edge was added
2721     new_edges++;
2722     add_field_uses_to_worklist(field);
2723   }
2724   return new_edges;
2725 }
2726 
2727 // Find fields initializing values for allocations.
2728 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2729   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");

2730   Node* alloc = pta->ideal_node();
2731 
2732   // Do nothing for Allocate nodes since its fields values are
2733   // "known" unless they are initialized by arraycopy/clone.
2734   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2735     return 0;







2736   }
2737   assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");

2738 #ifdef ASSERT
2739   if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
2740     const char* name = alloc->as_CallStaticJava()->_name;
2741     assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "sanity");

2742   }
2743 #endif
2744   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2745   int new_edges = 0;
2746   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2747     PointsToNode* field = i.get();
2748     if (field->is_Field() && field->as_Field()->is_oop()) {
2749       if (add_edge(field, phantom_obj)) {
2750         // New edge was added
2751         new_edges++;
2752         add_field_uses_to_worklist(field->as_Field());
2753       }
2754     }
2755   }
2756   return new_edges;
2757 }
2758 
2759 // Find fields initializing values for allocations.
2760 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2761   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2762   Node* alloc = pta->ideal_node();
2763   // Do nothing for Call nodes since its fields values are unknown.
2764   if (!alloc->is_Allocate()) {
2765     return 0;
2766   }
2767   InitializeNode* ini = alloc->as_Allocate()->initialization();
2768   bool visited_bottom_offset = false;
2769   GrowableArray<int> offsets_worklist;
2770   int new_edges = 0;
2771 
2772   // Check if an oop field's initializing value is recorded and add
2773   // a corresponding null if field's value if it is not recorded.
2774   // Connection Graph does not record a default initialization by null
2775   // captured by Initialize node.
2776   //
2777   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2778     PointsToNode* field = i.get(); // Field (AddP)
2779     if (!field->is_Field() || !field->as_Field()->is_oop()) {
2780       continue; // Not oop field
2781     }
2782     int offset = field->as_Field()->offset();
2783     if (offset == Type::OffsetBot) {
2784       if (!visited_bottom_offset) {

2830               } else {
2831                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2832                   tty->print_cr("----------init store has invalid value -----");
2833                   store->dump();
2834                   val->dump();
2835                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2836                 }
2837                 for (EdgeIterator j(val); j.has_next(); j.next()) {
2838                   PointsToNode* obj = j.get();
2839                   if (obj->is_JavaObject()) {
2840                     if (!field->points_to(obj->as_JavaObject())) {
2841                       missed_obj = obj;
2842                       break;
2843                     }
2844                   }
2845                 }
2846               }
2847               if (missed_obj != nullptr) {
2848                 tty->print_cr("----------field---------------------------------");
2849                 field->dump();
2850                 tty->print_cr("----------missed referernce to object-----------");
2851                 missed_obj->dump();
2852                 tty->print_cr("----------object referernced by init store -----");
2853                 store->dump();
2854                 val->dump();
2855                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2856               }
2857             }
2858 #endif
2859           } else {
2860             // There could be initializing stores which follow allocation.
2861             // For example, a volatile field store is not collected
2862             // by Initialize node.
2863             //
2864             // Need to check for dependent loads to separate such stores from
2865             // stores which follow loads. For now, add initial value null so
2866             // that compare pointers optimization works correctly.
2867           }
2868         }
2869         if (value == nullptr) {
2870           // A field's initializing value was not recorded. Add null.
2871           if (add_edge(field, null_obj)) {
2872             // New edge was added

3149         assert(field->edge_count() > 0, "sanity");
3150       }
3151     }
3152   }
3153 }
3154 #endif
3155 
3156 // Optimize ideal graph.
3157 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3158                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3159   Compile* C = _compile;
3160   PhaseIterGVN* igvn = _igvn;
3161   if (EliminateLocks) {
3162     // Mark locks before changing ideal graph.
3163     int cnt = C->macro_count();
3164     for (int i = 0; i < cnt; i++) {
3165       Node *n = C->macro_node(i);
3166       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3167         AbstractLockNode* alock = n->as_AbstractLock();
3168         if (!alock->is_non_esc_obj()) {
3169           if (can_eliminate_lock(alock)) {

3170             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3171             // The lock could be marked eliminated by lock coarsening
3172             // code during first IGVN before EA. Replace coarsened flag
3173             // to eliminate all associated locks/unlocks.
3174 #ifdef ASSERT
3175             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3176 #endif
3177             alock->set_non_esc_obj();
3178           }
3179         }
3180       }
3181     }
3182   }
3183 
3184   if (OptimizePtrCompare) {
3185     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3186       Node *n = ptr_cmp_worklist.at(i);
3187       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3188       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3189       if (tcmp->singleton()) {

3191 #ifndef PRODUCT
3192         if (PrintOptimizePtrCompare) {
3193           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3194           if (Verbose) {
3195             n->dump(1);
3196           }
3197         }
3198 #endif
3199         igvn->replace_node(n, cmp);
3200       }
3201     }
3202   }
3203 
3204   // For MemBarStoreStore nodes added in library_call.cpp, check
3205   // escape status of associated AllocateNode and optimize out
3206   // MemBarStoreStore node if the allocated object never escapes.
3207   for (int i = 0; i < storestore_worklist.length(); i++) {
3208     Node* storestore = storestore_worklist.at(i);
3209     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3210     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3211       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3212       mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3213       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3214       igvn->register_new_node_with_optimizer(mb);
3215       igvn->replace_node(storestore, mb);





3216     }
3217   }
3218 }
3219 
3220 // Optimize objects compare.
3221 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3222   assert(OptimizePtrCompare, "sanity");
3223   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3224   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3225   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3226 
3227   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3228   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3229   JavaObjectNode* jobj1 = unique_java_object(left);
3230   JavaObjectNode* jobj2 = unique_java_object(right);
3231 
3232   // The use of this method during allocation merge reduction may cause 'left'
3233   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3234   // that doesn't reference an unique java object.
3235   if (ptn1 == nullptr || ptn2 == nullptr ||

3357   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3358   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3359   PointsToNode* ptadr = _nodes.at(n->_idx);
3360   if (ptadr != nullptr) {
3361     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3362     return;
3363   }
3364   Compile* C = _compile;
3365   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3366   map_ideal_node(n, ptadr);
3367   // Add edge from arraycopy node to source object.
3368   (void)add_edge(ptadr, src);
3369   src->set_arraycopy_src();
3370   // Add edge from destination object to arraycopy node.
3371   (void)add_edge(dst, ptadr);
3372   dst->set_arraycopy_dst();
3373 }
3374 
3375 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3376   const Type* adr_type = n->as_AddP()->bottom_type();

3377   BasicType bt = T_INT;
3378   if (offset == Type::OffsetBot) {
3379     // Check only oop fields.
3380     if (!adr_type->isa_aryptr() ||
3381         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3382         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3383       // OffsetBot is used to reference array's element. Ignore first AddP.
3384       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3385         bt = T_OBJECT;
3386       }
3387     }
3388   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3389     if (adr_type->isa_instptr()) {
3390       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
3391       if (field != nullptr) {
3392         bt = field->layout_type();
3393       } else {
3394         // Check for unsafe oop field access
3395         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3396             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3397             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3398             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3399           bt = T_OBJECT;
3400           (*unsafe) = true;
3401         }
3402       }
3403     } else if (adr_type->isa_aryptr()) {
3404       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3405         // Ignore array length load.
3406       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3407         // Ignore first AddP.
3408       } else {
3409         const Type* elemtype = adr_type->isa_aryptr()->elem();
3410         bt = elemtype->array_element_basic_type();






3411       }
3412     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3413       // Allocation initialization, ThreadLocal field access, unsafe access
3414       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3415           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3416           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3417           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3418         bt = T_OBJECT;
3419       }
3420     }
3421   }
3422   // Note: T_NARROWOOP is not classed as a real reference type
3423   return (is_reference_type(bt) || bt == T_NARROWOOP);
3424 }
3425 
3426 // Returns unique pointed java object or null.
3427 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3428   // If the node was created after the escape computation we can't answer.
3429   uint idx = n->_idx;
3430   if (idx >= nodes_size()) {

3587             return true;
3588           }
3589         }
3590       }
3591     }
3592   }
3593   return false;
3594 }
3595 
3596 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3597   const Type *adr_type = phase->type(adr);
3598   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3599     // We are computing a raw address for a store captured by an Initialize
3600     // compute an appropriate address type. AddP cases #3 and #5 (see below).
3601     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3602     assert(offs != Type::OffsetBot ||
3603            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3604            "offset must be a constant or it is initialization of array");
3605     return offs;
3606   }
3607   const TypePtr *t_ptr = adr_type->isa_ptr();
3608   assert(t_ptr != nullptr, "must be a pointer type");
3609   return t_ptr->offset();
3610 }
3611 
3612 Node* ConnectionGraph::get_addp_base(Node *addp) {
3613   assert(addp->is_AddP(), "must be AddP");
3614   //
3615   // AddP cases for Base and Address inputs:
3616   // case #1. Direct object's field reference:
3617   //     Allocate
3618   //       |
3619   //     Proj #5 ( oop result )
3620   //       |
3621   //     CheckCastPP (cast to instance type)
3622   //      | |
3623   //     AddP  ( base == address )
3624   //
3625   // case #2. Indirect object's field reference:
3626   //      Phi
3627   //       |
3628   //     CastPP (cast to instance type)
3629   //      | |

3743   }
3744   return nullptr;
3745 }
3746 
3747 //
3748 // Adjust the type and inputs of an AddP which computes the
3749 // address of a field of an instance
3750 //
3751 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3752   PhaseGVN* igvn = _igvn;
3753   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3754   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3755   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3756   if (t == nullptr) {
3757     // We are computing a raw address for a store captured by an Initialize
3758     // compute an appropriate address type (cases #3 and #5).
3759     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3760     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3761     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3762     assert(offs != Type::OffsetBot, "offset must be a constant");
3763     t = base_t->add_offset(offs)->is_oopptr();







3764   }
3765   int inst_id =  base_t->instance_id();
3766   assert(!t->is_known_instance() || t->instance_id() == inst_id,
3767                              "old type must be non-instance or match new type");
3768 
3769   // The type 't' could be subclass of 'base_t'.
3770   // As result t->offset() could be large then base_t's size and it will
3771   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3772   // constructor verifies correctness of the offset.
3773   //
3774   // It could happened on subclass's branch (from the type profiling
3775   // inlining) which was not eliminated during parsing since the exactness
3776   // of the allocation type was not propagated to the subclass type check.
3777   //
3778   // Or the type 't' could be not related to 'base_t' at all.
3779   // It could happened when CHA type is different from MDO type on a dead path
3780   // (for example, from instanceof check) which is not collapsed during parsing.
3781   //
3782   // Do nothing for such AddP node and don't process its users since
3783   // this code branch will go away.
3784   //
3785   if (!t->is_known_instance() &&
3786       !base_t->maybe_java_subtype_of(t)) {
3787      return false; // bail out
3788   }
3789   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();











3790   // Do NOT remove the next line: ensure a new alias index is allocated
3791   // for the instance type. Note: C++ will not remove it since the call
3792   // has side effect.
3793   int alias_idx = _compile->get_alias_index(tinst);
3794   igvn->set_type(addp, tinst);
3795   // record the allocation in the node map
3796   set_map(addp, get_map(base->_idx));
3797   // Set addp's Base and Address to 'base'.
3798   Node *abase = addp->in(AddPNode::Base);
3799   Node *adr   = addp->in(AddPNode::Address);
3800   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3801       adr->in(0)->_idx == (uint)inst_id) {
3802     // Skip AddP cases #3 and #5.
3803   } else {
3804     assert(!abase->is_top(), "sanity"); // AddP case #3
3805     if (abase != base) {
3806       igvn->hash_delete(addp);
3807       addp->set_req(AddPNode::Base, base);
3808       if (abase == adr) {
3809         addp->set_req(AddPNode::Address, base);

4475         ptnode_adr(n->_idx)->dump();
4476         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4477 #endif
4478         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4479         return;
4480       } else {
4481         Node *val = get_map(jobj->idx());   // CheckCastPP node
4482         TypeNode *tn = n->as_Type();
4483         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4484         assert(tinst != nullptr && tinst->is_known_instance() &&
4485                tinst->instance_id() == jobj->idx() , "instance type expected.");
4486 
4487         const Type *tn_type = igvn->type(tn);
4488         const TypeOopPtr *tn_t;
4489         if (tn_type->isa_narrowoop()) {
4490           tn_t = tn_type->make_ptr()->isa_oopptr();
4491         } else {
4492           tn_t = tn_type->isa_oopptr();
4493         }
4494         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {







4495           if (tn_type->isa_narrowoop()) {
4496             tn_type = tinst->make_narrowoop();
4497           } else {
4498             tn_type = tinst;
4499           }
4500           igvn->hash_delete(tn);
4501           igvn->set_type(tn, tn_type);
4502           tn->set_type(tn_type);
4503           igvn->hash_insert(tn);
4504           record_for_optimizer(n);
4505         } else {
4506           assert(tn_type == TypePtr::NULL_PTR ||
4507                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4508                  "unexpected type");
4509           continue; // Skip dead path with different type
4510         }
4511       }
4512     } else {
4513       debug_only(n->dump();)
4514       assert(false, "EA: unexpected node");
4515       continue;
4516     }
4517     // push allocation's users on appropriate worklist
4518     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4519       Node *use = n->fast_out(i);
4520       if(use->is_Mem() && use->in(MemNode::Address) == n) {
4521         // Load/store to instance's field
4522         memnode_worklist.append_if_missing(use);
4523       } else if (use->is_MemBar()) {
4524         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4525           memnode_worklist.append_if_missing(use);
4526         }
4527       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4528         Node* addp2 = find_second_addp(use, n);
4529         if (addp2 != nullptr) {
4530           alloc_worklist.append_if_missing(addp2);
4531         }
4532         alloc_worklist.append_if_missing(use);
4533       } else if (use->is_Phi() ||
4534                  use->is_CheckCastPP() ||
4535                  use->is_EncodeNarrowPtr() ||
4536                  use->is_DecodeNarrowPtr() ||
4537                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4538         alloc_worklist.append_if_missing(use);
4539 #ifdef ASSERT
4540       } else if (use->is_Mem()) {
4541         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4542       } else if (use->is_MergeMem()) {
4543         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4544       } else if (use->is_SafePoint()) {
4545         // Look for MergeMem nodes for calls which reference unique allocation
4546         // (through CheckCastPP nodes) even for debug info.
4547         Node* m = use->in(TypeFunc::Memory);
4548         if (m->is_MergeMem()) {
4549           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4550         }
4551       } else if (use->Opcode() == Op_EncodeISOArray) {
4552         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4553           // EncodeISOArray overwrites destination array
4554           memnode_worklist.append_if_missing(use);
4555         }



4556       } else {
4557         uint op = use->Opcode();
4558         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4559             (use->in(MemNode::Memory) == n)) {
4560           // They overwrite memory edge corresponding to destination array,
4561           memnode_worklist.append_if_missing(use);
4562         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4563               op == Op_CastP2X ||
4564               op == Op_FastLock || op == Op_AryEq ||
4565               op == Op_StrComp || op == Op_CountPositives ||
4566               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4567               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4568               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4569               op == Op_SubTypeCheck ||
4570               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4571           n->dump();
4572           use->dump();
4573           assert(false, "EA: missing allocation reference path");
4574         }
4575 #endif
4576       }
4577     }
4578 
4579   }
4580 
4581 #ifdef ASSERT
4582   if (VerifyReduceAllocationMerges) {
4583     for (uint i = 0; i < reducible_merges.size(); i++) {
4584       Node* phi = reducible_merges.at(i);
4585 
4586       if (!reduced_merges.member(phi)) {
4587         phi->dump(2);
4588         phi->dump(-2);
4589         assert(false, "This reducible merge wasn't reduced.");

4649     if (n->is_Phi() || n->is_ClearArray()) {
4650       // we don't need to do anything, but the users must be pushed
4651     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
4652       // we don't need to do anything, but the users must be pushed
4653       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4654       if (n == nullptr) {
4655         continue;
4656       }
4657     } else if (n->is_CallLeaf()) {
4658       // Runtime calls with narrow memory input (no MergeMem node)
4659       // get the memory projection
4660       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4661       if (n == nullptr) {
4662         continue;
4663       }
4664     } else if (n->Opcode() == Op_StrCompressedCopy ||
4665                n->Opcode() == Op_EncodeISOArray) {
4666       // get the memory projection
4667       n = n->find_out_with(Op_SCMemProj);
4668       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");



4669     } else {
4670       assert(n->is_Mem(), "memory node required.");
4671       Node *addr = n->in(MemNode::Address);
4672       const Type *addr_t = igvn->type(addr);
4673       if (addr_t == Type::TOP) {
4674         continue;
4675       }
4676       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4677       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4678       assert ((uint)alias_idx < new_index_end, "wrong alias index");
4679       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4680       if (_compile->failing()) {
4681         return;
4682       }
4683       if (mem != n->in(MemNode::Memory)) {
4684         // We delay the memory edge update since we need old one in
4685         // MergeMem code below when instances memory slices are separated.
4686         set_map(n, mem);
4687       }
4688       if (n->is_Load()) {
4689         continue;  // don't push users
4690       } else if (n->is_LoadStore()) {
4691         // get the memory projection
4692         n = n->find_out_with(Op_SCMemProj);
4693         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4694       }
4695     }
4696     // push user on appropriate worklist
4697     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4698       Node *use = n->fast_out(i);
4699       if (use->is_Phi() || use->is_ClearArray()) {
4700         memnode_worklist.append_if_missing(use);
4701       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4702         memnode_worklist.append_if_missing(use);
4703       } else if (use->is_MemBar() || use->is_CallLeaf()) {
4704         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4705           memnode_worklist.append_if_missing(use);
4706         }
4707 #ifdef ASSERT
4708       } else if(use->is_Mem()) {
4709         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4710       } else if (use->is_MergeMem()) {
4711         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4712       } else if (use->Opcode() == Op_EncodeISOArray) {
4713         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4714           // EncodeISOArray overwrites destination array
4715           memnode_worklist.append_if_missing(use);
4716         }




4717       } else {
4718         uint op = use->Opcode();
4719         if ((use->in(MemNode::Memory) == n) &&
4720             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4721           // They overwrite memory edge corresponding to destination array,
4722           memnode_worklist.append_if_missing(use);
4723         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4724               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4725               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4726               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
4727           n->dump();
4728           use->dump();
4729           assert(false, "EA: missing memory path");
4730         }
4731 #endif
4732       }
4733     }
4734   }
4735 
4736   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4737   //            Walk each memory slice moving the first node encountered of each
4738   //            instance type to the input corresponding to its alias index.
4739   uint length = mergemem_worklist.length();
4740   for( uint next = 0; next < length; ++next ) {
4741     MergeMemNode* nmm = mergemem_worklist.at(next);
4742     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4743     // Note: we don't want to use MergeMemStream here because we only want to
4744     // scan inputs which exist at the start, not ones we add during processing.
4745     // Note 2: MergeMem may already contains instance memory slices added
4746     // during find_inst_mem() call when memory nodes were processed above.

4807     if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
4808       if (_compile->do_reduce_allocation_merges()) {
4809         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4810       } else if (_invocation > 0) {
4811         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4812       } else {
4813         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4814       }
4815       return;
4816     }
4817 
4818     igvn->hash_insert(nmm);
4819     record_for_optimizer(nmm);
4820   }
4821 
4822   //  Phase 4:  Update the inputs of non-instance memory Phis and
4823   //            the Memory input of memnodes
4824   // First update the inputs of any non-instance Phi's from
4825   // which we split out an instance Phi.  Note we don't have
4826   // to recursively process Phi's encountered on the input memory
4827   // chains as is done in split_memory_phi() since they  will
4828   // also be processed here.
4829   for (int j = 0; j < orig_phis.length(); j++) {
4830     PhiNode *phi = orig_phis.at(j);
4831     int alias_idx = _compile->get_alias_index(phi->adr_type());
4832     igvn->hash_delete(phi);
4833     for (uint i = 1; i < phi->req(); i++) {
4834       Node *mem = phi->in(i);
4835       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4836       if (_compile->failing()) {
4837         return;
4838       }
4839       if (mem != new_mem) {
4840         phi->set_req(i, new_mem);
4841       }
4842     }
4843     igvn->hash_insert(phi);
4844     record_for_optimizer(phi);
4845   }
4846 
4847   // Update the memory inputs of MemNodes with the value we computed

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/metaspace.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "opto/c2compiler.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/escape.hpp"
  40 #include "opto/inlinetypenode.hpp"
  41 #include "opto/macro.hpp"
  42 #include "opto/locknode.hpp"
  43 #include "opto/phaseX.hpp"
  44 #include "opto/movenode.hpp"
  45 #include "opto/narrowptrnode.hpp"
  46 #include "opto/castnode.hpp"
  47 #include "opto/rootnode.hpp"
  48 #include "utilities/macros.hpp"
  49 
  50 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  51   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  52   // split_unique_types and that will create additional nodes that need to be
  53   // pushed to the ConnectionGraph. The code below bumps the initial capacity of
  54   // _nodes by 10% to account for these additional nodes. If capacity is exceeded
  55   // the array will be reallocated.
  56   _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
  57   _in_worklist(C->comp_arena()),
  58   _next_pidx(0),
  59   _collecting(true),
  60   _verify(false),

 149   GrowableArray<SafePointNode*>  sfn_worklist;
 150   GrowableArray<MergeMemNode*>   mergemem_worklist;
 151   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 152 
 153   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 154 
 155   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 156   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 157   // Initialize worklist
 158   if (C->root() != nullptr) {
 159     ideal_nodes.push(C->root());
 160   }
 161   // Processed ideal nodes are unique on ideal_nodes list
 162   // but several ideal nodes are mapped to the phantom_obj.
 163   // To avoid duplicated entries on the following worklists
 164   // add the phantom_obj only once to them.
 165   ptnodes_worklist.append(phantom_obj);
 166   java_objects_worklist.append(phantom_obj);
 167   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 168     Node* n = ideal_nodes.at(next);
 169     if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
 170         !n->in(MemNode::Address)->is_AddP() &&
 171         _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
 172       // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
 173       Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
 174       _igvn->register_new_node_with_optimizer(addp);
 175       _igvn->replace_input_of(n, MemNode::Address, addp);
 176       ideal_nodes.push(addp);
 177       _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
 178     }
 179     // Create PointsTo nodes and add them to Connection Graph. Called
 180     // only once per ideal node since ideal_nodes is Unique_Node list.
 181     add_node_to_connection_graph(n, &delayed_worklist);
 182     PointsToNode* ptn = ptnode_adr(n->_idx);
 183     if (ptn != nullptr && ptn != phantom_obj) {
 184       ptnodes_worklist.append(ptn);
 185       if (ptn->is_JavaObject()) {
 186         java_objects_worklist.append(ptn->as_JavaObject());
 187         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 188             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 189           // Only allocations and java static calls results are interesting.
 190           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 191         }
 192       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 193         oop_fields_worklist.append(ptn->as_Field());
 194       }
 195     }
 196     // Collect some interesting nodes for further use.
 197     switch (n->Opcode()) {
 198       case Op_MergeMem:

1242 
1243     // The next two inputs are:
1244     //  (1) A copy of the original pointer to NSR objects.
1245     //  (2) A selector, used to decide if we need to rematerialize an object
1246     //      or use the pointer to a NSR object.
1247     // See more details of these fields in the declaration of SafePointScalarMergeNode
1248     sfpt->add_req(nsr_merge_pointer);
1249     sfpt->add_req(selector);
1250 
1251     for (uint i = 1; i < ophi->req(); i++) {
1252       Node* base = ophi->in(i);
1253       JavaObjectNode* ptn = unique_java_object(base);
1254 
1255       // If the base is not scalar replaceable we don't need to register information about
1256       // it at this time.
1257       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1258         continue;
1259       }
1260 
1261       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1262       Unique_Node_List value_worklist;
1263 #ifdef ASSERT
1264       const Type* res_type = alloc->result_cast()->bottom_type();
1265       if (res_type->is_inlinetypeptr() && !Compile::current()->has_circular_inline_type()) {
1266         PhiNode* phi = ophi->as_Phi();
1267         assert(!ophi->as_Phi()->can_push_inline_types_down(_igvn), "missed earlier scalarization opportunity");
1268       }
1269 #endif
1270       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
1271       if (sobj == nullptr) {
1272         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1273         return false;
1274       }
1275 
1276       // Now make a pass over the debug information replacing any references
1277       // to the allocated object with "sobj"
1278       Node* ccpp = alloc->result_cast();
1279       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1280 
1281       // Register the scalarized object as a candidate for reallocation
1282       smerge->add_req(sobj);
1283 
1284       // Scalarize inline types that were added to the safepoint.
1285       // Don't allow linking a constant oop (if available) for flat array elements
1286       // because Deoptimization::reassign_flat_array_elements needs field values.
1287       const bool allow_oop = !merge_t->is_flat();
1288       for (uint j = 0; j < value_worklist.size(); ++j) {
1289         InlineTypeNode* vt = value_worklist.at(j)->as_InlineType();
1290         vt->make_scalar_in_safepoints(_igvn, allow_oop);
1291       }
1292     }
1293 
1294     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1295     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1296 
1297     // The call to 'replace_edges_in_range' above might have removed the
1298     // reference to ophi that we need at _merge_pointer_idx. The line below make
1299     // sure the reference is maintained.
1300     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1301     _igvn->_worklist.push(sfpt);
1302   }
1303 
1304   return true;
1305 }
1306 
1307 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *>  &alloc_worklist, GrowableArray<Node *>  &memnode_worklist) {
1308   bool delay = _igvn->delay_transform();
1309   _igvn->set_delay_transform(true);
1310   _igvn->hash_delete(ophi);
1311 

1470   return false;
1471 }
1472 
1473 // Returns true if at least one of the arguments to the call is an object
1474 // that does not escape globally.
1475 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1476   if (call->method() != nullptr) {
1477     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1478     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1479       Node* p = call->in(idx);
1480       if (not_global_escape(p)) {
1481         return true;
1482       }
1483     }
1484   } else {
1485     const char* name = call->as_CallStaticJava()->_name;
1486     assert(name != nullptr, "no name");
1487     // no arg escapes through uncommon traps
1488     if (strcmp(name, "uncommon_trap") != 0) {
1489       // process_call_arguments() assumes that all arguments escape globally
1490       const TypeTuple* d = call->tf()->domain_sig();
1491       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1492         const Type* at = d->field_at(i);
1493         if (at->isa_oopptr() != nullptr) {
1494           return true;
1495         }
1496       }
1497     }
1498   }
1499   return false;
1500 }
1501 
1502 
1503 
1504 // Utility function for nodes that load an object
1505 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1506   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1507   // ThreadLocal has RawPtr type.
1508   const Type* t = _igvn->type(n);
1509   if (t->make_ptr() != nullptr) {
1510     Node* adr = n->in(MemNode::Address);

1544       // first IGVN optimization when escape information is still available.
1545       record_for_optimizer(n);
1546     } else if (n->is_Allocate()) {
1547       add_call_node(n->as_Call());
1548       record_for_optimizer(n);
1549     } else {
1550       if (n->is_CallStaticJava()) {
1551         const char* name = n->as_CallStaticJava()->_name;
1552         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1553           return; // Skip uncommon traps
1554         }
1555       }
1556       // Don't mark as processed since call's arguments have to be processed.
1557       delayed_worklist->push(n);
1558       // Check if a call returns an object.
1559       if ((n->as_Call()->returns_pointer() &&
1560            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1561           (n->is_CallStaticJava() &&
1562            n->as_CallStaticJava()->is_boxing_method())) {
1563         add_call_node(n->as_Call());
1564       } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
1565         bool returns_oop = false;
1566         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
1567           ProjNode* pn = n->fast_out(i)->as_Proj();
1568           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
1569             returns_oop = true;
1570           }
1571         }
1572         if (returns_oop) {
1573           add_call_node(n->as_Call());
1574         }
1575       }
1576     }
1577     return;
1578   }
1579   // Put this check here to process call arguments since some call nodes
1580   // point to phantom_obj.
1581   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1582     return; // Skip predefined nodes.
1583   }
1584   switch (opcode) {
1585     case Op_AddP: {
1586       Node* base = get_addp_base(n);
1587       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1588       // Field nodes are created for all field types. They are used in
1589       // adjust_scalar_replaceable_state() and split_unique_types().
1590       // Note, non-oop fields will have only base edges in Connection
1591       // Graph because such fields are not used for oop loads and stores.
1592       int offset = address_offset(n, igvn);
1593       add_field(n, PointsToNode::NoEscape, offset);
1594       if (ptn_base == nullptr) {
1595         delayed_worklist->push(n); // Process it later.
1596       } else {
1597         n_ptn = ptnode_adr(n_idx);
1598         add_base(n_ptn->as_Field(), ptn_base);
1599       }
1600       break;
1601     }
1602     case Op_CastX2P: {
1603       map_ideal_node(n, phantom_obj);
1604       break;
1605     }
1606     case Op_InlineType:
1607     case Op_CastPP:
1608     case Op_CheckCastPP:
1609     case Op_EncodeP:
1610     case Op_DecodeN:
1611     case Op_EncodePKlass:
1612     case Op_DecodeNKlass: {
1613       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1614       break;
1615     }
1616     case Op_CMoveP: {
1617       add_local_var(n, PointsToNode::NoEscape);
1618       // Do not add edges during first iteration because some could be
1619       // not defined yet.
1620       delayed_worklist->push(n);
1621       break;
1622     }
1623     case Op_ConP:
1624     case Op_ConN:
1625     case Op_ConNKlass: {
1626       // assume all oop constants globally escape except for null

1658     case Op_PartialSubtypeCheck: {
1659       // Produces Null or notNull and is used in only in CmpP so
1660       // phantom_obj could be used.
1661       map_ideal_node(n, phantom_obj); // Result is unknown
1662       break;
1663     }
1664     case Op_Phi: {
1665       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1666       // ThreadLocal has RawPtr type.
1667       const Type* t = n->as_Phi()->type();
1668       if (t->make_ptr() != nullptr) {
1669         add_local_var(n, PointsToNode::NoEscape);
1670         // Do not add edges during first iteration because some could be
1671         // not defined yet.
1672         delayed_worklist->push(n);
1673       }
1674       break;
1675     }
1676     case Op_Proj: {
1677       // we are only interested in the oop result projection from a call
1678       if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
1679           (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
1680         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1681                n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1682         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1683       }
1684       break;
1685     }
1686     case Op_Rethrow: // Exception object escapes
1687     case Op_Return: {
1688       if (n->req() > TypeFunc::Parms &&
1689           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1690         // Treat Return value as LocalVar with GlobalEscape escape state.
1691         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1692       }
1693       break;
1694     }
1695     case Op_CompareAndExchangeP:
1696     case Op_CompareAndExchangeN:
1697     case Op_GetAndSetP:
1698     case Op_GetAndSetN: {
1699       add_objload_to_connection_graph(n, delayed_worklist);
1700       // fall-through
1701     }

1763   if (n->is_Call()) {
1764     process_call_arguments(n->as_Call());
1765     return;
1766   }
1767   assert(n->is_Store() || n->is_LoadStore() ||
1768          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1769          "node should be registered already");
1770   int opcode = n->Opcode();
1771   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1772   if (gc_handled) {
1773     return; // Ignore node if already handled by GC.
1774   }
1775   switch (opcode) {
1776     case Op_AddP: {
1777       Node* base = get_addp_base(n);
1778       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1779       assert(ptn_base != nullptr, "field's base should be registered");
1780       add_base(n_ptn->as_Field(), ptn_base);
1781       break;
1782     }
1783     case Op_InlineType:
1784     case Op_CastPP:
1785     case Op_CheckCastPP:
1786     case Op_EncodeP:
1787     case Op_DecodeN:
1788     case Op_EncodePKlass:
1789     case Op_DecodeNKlass: {
1790       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1791       break;
1792     }
1793     case Op_CMoveP: {
1794       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1795         Node* in = n->in(i);
1796         if (in == nullptr) {
1797           continue;  // ignore null
1798         }
1799         Node* uncast_in = in->uncast();
1800         if (uncast_in->is_top() || uncast_in == n) {
1801           continue;  // ignore top or inputs which go back this node
1802         }
1803         PointsToNode* ptn = ptnode_adr(in->_idx);

1818       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1819       // ThreadLocal has RawPtr type.
1820       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1821       for (uint i = 1; i < n->req(); i++) {
1822         Node* in = n->in(i);
1823         if (in == nullptr) {
1824           continue;  // ignore null
1825         }
1826         Node* uncast_in = in->uncast();
1827         if (uncast_in->is_top() || uncast_in == n) {
1828           continue;  // ignore top or inputs which go back this node
1829         }
1830         PointsToNode* ptn = ptnode_adr(in->_idx);
1831         assert(ptn != nullptr, "node should be registered");
1832         add_edge(n_ptn, ptn);
1833       }
1834       break;
1835     }
1836     case Op_Proj: {
1837       // we are only interested in the oop result projection from a call
1838       assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1839              n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1840       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1841       break;
1842     }
1843     case Op_Rethrow: // Exception object escapes
1844     case Op_Return: {
1845       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1846              "Unexpected node type");
1847       // Treat Return value as LocalVar with GlobalEscape escape state.
1848       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1849       break;
1850     }
1851     case Op_CompareAndExchangeP:
1852     case Op_CompareAndExchangeN:
1853     case Op_GetAndSetP:
1854     case Op_GetAndSetN:{
1855       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1856       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1857       // fall-through
1858     }
1859     case Op_CompareAndSwapP:

1995     PointsToNode* ptn = ptnode_adr(val->_idx);
1996     assert(ptn != nullptr, "node should be registered");
1997     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1998     // Add edge to object for unsafe access with offset.
1999     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
2000     assert(adr_ptn != nullptr, "node should be registered");
2001     if (adr_ptn->is_Field()) {
2002       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
2003       add_edge(adr_ptn, ptn);
2004     }
2005     return true;
2006   }
2007 #ifdef ASSERT
2008   n->dump(1);
2009   assert(false, "not unsafe");
2010 #endif
2011   return false;
2012 }
2013 
2014 void ConnectionGraph::add_call_node(CallNode* call) {
2015   assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
2016   uint call_idx = call->_idx;
2017   if (call->is_Allocate()) {
2018     Node* k = call->in(AllocateNode::KlassNode);
2019     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
2020     assert(kt != nullptr, "TypeKlassPtr  required.");
2021     PointsToNode::EscapeState es = PointsToNode::NoEscape;
2022     bool scalar_replaceable = true;
2023     NOT_PRODUCT(const char* nsr_reason = "");
2024     if (call->is_AllocateArray()) {
2025       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
2026         es = PointsToNode::GlobalEscape;
2027       } else {
2028         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2029         if (length < 0) {
2030           // Not scalar replaceable if the length is not constant.
2031           scalar_replaceable = false;
2032           NOT_PRODUCT(nsr_reason = "has a non-constant length");
2033         } else if (length > EliminateAllocationArraySizeLimit) {
2034           // Not scalar replaceable if the length is too big.
2035           scalar_replaceable = false;

2071     //
2072     //    - all oop arguments are escaping globally;
2073     //
2074     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2075     //
2076     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2077     //
2078     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2079     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2080     //      during call is returned;
2081     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2082     //      which are returned and does not escape during call;
2083     //
2084     //    - oop arguments escaping status is defined by bytecode analysis;
2085     //
2086     // For a static call, we know exactly what method is being called.
2087     // Use bytecode estimator to record whether the call's return value escapes.
2088     ciMethod* meth = call->as_CallJava()->method();
2089     if (meth == nullptr) {
2090       const char* name = call->as_CallStaticJava()->_name;
2091       assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 ||
2092              strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0, "TODO: add failed case check");
2093       // Returns a newly allocated non-escaped object.
2094       add_java_object(call, PointsToNode::NoEscape);
2095       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2096     } else if (meth->is_boxing_method()) {
2097       // Returns boxing object
2098       PointsToNode::EscapeState es;
2099       vmIntrinsics::ID intr = meth->intrinsic_id();
2100       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2101         // It does not escape if object is always allocated.
2102         es = PointsToNode::NoEscape;
2103       } else {
2104         // It escapes globally if object could be loaded from cache.
2105         es = PointsToNode::GlobalEscape;
2106       }
2107       add_java_object(call, es);
2108       if (es == PointsToNode::GlobalEscape) {
2109         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2110       }
2111     } else {
2112       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2113       call_analyzer->copy_dependencies(_compile->dependencies());
2114       if (call_analyzer->is_return_allocated()) {
2115         // Returns a newly allocated non-escaped object, simply
2116         // update dependency information.
2117         // Mark it as NoEscape so that objects referenced by
2118         // it's fields will be marked as NoEscape at least.
2119         add_java_object(call, PointsToNode::NoEscape);
2120         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2121       } else {
2122         // Determine whether any arguments are returned.
2123         const TypeTuple* d = call->tf()->domain_cc();
2124         bool ret_arg = false;
2125         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2126           if (d->field_at(i)->isa_ptr() != nullptr &&
2127               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2128             ret_arg = true;
2129             break;
2130           }
2131         }
2132         if (ret_arg) {
2133           add_local_var(call, PointsToNode::ArgEscape);
2134         } else {
2135           // Returns unknown object.
2136           map_ideal_node(call, phantom_obj);
2137         }
2138       }
2139     }
2140   } else {
2141     // An other type of call, assume the worst case:
2142     // returned value is unknown and globally escapes.
2143     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

2151 #ifdef ASSERT
2152     case Op_Allocate:
2153     case Op_AllocateArray:
2154     case Op_Lock:
2155     case Op_Unlock:
2156       assert(false, "should be done already");
2157       break;
2158 #endif
2159     case Op_ArrayCopy:
2160     case Op_CallLeafNoFP:
2161       // Most array copies are ArrayCopy nodes at this point but there
2162       // are still a few direct calls to the copy subroutines (See
2163       // PhaseStringOpts::copy_string())
2164       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2165         call->as_CallLeaf()->is_call_to_arraycopystub();
2166       // fall through
2167     case Op_CallLeafVector:
2168     case Op_CallLeaf: {
2169       // Stub calls, objects do not escape but they are not scale replaceable.
2170       // Adjust escape state for outgoing arguments.
2171       const TypeTuple * d = call->tf()->domain_sig();
2172       bool src_has_oops = false;
2173       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2174         const Type* at = d->field_at(i);
2175         Node *arg = call->in(i);
2176         if (arg == nullptr) {
2177           continue;
2178         }
2179         const Type *aat = _igvn->type(arg);
2180         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2181           continue;
2182         }
2183         if (arg->is_AddP()) {
2184           //
2185           // The inline_native_clone() case when the arraycopy stub is called
2186           // after the allocation before Initialize and CheckCastPP nodes.
2187           // Or normal arraycopy for object arrays case.
2188           //
2189           // Set AddP's base (Allocate) as not scalar replaceable since
2190           // pointer to the base (with offset) is passed as argument.
2191           //
2192           arg = get_addp_base(arg);
2193         }
2194         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2195         assert(arg_ptn != nullptr, "should be registered");
2196         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2197         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2198           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2199                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2200           bool arg_has_oops = aat->isa_oopptr() &&
2201                               (aat->isa_instptr() ||
2202                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
2203                                (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
2204                                                                aat->isa_aryptr()->is_flat() &&
2205                                                                aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
2206           if (i == TypeFunc::Parms) {
2207             src_has_oops = arg_has_oops;
2208           }
2209           //
2210           // src or dst could be j.l.Object when other is basic type array:
2211           //
2212           //   arraycopy(char[],0,Object*,0,size);
2213           //   arraycopy(Object*,0,char[],0,size);
2214           //
2215           // Don't add edges in such cases.
2216           //
2217           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2218                                        arg_has_oops && (i > TypeFunc::Parms);
2219 #ifdef ASSERT
2220           if (!(is_arraycopy ||
2221                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2222                 (call->as_CallLeaf()->_name != nullptr &&
2223                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2224                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2225                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

2236                   strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2237                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2238                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2239                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2240                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2241                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2242                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2243                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2244                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2245                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2246                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2247                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2248                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2249                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2250                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2251                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2252                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2253                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2254                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2255                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
2256                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2257                   strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
2258                   strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
2259                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2260                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2261                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2262                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2263                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2264                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2265                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2266                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2267                  ))) {
2268             call->dump();
2269             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2270           }
2271 #endif
2272           // Always process arraycopy's destination object since
2273           // we need to add all possible edges to references in
2274           // source object.
2275           if (arg_esc >= PointsToNode::ArgEscape &&
2276               !arg_is_arraycopy_dest) {
2277             continue;
2278           }

2305           }
2306         }
2307       }
2308       break;
2309     }
2310     case Op_CallStaticJava: {
2311       // For a static call, we know exactly what method is being called.
2312       // Use bytecode estimator to record the call's escape affects
2313 #ifdef ASSERT
2314       const char* name = call->as_CallStaticJava()->_name;
2315       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2316 #endif
2317       ciMethod* meth = call->as_CallJava()->method();
2318       if ((meth != nullptr) && meth->is_boxing_method()) {
2319         break; // Boxing methods do not modify any oops.
2320       }
2321       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2322       // fall-through if not a Java method or no analyzer information
2323       if (call_analyzer != nullptr) {
2324         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2325         const TypeTuple* d = call->tf()->domain_cc();
2326         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2327           const Type* at = d->field_at(i);
2328           int k = i - TypeFunc::Parms;
2329           Node* arg = call->in(i);
2330           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2331           if (at->isa_ptr() != nullptr &&
2332               call_analyzer->is_arg_returned(k)) {
2333             // The call returns arguments.
2334             if (call_ptn != nullptr) { // Is call's result used?
2335               assert(call_ptn->is_LocalVar(), "node should be registered");
2336               assert(arg_ptn != nullptr, "node should be registered");
2337               add_edge(call_ptn, arg_ptn);
2338             }
2339           }
2340           if (at->isa_oopptr() != nullptr &&
2341               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2342             if (!call_analyzer->is_arg_stack(k)) {
2343               // The argument global escapes
2344               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2345             } else {

2349                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2350               }
2351             }
2352           }
2353         }
2354         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2355           // The call returns arguments.
2356           assert(call_ptn->edge_count() > 0, "sanity");
2357           if (!call_analyzer->is_return_local()) {
2358             // Returns also unknown object.
2359             add_edge(call_ptn, phantom_obj);
2360           }
2361         }
2362         break;
2363       }
2364     }
2365     default: {
2366       // Fall-through here if not a Java method or no analyzer information
2367       // or some other type of call, assume the worst case: all arguments
2368       // globally escape.
2369       const TypeTuple* d = call->tf()->domain_cc();
2370       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2371         const Type* at = d->field_at(i);
2372         if (at->isa_oopptr() != nullptr) {
2373           Node* arg = call->in(i);
2374           if (arg->is_AddP()) {
2375             arg = get_addp_base(arg);
2376           }
2377           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2378           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2379         }
2380       }
2381     }
2382   }
2383 }
2384 
2385 
2386 // Finish Graph construction.
2387 bool ConnectionGraph::complete_connection_graph(
2388                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2389                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

2762     PointsToNode* base = i.get();
2763     if (base->is_JavaObject()) {
2764       // Skip Allocate's fields which will be processed later.
2765       if (base->ideal_node()->is_Allocate()) {
2766         return 0;
2767       }
2768       assert(base == null_obj, "only null ptr base expected here");
2769     }
2770   }
2771   if (add_edge(field, phantom_obj)) {
2772     // New edge was added
2773     new_edges++;
2774     add_field_uses_to_worklist(field);
2775   }
2776   return new_edges;
2777 }
2778 
2779 // Find fields initializing values for allocations.
2780 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2781   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2782   PointsToNode* init_val = phantom_obj;
2783   Node* alloc = pta->ideal_node();
2784 
2785   // Do nothing for Allocate nodes since its fields values are
2786   // "known" unless they are initialized by arraycopy/clone.
2787   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2788     if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
2789       // Non-flat inline type arrays are initialized with
2790       // the default value instead of null. Handle them here.
2791       init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx);
2792       assert(init_val != nullptr, "default value should be registered");
2793     } else {
2794       return 0;
2795     }
2796   }
2797   // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
2798   assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
2799 #ifdef ASSERT
2800   if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
2801     const char* name = alloc->as_CallStaticJava()->_name;
2802     assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 ||
2803            strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0, "sanity");
2804   }
2805 #endif
2806   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2807   int new_edges = 0;
2808   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2809     PointsToNode* field = i.get();
2810     if (field->is_Field() && field->as_Field()->is_oop()) {
2811       if (add_edge(field, init_val)) {
2812         // New edge was added
2813         new_edges++;
2814         add_field_uses_to_worklist(field->as_Field());
2815       }
2816     }
2817   }
2818   return new_edges;
2819 }
2820 
2821 // Find fields initializing values for allocations.
2822 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2823   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2824   Node* alloc = pta->ideal_node();
2825   // Do nothing for Call nodes since its fields values are unknown.
2826   if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
2827     return 0;
2828   }
2829   InitializeNode* ini = alloc->as_Allocate()->initialization();
2830   bool visited_bottom_offset = false;
2831   GrowableArray<int> offsets_worklist;
2832   int new_edges = 0;
2833 
2834   // Check if an oop field's initializing value is recorded and add
2835   // a corresponding null if field's value if it is not recorded.
2836   // Connection Graph does not record a default initialization by null
2837   // captured by Initialize node.
2838   //
2839   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2840     PointsToNode* field = i.get(); // Field (AddP)
2841     if (!field->is_Field() || !field->as_Field()->is_oop()) {
2842       continue; // Not oop field
2843     }
2844     int offset = field->as_Field()->offset();
2845     if (offset == Type::OffsetBot) {
2846       if (!visited_bottom_offset) {

2892               } else {
2893                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2894                   tty->print_cr("----------init store has invalid value -----");
2895                   store->dump();
2896                   val->dump();
2897                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2898                 }
2899                 for (EdgeIterator j(val); j.has_next(); j.next()) {
2900                   PointsToNode* obj = j.get();
2901                   if (obj->is_JavaObject()) {
2902                     if (!field->points_to(obj->as_JavaObject())) {
2903                       missed_obj = obj;
2904                       break;
2905                     }
2906                   }
2907                 }
2908               }
2909               if (missed_obj != nullptr) {
2910                 tty->print_cr("----------field---------------------------------");
2911                 field->dump();
2912                 tty->print_cr("----------missed reference to object------------");
2913                 missed_obj->dump();
2914                 tty->print_cr("----------object referenced by init store-------");
2915                 store->dump();
2916                 val->dump();
2917                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2918               }
2919             }
2920 #endif
2921           } else {
2922             // There could be initializing stores which follow allocation.
2923             // For example, a volatile field store is not collected
2924             // by Initialize node.
2925             //
2926             // Need to check for dependent loads to separate such stores from
2927             // stores which follow loads. For now, add initial value null so
2928             // that compare pointers optimization works correctly.
2929           }
2930         }
2931         if (value == nullptr) {
2932           // A field's initializing value was not recorded. Add null.
2933           if (add_edge(field, null_obj)) {
2934             // New edge was added

3211         assert(field->edge_count() > 0, "sanity");
3212       }
3213     }
3214   }
3215 }
3216 #endif
3217 
3218 // Optimize ideal graph.
3219 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3220                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3221   Compile* C = _compile;
3222   PhaseIterGVN* igvn = _igvn;
3223   if (EliminateLocks) {
3224     // Mark locks before changing ideal graph.
3225     int cnt = C->macro_count();
3226     for (int i = 0; i < cnt; i++) {
3227       Node *n = C->macro_node(i);
3228       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3229         AbstractLockNode* alock = n->as_AbstractLock();
3230         if (!alock->is_non_esc_obj()) {
3231           const Type* obj_type = igvn->type(alock->obj_node());
3232           if (can_eliminate_lock(alock) && !obj_type->is_inlinetypeptr()) {
3233             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3234             // The lock could be marked eliminated by lock coarsening
3235             // code during first IGVN before EA. Replace coarsened flag
3236             // to eliminate all associated locks/unlocks.
3237 #ifdef ASSERT
3238             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3239 #endif
3240             alock->set_non_esc_obj();
3241           }
3242         }
3243       }
3244     }
3245   }
3246 
3247   if (OptimizePtrCompare) {
3248     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3249       Node *n = ptr_cmp_worklist.at(i);
3250       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3251       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3252       if (tcmp->singleton()) {

3254 #ifndef PRODUCT
3255         if (PrintOptimizePtrCompare) {
3256           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3257           if (Verbose) {
3258             n->dump(1);
3259           }
3260         }
3261 #endif
3262         igvn->replace_node(n, cmp);
3263       }
3264     }
3265   }
3266 
3267   // For MemBarStoreStore nodes added in library_call.cpp, check
3268   // escape status of associated AllocateNode and optimize out
3269   // MemBarStoreStore node if the allocated object never escapes.
3270   for (int i = 0; i < storestore_worklist.length(); i++) {
3271     Node* storestore = storestore_worklist.at(i);
3272     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3273     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3274       if (alloc->in(AllocateNode::InlineType) != nullptr) {
3275         // Non-escaping inline type buffer allocations don't require a membar
3276         storestore->as_MemBar()->remove(_igvn);
3277       } else {
3278         MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3279         mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3280         mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3281         igvn->register_new_node_with_optimizer(mb);
3282         igvn->replace_node(storestore, mb);
3283       }
3284     }
3285   }
3286 }
3287 
3288 // Optimize objects compare.
3289 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3290   assert(OptimizePtrCompare, "sanity");
3291   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3292   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3293   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3294 
3295   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3296   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3297   JavaObjectNode* jobj1 = unique_java_object(left);
3298   JavaObjectNode* jobj2 = unique_java_object(right);
3299 
3300   // The use of this method during allocation merge reduction may cause 'left'
3301   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3302   // that doesn't reference an unique java object.
3303   if (ptn1 == nullptr || ptn2 == nullptr ||

3425   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3426   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3427   PointsToNode* ptadr = _nodes.at(n->_idx);
3428   if (ptadr != nullptr) {
3429     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3430     return;
3431   }
3432   Compile* C = _compile;
3433   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3434   map_ideal_node(n, ptadr);
3435   // Add edge from arraycopy node to source object.
3436   (void)add_edge(ptadr, src);
3437   src->set_arraycopy_src();
3438   // Add edge from destination object to arraycopy node.
3439   (void)add_edge(dst, ptadr);
3440   dst->set_arraycopy_dst();
3441 }
3442 
3443 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3444   const Type* adr_type = n->as_AddP()->bottom_type();
3445   int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
3446   BasicType bt = T_INT;
3447   if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
3448     // Check only oop fields.
3449     if (!adr_type->isa_aryptr() ||
3450         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3451         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3452       // OffsetBot is used to reference array's element. Ignore first AddP.
3453       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3454         bt = T_OBJECT;
3455       }
3456     }
3457   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3458     if (adr_type->isa_instptr()) {
3459       ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
3460       if (field != nullptr) {
3461         bt = field->layout_type();
3462       } else {
3463         // Check for unsafe oop field access
3464         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3465             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3466             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3467             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3468           bt = T_OBJECT;
3469           (*unsafe) = true;
3470         }
3471       }
3472     } else if (adr_type->isa_aryptr()) {
3473       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3474         // Ignore array length load.
3475       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3476         // Ignore first AddP.
3477       } else {
3478         const Type* elemtype = adr_type->is_aryptr()->elem();
3479         if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
3480           ciInlineKlass* vk = elemtype->inline_klass();
3481           field_offset += vk->first_field_offset();
3482           bt = vk->get_field_by_offset(field_offset, false)->layout_type();
3483         } else {
3484           bt = elemtype->array_element_basic_type();
3485         }
3486       }
3487     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3488       // Allocation initialization, ThreadLocal field access, unsafe access
3489       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3490           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3491           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3492           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3493         bt = T_OBJECT;
3494       }
3495     }
3496   }
3497   // Note: T_NARROWOOP is not classed as a real reference type
3498   return (is_reference_type(bt) || bt == T_NARROWOOP);
3499 }
3500 
3501 // Returns unique pointed java object or null.
3502 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3503   // If the node was created after the escape computation we can't answer.
3504   uint idx = n->_idx;
3505   if (idx >= nodes_size()) {

3662             return true;
3663           }
3664         }
3665       }
3666     }
3667   }
3668   return false;
3669 }
3670 
3671 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3672   const Type *adr_type = phase->type(adr);
3673   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3674     // We are computing a raw address for a store captured by an Initialize
3675     // compute an appropriate address type. AddP cases #3 and #5 (see below).
3676     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3677     assert(offs != Type::OffsetBot ||
3678            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3679            "offset must be a constant or it is initialization of array");
3680     return offs;
3681   }
3682   return adr_type->is_ptr()->flat_offset();


3683 }
3684 
3685 Node* ConnectionGraph::get_addp_base(Node *addp) {
3686   assert(addp->is_AddP(), "must be AddP");
3687   //
3688   // AddP cases for Base and Address inputs:
3689   // case #1. Direct object's field reference:
3690   //     Allocate
3691   //       |
3692   //     Proj #5 ( oop result )
3693   //       |
3694   //     CheckCastPP (cast to instance type)
3695   //      | |
3696   //     AddP  ( base == address )
3697   //
3698   // case #2. Indirect object's field reference:
3699   //      Phi
3700   //       |
3701   //     CastPP (cast to instance type)
3702   //      | |

3816   }
3817   return nullptr;
3818 }
3819 
3820 //
3821 // Adjust the type and inputs of an AddP which computes the
3822 // address of a field of an instance
3823 //
3824 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3825   PhaseGVN* igvn = _igvn;
3826   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3827   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3828   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3829   if (t == nullptr) {
3830     // We are computing a raw address for a store captured by an Initialize
3831     // compute an appropriate address type (cases #3 and #5).
3832     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3833     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3834     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3835     assert(offs != Type::OffsetBot, "offset must be a constant");
3836     if (base_t->isa_aryptr() != nullptr) {
3837       // In the case of a flat inline type array, each field has its
3838       // own slice so we need to extract the field being accessed from
3839       // the address computation
3840       t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
3841     } else {
3842       t = base_t->add_offset(offs)->is_oopptr();
3843     }
3844   }
3845   int inst_id = base_t->instance_id();
3846   assert(!t->is_known_instance() || t->instance_id() == inst_id,
3847                              "old type must be non-instance or match new type");
3848 
3849   // The type 't' could be subclass of 'base_t'.
3850   // As result t->offset() could be large then base_t's size and it will
3851   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3852   // constructor verifies correctness of the offset.
3853   //
3854   // It could happened on subclass's branch (from the type profiling
3855   // inlining) which was not eliminated during parsing since the exactness
3856   // of the allocation type was not propagated to the subclass type check.
3857   //
3858   // Or the type 't' could be not related to 'base_t' at all.
3859   // It could happen when CHA type is different from MDO type on a dead path
3860   // (for example, from instanceof check) which is not collapsed during parsing.
3861   //
3862   // Do nothing for such AddP node and don't process its users since
3863   // this code branch will go away.
3864   //
3865   if (!t->is_known_instance() &&
3866       !base_t->maybe_java_subtype_of(t)) {
3867      return false; // bail out
3868   }
3869   const TypePtr* tinst = base_t->add_offset(t->offset());
3870   if (tinst->isa_aryptr() && t->isa_aryptr()) {
3871     // In the case of a flat inline type array, each field has its
3872     // own slice so we need to keep track of the field being accessed.
3873     tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
3874     // Keep array properties (not flat/null-free)
3875     tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
3876     if (tinst == nullptr) {
3877       return false; // Skip dead path with inconsistent properties
3878     }
3879   }
3880 
3881   // Do NOT remove the next line: ensure a new alias index is allocated
3882   // for the instance type. Note: C++ will not remove it since the call
3883   // has side effect.
3884   int alias_idx = _compile->get_alias_index(tinst);
3885   igvn->set_type(addp, tinst);
3886   // record the allocation in the node map
3887   set_map(addp, get_map(base->_idx));
3888   // Set addp's Base and Address to 'base'.
3889   Node *abase = addp->in(AddPNode::Base);
3890   Node *adr   = addp->in(AddPNode::Address);
3891   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3892       adr->in(0)->_idx == (uint)inst_id) {
3893     // Skip AddP cases #3 and #5.
3894   } else {
3895     assert(!abase->is_top(), "sanity"); // AddP case #3
3896     if (abase != base) {
3897       igvn->hash_delete(addp);
3898       addp->set_req(AddPNode::Base, base);
3899       if (abase == adr) {
3900         addp->set_req(AddPNode::Address, base);

4566         ptnode_adr(n->_idx)->dump();
4567         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4568 #endif
4569         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4570         return;
4571       } else {
4572         Node *val = get_map(jobj->idx());   // CheckCastPP node
4573         TypeNode *tn = n->as_Type();
4574         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4575         assert(tinst != nullptr && tinst->is_known_instance() &&
4576                tinst->instance_id() == jobj->idx() , "instance type expected.");
4577 
4578         const Type *tn_type = igvn->type(tn);
4579         const TypeOopPtr *tn_t;
4580         if (tn_type->isa_narrowoop()) {
4581           tn_t = tn_type->make_ptr()->isa_oopptr();
4582         } else {
4583           tn_t = tn_type->isa_oopptr();
4584         }
4585         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
4586           if (tn_t->isa_aryptr()) {
4587             // Keep array properties (not flat/null-free)
4588             tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
4589             if (tinst == nullptr) {
4590               continue; // Skip dead path with inconsistent properties
4591             }
4592           }
4593           if (tn_type->isa_narrowoop()) {
4594             tn_type = tinst->make_narrowoop();
4595           } else {
4596             tn_type = tinst;
4597           }
4598           igvn->hash_delete(tn);
4599           igvn->set_type(tn, tn_type);
4600           tn->set_type(tn_type);
4601           igvn->hash_insert(tn);
4602           record_for_optimizer(n);
4603         } else {
4604           assert(tn_type == TypePtr::NULL_PTR ||
4605                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4606                  "unexpected type");
4607           continue; // Skip dead path with different type
4608         }
4609       }
4610     } else {
4611       debug_only(n->dump();)
4612       assert(false, "EA: unexpected node");
4613       continue;
4614     }
4615     // push allocation's users on appropriate worklist
4616     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4617       Node *use = n->fast_out(i);
4618       if (use->is_Mem() && use->in(MemNode::Address) == n) {
4619         // Load/store to instance's field
4620         memnode_worklist.append_if_missing(use);
4621       } else if (use->is_MemBar()) {
4622         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4623           memnode_worklist.append_if_missing(use);
4624         }
4625       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4626         Node* addp2 = find_second_addp(use, n);
4627         if (addp2 != nullptr) {
4628           alloc_worklist.append_if_missing(addp2);
4629         }
4630         alloc_worklist.append_if_missing(use);
4631       } else if (use->is_Phi() ||
4632                  use->is_CheckCastPP() ||
4633                  use->is_EncodeNarrowPtr() ||
4634                  use->is_DecodeNarrowPtr() ||
4635                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4636         alloc_worklist.append_if_missing(use);
4637 #ifdef ASSERT
4638       } else if (use->is_Mem()) {
4639         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4640       } else if (use->is_MergeMem()) {
4641         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4642       } else if (use->is_SafePoint()) {
4643         // Look for MergeMem nodes for calls which reference unique allocation
4644         // (through CheckCastPP nodes) even for debug info.
4645         Node* m = use->in(TypeFunc::Memory);
4646         if (m->is_MergeMem()) {
4647           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4648         }
4649       } else if (use->Opcode() == Op_EncodeISOArray) {
4650         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4651           // EncodeISOArray overwrites destination array
4652           memnode_worklist.append_if_missing(use);
4653         }
4654       } else if (use->Opcode() == Op_Return) {
4655         // Allocation is referenced by field of returned inline type
4656         assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
4657       } else {
4658         uint op = use->Opcode();
4659         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4660             (use->in(MemNode::Memory) == n)) {
4661           // They overwrite memory edge corresponding to destination array,
4662           memnode_worklist.append_if_missing(use);
4663         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4664               op == Op_CastP2X ||
4665               op == Op_FastLock || op == Op_AryEq ||
4666               op == Op_StrComp || op == Op_CountPositives ||
4667               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4668               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4669               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4670               op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
4671               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4672           n->dump();
4673           use->dump();
4674           assert(false, "EA: missing allocation reference path");
4675         }
4676 #endif
4677       }
4678     }
4679 
4680   }
4681 
4682 #ifdef ASSERT
4683   if (VerifyReduceAllocationMerges) {
4684     for (uint i = 0; i < reducible_merges.size(); i++) {
4685       Node* phi = reducible_merges.at(i);
4686 
4687       if (!reduced_merges.member(phi)) {
4688         phi->dump(2);
4689         phi->dump(-2);
4690         assert(false, "This reducible merge wasn't reduced.");

4750     if (n->is_Phi() || n->is_ClearArray()) {
4751       // we don't need to do anything, but the users must be pushed
4752     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
4753       // we don't need to do anything, but the users must be pushed
4754       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4755       if (n == nullptr) {
4756         continue;
4757       }
4758     } else if (n->is_CallLeaf()) {
4759       // Runtime calls with narrow memory input (no MergeMem node)
4760       // get the memory projection
4761       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4762       if (n == nullptr) {
4763         continue;
4764       }
4765     } else if (n->Opcode() == Op_StrCompressedCopy ||
4766                n->Opcode() == Op_EncodeISOArray) {
4767       // get the memory projection
4768       n = n->find_out_with(Op_SCMemProj);
4769       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4770     } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
4771                strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4772       n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
4773     } else {
4774       assert(n->is_Mem(), "memory node required.");
4775       Node *addr = n->in(MemNode::Address);
4776       const Type *addr_t = igvn->type(addr);
4777       if (addr_t == Type::TOP) {
4778         continue;
4779       }
4780       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4781       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4782       assert ((uint)alias_idx < new_index_end, "wrong alias index");
4783       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4784       if (_compile->failing()) {
4785         return;
4786       }
4787       if (mem != n->in(MemNode::Memory)) {
4788         // We delay the memory edge update since we need old one in
4789         // MergeMem code below when instances memory slices are separated.
4790         set_map(n, mem);
4791       }
4792       if (n->is_Load()) {
4793         continue;  // don't push users
4794       } else if (n->is_LoadStore()) {
4795         // get the memory projection
4796         n = n->find_out_with(Op_SCMemProj);
4797         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4798       }
4799     }
4800     // push user on appropriate worklist
4801     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4802       Node *use = n->fast_out(i);
4803       if (use->is_Phi() || use->is_ClearArray()) {
4804         memnode_worklist.append_if_missing(use);
4805       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4806         memnode_worklist.append_if_missing(use);
4807       } else if (use->is_MemBar() || use->is_CallLeaf()) {
4808         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4809           memnode_worklist.append_if_missing(use);
4810         }
4811 #ifdef ASSERT
4812       } else if (use->is_Mem()) {
4813         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4814       } else if (use->is_MergeMem()) {
4815         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4816       } else if (use->Opcode() == Op_EncodeISOArray) {
4817         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4818           // EncodeISOArray overwrites destination array
4819           memnode_worklist.append_if_missing(use);
4820         }
4821       } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
4822                  strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4823         // store_unknown_inline overwrites destination array
4824         memnode_worklist.append_if_missing(use);
4825       } else {
4826         uint op = use->Opcode();
4827         if ((use->in(MemNode::Memory) == n) &&
4828             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4829           // They overwrite memory edge corresponding to destination array,
4830           memnode_worklist.append_if_missing(use);
4831         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4832               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4833               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4834               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
4835           n->dump();
4836           use->dump();
4837           assert(false, "EA: missing memory path");
4838         }
4839 #endif
4840       }
4841     }
4842   }
4843 
4844   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4845   //            Walk each memory slice moving the first node encountered of each
4846   //            instance type to the input corresponding to its alias index.
4847   uint length = mergemem_worklist.length();
4848   for( uint next = 0; next < length; ++next ) {
4849     MergeMemNode* nmm = mergemem_worklist.at(next);
4850     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4851     // Note: we don't want to use MergeMemStream here because we only want to
4852     // scan inputs which exist at the start, not ones we add during processing.
4853     // Note 2: MergeMem may already contains instance memory slices added
4854     // during find_inst_mem() call when memory nodes were processed above.

4915     if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
4916       if (_compile->do_reduce_allocation_merges()) {
4917         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4918       } else if (_invocation > 0) {
4919         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4920       } else {
4921         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4922       }
4923       return;
4924     }
4925 
4926     igvn->hash_insert(nmm);
4927     record_for_optimizer(nmm);
4928   }
4929 
4930   //  Phase 4:  Update the inputs of non-instance memory Phis and
4931   //            the Memory input of memnodes
4932   // First update the inputs of any non-instance Phi's from
4933   // which we split out an instance Phi.  Note we don't have
4934   // to recursively process Phi's encountered on the input memory
4935   // chains as is done in split_memory_phi() since they will
4936   // also be processed here.
4937   for (int j = 0; j < orig_phis.length(); j++) {
4938     PhiNode *phi = orig_phis.at(j);
4939     int alias_idx = _compile->get_alias_index(phi->adr_type());
4940     igvn->hash_delete(phi);
4941     for (uint i = 1; i < phi->req(); i++) {
4942       Node *mem = phi->in(i);
4943       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4944       if (_compile->failing()) {
4945         return;
4946       }
4947       if (mem != new_mem) {
4948         phi->set_req(i, new_mem);
4949       }
4950     }
4951     igvn->hash_insert(phi);
4952     record_for_optimizer(phi);
4953   }
4954 
4955   // Update the memory inputs of MemNodes with the value we computed
< prev index next >