< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.hpp"

  31 #include "memory/resourceArea.hpp"
  32 #include "opto/arraycopynode.hpp"
  33 #include "opto/c2compiler.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"

  39 #include "opto/locknode.hpp"
  40 #include "opto/macro.hpp"
  41 #include "opto/movenode.hpp"
  42 #include "opto/narrowptrnode.hpp"
  43 #include "opto/phaseX.hpp"
  44 #include "opto/rootnode.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  48   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  49   // split_unique_types and that will create additional nodes that need to be
  50   // pushed to the ConnectionGraph. The code below bumps the initial capacity of
  51   // _nodes by 10% to account for these additional nodes. If capacity is exceeded
  52   // the array will be reallocated.
  53   _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
  54   _in_worklist(C->comp_arena()),
  55   _next_pidx(0),
  56   _collecting(true),
  57   _verify(false),
  58   _compile(C),

 150   GrowableArray<SafePointNode*>  sfn_worklist;
 151   GrowableArray<MergeMemNode*>   mergemem_worklist;
 152   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 153 
 154   { Compile::TracePhase tp(Phase::_t_connectionGraph);
 155 
 156   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 157   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 158   // Initialize worklist
 159   if (C->root() != nullptr) {
 160     ideal_nodes.push(C->root());
 161   }
 162   // Processed ideal nodes are unique on ideal_nodes list
 163   // but several ideal nodes are mapped to the phantom_obj.
 164   // To avoid duplicated entries on the following worklists
 165   // add the phantom_obj only once to them.
 166   ptnodes_worklist.append(phantom_obj);
 167   java_objects_worklist.append(phantom_obj);
 168   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 169     Node* n = ideal_nodes.at(next);










 170     // Create PointsTo nodes and add them to Connection Graph. Called
 171     // only once per ideal node since ideal_nodes is Unique_Node list.
 172     add_node_to_connection_graph(n, &delayed_worklist);
 173     PointsToNode* ptn = ptnode_adr(n->_idx);
 174     if (ptn != nullptr && ptn != phantom_obj) {
 175       ptnodes_worklist.append(ptn);
 176       if (ptn->is_JavaObject()) {
 177         java_objects_worklist.append(ptn->as_JavaObject());
 178         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 179             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 180           // Only allocations and java static calls results are interesting.
 181           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 182         }
 183       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 184         oop_fields_worklist.append(ptn->as_Field());
 185       }
 186     }
 187     // Collect some interesting nodes for further use.
 188     switch (n->Opcode()) {
 189       case Op_MergeMem:

 407     // scalar replaceable objects.
 408     split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges);
 409     if (C->failing()) {
 410       NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 411       return false;
 412     }
 413 
 414 #ifdef ASSERT
 415   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
 416     tty->print("=== No allocations eliminated for ");
 417     C->method()->print_short_name();
 418     if (!EliminateAllocations) {
 419       tty->print(" since EliminateAllocations is off ===");
 420     } else if(!has_scalar_replaceable_candidates) {
 421       tty->print(" since there are no scalar replaceable candidates ===");
 422     }
 423     tty->cr();
 424 #endif
 425   }
 426 








 427   _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES, 4);
 428 
 429   // 6. Reduce allocation merges used as debug information. This is done after
 430   // split_unique_types because the methods used to create SafePointScalarObject
 431   // need to traverse the memory graph to find values for object fields. We also
 432   // set to null the scalarized inputs of reducible Phis so that the Allocate
 433   // that they point can be later scalar replaced.
 434   bool delay = _igvn->delay_transform();
 435   _igvn->set_delay_transform(true);
 436   for (uint i = 0; i < reducible_merges.size(); i++) {
 437     Node* n = reducible_merges.at(i);
 438     if (n->outcnt() > 0) {
 439       if (!reduce_phi_on_safepoints(n->as_Phi())) {
 440         NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 441         C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
 442         return false;
 443       }
 444 
 445       // Now we set the scalar replaceable inputs of ophi to null, which is
 446       // the last piece that would prevent it from being scalar replaceable.
 447       reset_scalar_replaceable_entries(n->as_Phi());
 448     }
 449   }

1250 
1251     // The next two inputs are:
1252     //  (1) A copy of the original pointer to NSR objects.
1253     //  (2) A selector, used to decide if we need to rematerialize an object
1254     //      or use the pointer to a NSR object.
1255     // See more details of these fields in the declaration of SafePointScalarMergeNode
1256     sfpt->add_req(nsr_merge_pointer);
1257     sfpt->add_req(selector);
1258 
1259     for (uint i = 1; i < ophi->req(); i++) {
1260       Node* base = ophi->in(i);
1261       JavaObjectNode* ptn = unique_java_object(base);
1262 
1263       // If the base is not scalar replaceable we don't need to register information about
1264       // it at this time.
1265       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1266         continue;
1267       }
1268 
1269       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1270       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt);








1271       if (sobj == nullptr) {

1272         return false;
1273       }
1274 
1275       // Now make a pass over the debug information replacing any references
1276       // to the allocated object with "sobj"
1277       Node* ccpp = alloc->result_cast();
1278       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1279 
1280       // Register the scalarized object as a candidate for reallocation
1281       smerge->add_req(sobj);









1282     }
1283 
1284     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1285     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1286 
1287     // The call to 'replace_edges_in_range' above might have removed the
1288     // reference to ophi that we need at _merge_pointer_idx. The line below make
1289     // sure the reference is maintained.
1290     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1291     _igvn->_worklist.push(sfpt);
1292   }
1293 
1294   return true;
1295 }
1296 
1297 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist) {
1298   bool delay = _igvn->delay_transform();
1299   _igvn->set_delay_transform(true);
1300   _igvn->hash_delete(ophi);
1301 

1464   return false;
1465 }
1466 
1467 // Returns true if at least one of the arguments to the call is an object
1468 // that does not escape globally.
1469 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1470   if (call->method() != nullptr) {
1471     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1472     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1473       Node* p = call->in(idx);
1474       if (not_global_escape(p)) {
1475         return true;
1476       }
1477     }
1478   } else {
1479     const char* name = call->as_CallStaticJava()->_name;
1480     assert(name != nullptr, "no name");
1481     // no arg escapes through uncommon traps
1482     if (strcmp(name, "uncommon_trap") != 0) {
1483       // process_call_arguments() assumes that all arguments escape globally
1484       const TypeTuple* d = call->tf()->domain();
1485       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1486         const Type* at = d->field_at(i);
1487         if (at->isa_oopptr() != nullptr) {
1488           return true;
1489         }
1490       }
1491     }
1492   }
1493   return false;
1494 }
1495 
1496 
1497 
1498 // Utility function for nodes that load an object
1499 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1500   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1501   // ThreadLocal has RawPtr type.
1502   const Type* t = _igvn->type(n);
1503   if (t->make_ptr() != nullptr) {
1504     Node* adr = n->in(MemNode::Address);

1538       // first IGVN optimization when escape information is still available.
1539       record_for_optimizer(n);
1540     } else if (n->is_Allocate()) {
1541       add_call_node(n->as_Call());
1542       record_for_optimizer(n);
1543     } else {
1544       if (n->is_CallStaticJava()) {
1545         const char* name = n->as_CallStaticJava()->_name;
1546         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1547           return; // Skip uncommon traps
1548         }
1549       }
1550       // Don't mark as processed since call's arguments have to be processed.
1551       delayed_worklist->push(n);
1552       // Check if a call returns an object.
1553       if ((n->as_Call()->returns_pointer() &&
1554            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1555           (n->is_CallStaticJava() &&
1556            n->as_CallStaticJava()->is_boxing_method())) {
1557         add_call_node(n->as_Call());











1558       }
1559     }
1560     return;
1561   }
1562   // Put this check here to process call arguments since some call nodes
1563   // point to phantom_obj.
1564   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1565     return; // Skip predefined nodes.
1566   }
1567   switch (opcode) {
1568     case Op_AddP: {
1569       Node* base = get_addp_base(n);
1570       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1571       // Field nodes are created for all field types. They are used in
1572       // adjust_scalar_replaceable_state() and split_unique_types().
1573       // Note, non-oop fields will have only base edges in Connection
1574       // Graph because such fields are not used for oop loads and stores.
1575       int offset = address_offset(n, igvn);
1576       add_field(n, PointsToNode::NoEscape, offset);
1577       if (ptn_base == nullptr) {
1578         delayed_worklist->push(n); // Process it later.
1579       } else {
1580         n_ptn = ptnode_adr(n_idx);
1581         add_base(n_ptn->as_Field(), ptn_base);
1582       }
1583       break;
1584     }
1585     case Op_CastX2P: {

1586       map_ideal_node(n, phantom_obj);
1587       break;
1588     }

1589     case Op_CastPP:
1590     case Op_CheckCastPP:
1591     case Op_EncodeP:
1592     case Op_DecodeN:
1593     case Op_EncodePKlass:
1594     case Op_DecodeNKlass: {
1595       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1596       break;
1597     }
1598     case Op_CMoveP: {
1599       add_local_var(n, PointsToNode::NoEscape);
1600       // Do not add edges during first iteration because some could be
1601       // not defined yet.
1602       delayed_worklist->push(n);
1603       break;
1604     }
1605     case Op_ConP:
1606     case Op_ConN:
1607     case Op_ConNKlass: {
1608       // assume all oop constants globally escape except for null

1638       break;
1639     }
1640     case Op_PartialSubtypeCheck: {
1641       // Produces Null or notNull and is used in only in CmpP so
1642       // phantom_obj could be used.
1643       map_ideal_node(n, phantom_obj); // Result is unknown
1644       break;
1645     }
1646     case Op_Phi: {
1647       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1648       // ThreadLocal has RawPtr type.
1649       const Type* t = n->as_Phi()->type();
1650       if (t->make_ptr() != nullptr) {
1651         add_local_var(n, PointsToNode::NoEscape);
1652         // Do not add edges during first iteration because some could be
1653         // not defined yet.
1654         delayed_worklist->push(n);
1655       }
1656       break;
1657     }








1658     case Op_Proj: {
1659       // we are only interested in the oop result projection from a call
1660       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1661           n->in(0)->as_Call()->returns_pointer()) {





1662         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1663       }
1664       break;
1665     }
1666     case Op_Rethrow: // Exception object escapes
1667     case Op_Return: {
1668       if (n->req() > TypeFunc::Parms &&
1669           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1670         // Treat Return value as LocalVar with GlobalEscape escape state.
1671         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1672       }
1673       break;
1674     }
1675     case Op_CompareAndExchangeP:
1676     case Op_CompareAndExchangeN:
1677     case Op_GetAndSetP:
1678     case Op_GetAndSetN: {
1679       add_objload_to_connection_graph(n, delayed_worklist);
1680       // fall-through
1681     }

1727       break;
1728     }
1729     default:
1730       ; // Do nothing for nodes not related to EA.
1731   }
1732   return;
1733 }
1734 
1735 // Add final simple edges to graph.
1736 void ConnectionGraph::add_final_edges(Node *n) {
1737   PointsToNode* n_ptn = ptnode_adr(n->_idx);
1738 #ifdef ASSERT
1739   if (_verify && n_ptn->is_JavaObject())
1740     return; // This method does not change graph for JavaObject.
1741 #endif
1742 
1743   if (n->is_Call()) {
1744     process_call_arguments(n->as_Call());
1745     return;
1746   }
1747   assert(n->is_Store() || n->is_LoadStore() ||
1748          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1749          "node should be registered already");
1750   int opcode = n->Opcode();
1751   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1752   if (gc_handled) {
1753     return; // Ignore node if already handled by GC.
1754   }
1755   switch (opcode) {
1756     case Op_AddP: {
1757       Node* base = get_addp_base(n);
1758       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1759       assert(ptn_base != nullptr, "field's base should be registered");
1760       add_base(n_ptn->as_Field(), ptn_base);
1761       break;
1762     }

1763     case Op_CastPP:
1764     case Op_CheckCastPP:
1765     case Op_EncodeP:
1766     case Op_DecodeN:
1767     case Op_EncodePKlass:
1768     case Op_DecodeNKlass: {
1769       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1770       break;
1771     }
1772     case Op_CMoveP: {
1773       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1774         Node* in = n->in(i);
1775         if (in == nullptr) {
1776           continue;  // ignore null
1777         }
1778         Node* uncast_in = in->uncast();
1779         if (uncast_in->is_top() || uncast_in == n) {
1780           continue;  // ignore top or inputs which go back this node
1781         }
1782         PointsToNode* ptn = ptnode_adr(in->_idx);

1795     }
1796     case Op_Phi: {
1797       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1798       // ThreadLocal has RawPtr type.
1799       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1800       for (uint i = 1; i < n->req(); i++) {
1801         Node* in = n->in(i);
1802         if (in == nullptr) {
1803           continue;  // ignore null
1804         }
1805         Node* uncast_in = in->uncast();
1806         if (uncast_in->is_top() || uncast_in == n) {
1807           continue;  // ignore top or inputs which go back this node
1808         }
1809         PointsToNode* ptn = ptnode_adr(in->_idx);
1810         assert(ptn != nullptr, "node should be registered");
1811         add_edge(n_ptn, ptn);
1812       }
1813       break;
1814     }
















1815     case Op_Proj: {
1816       // we are only interested in the oop result projection from a call
1817       assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1818              n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
1819       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);





1820       break;
1821     }
1822     case Op_Rethrow: // Exception object escapes
1823     case Op_Return: {
1824       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1825              "Unexpected node type");
1826       // Treat Return value as LocalVar with GlobalEscape escape state.
1827       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1828       break;
1829     }
1830     case Op_CompareAndExchangeP:
1831     case Op_CompareAndExchangeN:
1832     case Op_GetAndSetP:
1833     case Op_GetAndSetN:{
1834       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1835       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1836       // fall-through
1837     }
1838     case Op_CompareAndSwapP:
1839     case Op_CompareAndSwapN:

1974     PointsToNode* ptn = ptnode_adr(val->_idx);
1975     assert(ptn != nullptr, "node should be registered");
1976     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1977     // Add edge to object for unsafe access with offset.
1978     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1979     assert(adr_ptn != nullptr, "node should be registered");
1980     if (adr_ptn->is_Field()) {
1981       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
1982       add_edge(adr_ptn, ptn);
1983     }
1984     return true;
1985   }
1986 #ifdef ASSERT
1987   n->dump(1);
1988   assert(false, "not unsafe");
1989 #endif
1990   return false;
1991 }
1992 
1993 void ConnectionGraph::add_call_node(CallNode* call) {
1994   assert(call->returns_pointer(), "only for call which returns pointer");
1995   uint call_idx = call->_idx;
1996   if (call->is_Allocate()) {
1997     Node* k = call->in(AllocateNode::KlassNode);
1998     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
1999     assert(kt != nullptr, "TypeKlassPtr  required.");
2000     PointsToNode::EscapeState es = PointsToNode::NoEscape;
2001     bool scalar_replaceable = true;
2002     NOT_PRODUCT(const char* nsr_reason = "");
2003     if (call->is_AllocateArray()) {
2004       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
2005         es = PointsToNode::GlobalEscape;
2006       } else {
2007         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2008         if (length < 0) {
2009           // Not scalar replaceable if the length is not constant.
2010           scalar_replaceable = false;
2011           NOT_PRODUCT(nsr_reason = "has a non-constant length");
2012         } else if (length > EliminateAllocationArraySizeLimit) {
2013           // Not scalar replaceable if the length is too big.
2014           scalar_replaceable = false;

2050     //
2051     //    - all oop arguments are escaping globally;
2052     //
2053     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2054     //
2055     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2056     //
2057     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2058     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2059     //      during call is returned;
2060     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2061     //      which are returned and does not escape during call;
2062     //
2063     //    - oop arguments escaping status is defined by bytecode analysis;
2064     //
2065     // For a static call, we know exactly what method is being called.
2066     // Use bytecode estimator to record whether the call's return value escapes.
2067     ciMethod* meth = call->as_CallJava()->method();
2068     if (meth == nullptr) {
2069       const char* name = call->as_CallStaticJava()->_name;
2070       assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "TODO: add failed case check");


2071       // Returns a newly allocated non-escaped object.
2072       add_java_object(call, PointsToNode::NoEscape);
2073       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2074     } else if (meth->is_boxing_method()) {
2075       // Returns boxing object
2076       PointsToNode::EscapeState es;
2077       vmIntrinsics::ID intr = meth->intrinsic_id();
2078       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2079         // It does not escape if object is always allocated.
2080         es = PointsToNode::NoEscape;
2081       } else {
2082         // It escapes globally if object could be loaded from cache.
2083         es = PointsToNode::GlobalEscape;
2084       }
2085       add_java_object(call, es);
2086       if (es == PointsToNode::GlobalEscape) {
2087         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2088       }
2089     } else {
2090       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2091       call_analyzer->copy_dependencies(_compile->dependencies());
2092       if (call_analyzer->is_return_allocated()) {
2093         // Returns a newly allocated non-escaped object, simply
2094         // update dependency information.
2095         // Mark it as NoEscape so that objects referenced by
2096         // it's fields will be marked as NoEscape at least.
2097         add_java_object(call, PointsToNode::NoEscape);
2098         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2099       } else {
2100         // Determine whether any arguments are returned.
2101         const TypeTuple* d = call->tf()->domain();
2102         bool ret_arg = false;
2103         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2104           if (d->field_at(i)->isa_ptr() != nullptr &&
2105               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2106             ret_arg = true;
2107             break;
2108           }
2109         }
2110         if (ret_arg) {
2111           add_local_var(call, PointsToNode::ArgEscape);
2112         } else {
2113           // Returns unknown object.
2114           map_ideal_node(call, phantom_obj);
2115         }
2116       }
2117     }
2118   } else {
2119     // An other type of call, assume the worst case:
2120     // returned value is unknown and globally escapes.
2121     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

2129 #ifdef ASSERT
2130     case Op_Allocate:
2131     case Op_AllocateArray:
2132     case Op_Lock:
2133     case Op_Unlock:
2134       assert(false, "should be done already");
2135       break;
2136 #endif
2137     case Op_ArrayCopy:
2138     case Op_CallLeafNoFP:
2139       // Most array copies are ArrayCopy nodes at this point but there
2140       // are still a few direct calls to the copy subroutines (See
2141       // PhaseStringOpts::copy_string())
2142       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2143         call->as_CallLeaf()->is_call_to_arraycopystub();
2144       // fall through
2145     case Op_CallLeafVector:
2146     case Op_CallLeaf: {
2147       // Stub calls, objects do not escape but they are not scale replaceable.
2148       // Adjust escape state for outgoing arguments.
2149       const TypeTuple * d = call->tf()->domain();
2150       bool src_has_oops = false;
2151       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2152         const Type* at = d->field_at(i);
2153         Node *arg = call->in(i);
2154         if (arg == nullptr) {
2155           continue;
2156         }
2157         const Type *aat = _igvn->type(arg);
2158         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2159           continue;
2160         }
2161         if (arg->is_AddP()) {
2162           //
2163           // The inline_native_clone() case when the arraycopy stub is called
2164           // after the allocation before Initialize and CheckCastPP nodes.
2165           // Or normal arraycopy for object arrays case.
2166           //
2167           // Set AddP's base (Allocate) as not scalar replaceable since
2168           // pointer to the base (with offset) is passed as argument.
2169           //
2170           arg = get_addp_base(arg);
2171         }
2172         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2173         assert(arg_ptn != nullptr, "should be registered");
2174         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2175         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2176           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2177                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2178           bool arg_has_oops = aat->isa_oopptr() &&
2179                               (aat->isa_instptr() ||
2180                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));



2181           if (i == TypeFunc::Parms) {
2182             src_has_oops = arg_has_oops;
2183           }
2184           //
2185           // src or dst could be j.l.Object when other is basic type array:
2186           //
2187           //   arraycopy(char[],0,Object*,0,size);
2188           //   arraycopy(Object*,0,char[],0,size);
2189           //
2190           // Don't add edges in such cases.
2191           //
2192           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2193                                        arg_has_oops && (i > TypeFunc::Parms);
2194 #ifdef ASSERT
2195           if (!(is_arraycopy ||
2196                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2197                 (call->as_CallLeaf()->_name != nullptr &&
2198                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2199                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2200                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

2224                   strcmp(call->as_CallLeaf()->_name, "dilithiumMontMulByConstant") == 0 ||
2225                   strcmp(call->as_CallLeaf()->_name, "dilithiumDecomposePoly") == 0 ||
2226                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2227                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2228                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2229                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2230                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2231                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2232                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2233                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2234                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2235                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2236                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2237                   strcmp(call->as_CallLeaf()->_name, "double_keccak") == 0 ||
2238                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2239                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2240                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2241                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2242                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2243                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||




2244                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2245                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2246                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2247                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2248                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2249                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2250                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2251                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2252                  ))) {
2253             call->dump();
2254             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2255           }
2256 #endif
2257           // Always process arraycopy's destination object since
2258           // we need to add all possible edges to references in
2259           // source object.
2260           if (arg_esc >= PointsToNode::ArgEscape &&
2261               !arg_is_arraycopy_dest) {
2262             continue;
2263           }

2286           }
2287         }
2288       }
2289       break;
2290     }
2291     case Op_CallStaticJava: {
2292       // For a static call, we know exactly what method is being called.
2293       // Use bytecode estimator to record the call's escape affects
2294 #ifdef ASSERT
2295       const char* name = call->as_CallStaticJava()->_name;
2296       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2297 #endif
2298       ciMethod* meth = call->as_CallJava()->method();
2299       if ((meth != nullptr) && meth->is_boxing_method()) {
2300         break; // Boxing methods do not modify any oops.
2301       }
2302       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2303       // fall-through if not a Java method or no analyzer information
2304       if (call_analyzer != nullptr) {
2305         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2306         const TypeTuple* d = call->tf()->domain();
2307         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2308           const Type* at = d->field_at(i);
2309           int k = i - TypeFunc::Parms;
2310           Node* arg = call->in(i);
2311           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2312           if (at->isa_ptr() != nullptr &&
2313               call_analyzer->is_arg_returned(k)) {
2314             // The call returns arguments.
2315             if (call_ptn != nullptr) { // Is call's result used?
2316               assert(call_ptn->is_LocalVar(), "node should be registered");
2317               assert(arg_ptn != nullptr, "node should be registered");
2318               add_edge(call_ptn, arg_ptn);
2319             }
2320           }
2321           if (at->isa_oopptr() != nullptr &&
2322               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2323             if (!call_analyzer->is_arg_stack(k)) {
2324               // The argument global escapes
2325               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2326             } else {

2330                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2331               }
2332             }
2333           }
2334         }
2335         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2336           // The call returns arguments.
2337           assert(call_ptn->edge_count() > 0, "sanity");
2338           if (!call_analyzer->is_return_local()) {
2339             // Returns also unknown object.
2340             add_edge(call_ptn, phantom_obj);
2341           }
2342         }
2343         break;
2344       }
2345     }
2346     default: {
2347       // Fall-through here if not a Java method or no analyzer information
2348       // or some other type of call, assume the worst case: all arguments
2349       // globally escape.
2350       const TypeTuple* d = call->tf()->domain();
2351       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2352         const Type* at = d->field_at(i);
2353         if (at->isa_oopptr() != nullptr) {
2354           Node* arg = call->in(i);
2355           if (arg->is_AddP()) {
2356             arg = get_addp_base(arg);
2357           }
2358           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2359           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2360         }
2361       }
2362     }
2363   }
2364 }
2365 
2366 
2367 // Finish Graph construction.
2368 bool ConnectionGraph::complete_connection_graph(
2369                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2370                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

2748     PointsToNode* base = i.get();
2749     if (base->is_JavaObject()) {
2750       // Skip Allocate's fields which will be processed later.
2751       if (base->ideal_node()->is_Allocate()) {
2752         return 0;
2753       }
2754       assert(base == null_obj, "only null ptr base expected here");
2755     }
2756   }
2757   if (add_edge(field, phantom_obj)) {
2758     // New edge was added
2759     new_edges++;
2760     add_field_uses_to_worklist(field);
2761   }
2762   return new_edges;
2763 }
2764 
2765 // Find fields initializing values for allocations.
2766 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2767   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");

2768   Node* alloc = pta->ideal_node();
2769 
2770   // Do nothing for Allocate nodes since its fields values are
2771   // "known" unless they are initialized by arraycopy/clone.
2772   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2773     return 0;






2774   }
2775   assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");

2776 #ifdef ASSERT
2777   if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
2778     const char* name = alloc->as_CallStaticJava()->_name;
2779     assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "sanity");


2780   }
2781 #endif
2782   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2783   int new_edges = 0;
2784   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2785     PointsToNode* field = i.get();
2786     if (field->is_Field() && field->as_Field()->is_oop()) {
2787       if (add_edge(field, phantom_obj)) {
2788         // New edge was added
2789         new_edges++;
2790         add_field_uses_to_worklist(field->as_Field());
2791       }
2792     }
2793   }
2794   return new_edges;
2795 }
2796 
2797 // Find fields initializing values for allocations.
2798 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2799   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2800   Node* alloc = pta->ideal_node();
2801   // Do nothing for Call nodes since its fields values are unknown.
2802   if (!alloc->is_Allocate()) {
2803     return 0;
2804   }
2805   InitializeNode* ini = alloc->as_Allocate()->initialization();
2806   bool visited_bottom_offset = false;
2807   GrowableArray<int> offsets_worklist;
2808   int new_edges = 0;
2809 
2810   // Check if an oop field's initializing value is recorded and add
2811   // a corresponding null if field's value if it is not recorded.
2812   // Connection Graph does not record a default initialization by null
2813   // captured by Initialize node.
2814   //
2815   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2816     PointsToNode* field = i.get(); // Field (AddP)
2817     if (!field->is_Field() || !field->as_Field()->is_oop()) {
2818       continue; // Not oop field
2819     }
2820     int offset = field->as_Field()->offset();
2821     if (offset == Type::OffsetBot) {
2822       if (!visited_bottom_offset) {

2868               } else {
2869                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2870                   tty->print_cr("----------init store has invalid value -----");
2871                   store->dump();
2872                   val->dump();
2873                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2874                 }
2875                 for (EdgeIterator j(val); j.has_next(); j.next()) {
2876                   PointsToNode* obj = j.get();
2877                   if (obj->is_JavaObject()) {
2878                     if (!field->points_to(obj->as_JavaObject())) {
2879                       missed_obj = obj;
2880                       break;
2881                     }
2882                   }
2883                 }
2884               }
2885               if (missed_obj != nullptr) {
2886                 tty->print_cr("----------field---------------------------------");
2887                 field->dump();
2888                 tty->print_cr("----------missed referernce to object-----------");
2889                 missed_obj->dump();
2890                 tty->print_cr("----------object referernced by init store -----");
2891                 store->dump();
2892                 val->dump();
2893                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2894               }
2895             }
2896 #endif
2897           } else {
2898             // There could be initializing stores which follow allocation.
2899             // For example, a volatile field store is not collected
2900             // by Initialize node.
2901             //
2902             // Need to check for dependent loads to separate such stores from
2903             // stores which follow loads. For now, add initial value null so
2904             // that compare pointers optimization works correctly.
2905           }
2906         }
2907         if (value == nullptr) {
2908           // A field's initializing value was not recorded. Add null.
2909           if (add_edge(field, null_obj)) {
2910             // New edge was added

3235         assert(field->edge_count() > 0, "sanity");
3236       }
3237     }
3238   }
3239 }
3240 #endif
3241 
3242 // Optimize ideal graph.
3243 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3244                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3245   Compile* C = _compile;
3246   PhaseIterGVN* igvn = _igvn;
3247   if (EliminateLocks) {
3248     // Mark locks before changing ideal graph.
3249     int cnt = C->macro_count();
3250     for (int i = 0; i < cnt; i++) {
3251       Node *n = C->macro_node(i);
3252       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3253         AbstractLockNode* alock = n->as_AbstractLock();
3254         if (!alock->is_non_esc_obj()) {
3255           if (can_eliminate_lock(alock)) {

3256             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3257             // The lock could be marked eliminated by lock coarsening
3258             // code during first IGVN before EA. Replace coarsened flag
3259             // to eliminate all associated locks/unlocks.
3260 #ifdef ASSERT
3261             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3262 #endif
3263             alock->set_non_esc_obj();
3264           }
3265         }
3266       }
3267     }
3268   }
3269 
3270   if (OptimizePtrCompare) {
3271     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3272       Node *n = ptr_cmp_worklist.at(i);
3273       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3274       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3275       if (tcmp->singleton()) {

3277 #ifndef PRODUCT
3278         if (PrintOptimizePtrCompare) {
3279           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3280           if (Verbose) {
3281             n->dump(1);
3282           }
3283         }
3284 #endif
3285         igvn->replace_node(n, cmp);
3286       }
3287     }
3288   }
3289 
3290   // For MemBarStoreStore nodes added in library_call.cpp, check
3291   // escape status of associated AllocateNode and optimize out
3292   // MemBarStoreStore node if the allocated object never escapes.
3293   for (int i = 0; i < storestore_worklist.length(); i++) {
3294     Node* storestore = storestore_worklist.at(i);
3295     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3296     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3297       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3298       mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3299       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3300       igvn->register_new_node_with_optimizer(mb);
3301       igvn->replace_node(storestore, mb);





3302     }
3303   }
3304 }
3305 

























3306 // Optimize objects compare.
3307 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3308   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3309   if (!OptimizePtrCompare) {
3310     return UNKNOWN;
3311   }
3312   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3313   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3314 
3315   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3316   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3317   JavaObjectNode* jobj1 = unique_java_object(left);
3318   JavaObjectNode* jobj2 = unique_java_object(right);
3319 
3320   // The use of this method during allocation merge reduction may cause 'left'
3321   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3322   // that doesn't reference an unique java object.
3323   if (ptn1 == nullptr || ptn2 == nullptr ||
3324       jobj1 == nullptr || jobj2 == nullptr) {
3325     return UNKNOWN;

3445   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3446   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3447   PointsToNode* ptadr = _nodes.at(n->_idx);
3448   if (ptadr != nullptr) {
3449     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3450     return;
3451   }
3452   Compile* C = _compile;
3453   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3454   map_ideal_node(n, ptadr);
3455   // Add edge from arraycopy node to source object.
3456   (void)add_edge(ptadr, src);
3457   src->set_arraycopy_src();
3458   // Add edge from destination object to arraycopy node.
3459   (void)add_edge(dst, ptadr);
3460   dst->set_arraycopy_dst();
3461 }
3462 
3463 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3464   const Type* adr_type = n->as_AddP()->bottom_type();

3465   BasicType bt = T_INT;
3466   if (offset == Type::OffsetBot) {
3467     // Check only oop fields.
3468     if (!adr_type->isa_aryptr() ||
3469         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3470         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3471       // OffsetBot is used to reference array's element. Ignore first AddP.
3472       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3473         bt = T_OBJECT;
3474       }
3475     }
3476   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3477     if (adr_type->isa_instptr()) {
3478       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
3479       if (field != nullptr) {
3480         bt = field->layout_type();
3481       } else {
3482         // Check for unsafe oop field access
3483         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3484             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3485             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3486             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3487           bt = T_OBJECT;
3488           (*unsafe) = true;
3489         }
3490       }
3491     } else if (adr_type->isa_aryptr()) {
3492       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3493         // Ignore array length load.
3494       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3495         // Ignore first AddP.
3496       } else {
3497         const Type* elemtype = adr_type->isa_aryptr()->elem();
3498         bt = elemtype->array_element_basic_type();












3499       }
3500     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3501       // Allocation initialization, ThreadLocal field access, unsafe access
3502       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3503           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3504           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3505           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3506         bt = T_OBJECT;
3507       }
3508     }
3509   }
3510   // Note: T_NARROWOOP is not classed as a real reference type
3511   return (is_reference_type(bt) || bt == T_NARROWOOP);
3512 }
3513 
3514 // Returns unique pointed java object or null.
3515 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3516   // If the node was created after the escape computation we can't answer.
3517   uint idx = n->_idx;
3518   if (idx >= nodes_size()) {

3675             return true;
3676           }
3677         }
3678       }
3679     }
3680   }
3681   return false;
3682 }
3683 
3684 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3685   const Type *adr_type = phase->type(adr);
3686   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3687     // We are computing a raw address for a store captured by an Initialize
3688     // compute an appropriate address type. AddP cases #3 and #5 (see below).
3689     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3690     assert(offs != Type::OffsetBot ||
3691            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3692            "offset must be a constant or it is initialization of array");
3693     return offs;
3694   }
3695   const TypePtr *t_ptr = adr_type->isa_ptr();
3696   assert(t_ptr != nullptr, "must be a pointer type");
3697   return t_ptr->offset();
3698 }
3699 
3700 Node* ConnectionGraph::get_addp_base(Node *addp) {
3701   assert(addp->is_AddP(), "must be AddP");
3702   //
3703   // AddP cases for Base and Address inputs:
3704   // case #1. Direct object's field reference:
3705   //     Allocate
3706   //       |
3707   //     Proj #5 ( oop result )
3708   //       |
3709   //     CheckCastPP (cast to instance type)
3710   //      | |
3711   //     AddP  ( base == address )
3712   //
3713   // case #2. Indirect object's field reference:
3714   //      Phi
3715   //       |
3716   //     CastPP (cast to instance type)
3717   //      | |

3831   }
3832   return nullptr;
3833 }
3834 
3835 //
3836 // Adjust the type and inputs of an AddP which computes the
3837 // address of a field of an instance
3838 //
3839 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3840   PhaseGVN* igvn = _igvn;
3841   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3842   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3843   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3844   if (t == nullptr) {
3845     // We are computing a raw address for a store captured by an Initialize
3846     // compute an appropriate address type (cases #3 and #5).
3847     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3848     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3849     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3850     assert(offs != Type::OffsetBot, "offset must be a constant");
3851     t = base_t->add_offset(offs)->is_oopptr();







3852   }
3853   int inst_id =  base_t->instance_id();
3854   assert(!t->is_known_instance() || t->instance_id() == inst_id,
3855                              "old type must be non-instance or match new type");
3856 
3857   // The type 't' could be subclass of 'base_t'.
3858   // As result t->offset() could be large then base_t's size and it will
3859   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3860   // constructor verifies correctness of the offset.
3861   //
3862   // It could happened on subclass's branch (from the type profiling
3863   // inlining) which was not eliminated during parsing since the exactness
3864   // of the allocation type was not propagated to the subclass type check.
3865   //
3866   // Or the type 't' could be not related to 'base_t' at all.
3867   // It could happened when CHA type is different from MDO type on a dead path
3868   // (for example, from instanceof check) which is not collapsed during parsing.
3869   //
3870   // Do nothing for such AddP node and don't process its users since
3871   // this code branch will go away.
3872   //
3873   if (!t->is_known_instance() &&
3874       !base_t->maybe_java_subtype_of(t)) {
3875      return false; // bail out
3876   }
3877   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();











3878   // Do NOT remove the next line: ensure a new alias index is allocated
3879   // for the instance type. Note: C++ will not remove it since the call
3880   // has side effect.
3881   int alias_idx = _compile->get_alias_index(tinst);
3882   igvn->set_type(addp, tinst);
3883   // record the allocation in the node map
3884   set_map(addp, get_map(base->_idx));
3885   // Set addp's Base and Address to 'base'.
3886   Node *abase = addp->in(AddPNode::Base);
3887   Node *adr   = addp->in(AddPNode::Address);
3888   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3889       adr->in(0)->_idx == (uint)inst_id) {
3890     // Skip AddP cases #3 and #5.
3891   } else {
3892     assert(!abase->is_top(), "sanity"); // AddP case #3
3893     if (abase != base) {
3894       igvn->hash_delete(addp);
3895       addp->set_req(AddPNode::Base, base);
3896       if (abase == adr) {
3897         addp->set_req(AddPNode::Address, base);

4161     if (!is_instance) {
4162       continue;  // don't search further for non-instance types
4163     }
4164     // skip over a call which does not affect this memory slice
4165     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
4166       Node *proj_in = result->in(0);
4167       if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
4168         break;  // hit one of our sentinels
4169       } else if (proj_in->is_Call()) {
4170         // ArrayCopy node processed here as well
4171         CallNode *call = proj_in->as_Call();
4172         if (!call->may_modify(toop, igvn)) {
4173           result = call->in(TypeFunc::Memory);
4174         }
4175       } else if (proj_in->is_Initialize()) {
4176         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
4177         // Stop if this is the initialization for the object instance which
4178         // which contains this memory slice, otherwise skip over it.
4179         if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) {
4180           result = proj_in->in(TypeFunc::Memory);

4181         } else if (C->get_alias_index(result->adr_type()) != alias_idx) {
4182           assert(C->get_general_index(alias_idx) == C->get_alias_index(result->adr_type()), "should be projection for the same field/array element");
4183           result = get_map(result->_idx);
4184           assert(result != nullptr, "new projection should have been allocated");
4185           break;
4186         }



4187       } else if (proj_in->is_MemBar()) {
4188         // Check if there is an array copy for a clone
4189         // Step over GC barrier when ReduceInitialCardMarks is disabled
4190         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4191         Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));
4192 
4193         if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
4194           // Stop if it is a clone
4195           ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
4196           if (ac->may_modify(toop, igvn)) {
4197             break;
4198           }
4199         }
4200         result = proj_in->in(TypeFunc::Memory);
4201       }
4202     } else if (result->is_MergeMem()) {
4203       MergeMemNode *mmem = result->as_MergeMem();
4204       result = step_through_mergemem(mmem, alias_idx, toop);
4205       if (result == mmem->base_memory()) {
4206         // Didn't find instance memory, search through general slice recursively.

4467       //   - not determined to be ineligible by escape analysis
4468       set_map(alloc, n);
4469       set_map(n, alloc);
4470       const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
4471       igvn->hash_delete(n);
4472       igvn->set_type(n,  tinst);
4473       n->raise_bottom_type(tinst);
4474       igvn->hash_insert(n);
4475       record_for_optimizer(n);
4476       // Allocate an alias index for the header fields. Accesses to
4477       // the header emitted during macro expansion wouldn't have
4478       // correct memory state otherwise.
4479       _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
4480       _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
4481       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
4482         // Add a new NarrowMem projection for each existing NarrowMem projection with new adr type
4483         InitializeNode* init = alloc->as_Allocate()->initialization();
4484         assert(init != nullptr, "can't find Initialization node for this Allocate node");
4485         auto process_narrow_proj = [&](NarrowMemProjNode* proj) {
4486           const TypePtr* adr_type = proj->adr_type();
4487           const TypePtr* new_adr_type = tinst->add_offset(adr_type->offset());





4488           if (adr_type != new_adr_type && !init->already_has_narrow_mem_proj_with_adr_type(new_adr_type)) {
4489             DEBUG_ONLY( uint alias_idx = _compile->get_alias_index(new_adr_type); )
4490             assert(_compile->get_general_index(alias_idx) == _compile->get_alias_index(adr_type), "new adr type should be narrowed down from existing adr type");
4491             NarrowMemProjNode* new_proj = new NarrowMemProjNode(init, new_adr_type);
4492             igvn->set_type(new_proj, new_proj->bottom_type());
4493             record_for_optimizer(new_proj);
4494             set_map(proj, new_proj); // record it so ConnectionGraph::find_inst_mem() can find it
4495           }
4496         };
4497         init->for_each_narrow_mem_proj_with_new_uses(process_narrow_proj);
4498 
4499         // First, put on the worklist all Field edges from Connection Graph
4500         // which is more accurate than putting immediate users from Ideal Graph.
4501         for (EdgeIterator e(ptn); e.has_next(); e.next()) {
4502           PointsToNode* tgt = e.get();
4503           if (tgt->is_Arraycopy()) {
4504             continue;
4505           }
4506           Node* use = tgt->ideal_node();
4507           assert(tgt->is_Field() && use->is_AddP(),

4584         ptnode_adr(n->_idx)->dump();
4585         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4586 #endif
4587         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4588         return;
4589       } else {
4590         Node *val = get_map(jobj->idx());   // CheckCastPP node
4591         TypeNode *tn = n->as_Type();
4592         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4593         assert(tinst != nullptr && tinst->is_known_instance() &&
4594                tinst->instance_id() == jobj->idx() , "instance type expected.");
4595 
4596         const Type *tn_type = igvn->type(tn);
4597         const TypeOopPtr *tn_t;
4598         if (tn_type->isa_narrowoop()) {
4599           tn_t = tn_type->make_ptr()->isa_oopptr();
4600         } else {
4601           tn_t = tn_type->isa_oopptr();
4602         }
4603         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {







4604           if (tn_type->isa_narrowoop()) {
4605             tn_type = tinst->make_narrowoop();
4606           } else {
4607             tn_type = tinst;
4608           }
4609           igvn->hash_delete(tn);
4610           igvn->set_type(tn, tn_type);
4611           tn->set_type(tn_type);
4612           igvn->hash_insert(tn);
4613           record_for_optimizer(n);
4614         } else {
4615           assert(tn_type == TypePtr::NULL_PTR ||
4616                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4617                  "unexpected type");
4618           continue; // Skip dead path with different type
4619         }
4620       }
4621     } else {
4622       DEBUG_ONLY(n->dump();)
4623       assert(false, "EA: unexpected node");
4624       continue;
4625     }
4626     // push allocation's users on appropriate worklist
4627     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4628       Node *use = n->fast_out(i);
4629       if(use->is_Mem() && use->in(MemNode::Address) == n) {
4630         // Load/store to instance's field
4631         memnode_worklist.append_if_missing(use);
4632       } else if (use->is_MemBar()) {
4633         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4634           memnode_worklist.append_if_missing(use);
4635         }
4636       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4637         Node* addp2 = find_second_addp(use, n);
4638         if (addp2 != nullptr) {
4639           alloc_worklist.append_if_missing(addp2);
4640         }
4641         alloc_worklist.append_if_missing(use);
4642       } else if (use->is_Phi() ||
4643                  use->is_CheckCastPP() ||
4644                  use->is_EncodeNarrowPtr() ||
4645                  use->is_DecodeNarrowPtr() ||
4646                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4647         alloc_worklist.append_if_missing(use);
4648 #ifdef ASSERT
4649       } else if (use->is_Mem()) {
4650         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4651       } else if (use->is_MergeMem()) {
4652         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4653       } else if (use->is_SafePoint()) {
4654         // Look for MergeMem nodes for calls which reference unique allocation
4655         // (through CheckCastPP nodes) even for debug info.
4656         Node* m = use->in(TypeFunc::Memory);
4657         if (m->is_MergeMem()) {
4658           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4659         }
4660       } else if (use->Opcode() == Op_EncodeISOArray) {
4661         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4662           // EncodeISOArray overwrites destination array
4663           memnode_worklist.append_if_missing(use);
4664         }



4665       } else {
4666         uint op = use->Opcode();
4667         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4668             (use->in(MemNode::Memory) == n)) {
4669           // They overwrite memory edge corresponding to destination array,
4670           memnode_worklist.append_if_missing(use);
4671         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4672               op == Op_CastP2X ||
4673               op == Op_FastLock || op == Op_AryEq ||
4674               op == Op_StrComp || op == Op_CountPositives ||
4675               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4676               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4677               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4678               op == Op_SubTypeCheck ||
4679               op == Op_ReinterpretS2HF ||
4680               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4681           n->dump();
4682           use->dump();
4683           assert(false, "EA: missing allocation reference path");
4684         }
4685 #endif
4686       }
4687     }
4688 
4689   }
4690 
4691 #ifdef ASSERT
4692   if (VerifyReduceAllocationMerges) {
4693     for (uint i = 0; i < reducible_merges.size(); i++) {
4694       Node* phi = reducible_merges.at(i);
4695 
4696       if (!reduced_merges.member(phi)) {
4697         phi->dump(2);
4698         phi->dump(-2);

4766         n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4767         if (n == nullptr) {
4768           continue;
4769         }
4770       }
4771     } else if (n->is_CallLeaf()) {
4772       // Runtime calls with narrow memory input (no MergeMem node)
4773       // get the memory projection
4774       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4775       if (n == nullptr) {
4776         continue;
4777       }
4778     } else if (n->Opcode() == Op_StrInflatedCopy) {
4779       // Check direct uses of StrInflatedCopy.
4780       // It is memory type Node - no special SCMemProj node.
4781     } else if (n->Opcode() == Op_StrCompressedCopy ||
4782                n->Opcode() == Op_EncodeISOArray) {
4783       // get the memory projection
4784       n = n->find_out_with(Op_SCMemProj);
4785       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");



4786     } else if (n->is_Proj()) {
4787       assert(n->in(0)->is_Initialize(), "we only push memory projections for Initialize");
4788     } else {
4789 #ifdef ASSERT
4790       if (!n->is_Mem()) {
4791         n->dump();
4792       }
4793       assert(n->is_Mem(), "memory node required.");
4794 #endif
4795       Node *addr = n->in(MemNode::Address);
4796       const Type *addr_t = igvn->type(addr);
4797       if (addr_t == Type::TOP) {
4798         continue;
4799       }
4800       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4801       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4802       assert ((uint)alias_idx < new_index_end, "wrong alias index");
4803       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4804       if (_compile->failing()) {
4805         return;

4817         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4818       }
4819     }
4820     // push user on appropriate worklist
4821     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4822       Node *use = n->fast_out(i);
4823       if (use->is_Phi() || use->is_ClearArray()) {
4824         memnode_worklist.append_if_missing(use);
4825       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4826         memnode_worklist.append_if_missing(use);
4827       } else if (use->is_MemBar() || use->is_CallLeaf()) {
4828         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4829           memnode_worklist.append_if_missing(use);
4830         }
4831       } else if (use->is_Proj()) {
4832         assert(n->is_Initialize(), "We only push projections of Initialize");
4833         if (use->as_Proj()->_con == TypeFunc::Memory) { // Ignore precedent edge
4834           memnode_worklist.append_if_missing(use);
4835         }
4836 #ifdef ASSERT
4837       } else if(use->is_Mem()) {
4838         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4839       } else if (use->is_MergeMem()) {
4840         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4841       } else if (use->Opcode() == Op_EncodeISOArray) {
4842         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4843           // EncodeISOArray overwrites destination array
4844           memnode_worklist.append_if_missing(use);
4845         }




4846       } else {
4847         uint op = use->Opcode();
4848         if ((use->in(MemNode::Memory) == n) &&
4849             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4850           // They overwrite memory edge corresponding to destination array,
4851           memnode_worklist.append_if_missing(use);
4852         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4853               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4854               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4855               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
4856           n->dump();
4857           use->dump();
4858           assert(false, "EA: missing memory path");
4859         }
4860 #endif
4861       }
4862     }
4863   }
4864 
4865   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4866   //            Walk each memory slice moving the first node encountered of each
4867   //            instance type to the input corresponding to its alias index.
4868   uint length = mergemem_worklist.length();
4869   for( uint next = 0; next < length; ++next ) {
4870     MergeMemNode* nmm = mergemem_worklist.at(next);
4871     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4872     // Note: we don't want to use MergeMemStream here because we only want to
4873     // scan inputs which exist at the start, not ones we add during processing.
4874     // Note 2: MergeMem may already contains instance memory slices added
4875     // during find_inst_mem() call when memory nodes were processed above.

4938         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4939       } else if (_invocation > 0) {
4940         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4941       } else {
4942         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4943       }
4944       return;
4945     }
4946 
4947     igvn->hash_insert(nmm);
4948     record_for_optimizer(nmm);
4949   }
4950 
4951   _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_3, 5);
4952 
4953   //  Phase 4:  Update the inputs of non-instance memory Phis and
4954   //            the Memory input of memnodes
4955   // First update the inputs of any non-instance Phi's from
4956   // which we split out an instance Phi.  Note we don't have
4957   // to recursively process Phi's encountered on the input memory
4958   // chains as is done in split_memory_phi() since they  will
4959   // also be processed here.
4960   for (int j = 0; j < orig_phis.length(); j++) {
4961     PhiNode *phi = orig_phis.at(j);
4962     int alias_idx = _compile->get_alias_index(phi->adr_type());
4963     igvn->hash_delete(phi);
4964     for (uint i = 1; i < phi->req(); i++) {
4965       Node *mem = phi->in(i);
4966       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4967       if (_compile->failing()) {
4968         return;
4969       }
4970       if (mem != new_mem) {
4971         phi->set_req(i, new_mem);
4972       }
4973     }
4974     igvn->hash_insert(phi);
4975     record_for_optimizer(phi);
4976   }
4977 
4978   // Update the memory inputs of MemNodes with the value we computed

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/metaspace.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/c2compiler.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/escape.hpp"
  40 #include "opto/inlinetypenode.hpp"
  41 #include "opto/locknode.hpp"
  42 #include "opto/macro.hpp"
  43 #include "opto/movenode.hpp"
  44 #include "opto/narrowptrnode.hpp"
  45 #include "opto/phaseX.hpp"
  46 #include "opto/rootnode.hpp"
  47 #include "utilities/macros.hpp"
  48 
  49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  50   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  51   // split_unique_types and that will create additional nodes that need to be
  52   // pushed to the ConnectionGraph. The code below bumps the initial capacity of
  53   // _nodes by 10% to account for these additional nodes. If capacity is exceeded
  54   // the array will be reallocated.
  55   _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
  56   _in_worklist(C->comp_arena()),
  57   _next_pidx(0),
  58   _collecting(true),
  59   _verify(false),
  60   _compile(C),

 152   GrowableArray<SafePointNode*>  sfn_worklist;
 153   GrowableArray<MergeMemNode*>   mergemem_worklist;
 154   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 155 
 156   { Compile::TracePhase tp(Phase::_t_connectionGraph);
 157 
 158   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 159   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 160   // Initialize worklist
 161   if (C->root() != nullptr) {
 162     ideal_nodes.push(C->root());
 163   }
 164   // Processed ideal nodes are unique on ideal_nodes list
 165   // but several ideal nodes are mapped to the phantom_obj.
 166   // To avoid duplicated entries on the following worklists
 167   // add the phantom_obj only once to them.
 168   ptnodes_worklist.append(phantom_obj);
 169   java_objects_worklist.append(phantom_obj);
 170   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 171     Node* n = ideal_nodes.at(next);
 172     if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
 173         !n->in(MemNode::Address)->is_AddP() &&
 174         _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
 175       // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
 176       Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
 177       _igvn->register_new_node_with_optimizer(addp);
 178       _igvn->replace_input_of(n, MemNode::Address, addp);
 179       ideal_nodes.push(addp);
 180       _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
 181     }
 182     // Create PointsTo nodes and add them to Connection Graph. Called
 183     // only once per ideal node since ideal_nodes is Unique_Node list.
 184     add_node_to_connection_graph(n, &delayed_worklist);
 185     PointsToNode* ptn = ptnode_adr(n->_idx);
 186     if (ptn != nullptr && ptn != phantom_obj) {
 187       ptnodes_worklist.append(ptn);
 188       if (ptn->is_JavaObject()) {
 189         java_objects_worklist.append(ptn->as_JavaObject());
 190         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 191             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 192           // Only allocations and java static calls results are interesting.
 193           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 194         }
 195       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 196         oop_fields_worklist.append(ptn->as_Field());
 197       }
 198     }
 199     // Collect some interesting nodes for further use.
 200     switch (n->Opcode()) {
 201       case Op_MergeMem:

 419     // scalar replaceable objects.
 420     split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges);
 421     if (C->failing()) {
 422       NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 423       return false;
 424     }
 425 
 426 #ifdef ASSERT
 427   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
 428     tty->print("=== No allocations eliminated for ");
 429     C->method()->print_short_name();
 430     if (!EliminateAllocations) {
 431       tty->print(" since EliminateAllocations is off ===");
 432     } else if(!has_scalar_replaceable_candidates) {
 433       tty->print(" since there are no scalar replaceable candidates ===");
 434     }
 435     tty->cr();
 436 #endif
 437   }
 438 
 439   // 6. Expand flat accesses if the object does not escape. This adds nodes to
 440   // the graph, so it has to be after split_unique_types. This expands atomic
 441   // mismatched accesses (though encapsulated in LoadFlats and StoreFlats) into
 442   // non-mismatched accesses, so it is better before reduce allocation merges.
 443   if (has_non_escaping_obj) {
 444     optimize_flat_accesses(sfn_worklist);
 445   }
 446 
 447   _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES, 4);
 448 
 449   // 7. Reduce allocation merges used as debug information. This is done after
 450   // split_unique_types because the methods used to create SafePointScalarObject
 451   // need to traverse the memory graph to find values for object fields. We also
 452   // set to null the scalarized inputs of reducible Phis so that the Allocate
 453   // that they point can be later scalar replaced.
 454   bool delay = _igvn->delay_transform();
 455   _igvn->set_delay_transform(true);
 456   for (uint i = 0; i < reducible_merges.size(); i++) {
 457     Node* n = reducible_merges.at(i);
 458     if (n->outcnt() > 0) {
 459       if (!reduce_phi_on_safepoints(n->as_Phi())) {
 460         NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 461         C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
 462         return false;
 463       }
 464 
 465       // Now we set the scalar replaceable inputs of ophi to null, which is
 466       // the last piece that would prevent it from being scalar replaceable.
 467       reset_scalar_replaceable_entries(n->as_Phi());
 468     }
 469   }

1270 
1271     // The next two inputs are:
1272     //  (1) A copy of the original pointer to NSR objects.
1273     //  (2) A selector, used to decide if we need to rematerialize an object
1274     //      or use the pointer to a NSR object.
1275     // See more details of these fields in the declaration of SafePointScalarMergeNode
1276     sfpt->add_req(nsr_merge_pointer);
1277     sfpt->add_req(selector);
1278 
1279     for (uint i = 1; i < ophi->req(); i++) {
1280       Node* base = ophi->in(i);
1281       JavaObjectNode* ptn = unique_java_object(base);
1282 
1283       // If the base is not scalar replaceable we don't need to register information about
1284       // it at this time.
1285       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1286         continue;
1287       }
1288 
1289       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1290       Unique_Node_List value_worklist;
1291 #ifdef ASSERT
1292       const Type* res_type = alloc->result_cast()->bottom_type();
1293       if (res_type->is_inlinetypeptr() && !Compile::current()->has_circular_inline_type()) {
1294         PhiNode* phi = ophi->as_Phi();
1295         assert(!ophi->as_Phi()->can_push_inline_types_down(_igvn), "missed earlier scalarization opportunity");
1296       }
1297 #endif
1298       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
1299       if (sobj == nullptr) {
1300         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1301         return false;
1302       }
1303 
1304       // Now make a pass over the debug information replacing any references
1305       // to the allocated object with "sobj"
1306       Node* ccpp = alloc->result_cast();
1307       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1308 
1309       // Register the scalarized object as a candidate for reallocation
1310       smerge->add_req(sobj);
1311 
1312       // Scalarize inline types that were added to the safepoint.
1313       // Don't allow linking a constant oop (if available) for flat array elements
1314       // because Deoptimization::reassign_flat_array_elements needs field values.
1315       const bool allow_oop = !merge_t->is_flat();
1316       for (uint j = 0; j < value_worklist.size(); ++j) {
1317         InlineTypeNode* vt = value_worklist.at(j)->as_InlineType();
1318         vt->make_scalar_in_safepoints(_igvn, allow_oop);
1319       }
1320     }
1321 
1322     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1323     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1324 
1325     // The call to 'replace_edges_in_range' above might have removed the
1326     // reference to ophi that we need at _merge_pointer_idx. The line below make
1327     // sure the reference is maintained.
1328     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1329     _igvn->_worklist.push(sfpt);
1330   }
1331 
1332   return true;
1333 }
1334 
1335 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist) {
1336   bool delay = _igvn->delay_transform();
1337   _igvn->set_delay_transform(true);
1338   _igvn->hash_delete(ophi);
1339 

1502   return false;
1503 }
1504 
1505 // Returns true if at least one of the arguments to the call is an object
1506 // that does not escape globally.
1507 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1508   if (call->method() != nullptr) {
1509     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1510     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1511       Node* p = call->in(idx);
1512       if (not_global_escape(p)) {
1513         return true;
1514       }
1515     }
1516   } else {
1517     const char* name = call->as_CallStaticJava()->_name;
1518     assert(name != nullptr, "no name");
1519     // no arg escapes through uncommon traps
1520     if (strcmp(name, "uncommon_trap") != 0) {
1521       // process_call_arguments() assumes that all arguments escape globally
1522       const TypeTuple* d = call->tf()->domain_sig();
1523       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1524         const Type* at = d->field_at(i);
1525         if (at->isa_oopptr() != nullptr) {
1526           return true;
1527         }
1528       }
1529     }
1530   }
1531   return false;
1532 }
1533 
1534 
1535 
1536 // Utility function for nodes that load an object
1537 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1538   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1539   // ThreadLocal has RawPtr type.
1540   const Type* t = _igvn->type(n);
1541   if (t->make_ptr() != nullptr) {
1542     Node* adr = n->in(MemNode::Address);

1576       // first IGVN optimization when escape information is still available.
1577       record_for_optimizer(n);
1578     } else if (n->is_Allocate()) {
1579       add_call_node(n->as_Call());
1580       record_for_optimizer(n);
1581     } else {
1582       if (n->is_CallStaticJava()) {
1583         const char* name = n->as_CallStaticJava()->_name;
1584         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1585           return; // Skip uncommon traps
1586         }
1587       }
1588       // Don't mark as processed since call's arguments have to be processed.
1589       delayed_worklist->push(n);
1590       // Check if a call returns an object.
1591       if ((n->as_Call()->returns_pointer() &&
1592            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1593           (n->is_CallStaticJava() &&
1594            n->as_CallStaticJava()->is_boxing_method())) {
1595         add_call_node(n->as_Call());
1596       } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
1597         bool returns_oop = false;
1598         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
1599           ProjNode* pn = n->fast_out(i)->as_Proj();
1600           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
1601             returns_oop = true;
1602           }
1603         }
1604         if (returns_oop) {
1605           add_call_node(n->as_Call());
1606         }
1607       }
1608     }
1609     return;
1610   }
1611   // Put this check here to process call arguments since some call nodes
1612   // point to phantom_obj.
1613   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1614     return; // Skip predefined nodes.
1615   }
1616   switch (opcode) {
1617     case Op_AddP: {
1618       Node* base = get_addp_base(n);
1619       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1620       // Field nodes are created for all field types. They are used in
1621       // adjust_scalar_replaceable_state() and split_unique_types().
1622       // Note, non-oop fields will have only base edges in Connection
1623       // Graph because such fields are not used for oop loads and stores.
1624       int offset = address_offset(n, igvn);
1625       add_field(n, PointsToNode::NoEscape, offset);
1626       if (ptn_base == nullptr) {
1627         delayed_worklist->push(n); // Process it later.
1628       } else {
1629         n_ptn = ptnode_adr(n_idx);
1630         add_base(n_ptn->as_Field(), ptn_base);
1631       }
1632       break;
1633     }
1634     case Op_CastX2P:
1635     case Op_CastI2N: {
1636       map_ideal_node(n, phantom_obj);
1637       break;
1638     }
1639     case Op_InlineType:
1640     case Op_CastPP:
1641     case Op_CheckCastPP:
1642     case Op_EncodeP:
1643     case Op_DecodeN:
1644     case Op_EncodePKlass:
1645     case Op_DecodeNKlass: {
1646       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1647       break;
1648     }
1649     case Op_CMoveP: {
1650       add_local_var(n, PointsToNode::NoEscape);
1651       // Do not add edges during first iteration because some could be
1652       // not defined yet.
1653       delayed_worklist->push(n);
1654       break;
1655     }
1656     case Op_ConP:
1657     case Op_ConN:
1658     case Op_ConNKlass: {
1659       // assume all oop constants globally escape except for null

1689       break;
1690     }
1691     case Op_PartialSubtypeCheck: {
1692       // Produces Null or notNull and is used in only in CmpP so
1693       // phantom_obj could be used.
1694       map_ideal_node(n, phantom_obj); // Result is unknown
1695       break;
1696     }
1697     case Op_Phi: {
1698       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1699       // ThreadLocal has RawPtr type.
1700       const Type* t = n->as_Phi()->type();
1701       if (t->make_ptr() != nullptr) {
1702         add_local_var(n, PointsToNode::NoEscape);
1703         // Do not add edges during first iteration because some could be
1704         // not defined yet.
1705         delayed_worklist->push(n);
1706       }
1707       break;
1708     }
1709     case Op_LoadFlat:
1710       // Treat LoadFlat similar to an unknown call that receives nothing and produces its results
1711       map_ideal_node(n, phantom_obj);
1712       break;
1713     case Op_StoreFlat:
1714       // Treat StoreFlat similar to a call that escapes the stored flattened fields
1715       delayed_worklist->push(n);
1716       break;
1717     case Op_Proj: {
1718       // we are only interested in the oop result projection from a call
1719       if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
1720           (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
1721         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1722                n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1723         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1724       } else if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_LoadFlat() && igvn->type(n)->isa_ptr()) {
1725         // Treat LoadFlat outputs similar to a call return value
1726         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1727       }
1728       break;
1729     }
1730     case Op_Rethrow: // Exception object escapes
1731     case Op_Return: {
1732       if (n->req() > TypeFunc::Parms &&
1733           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1734         // Treat Return value as LocalVar with GlobalEscape escape state.
1735         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1736       }
1737       break;
1738     }
1739     case Op_CompareAndExchangeP:
1740     case Op_CompareAndExchangeN:
1741     case Op_GetAndSetP:
1742     case Op_GetAndSetN: {
1743       add_objload_to_connection_graph(n, delayed_worklist);
1744       // fall-through
1745     }

1791       break;
1792     }
1793     default:
1794       ; // Do nothing for nodes not related to EA.
1795   }
1796   return;
1797 }
1798 
1799 // Add final simple edges to graph.
1800 void ConnectionGraph::add_final_edges(Node *n) {
1801   PointsToNode* n_ptn = ptnode_adr(n->_idx);
1802 #ifdef ASSERT
1803   if (_verify && n_ptn->is_JavaObject())
1804     return; // This method does not change graph for JavaObject.
1805 #endif
1806 
1807   if (n->is_Call()) {
1808     process_call_arguments(n->as_Call());
1809     return;
1810   }
1811   assert(n->is_Store() || n->is_LoadStore() || n->is_StoreFlat() ||
1812          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1813          "node should be registered already");
1814   int opcode = n->Opcode();
1815   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1816   if (gc_handled) {
1817     return; // Ignore node if already handled by GC.
1818   }
1819   switch (opcode) {
1820     case Op_AddP: {
1821       Node* base = get_addp_base(n);
1822       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1823       assert(ptn_base != nullptr, "field's base should be registered");
1824       add_base(n_ptn->as_Field(), ptn_base);
1825       break;
1826     }
1827     case Op_InlineType:
1828     case Op_CastPP:
1829     case Op_CheckCastPP:
1830     case Op_EncodeP:
1831     case Op_DecodeN:
1832     case Op_EncodePKlass:
1833     case Op_DecodeNKlass: {
1834       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1835       break;
1836     }
1837     case Op_CMoveP: {
1838       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1839         Node* in = n->in(i);
1840         if (in == nullptr) {
1841           continue;  // ignore null
1842         }
1843         Node* uncast_in = in->uncast();
1844         if (uncast_in->is_top() || uncast_in == n) {
1845           continue;  // ignore top or inputs which go back this node
1846         }
1847         PointsToNode* ptn = ptnode_adr(in->_idx);

1860     }
1861     case Op_Phi: {
1862       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1863       // ThreadLocal has RawPtr type.
1864       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1865       for (uint i = 1; i < n->req(); i++) {
1866         Node* in = n->in(i);
1867         if (in == nullptr) {
1868           continue;  // ignore null
1869         }
1870         Node* uncast_in = in->uncast();
1871         if (uncast_in->is_top() || uncast_in == n) {
1872           continue;  // ignore top or inputs which go back this node
1873         }
1874         PointsToNode* ptn = ptnode_adr(in->_idx);
1875         assert(ptn != nullptr, "node should be registered");
1876         add_edge(n_ptn, ptn);
1877       }
1878       break;
1879     }
1880     case Op_StoreFlat: {
1881       // StoreFlat globally escapes its stored flattened fields
1882       InlineTypeNode* value = n->as_StoreFlat()->value();
1883       ciInlineKlass* vk = _igvn->type(value)->inline_klass();
1884       for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
1885         ciField* field = vk->nonstatic_field_at(i);
1886         if (field->type()->is_primitive_type()) {
1887           continue;
1888         }
1889 
1890         Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true);
1891         PointsToNode* field_value_ptn = ptnode_adr(field_value->_idx);
1892         set_escape_state(field_value_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "store into a flat field"));
1893       }
1894       break;
1895     }
1896     case Op_Proj: {
1897       if (n->in(0)->is_Call()) {
1898         // we are only interested in the oop result projection from a call
1899         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1900               n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1901         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1902       } else if (n->in(0)->is_LoadFlat()) {
1903         // Treat LoadFlat outputs similar to a call return value
1904         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1905       }
1906       break;
1907     }
1908     case Op_Rethrow: // Exception object escapes
1909     case Op_Return: {
1910       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1911              "Unexpected node type");
1912       // Treat Return value as LocalVar with GlobalEscape escape state.
1913       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1914       break;
1915     }
1916     case Op_CompareAndExchangeP:
1917     case Op_CompareAndExchangeN:
1918     case Op_GetAndSetP:
1919     case Op_GetAndSetN:{
1920       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1921       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1922       // fall-through
1923     }
1924     case Op_CompareAndSwapP:
1925     case Op_CompareAndSwapN:

2060     PointsToNode* ptn = ptnode_adr(val->_idx);
2061     assert(ptn != nullptr, "node should be registered");
2062     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
2063     // Add edge to object for unsafe access with offset.
2064     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
2065     assert(adr_ptn != nullptr, "node should be registered");
2066     if (adr_ptn->is_Field()) {
2067       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
2068       add_edge(adr_ptn, ptn);
2069     }
2070     return true;
2071   }
2072 #ifdef ASSERT
2073   n->dump(1);
2074   assert(false, "not unsafe");
2075 #endif
2076   return false;
2077 }
2078 
2079 void ConnectionGraph::add_call_node(CallNode* call) {
2080   assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
2081   uint call_idx = call->_idx;
2082   if (call->is_Allocate()) {
2083     Node* k = call->in(AllocateNode::KlassNode);
2084     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
2085     assert(kt != nullptr, "TypeKlassPtr  required.");
2086     PointsToNode::EscapeState es = PointsToNode::NoEscape;
2087     bool scalar_replaceable = true;
2088     NOT_PRODUCT(const char* nsr_reason = "");
2089     if (call->is_AllocateArray()) {
2090       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
2091         es = PointsToNode::GlobalEscape;
2092       } else {
2093         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2094         if (length < 0) {
2095           // Not scalar replaceable if the length is not constant.
2096           scalar_replaceable = false;
2097           NOT_PRODUCT(nsr_reason = "has a non-constant length");
2098         } else if (length > EliminateAllocationArraySizeLimit) {
2099           // Not scalar replaceable if the length is too big.
2100           scalar_replaceable = false;

2136     //
2137     //    - all oop arguments are escaping globally;
2138     //
2139     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2140     //
2141     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2142     //
2143     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2144     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2145     //      during call is returned;
2146     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2147     //      which are returned and does not escape during call;
2148     //
2149     //    - oop arguments escaping status is defined by bytecode analysis;
2150     //
2151     // For a static call, we know exactly what method is being called.
2152     // Use bytecode estimator to record whether the call's return value escapes.
2153     ciMethod* meth = call->as_CallJava()->method();
2154     if (meth == nullptr) {
2155       const char* name = call->as_CallStaticJava()->_name;
2156       assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 ||
2157              strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0 ||
2158              strncmp(name, "store_inline_type_fields_to_buf", 31) == 0, "TODO: add failed case check");
2159       // Returns a newly allocated non-escaped object.
2160       add_java_object(call, PointsToNode::NoEscape);
2161       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2162     } else if (meth->is_boxing_method()) {
2163       // Returns boxing object
2164       PointsToNode::EscapeState es;
2165       vmIntrinsics::ID intr = meth->intrinsic_id();
2166       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2167         // It does not escape if object is always allocated.
2168         es = PointsToNode::NoEscape;
2169       } else {
2170         // It escapes globally if object could be loaded from cache.
2171         es = PointsToNode::GlobalEscape;
2172       }
2173       add_java_object(call, es);
2174       if (es == PointsToNode::GlobalEscape) {
2175         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2176       }
2177     } else {
2178       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2179       call_analyzer->copy_dependencies(_compile->dependencies());
2180       if (call_analyzer->is_return_allocated()) {
2181         // Returns a newly allocated non-escaped object, simply
2182         // update dependency information.
2183         // Mark it as NoEscape so that objects referenced by
2184         // it's fields will be marked as NoEscape at least.
2185         add_java_object(call, PointsToNode::NoEscape);
2186         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2187       } else {
2188         // Determine whether any arguments are returned.
2189         const TypeTuple* d = call->tf()->domain_cc();
2190         bool ret_arg = false;
2191         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2192           if (d->field_at(i)->isa_ptr() != nullptr &&
2193               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2194             ret_arg = true;
2195             break;
2196           }
2197         }
2198         if (ret_arg) {
2199           add_local_var(call, PointsToNode::ArgEscape);
2200         } else {
2201           // Returns unknown object.
2202           map_ideal_node(call, phantom_obj);
2203         }
2204       }
2205     }
2206   } else {
2207     // An other type of call, assume the worst case:
2208     // returned value is unknown and globally escapes.
2209     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

2217 #ifdef ASSERT
2218     case Op_Allocate:
2219     case Op_AllocateArray:
2220     case Op_Lock:
2221     case Op_Unlock:
2222       assert(false, "should be done already");
2223       break;
2224 #endif
2225     case Op_ArrayCopy:
2226     case Op_CallLeafNoFP:
2227       // Most array copies are ArrayCopy nodes at this point but there
2228       // are still a few direct calls to the copy subroutines (See
2229       // PhaseStringOpts::copy_string())
2230       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2231         call->as_CallLeaf()->is_call_to_arraycopystub();
2232       // fall through
2233     case Op_CallLeafVector:
2234     case Op_CallLeaf: {
2235       // Stub calls, objects do not escape but they are not scale replaceable.
2236       // Adjust escape state for outgoing arguments.
2237       const TypeTuple * d = call->tf()->domain_sig();
2238       bool src_has_oops = false;
2239       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2240         const Type* at = d->field_at(i);
2241         Node *arg = call->in(i);
2242         if (arg == nullptr) {
2243           continue;
2244         }
2245         const Type *aat = _igvn->type(arg);
2246         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2247           continue;
2248         }
2249         if (arg->is_AddP()) {
2250           //
2251           // The inline_native_clone() case when the arraycopy stub is called
2252           // after the allocation before Initialize and CheckCastPP nodes.
2253           // Or normal arraycopy for object arrays case.
2254           //
2255           // Set AddP's base (Allocate) as not scalar replaceable since
2256           // pointer to the base (with offset) is passed as argument.
2257           //
2258           arg = get_addp_base(arg);
2259         }
2260         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2261         assert(arg_ptn != nullptr, "should be registered");
2262         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2263         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2264           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2265                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2266           bool arg_has_oops = aat->isa_oopptr() &&
2267                               (aat->isa_instptr() ||
2268                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
2269                                (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
2270                                                                aat->isa_aryptr()->is_flat() &&
2271                                                                aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
2272           if (i == TypeFunc::Parms) {
2273             src_has_oops = arg_has_oops;
2274           }
2275           //
2276           // src or dst could be j.l.Object when other is basic type array:
2277           //
2278           //   arraycopy(char[],0,Object*,0,size);
2279           //   arraycopy(Object*,0,char[],0,size);
2280           //
2281           // Don't add edges in such cases.
2282           //
2283           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2284                                        arg_has_oops && (i > TypeFunc::Parms);
2285 #ifdef ASSERT
2286           if (!(is_arraycopy ||
2287                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2288                 (call->as_CallLeaf()->_name != nullptr &&
2289                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2290                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2291                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

2315                   strcmp(call->as_CallLeaf()->_name, "dilithiumMontMulByConstant") == 0 ||
2316                   strcmp(call->as_CallLeaf()->_name, "dilithiumDecomposePoly") == 0 ||
2317                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2318                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2319                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2320                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2321                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2322                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2323                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2324                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2325                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2326                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2327                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2328                   strcmp(call->as_CallLeaf()->_name, "double_keccak") == 0 ||
2329                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2330                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2331                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2332                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2333                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2334                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
2335                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2336                   strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
2337                   strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
2338                   strcmp(call->as_CallLeaf()->_name, "store_inline_type_fields_to_buf") == 0 ||
2339                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2340                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2341                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2342                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2343                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2344                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2345                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2346                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2347                  ))) {
2348             call->dump();
2349             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2350           }
2351 #endif
2352           // Always process arraycopy's destination object since
2353           // we need to add all possible edges to references in
2354           // source object.
2355           if (arg_esc >= PointsToNode::ArgEscape &&
2356               !arg_is_arraycopy_dest) {
2357             continue;
2358           }

2381           }
2382         }
2383       }
2384       break;
2385     }
2386     case Op_CallStaticJava: {
2387       // For a static call, we know exactly what method is being called.
2388       // Use bytecode estimator to record the call's escape affects
2389 #ifdef ASSERT
2390       const char* name = call->as_CallStaticJava()->_name;
2391       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2392 #endif
2393       ciMethod* meth = call->as_CallJava()->method();
2394       if ((meth != nullptr) && meth->is_boxing_method()) {
2395         break; // Boxing methods do not modify any oops.
2396       }
2397       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2398       // fall-through if not a Java method or no analyzer information
2399       if (call_analyzer != nullptr) {
2400         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2401         const TypeTuple* d = call->tf()->domain_cc();
2402         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2403           const Type* at = d->field_at(i);
2404           int k = i - TypeFunc::Parms;
2405           Node* arg = call->in(i);
2406           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2407           if (at->isa_ptr() != nullptr &&
2408               call_analyzer->is_arg_returned(k)) {
2409             // The call returns arguments.
2410             if (call_ptn != nullptr) { // Is call's result used?
2411               assert(call_ptn->is_LocalVar(), "node should be registered");
2412               assert(arg_ptn != nullptr, "node should be registered");
2413               add_edge(call_ptn, arg_ptn);
2414             }
2415           }
2416           if (at->isa_oopptr() != nullptr &&
2417               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2418             if (!call_analyzer->is_arg_stack(k)) {
2419               // The argument global escapes
2420               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2421             } else {

2425                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2426               }
2427             }
2428           }
2429         }
2430         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2431           // The call returns arguments.
2432           assert(call_ptn->edge_count() > 0, "sanity");
2433           if (!call_analyzer->is_return_local()) {
2434             // Returns also unknown object.
2435             add_edge(call_ptn, phantom_obj);
2436           }
2437         }
2438         break;
2439       }
2440     }
2441     default: {
2442       // Fall-through here if not a Java method or no analyzer information
2443       // or some other type of call, assume the worst case: all arguments
2444       // globally escape.
2445       const TypeTuple* d = call->tf()->domain_cc();
2446       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2447         const Type* at = d->field_at(i);
2448         if (at->isa_oopptr() != nullptr) {
2449           Node* arg = call->in(i);
2450           if (arg->is_AddP()) {
2451             arg = get_addp_base(arg);
2452           }
2453           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2454           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2455         }
2456       }
2457     }
2458   }
2459 }
2460 
2461 
2462 // Finish Graph construction.
2463 bool ConnectionGraph::complete_connection_graph(
2464                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2465                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

2843     PointsToNode* base = i.get();
2844     if (base->is_JavaObject()) {
2845       // Skip Allocate's fields which will be processed later.
2846       if (base->ideal_node()->is_Allocate()) {
2847         return 0;
2848       }
2849       assert(base == null_obj, "only null ptr base expected here");
2850     }
2851   }
2852   if (add_edge(field, phantom_obj)) {
2853     // New edge was added
2854     new_edges++;
2855     add_field_uses_to_worklist(field);
2856   }
2857   return new_edges;
2858 }
2859 
2860 // Find fields initializing values for allocations.
2861 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2862   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2863   PointsToNode* init_val = phantom_obj;
2864   Node* alloc = pta->ideal_node();
2865 
2866   // Do nothing for Allocate nodes since its fields values are
2867   // "known" unless they are initialized by arraycopy/clone.
2868   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2869     if (alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) {
2870       // Null-free inline type arrays are initialized with an init value instead of null
2871       init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::InitValue)->_idx);
2872       assert(init_val != nullptr, "init value should be registered");
2873     } else {
2874       return 0;
2875     }
2876   }
2877   // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
2878   assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
2879 #ifdef ASSERT
2880   if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
2881     const char* name = alloc->as_CallStaticJava()->_name;
2882     assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 ||
2883            strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0 ||
2884            strncmp(name, "store_inline_type_fields_to_buf", 31) == 0, "sanity");
2885   }
2886 #endif
2887   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2888   int new_edges = 0;
2889   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2890     PointsToNode* field = i.get();
2891     if (field->is_Field() && field->as_Field()->is_oop()) {
2892       if (add_edge(field, init_val)) {
2893         // New edge was added
2894         new_edges++;
2895         add_field_uses_to_worklist(field->as_Field());
2896       }
2897     }
2898   }
2899   return new_edges;
2900 }
2901 
2902 // Find fields initializing values for allocations.
2903 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2904   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2905   Node* alloc = pta->ideal_node();
2906   // Do nothing for Call nodes since its fields values are unknown.
2907   if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) {
2908     return 0;
2909   }
2910   InitializeNode* ini = alloc->as_Allocate()->initialization();
2911   bool visited_bottom_offset = false;
2912   GrowableArray<int> offsets_worklist;
2913   int new_edges = 0;
2914 
2915   // Check if an oop field's initializing value is recorded and add
2916   // a corresponding null if field's value if it is not recorded.
2917   // Connection Graph does not record a default initialization by null
2918   // captured by Initialize node.
2919   //
2920   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2921     PointsToNode* field = i.get(); // Field (AddP)
2922     if (!field->is_Field() || !field->as_Field()->is_oop()) {
2923       continue; // Not oop field
2924     }
2925     int offset = field->as_Field()->offset();
2926     if (offset == Type::OffsetBot) {
2927       if (!visited_bottom_offset) {

2973               } else {
2974                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2975                   tty->print_cr("----------init store has invalid value -----");
2976                   store->dump();
2977                   val->dump();
2978                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2979                 }
2980                 for (EdgeIterator j(val); j.has_next(); j.next()) {
2981                   PointsToNode* obj = j.get();
2982                   if (obj->is_JavaObject()) {
2983                     if (!field->points_to(obj->as_JavaObject())) {
2984                       missed_obj = obj;
2985                       break;
2986                     }
2987                   }
2988                 }
2989               }
2990               if (missed_obj != nullptr) {
2991                 tty->print_cr("----------field---------------------------------");
2992                 field->dump();
2993                 tty->print_cr("----------missed reference to object------------");
2994                 missed_obj->dump();
2995                 tty->print_cr("----------object referenced by init store-------");
2996                 store->dump();
2997                 val->dump();
2998                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2999               }
3000             }
3001 #endif
3002           } else {
3003             // There could be initializing stores which follow allocation.
3004             // For example, a volatile field store is not collected
3005             // by Initialize node.
3006             //
3007             // Need to check for dependent loads to separate such stores from
3008             // stores which follow loads. For now, add initial value null so
3009             // that compare pointers optimization works correctly.
3010           }
3011         }
3012         if (value == nullptr) {
3013           // A field's initializing value was not recorded. Add null.
3014           if (add_edge(field, null_obj)) {
3015             // New edge was added

3340         assert(field->edge_count() > 0, "sanity");
3341       }
3342     }
3343   }
3344 }
3345 #endif
3346 
3347 // Optimize ideal graph.
3348 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3349                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3350   Compile* C = _compile;
3351   PhaseIterGVN* igvn = _igvn;
3352   if (EliminateLocks) {
3353     // Mark locks before changing ideal graph.
3354     int cnt = C->macro_count();
3355     for (int i = 0; i < cnt; i++) {
3356       Node *n = C->macro_node(i);
3357       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3358         AbstractLockNode* alock = n->as_AbstractLock();
3359         if (!alock->is_non_esc_obj()) {
3360           const Type* obj_type = igvn->type(alock->obj_node());
3361           if (can_eliminate_lock(alock) && !obj_type->is_inlinetypeptr()) {
3362             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3363             // The lock could be marked eliminated by lock coarsening
3364             // code during first IGVN before EA. Replace coarsened flag
3365             // to eliminate all associated locks/unlocks.
3366 #ifdef ASSERT
3367             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3368 #endif
3369             alock->set_non_esc_obj();
3370           }
3371         }
3372       }
3373     }
3374   }
3375 
3376   if (OptimizePtrCompare) {
3377     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3378       Node *n = ptr_cmp_worklist.at(i);
3379       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3380       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3381       if (tcmp->singleton()) {

3383 #ifndef PRODUCT
3384         if (PrintOptimizePtrCompare) {
3385           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3386           if (Verbose) {
3387             n->dump(1);
3388           }
3389         }
3390 #endif
3391         igvn->replace_node(n, cmp);
3392       }
3393     }
3394   }
3395 
3396   // For MemBarStoreStore nodes added in library_call.cpp, check
3397   // escape status of associated AllocateNode and optimize out
3398   // MemBarStoreStore node if the allocated object never escapes.
3399   for (int i = 0; i < storestore_worklist.length(); i++) {
3400     Node* storestore = storestore_worklist.at(i);
3401     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3402     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3403       if (alloc->in(AllocateNode::InlineType) != nullptr) {
3404         // Non-escaping inline type buffer allocations don't require a membar
3405         storestore->as_MemBar()->remove(_igvn);
3406       } else {
3407         MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3408         mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3409         mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3410         igvn->register_new_node_with_optimizer(mb);
3411         igvn->replace_node(storestore, mb);
3412       }
3413     }
3414   }
3415 }
3416 
3417 // Atomic flat accesses on non-escaping objects can be optimized to non-atomic accesses
3418 void ConnectionGraph::optimize_flat_accesses(GrowableArray<SafePointNode*>& sfn_worklist) {
3419   PhaseIterGVN& igvn = *_igvn;
3420   bool delay = igvn.delay_transform();
3421   igvn.set_delay_transform(true);
3422   igvn.C->for_each_flat_access([&](Node* n) {
3423     Node* base = n->is_LoadFlat() ? n->as_LoadFlat()->base() : n->as_StoreFlat()->base();
3424     if (!not_global_escape(base)) {
3425       return;
3426     }
3427 
3428     bool expanded;
3429     if (n->is_LoadFlat()) {
3430       expanded = n->as_LoadFlat()->expand_non_atomic(igvn);
3431     } else {
3432       expanded = n->as_StoreFlat()->expand_non_atomic(igvn);
3433     }
3434     if (expanded) {
3435       sfn_worklist.remove(n->as_SafePoint());
3436       igvn.C->remove_flat_access(n);
3437     }
3438   });
3439   igvn.set_delay_transform(delay);
3440 }
3441 
3442 // Optimize objects compare.
3443 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3444   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3445   if (!OptimizePtrCompare) {
3446     return UNKNOWN;
3447   }
3448   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3449   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3450 
3451   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3452   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3453   JavaObjectNode* jobj1 = unique_java_object(left);
3454   JavaObjectNode* jobj2 = unique_java_object(right);
3455 
3456   // The use of this method during allocation merge reduction may cause 'left'
3457   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3458   // that doesn't reference an unique java object.
3459   if (ptn1 == nullptr || ptn2 == nullptr ||
3460       jobj1 == nullptr || jobj2 == nullptr) {
3461     return UNKNOWN;

3581   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3582   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3583   PointsToNode* ptadr = _nodes.at(n->_idx);
3584   if (ptadr != nullptr) {
3585     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3586     return;
3587   }
3588   Compile* C = _compile;
3589   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3590   map_ideal_node(n, ptadr);
3591   // Add edge from arraycopy node to source object.
3592   (void)add_edge(ptadr, src);
3593   src->set_arraycopy_src();
3594   // Add edge from destination object to arraycopy node.
3595   (void)add_edge(dst, ptadr);
3596   dst->set_arraycopy_dst();
3597 }
3598 
3599 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3600   const Type* adr_type = n->as_AddP()->bottom_type();
3601   int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
3602   BasicType bt = T_INT;
3603   if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
3604     // Check only oop fields.
3605     if (!adr_type->isa_aryptr() ||
3606         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3607         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3608       // OffsetBot is used to reference array's element. Ignore first AddP.
3609       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3610         bt = T_OBJECT;
3611       }
3612     }
3613   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3614     if (adr_type->isa_instptr()) {
3615       ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
3616       if (field != nullptr) {
3617         bt = field->layout_type();
3618       } else {
3619         // Check for unsafe oop field access
3620         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3621             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3622             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3623             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3624           bt = T_OBJECT;
3625           (*unsafe) = true;
3626         }
3627       }
3628     } else if (adr_type->isa_aryptr()) {
3629       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3630         // Ignore array length load.
3631       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3632         // Ignore first AddP.
3633       } else {
3634         const Type* elemtype = adr_type->is_aryptr()->elem();
3635         if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
3636           ciInlineKlass* vk = elemtype->inline_klass();
3637           field_offset += vk->payload_offset();
3638           ciField* field = vk->get_field_by_offset(field_offset, false);
3639           if (field != nullptr) {
3640             bt = field->layout_type();
3641           } else {
3642             assert(field_offset == vk->payload_offset() + vk->null_marker_offset_in_payload(), "no field or null marker of %s at offset %d", vk->name()->as_utf8(), field_offset);
3643             bt = T_BOOLEAN;
3644           }
3645         } else {
3646           bt = elemtype->array_element_basic_type();
3647         }
3648       }
3649     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3650       // Allocation initialization, ThreadLocal field access, unsafe access
3651       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3652           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3653           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3654           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3655         bt = T_OBJECT;
3656       }
3657     }
3658   }
3659   // Note: T_NARROWOOP is not classed as a real reference type
3660   return (is_reference_type(bt) || bt == T_NARROWOOP);
3661 }
3662 
3663 // Returns unique pointed java object or null.
3664 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3665   // If the node was created after the escape computation we can't answer.
3666   uint idx = n->_idx;
3667   if (idx >= nodes_size()) {

3824             return true;
3825           }
3826         }
3827       }
3828     }
3829   }
3830   return false;
3831 }
3832 
3833 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3834   const Type *adr_type = phase->type(adr);
3835   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3836     // We are computing a raw address for a store captured by an Initialize
3837     // compute an appropriate address type. AddP cases #3 and #5 (see below).
3838     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3839     assert(offs != Type::OffsetBot ||
3840            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3841            "offset must be a constant or it is initialization of array");
3842     return offs;
3843   }
3844   return adr_type->is_ptr()->flat_offset();


3845 }
3846 
3847 Node* ConnectionGraph::get_addp_base(Node *addp) {
3848   assert(addp->is_AddP(), "must be AddP");
3849   //
3850   // AddP cases for Base and Address inputs:
3851   // case #1. Direct object's field reference:
3852   //     Allocate
3853   //       |
3854   //     Proj #5 ( oop result )
3855   //       |
3856   //     CheckCastPP (cast to instance type)
3857   //      | |
3858   //     AddP  ( base == address )
3859   //
3860   // case #2. Indirect object's field reference:
3861   //      Phi
3862   //       |
3863   //     CastPP (cast to instance type)
3864   //      | |

3978   }
3979   return nullptr;
3980 }
3981 
3982 //
3983 // Adjust the type and inputs of an AddP which computes the
3984 // address of a field of an instance
3985 //
3986 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3987   PhaseGVN* igvn = _igvn;
3988   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3989   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3990   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3991   if (t == nullptr) {
3992     // We are computing a raw address for a store captured by an Initialize
3993     // compute an appropriate address type (cases #3 and #5).
3994     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3995     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3996     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3997     assert(offs != Type::OffsetBot, "offset must be a constant");
3998     if (base_t->isa_aryptr() != nullptr) {
3999       // In the case of a flat inline type array, each field has its
4000       // own slice so we need to extract the field being accessed from
4001       // the address computation
4002       t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
4003     } else {
4004       t = base_t->add_offset(offs)->is_oopptr();
4005     }
4006   }
4007   int inst_id = base_t->instance_id();
4008   assert(!t->is_known_instance() || t->instance_id() == inst_id,
4009                              "old type must be non-instance or match new type");
4010 
4011   // The type 't' could be subclass of 'base_t'.
4012   // As result t->offset() could be large then base_t's size and it will
4013   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
4014   // constructor verifies correctness of the offset.
4015   //
4016   // It could happened on subclass's branch (from the type profiling
4017   // inlining) which was not eliminated during parsing since the exactness
4018   // of the allocation type was not propagated to the subclass type check.
4019   //
4020   // Or the type 't' could be not related to 'base_t' at all.
4021   // It could happen when CHA type is different from MDO type on a dead path
4022   // (for example, from instanceof check) which is not collapsed during parsing.
4023   //
4024   // Do nothing for such AddP node and don't process its users since
4025   // this code branch will go away.
4026   //
4027   if (!t->is_known_instance() &&
4028       !base_t->maybe_java_subtype_of(t)) {
4029      return false; // bail out
4030   }
4031   const TypePtr* tinst = base_t->add_offset(t->offset());
4032   if (tinst->isa_aryptr() && t->isa_aryptr()) {
4033     // In the case of a flat inline type array, each field has its
4034     // own slice so we need to keep track of the field being accessed.
4035     tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
4036     // Keep array properties (not flat/null-free)
4037     tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
4038     if (tinst == nullptr) {
4039       return false; // Skip dead path with inconsistent properties
4040     }
4041   }
4042 
4043   // Do NOT remove the next line: ensure a new alias index is allocated
4044   // for the instance type. Note: C++ will not remove it since the call
4045   // has side effect.
4046   int alias_idx = _compile->get_alias_index(tinst);
4047   igvn->set_type(addp, tinst);
4048   // record the allocation in the node map
4049   set_map(addp, get_map(base->_idx));
4050   // Set addp's Base and Address to 'base'.
4051   Node *abase = addp->in(AddPNode::Base);
4052   Node *adr   = addp->in(AddPNode::Address);
4053   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
4054       adr->in(0)->_idx == (uint)inst_id) {
4055     // Skip AddP cases #3 and #5.
4056   } else {
4057     assert(!abase->is_top(), "sanity"); // AddP case #3
4058     if (abase != base) {
4059       igvn->hash_delete(addp);
4060       addp->set_req(AddPNode::Base, base);
4061       if (abase == adr) {
4062         addp->set_req(AddPNode::Address, base);

4326     if (!is_instance) {
4327       continue;  // don't search further for non-instance types
4328     }
4329     // skip over a call which does not affect this memory slice
4330     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
4331       Node *proj_in = result->in(0);
4332       if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
4333         break;  // hit one of our sentinels
4334       } else if (proj_in->is_Call()) {
4335         // ArrayCopy node processed here as well
4336         CallNode *call = proj_in->as_Call();
4337         if (!call->may_modify(toop, igvn)) {
4338           result = call->in(TypeFunc::Memory);
4339         }
4340       } else if (proj_in->is_Initialize()) {
4341         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
4342         // Stop if this is the initialization for the object instance which
4343         // which contains this memory slice, otherwise skip over it.
4344         if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) {
4345           result = proj_in->in(TypeFunc::Memory);
4346 #if 0  // TODO: Fix 8372259
4347         } else if (C->get_alias_index(result->adr_type()) != alias_idx) {
4348           assert(C->get_general_index(alias_idx) == C->get_alias_index(result->adr_type()), "should be projection for the same field/array element");
4349           result = get_map(result->_idx);
4350           assert(result != nullptr, "new projection should have been allocated");
4351           break;
4352         }
4353 #else
4354         }
4355 #endif
4356       } else if (proj_in->is_MemBar()) {
4357         // Check if there is an array copy for a clone
4358         // Step over GC barrier when ReduceInitialCardMarks is disabled
4359         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4360         Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));
4361 
4362         if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
4363           // Stop if it is a clone
4364           ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
4365           if (ac->may_modify(toop, igvn)) {
4366             break;
4367           }
4368         }
4369         result = proj_in->in(TypeFunc::Memory);
4370       }
4371     } else if (result->is_MergeMem()) {
4372       MergeMemNode *mmem = result->as_MergeMem();
4373       result = step_through_mergemem(mmem, alias_idx, toop);
4374       if (result == mmem->base_memory()) {
4375         // Didn't find instance memory, search through general slice recursively.

4636       //   - not determined to be ineligible by escape analysis
4637       set_map(alloc, n);
4638       set_map(n, alloc);
4639       const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
4640       igvn->hash_delete(n);
4641       igvn->set_type(n,  tinst);
4642       n->raise_bottom_type(tinst);
4643       igvn->hash_insert(n);
4644       record_for_optimizer(n);
4645       // Allocate an alias index for the header fields. Accesses to
4646       // the header emitted during macro expansion wouldn't have
4647       // correct memory state otherwise.
4648       _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
4649       _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
4650       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
4651         // Add a new NarrowMem projection for each existing NarrowMem projection with new adr type
4652         InitializeNode* init = alloc->as_Allocate()->initialization();
4653         assert(init != nullptr, "can't find Initialization node for this Allocate node");
4654         auto process_narrow_proj = [&](NarrowMemProjNode* proj) {
4655           const TypePtr* adr_type = proj->adr_type();
4656           const TypePtr* new_adr_type = tinst->with_offset(adr_type->offset());
4657           if (adr_type->isa_aryptr()) {
4658             // In the case of a flat inline type array, each field has its own slice so we need a
4659             // NarrowMemProj for each field of the flat array elements
4660             new_adr_type = new_adr_type->is_aryptr()->with_field_offset(adr_type->is_aryptr()->field_offset().get());
4661           }
4662           if (adr_type != new_adr_type && !init->already_has_narrow_mem_proj_with_adr_type(new_adr_type)) {
4663             DEBUG_ONLY( uint alias_idx = _compile->get_alias_index(new_adr_type); )
4664             assert(_compile->get_general_index(alias_idx) == _compile->get_alias_index(adr_type), "new adr type should be narrowed down from existing adr type");
4665             NarrowMemProjNode* new_proj = new NarrowMemProjNode(init, new_adr_type);
4666             igvn->set_type(new_proj, new_proj->bottom_type());
4667             record_for_optimizer(new_proj);
4668             set_map(proj, new_proj); // record it so ConnectionGraph::find_inst_mem() can find it
4669           }
4670         };
4671         init->for_each_narrow_mem_proj_with_new_uses(process_narrow_proj);
4672 
4673         // First, put on the worklist all Field edges from Connection Graph
4674         // which is more accurate than putting immediate users from Ideal Graph.
4675         for (EdgeIterator e(ptn); e.has_next(); e.next()) {
4676           PointsToNode* tgt = e.get();
4677           if (tgt->is_Arraycopy()) {
4678             continue;
4679           }
4680           Node* use = tgt->ideal_node();
4681           assert(tgt->is_Field() && use->is_AddP(),

4758         ptnode_adr(n->_idx)->dump();
4759         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4760 #endif
4761         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4762         return;
4763       } else {
4764         Node *val = get_map(jobj->idx());   // CheckCastPP node
4765         TypeNode *tn = n->as_Type();
4766         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4767         assert(tinst != nullptr && tinst->is_known_instance() &&
4768                tinst->instance_id() == jobj->idx() , "instance type expected.");
4769 
4770         const Type *tn_type = igvn->type(tn);
4771         const TypeOopPtr *tn_t;
4772         if (tn_type->isa_narrowoop()) {
4773           tn_t = tn_type->make_ptr()->isa_oopptr();
4774         } else {
4775           tn_t = tn_type->isa_oopptr();
4776         }
4777         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
4778           if (tn_t->isa_aryptr()) {
4779             // Keep array properties (not flat/null-free)
4780             tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
4781             if (tinst == nullptr) {
4782               continue; // Skip dead path with inconsistent properties
4783             }
4784           }
4785           if (tn_type->isa_narrowoop()) {
4786             tn_type = tinst->make_narrowoop();
4787           } else {
4788             tn_type = tinst;
4789           }
4790           igvn->hash_delete(tn);
4791           igvn->set_type(tn, tn_type);
4792           tn->set_type(tn_type);
4793           igvn->hash_insert(tn);
4794           record_for_optimizer(n);
4795         } else {
4796           assert(tn_type == TypePtr::NULL_PTR ||
4797                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4798                  "unexpected type");
4799           continue; // Skip dead path with different type
4800         }
4801       }
4802     } else {
4803       DEBUG_ONLY(n->dump();)
4804       assert(false, "EA: unexpected node");
4805       continue;
4806     }
4807     // push allocation's users on appropriate worklist
4808     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4809       Node *use = n->fast_out(i);
4810       if (use->is_Mem() && use->in(MemNode::Address) == n) {
4811         // Load/store to instance's field
4812         memnode_worklist.append_if_missing(use);
4813       } else if (use->is_MemBar()) {
4814         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4815           memnode_worklist.append_if_missing(use);
4816         }
4817       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4818         Node* addp2 = find_second_addp(use, n);
4819         if (addp2 != nullptr) {
4820           alloc_worklist.append_if_missing(addp2);
4821         }
4822         alloc_worklist.append_if_missing(use);
4823       } else if (use->is_Phi() ||
4824                  use->is_CheckCastPP() ||
4825                  use->is_EncodeNarrowPtr() ||
4826                  use->is_DecodeNarrowPtr() ||
4827                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4828         alloc_worklist.append_if_missing(use);
4829 #ifdef ASSERT
4830       } else if (use->is_Mem()) {
4831         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4832       } else if (use->is_MergeMem()) {
4833         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4834       } else if (use->is_SafePoint()) {
4835         // Look for MergeMem nodes for calls which reference unique allocation
4836         // (through CheckCastPP nodes) even for debug info.
4837         Node* m = use->in(TypeFunc::Memory);
4838         if (m->is_MergeMem()) {
4839           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4840         }
4841       } else if (use->Opcode() == Op_EncodeISOArray) {
4842         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4843           // EncodeISOArray overwrites destination array
4844           memnode_worklist.append_if_missing(use);
4845         }
4846       } else if (use->Opcode() == Op_Return) {
4847         // Allocation is referenced by field of returned inline type
4848         assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
4849       } else {
4850         uint op = use->Opcode();
4851         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4852             (use->in(MemNode::Memory) == n)) {
4853           // They overwrite memory edge corresponding to destination array,
4854           memnode_worklist.append_if_missing(use);
4855         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4856               op == Op_CastP2X ||
4857               op == Op_FastLock || op == Op_AryEq ||
4858               op == Op_StrComp || op == Op_CountPositives ||
4859               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4860               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4861               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4862               op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
4863               op == Op_ReinterpretS2HF ||
4864               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4865           n->dump();
4866           use->dump();
4867           assert(false, "EA: missing allocation reference path");
4868         }
4869 #endif
4870       }
4871     }
4872 
4873   }
4874 
4875 #ifdef ASSERT
4876   if (VerifyReduceAllocationMerges) {
4877     for (uint i = 0; i < reducible_merges.size(); i++) {
4878       Node* phi = reducible_merges.at(i);
4879 
4880       if (!reduced_merges.member(phi)) {
4881         phi->dump(2);
4882         phi->dump(-2);

4950         n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4951         if (n == nullptr) {
4952           continue;
4953         }
4954       }
4955     } else if (n->is_CallLeaf()) {
4956       // Runtime calls with narrow memory input (no MergeMem node)
4957       // get the memory projection
4958       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4959       if (n == nullptr) {
4960         continue;
4961       }
4962     } else if (n->Opcode() == Op_StrInflatedCopy) {
4963       // Check direct uses of StrInflatedCopy.
4964       // It is memory type Node - no special SCMemProj node.
4965     } else if (n->Opcode() == Op_StrCompressedCopy ||
4966                n->Opcode() == Op_EncodeISOArray) {
4967       // get the memory projection
4968       n = n->find_out_with(Op_SCMemProj);
4969       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4970     } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
4971                strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4972       n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
4973     } else if (n->is_Proj()) {
4974       assert(n->in(0)->is_Initialize(), "we only push memory projections for Initialize");
4975     } else {
4976 #ifdef ASSERT
4977       if (!n->is_Mem()) {
4978         n->dump();
4979       }
4980       assert(n->is_Mem(), "memory node required.");
4981 #endif
4982       Node *addr = n->in(MemNode::Address);
4983       const Type *addr_t = igvn->type(addr);
4984       if (addr_t == Type::TOP) {
4985         continue;
4986       }
4987       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4988       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4989       assert ((uint)alias_idx < new_index_end, "wrong alias index");
4990       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4991       if (_compile->failing()) {
4992         return;

5004         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
5005       }
5006     }
5007     // push user on appropriate worklist
5008     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5009       Node *use = n->fast_out(i);
5010       if (use->is_Phi() || use->is_ClearArray()) {
5011         memnode_worklist.append_if_missing(use);
5012       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
5013         memnode_worklist.append_if_missing(use);
5014       } else if (use->is_MemBar() || use->is_CallLeaf()) {
5015         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
5016           memnode_worklist.append_if_missing(use);
5017         }
5018       } else if (use->is_Proj()) {
5019         assert(n->is_Initialize(), "We only push projections of Initialize");
5020         if (use->as_Proj()->_con == TypeFunc::Memory) { // Ignore precedent edge
5021           memnode_worklist.append_if_missing(use);
5022         }
5023 #ifdef ASSERT
5024       } else if (use->is_Mem()) {
5025         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
5026       } else if (use->is_MergeMem()) {
5027         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
5028       } else if (use->Opcode() == Op_EncodeISOArray) {
5029         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
5030           // EncodeISOArray overwrites destination array
5031           memnode_worklist.append_if_missing(use);
5032         }
5033       } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
5034                  strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
5035         // store_unknown_inline overwrites destination array
5036         memnode_worklist.append_if_missing(use);
5037       } else {
5038         uint op = use->Opcode();
5039         if ((use->in(MemNode::Memory) == n) &&
5040             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
5041           // They overwrite memory edge corresponding to destination array,
5042           memnode_worklist.append_if_missing(use);
5043         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
5044               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
5045               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
5046               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
5047           n->dump();
5048           use->dump();
5049           assert(false, "EA: missing memory path");
5050         }
5051 #endif
5052       }
5053     }
5054   }
5055 
5056   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
5057   //            Walk each memory slice moving the first node encountered of each
5058   //            instance type to the input corresponding to its alias index.
5059   uint length = mergemem_worklist.length();
5060   for( uint next = 0; next < length; ++next ) {
5061     MergeMemNode* nmm = mergemem_worklist.at(next);
5062     assert(!visited.test_set(nmm->_idx), "should not be visited before");
5063     // Note: we don't want to use MergeMemStream here because we only want to
5064     // scan inputs which exist at the start, not ones we add during processing.
5065     // Note 2: MergeMem may already contains instance memory slices added
5066     // during find_inst_mem() call when memory nodes were processed above.

5129         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
5130       } else if (_invocation > 0) {
5131         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
5132       } else {
5133         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
5134       }
5135       return;
5136     }
5137 
5138     igvn->hash_insert(nmm);
5139     record_for_optimizer(nmm);
5140   }
5141 
5142   _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_3, 5);
5143 
5144   //  Phase 4:  Update the inputs of non-instance memory Phis and
5145   //            the Memory input of memnodes
5146   // First update the inputs of any non-instance Phi's from
5147   // which we split out an instance Phi.  Note we don't have
5148   // to recursively process Phi's encountered on the input memory
5149   // chains as is done in split_memory_phi() since they will
5150   // also be processed here.
5151   for (int j = 0; j < orig_phis.length(); j++) {
5152     PhiNode *phi = orig_phis.at(j);
5153     int alias_idx = _compile->get_alias_index(phi->adr_type());
5154     igvn->hash_delete(phi);
5155     for (uint i = 1; i < phi->req(); i++) {
5156       Node *mem = phi->in(i);
5157       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
5158       if (_compile->failing()) {
5159         return;
5160       }
5161       if (mem != new_mem) {
5162         phi->set_req(i, new_mem);
5163       }
5164     }
5165     igvn->hash_insert(phi);
5166     record_for_optimizer(phi);
5167   }
5168 
5169   // Update the memory inputs of MemNodes with the value we computed
< prev index next >