< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.hpp"

  31 #include "memory/resourceArea.hpp"
  32 #include "opto/arraycopynode.hpp"
  33 #include "opto/c2compiler.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"

  39 #include "opto/locknode.hpp"
  40 #include "opto/macro.hpp"
  41 #include "opto/movenode.hpp"
  42 #include "opto/narrowptrnode.hpp"
  43 #include "opto/phaseX.hpp"
  44 #include "opto/rootnode.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  48   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  49   // split_unique_types and that will create additional nodes that need to be
  50   // pushed to the ConnectionGraph. The code below bumps the initial capacity of
  51   // _nodes by 10% to account for these additional nodes. If capacity is exceeded
  52   // the array will be reallocated.
  53   _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
  54   _in_worklist(C->comp_arena()),
  55   _next_pidx(0),
  56   _collecting(true),
  57   _verify(false),
  58   _compile(C),

 150   GrowableArray<SafePointNode*>  sfn_worklist;
 151   GrowableArray<MergeMemNode*>   mergemem_worklist;
 152   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 153 
 154   { Compile::TracePhase tp(Phase::_t_connectionGraph);
 155 
 156   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 157   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 158   // Initialize worklist
 159   if (C->root() != nullptr) {
 160     ideal_nodes.push(C->root());
 161   }
 162   // Processed ideal nodes are unique on ideal_nodes list
 163   // but several ideal nodes are mapped to the phantom_obj.
 164   // To avoid duplicated entries on the following worklists
 165   // add the phantom_obj only once to them.
 166   ptnodes_worklist.append(phantom_obj);
 167   java_objects_worklist.append(phantom_obj);
 168   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 169     Node* n = ideal_nodes.at(next);










 170     // Create PointsTo nodes and add them to Connection Graph. Called
 171     // only once per ideal node since ideal_nodes is Unique_Node list.
 172     add_node_to_connection_graph(n, &delayed_worklist);
 173     PointsToNode* ptn = ptnode_adr(n->_idx);
 174     if (ptn != nullptr && ptn != phantom_obj) {
 175       ptnodes_worklist.append(ptn);
 176       if (ptn->is_JavaObject()) {
 177         java_objects_worklist.append(ptn->as_JavaObject());
 178         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 179             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 180           // Only allocations and java static calls results are interesting.
 181           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 182         }
 183       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 184         oop_fields_worklist.append(ptn->as_Field());
 185       }
 186     }
 187     // Collect some interesting nodes for further use.
 188     switch (n->Opcode()) {
 189       case Op_MergeMem:

 407     // scalar replaceable objects.
 408     split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges);
 409     if (C->failing()) {
 410       NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 411       return false;
 412     }
 413 
 414 #ifdef ASSERT
 415   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
 416     tty->print("=== No allocations eliminated for ");
 417     C->method()->print_short_name();
 418     if (!EliminateAllocations) {
 419       tty->print(" since EliminateAllocations is off ===");
 420     } else if(!has_scalar_replaceable_candidates) {
 421       tty->print(" since there are no scalar replaceable candidates ===");
 422     }
 423     tty->cr();
 424 #endif
 425   }
 426 








 427   _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES, 4);
 428 
 429   // 6. Reduce allocation merges used as debug information. This is done after
 430   // split_unique_types because the methods used to create SafePointScalarObject
 431   // need to traverse the memory graph to find values for object fields. We also
 432   // set to null the scalarized inputs of reducible Phis so that the Allocate
 433   // that they point can be later scalar replaced.
 434   bool delay = _igvn->delay_transform();
 435   _igvn->set_delay_transform(true);
 436   for (uint i = 0; i < reducible_merges.size(); i++) {
 437     Node* n = reducible_merges.at(i);
 438     if (n->outcnt() > 0) {
 439       if (!reduce_phi_on_safepoints(n->as_Phi())) {
 440         NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 441         C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
 442         return false;
 443       }
 444 
 445       // Now we set the scalar replaceable inputs of ophi to null, which is
 446       // the last piece that would prevent it from being scalar replaceable.
 447       reset_scalar_replaceable_entries(n->as_Phi());
 448     }
 449   }

1295     //  (2) A selector, used to decide if we need to rematerialize an object
1296     //      or use the pointer to a NSR object.
1297     // See more details of these fields in the declaration of SafePointScalarMergeNode.
1298     // It is safe to include them into debug info straight away since create_scalarized_object_description()
1299     // will include all newly added inputs into debug info anyway.
1300     sfpt->add_req(nsr_merge_pointer);
1301     sfpt->add_req(selector);
1302     sfpt->jvms()->set_endoff(sfpt->req());
1303 
1304     for (uint i = 1; i < ophi->req(); i++) {
1305       Node* base = ophi->in(i);
1306       JavaObjectNode* ptn = unique_java_object(base);
1307 
1308       // If the base is not scalar replaceable we don't need to register information about
1309       // it at this time.
1310       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1311         continue;
1312       }
1313 
1314       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1315       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt);







1316       if (sobj == nullptr) {

1317         sfpt->restore_non_debug_edges(non_debug_edges_worklist);
1318         return false; // non-recoverable failure; recompile
1319       }
1320 
1321       // Now make a pass over the debug information replacing any references
1322       // to the allocated object with "sobj"
1323       Node* ccpp = alloc->result_cast();
1324       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1325       non_debug_edges_worklist.remove_edge_if_present(ccpp); // drop scalarized input from non-debug info
1326 
1327       // Register the scalarized object as a candidate for reallocation
1328       smerge->add_req(sobj);









1329     }
1330 
1331     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1332     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1333     non_debug_edges_worklist.remove_edge_if_present(original_sfpt_parent); // drop scalarized input from non-debug info
1334 
1335     // The call to 'replace_edges_in_range' above might have removed the
1336     // reference to ophi that we need at _merge_pointer_idx. The line below make
1337     // sure the reference is maintained.
1338     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1339 
1340     sfpt->restore_non_debug_edges(non_debug_edges_worklist);
1341 
1342     _igvn->_worklist.push(sfpt);
1343   }
1344 
1345   return true;
1346 }
1347 
1348 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist) {

1515   return false;
1516 }
1517 
1518 // Returns true if at least one of the arguments to the call is an object
1519 // that does not escape globally.
1520 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1521   if (call->method() != nullptr) {
1522     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1523     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1524       Node* p = call->in(idx);
1525       if (not_global_escape(p)) {
1526         return true;
1527       }
1528     }
1529   } else {
1530     const char* name = call->as_CallStaticJava()->_name;
1531     assert(name != nullptr, "no name");
1532     // no arg escapes through uncommon traps
1533     if (strcmp(name, "uncommon_trap") != 0) {
1534       // process_call_arguments() assumes that all arguments escape globally
1535       const TypeTuple* d = call->tf()->domain();
1536       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1537         const Type* at = d->field_at(i);
1538         if (at->isa_oopptr() != nullptr) {
1539           return true;
1540         }
1541       }
1542     }
1543   }
1544   return false;
1545 }
1546 
1547 
1548 
1549 // Utility function for nodes that load an object
1550 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1551   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1552   // ThreadLocal has RawPtr type.
1553   const Type* t = _igvn->type(n);
1554   if (t->make_ptr() != nullptr) {
1555     Node* adr = n->in(MemNode::Address);
1556 #ifdef ASSERT
1557     if (!adr->is_AddP()) {
1558       assert(_igvn->type(adr)->isa_rawptr(), "sanity");
1559     } else {
1560       assert((ptnode_adr(adr->_idx) == nullptr ||
1561               ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
1562     }
1563 #endif
1564     add_local_var_and_edge(n, PointsToNode::NoEscape,
1565                            adr, delayed_worklist);
1566   }
1567 }
1568 




















1569 // Populate Connection Graph with PointsTo nodes and create simple
1570 // connection graph edges.
1571 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1572   assert(!_verify, "this method should not be called for verification");
1573   PhaseGVN* igvn = _igvn;
1574   uint n_idx = n->_idx;
1575   PointsToNode* n_ptn = ptnode_adr(n_idx);
1576   if (n_ptn != nullptr) {
1577     return; // No need to redefine PointsTo node during first iteration.
1578   }
1579   int opcode = n->Opcode();
1580   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
1581   if (gc_handled) {
1582     return; // Ignore node if already handled by GC.
1583   }
1584 
1585   if (n->is_Call()) {
1586     // Arguments to allocation and locking don't escape.
1587     if (n->is_AbstractLock()) {
1588       // Put Lock and Unlock nodes on IGVN worklist to process them during
1589       // first IGVN optimization when escape information is still available.
1590       record_for_optimizer(n);
1591     } else if (n->is_Allocate()) {
1592       add_call_node(n->as_Call());
1593       record_for_optimizer(n);
1594     } else {
1595       if (n->is_CallStaticJava()) {
1596         const char* name = n->as_CallStaticJava()->_name;
1597         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1598           return; // Skip uncommon traps
1599         }
1600       }
1601       // Don't mark as processed since call's arguments have to be processed.
1602       delayed_worklist->push(n);
1603       // Check if a call returns an object.
1604       if ((n->as_Call()->returns_pointer() &&
1605            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1606           (n->is_CallStaticJava() &&
1607            n->as_CallStaticJava()->is_boxing_method())) {
1608         add_call_node(n->as_Call());











1609       }
1610     }
1611     return;
1612   }
1613   // Put this check here to process call arguments since some call nodes
1614   // point to phantom_obj.
1615   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1616     return; // Skip predefined nodes.
1617   }
1618   switch (opcode) {
1619     case Op_AddP: {
1620       Node* base = get_addp_base(n);
1621       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1622       // Field nodes are created for all field types. They are used in
1623       // adjust_scalar_replaceable_state() and split_unique_types().
1624       // Note, non-oop fields will have only base edges in Connection
1625       // Graph because such fields are not used for oop loads and stores.
1626       int offset = address_offset(n, igvn);
1627       add_field(n, PointsToNode::NoEscape, offset);
1628       if (ptn_base == nullptr) {
1629         delayed_worklist->push(n); // Process it later.
1630       } else {
1631         n_ptn = ptnode_adr(n_idx);
1632         add_base(n_ptn->as_Field(), ptn_base);
1633       }
1634       break;
1635     }
1636     case Op_CastX2P: {

1637       map_ideal_node(n, phantom_obj);
1638       break;
1639     }

1640     case Op_CastPP:
1641     case Op_CheckCastPP:
1642     case Op_EncodeP:
1643     case Op_DecodeN:
1644     case Op_EncodePKlass:
1645     case Op_DecodeNKlass: {
1646       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1647       break;
1648     }
1649     case Op_CMoveP: {
1650       add_local_var(n, PointsToNode::NoEscape);
1651       // Do not add edges during first iteration because some could be
1652       // not defined yet.
1653       delayed_worklist->push(n);
1654       break;
1655     }
1656     case Op_ConP:
1657     case Op_ConN:
1658     case Op_ConNKlass: {
1659       // assume all oop constants globally escape except for null

1689       break;
1690     }
1691     case Op_PartialSubtypeCheck: {
1692       // Produces Null or notNull and is used in only in CmpP so
1693       // phantom_obj could be used.
1694       map_ideal_node(n, phantom_obj); // Result is unknown
1695       break;
1696     }
1697     case Op_Phi: {
1698       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1699       // ThreadLocal has RawPtr type.
1700       const Type* t = n->as_Phi()->type();
1701       if (t->make_ptr() != nullptr) {
1702         add_local_var(n, PointsToNode::NoEscape);
1703         // Do not add edges during first iteration because some could be
1704         // not defined yet.
1705         delayed_worklist->push(n);
1706       }
1707       break;
1708     }








1709     case Op_Proj: {
1710       // we are only interested in the oop result projection from a call
1711       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1712           n->in(0)->as_Call()->returns_pointer()) {
1713         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1714       }
1715       break;
1716     }
1717     case Op_Rethrow: // Exception object escapes
1718     case Op_Return: {
1719       if (n->req() > TypeFunc::Parms &&
1720           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1721         // Treat Return value as LocalVar with GlobalEscape escape state.
1722         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1723       }
1724       break;
1725     }
1726     case Op_CompareAndExchangeP:
1727     case Op_CompareAndExchangeN:
1728     case Op_GetAndSetP:
1729     case Op_GetAndSetN: {
1730       add_objload_to_connection_graph(n, delayed_worklist);
1731       // fall-through
1732     }
1733     case Op_StoreP:
1734     case Op_StoreN:

1778       break;
1779     }
1780     default:
1781       ; // Do nothing for nodes not related to EA.
1782   }
1783   return;
1784 }
1785 
1786 // Add final simple edges to graph.
1787 void ConnectionGraph::add_final_edges(Node *n) {
1788   PointsToNode* n_ptn = ptnode_adr(n->_idx);
1789 #ifdef ASSERT
1790   if (_verify && n_ptn->is_JavaObject())
1791     return; // This method does not change graph for JavaObject.
1792 #endif
1793 
1794   if (n->is_Call()) {
1795     process_call_arguments(n->as_Call());
1796     return;
1797   }
1798   assert(n->is_Store() || n->is_LoadStore() ||
1799          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1800          "node should be registered already");
1801   int opcode = n->Opcode();
1802   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1803   if (gc_handled) {
1804     return; // Ignore node if already handled by GC.
1805   }
1806   switch (opcode) {
1807     case Op_AddP: {
1808       Node* base = get_addp_base(n);
1809       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1810       assert(ptn_base != nullptr, "field's base should be registered");
1811       add_base(n_ptn->as_Field(), ptn_base);
1812       break;
1813     }

1814     case Op_CastPP:
1815     case Op_CheckCastPP:
1816     case Op_EncodeP:
1817     case Op_DecodeN:
1818     case Op_EncodePKlass:
1819     case Op_DecodeNKlass: {
1820       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1821       break;
1822     }
1823     case Op_CMoveP: {
1824       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1825         Node* in = n->in(i);
1826         if (in == nullptr) {
1827           continue;  // ignore null
1828         }
1829         Node* uncast_in = in->uncast();
1830         if (uncast_in->is_top() || uncast_in == n) {
1831           continue;  // ignore top or inputs which go back this node
1832         }
1833         PointsToNode* ptn = ptnode_adr(in->_idx);

1846     }
1847     case Op_Phi: {
1848       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1849       // ThreadLocal has RawPtr type.
1850       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1851       for (uint i = 1; i < n->req(); i++) {
1852         Node* in = n->in(i);
1853         if (in == nullptr) {
1854           continue;  // ignore null
1855         }
1856         Node* uncast_in = in->uncast();
1857         if (uncast_in->is_top() || uncast_in == n) {
1858           continue;  // ignore top or inputs which go back this node
1859         }
1860         PointsToNode* ptn = ptnode_adr(in->_idx);
1861         assert(ptn != nullptr, "node should be registered");
1862         add_edge(n_ptn, ptn);
1863       }
1864       break;
1865     }
















1866     case Op_Proj: {
1867       // we are only interested in the oop result projection from a call
1868       assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1869              n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
1870       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1871       break;
1872     }
1873     case Op_Rethrow: // Exception object escapes
1874     case Op_Return: {
1875       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1876              "Unexpected node type");
1877       // Treat Return value as LocalVar with GlobalEscape escape state.
1878       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1879       break;
1880     }
1881     case Op_CompareAndExchangeP:
1882     case Op_CompareAndExchangeN:
1883     case Op_GetAndSetP:
1884     case Op_GetAndSetN:{
1885       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1886       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1887       // fall-through
1888     }
1889     case Op_CompareAndSwapP:
1890     case Op_CompareAndSwapN:

2024     Node* val = n->in(MemNode::ValueIn);
2025     PointsToNode* ptn = ptnode_adr(val->_idx);
2026     assert(ptn != nullptr, "node should be registered");
2027     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
2028     // Add edge to object for unsafe access with offset.
2029     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
2030     assert(adr_ptn != nullptr, "node should be registered");
2031     if (adr_ptn->is_Field()) {
2032       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
2033       add_edge(adr_ptn, ptn);
2034     }
2035     return true;
2036   }
2037 #ifdef ASSERT
2038   n->dump(1);
2039   assert(false, "not unsafe");
2040 #endif
2041   return false;
2042 }
2043 











































































































































2044 void ConnectionGraph::add_call_node(CallNode* call) {
2045   assert(call->returns_pointer(), "only for call which returns pointer");
2046   uint call_idx = call->_idx;
2047   if (call->is_Allocate()) {
2048     Node* k = call->in(AllocateNode::KlassNode);
2049     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
2050     assert(kt != nullptr, "TypeKlassPtr  required.");
2051     PointsToNode::EscapeState es = PointsToNode::NoEscape;
2052     bool scalar_replaceable = true;
2053     NOT_PRODUCT(const char* nsr_reason = "");
2054     if (call->is_AllocateArray()) {
2055       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
2056         es = PointsToNode::GlobalEscape;
2057       } else {
2058         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2059         if (length < 0) {
2060           // Not scalar replaceable if the length is not constant.
2061           scalar_replaceable = false;
2062           NOT_PRODUCT(nsr_reason = "has a non-constant length");
2063         } else if (length > EliminateAllocationArraySizeLimit) {
2064           // Not scalar replaceable if the length is too big.
2065           scalar_replaceable = false;

2100     //    - mapped to GlobalEscape JavaObject node if oop is returned;
2101     //
2102     //    - all oop arguments are escaping globally;
2103     //
2104     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2105     //
2106     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2107     //
2108     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2109     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2110     //      during call is returned;
2111     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2112     //      which are returned and does not escape during call;
2113     //
2114     //    - oop arguments escaping status is defined by bytecode analysis;
2115     //
2116     // For a static call, we know exactly what method is being called.
2117     // Use bytecode estimator to record whether the call's return value escapes.
2118     ciMethod* meth = call->as_CallJava()->method();
2119     if (meth == nullptr) {
2120       assert(call->as_CallStaticJava()->is_call_to_multianewarray_stub(), "TODO: add failed case check");



2121       // Returns a newly allocated non-escaped object.
2122       add_java_object(call, PointsToNode::NoEscape);
2123       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2124     } else if (meth->is_boxing_method()) {
2125       // Returns boxing object
2126       PointsToNode::EscapeState es;
2127       vmIntrinsics::ID intr = meth->intrinsic_id();
2128       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2129         // It does not escape if object is always allocated.
2130         es = PointsToNode::NoEscape;
2131       } else {
2132         // It escapes globally if object could be loaded from cache.
2133         es = PointsToNode::GlobalEscape;
2134       }
2135       add_java_object(call, es);
2136       if (es == PointsToNode::GlobalEscape) {
2137         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2138       }
2139     } else {
2140       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2141       call_analyzer->copy_dependencies(_compile->dependencies());
2142       if (call_analyzer->is_return_allocated()) {
2143         // Returns a newly allocated non-escaped object, simply
2144         // update dependency information.
2145         // Mark it as NoEscape so that objects referenced by
2146         // it's fields will be marked as NoEscape at least.
2147         add_java_object(call, PointsToNode::NoEscape);
2148         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2149       } else {
2150         // Determine whether any arguments are returned.
2151         const TypeTuple* d = call->tf()->domain();
2152         bool ret_arg = false;
2153         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2154           if (d->field_at(i)->isa_ptr() != nullptr &&
2155               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2156             ret_arg = true;
2157             break;
2158           }
2159         }
2160         if (ret_arg) {
2161           add_local_var(call, PointsToNode::ArgEscape);
2162         } else {
2163           // Returns unknown object.
2164           map_ideal_node(call, phantom_obj);
2165         }
2166       }
2167     }
2168   } else {
2169     // An other type of call, assume the worst case:
2170     // returned value is unknown and globally escapes.
2171     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
2172     map_ideal_node(call, phantom_obj);
2173   }
2174 }
2175 






2176 void ConnectionGraph::process_call_arguments(CallNode *call) {
2177     bool is_arraycopy = false;
2178     switch (call->Opcode()) {
2179 #ifdef ASSERT
2180     case Op_Allocate:
2181     case Op_AllocateArray:
2182     case Op_Lock:
2183     case Op_Unlock:
2184       assert(false, "should be done already");
2185       break;
2186 #endif
2187     case Op_ArrayCopy:
2188     case Op_CallLeafNoFP:
2189       // Most array copies are ArrayCopy nodes at this point but there
2190       // are still a few direct calls to the copy subroutines (See
2191       // PhaseStringOpts::copy_string())
2192       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2193         call->as_CallLeaf()->is_call_to_arraycopystub();
2194       // fall through
2195     case Op_CallLeafVector:
2196     case Op_CallLeaf: {
2197       // Stub calls, objects do not escape but they are not scale replaceable.
2198       // Adjust escape state for outgoing arguments.
2199       const TypeTuple * d = call->tf()->domain();
2200       bool src_has_oops = false;
2201       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2202         const Type* at = d->field_at(i);
2203         Node *arg = call->in(i);
2204         if (arg == nullptr) {
2205           continue;
2206         }
2207         const Type *aat = _igvn->type(arg);
2208         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2209           continue;
2210         }
2211         if (arg->is_AddP()) {
2212           //
2213           // The inline_native_clone() case when the arraycopy stub is called
2214           // after the allocation before Initialize and CheckCastPP nodes.
2215           // Or normal arraycopy for object arrays case.
2216           //
2217           // Set AddP's base (Allocate) as not scalar replaceable since
2218           // pointer to the base (with offset) is passed as argument.
2219           //
2220           arg = get_addp_base(arg);
2221         }
2222         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2223         assert(arg_ptn != nullptr, "should be registered");
2224         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2225         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2226           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2227                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2228           bool arg_has_oops = aat->isa_oopptr() &&
2229                               (aat->isa_instptr() ||
2230                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));



2231           if (i == TypeFunc::Parms) {
2232             src_has_oops = arg_has_oops;
2233           }
2234           //
2235           // src or dst could be j.l.Object when other is basic type array:
2236           //
2237           //   arraycopy(char[],0,Object*,0,size);
2238           //   arraycopy(Object*,0,char[],0,size);
2239           //
2240           // Don't add edges in such cases.
2241           //
2242           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2243                                        arg_has_oops && (i > TypeFunc::Parms);
2244 #ifdef ASSERT
2245           if (!(is_arraycopy ||
2246                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2247                 (call->as_CallLeaf()->_name != nullptr &&
2248                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2249                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2250                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

2274                   strcmp(call->as_CallLeaf()->_name, "dilithiumMontMulByConstant") == 0 ||
2275                   strcmp(call->as_CallLeaf()->_name, "dilithiumDecomposePoly") == 0 ||
2276                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2277                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2278                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2279                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2280                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2281                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2282                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2283                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2284                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2285                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2286                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2287                   strcmp(call->as_CallLeaf()->_name, "double_keccak") == 0 ||
2288                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2289                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2290                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2291                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2292                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2293                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||




2294                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2295                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2296                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2297                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2298                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2299                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2300                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2301                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2302                  ))) {
2303             call->dump();
2304             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2305           }
2306 #endif
2307           // Always process arraycopy's destination object since
2308           // we need to add all possible edges to references in
2309           // source object.
2310           if (arg_esc >= PointsToNode::ArgEscape &&
2311               !arg_is_arraycopy_dest) {
2312             continue;
2313           }

2336           }
2337         }
2338       }
2339       break;
2340     }
2341     case Op_CallStaticJava: {
2342       // For a static call, we know exactly what method is being called.
2343       // Use bytecode estimator to record the call's escape affects
2344 #ifdef ASSERT
2345       const char* name = call->as_CallStaticJava()->_name;
2346       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2347 #endif
2348       ciMethod* meth = call->as_CallJava()->method();
2349       if ((meth != nullptr) && meth->is_boxing_method()) {
2350         break; // Boxing methods do not modify any oops.
2351       }
2352       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2353       // fall-through if not a Java method or no analyzer information
2354       if (call_analyzer != nullptr) {
2355         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2356         const TypeTuple* d = call->tf()->domain();
2357         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2358           const Type* at = d->field_at(i);
2359           int k = i - TypeFunc::Parms;
2360           Node* arg = call->in(i);
2361           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2362           if (at->isa_ptr() != nullptr &&
2363               call_analyzer->is_arg_returned(k)) {




2364             // The call returns arguments.
2365             if (call_ptn != nullptr) { // Is call's result used?













2366               assert(call_ptn->is_LocalVar(), "node should be registered");
2367               assert(arg_ptn != nullptr, "node should be registered");
2368               add_edge(call_ptn, arg_ptn);
2369             }
2370           }
2371           if (at->isa_oopptr() != nullptr &&
2372               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2373             if (!call_analyzer->is_arg_stack(k)) {
2374               // The argument global escapes
2375               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2376             } else {
2377               set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2378               if (!call_analyzer->is_arg_local(k)) {
2379                 // The argument itself doesn't escape, but any fields might
2380                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2381               }
2382             }
2383           }
2384         }
2385         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2386           // The call returns arguments.
2387           assert(call_ptn->edge_count() > 0, "sanity");
2388           if (!call_analyzer->is_return_local()) {
2389             // Returns also unknown object.
2390             add_edge(call_ptn, phantom_obj);
2391           }
2392         }
2393         break;
2394       }
2395     }
2396     default: {
2397       // Fall-through here if not a Java method or no analyzer information
2398       // or some other type of call, assume the worst case: all arguments
2399       // globally escape.
2400       const TypeTuple* d = call->tf()->domain();
2401       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2402         const Type* at = d->field_at(i);
2403         if (at->isa_oopptr() != nullptr) {
2404           Node* arg = call->in(i);
2405           if (arg->is_AddP()) {
2406             arg = get_addp_base(arg);
2407           }
2408           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2409           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2410         }
2411       }
2412     }
2413   }
2414 }
2415 
2416 
2417 // Finish Graph construction.
2418 bool ConnectionGraph::complete_connection_graph(
2419                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2420                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

2798     PointsToNode* base = i.get();
2799     if (base->is_JavaObject()) {
2800       // Skip Allocate's fields which will be processed later.
2801       if (base->ideal_node()->is_Allocate()) {
2802         return 0;
2803       }
2804       assert(base == null_obj, "only null ptr base expected here");
2805     }
2806   }
2807   if (add_edge(field, phantom_obj)) {
2808     // New edge was added
2809     new_edges++;
2810     add_field_uses_to_worklist(field);
2811   }
2812   return new_edges;
2813 }
2814 
2815 // Find fields initializing values for allocations.
2816 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2817   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");

2818   Node* alloc = pta->ideal_node();
2819 
2820   // Do nothing for Allocate nodes since its fields values are
2821   // "known" unless they are initialized by arraycopy/clone.
2822   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2823     return 0;






2824   }
2825   assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");

2826 #ifdef ASSERT
2827   if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
2828     assert(alloc->as_CallStaticJava()->is_call_to_multianewarray_stub(), "sanity");



2829   }
2830 #endif
2831   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2832   int new_edges = 0;
2833   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2834     PointsToNode* field = i.get();
2835     if (field->is_Field() && field->as_Field()->is_oop()) {
2836       if (add_edge(field, phantom_obj)) {
2837         // New edge was added
2838         new_edges++;
2839         add_field_uses_to_worklist(field->as_Field());
2840       }
2841     }
2842   }
2843   return new_edges;
2844 }
2845 
2846 // Find fields initializing values for allocations.
2847 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2848   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2849   Node* alloc = pta->ideal_node();
2850   // Do nothing for Call nodes since its fields values are unknown.
2851   if (!alloc->is_Allocate()) {
2852     return 0;
2853   }
2854   InitializeNode* ini = alloc->as_Allocate()->initialization();
2855   bool visited_bottom_offset = false;
2856   GrowableArray<int> offsets_worklist;
2857   int new_edges = 0;
2858 
2859   // Check if an oop field's initializing value is recorded and add
2860   // a corresponding null if field's value if it is not recorded.
2861   // Connection Graph does not record a default initialization by null
2862   // captured by Initialize node.
2863   //
2864   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2865     PointsToNode* field = i.get(); // Field (AddP)
2866     if (!field->is_Field() || !field->as_Field()->is_oop()) {
2867       continue; // Not oop field
2868     }
2869     int offset = field->as_Field()->offset();
2870     if (offset == Type::OffsetBot) {
2871       if (!visited_bottom_offset) {

2917               } else {
2918                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2919                   tty->print_cr("----------init store has invalid value -----");
2920                   store->dump();
2921                   val->dump();
2922                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2923                 }
2924                 for (EdgeIterator j(val); j.has_next(); j.next()) {
2925                   PointsToNode* obj = j.get();
2926                   if (obj->is_JavaObject()) {
2927                     if (!field->points_to(obj->as_JavaObject())) {
2928                       missed_obj = obj;
2929                       break;
2930                     }
2931                   }
2932                 }
2933               }
2934               if (missed_obj != nullptr) {
2935                 tty->print_cr("----------field---------------------------------");
2936                 field->dump();
2937                 tty->print_cr("----------missed referernce to object-----------");
2938                 missed_obj->dump();
2939                 tty->print_cr("----------object referernced by init store -----");
2940                 store->dump();
2941                 val->dump();
2942                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2943               }
2944             }
2945 #endif
2946           } else {
2947             // There could be initializing stores which follow allocation.
2948             // For example, a volatile field store is not collected
2949             // by Initialize node.
2950             //
2951             // Need to check for dependent loads to separate such stores from
2952             // stores which follow loads. For now, add initial value null so
2953             // that compare pointers optimization works correctly.
2954           }
2955         }
2956         if (value == nullptr) {
2957           // A field's initializing value was not recorded. Add null.
2958           if (add_edge(field, null_obj)) {
2959             // New edge was added

3284         assert(field->edge_count() > 0, "sanity");
3285       }
3286     }
3287   }
3288 }
3289 #endif
3290 
3291 // Optimize ideal graph.
3292 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3293                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3294   Compile* C = _compile;
3295   PhaseIterGVN* igvn = _igvn;
3296   if (EliminateLocks) {
3297     // Mark locks before changing ideal graph.
3298     int cnt = C->macro_count();
3299     for (int i = 0; i < cnt; i++) {
3300       Node *n = C->macro_node(i);
3301       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3302         AbstractLockNode* alock = n->as_AbstractLock();
3303         if (!alock->is_non_esc_obj()) {
3304           if (can_eliminate_lock(alock)) {

3305             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3306             // The lock could be marked eliminated by lock coarsening
3307             // code during first IGVN before EA. Replace coarsened flag
3308             // to eliminate all associated locks/unlocks.
3309 #ifdef ASSERT
3310             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3311 #endif
3312             alock->set_non_esc_obj();
3313           }
3314         }
3315       }
3316     }
3317   }
3318 
3319   if (OptimizePtrCompare) {
3320     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3321       Node *n = ptr_cmp_worklist.at(i);
3322       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3323       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3324       if (tcmp->singleton()) {

3326 #ifndef PRODUCT
3327         if (PrintOptimizePtrCompare) {
3328           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3329           if (Verbose) {
3330             n->dump(1);
3331           }
3332         }
3333 #endif
3334         igvn->replace_node(n, cmp);
3335       }
3336     }
3337   }
3338 
3339   // For MemBarStoreStore nodes added in library_call.cpp, check
3340   // escape status of associated AllocateNode and optimize out
3341   // MemBarStoreStore node if the allocated object never escapes.
3342   for (int i = 0; i < storestore_worklist.length(); i++) {
3343     Node* storestore = storestore_worklist.at(i);
3344     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3345     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3346       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3347       mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3348       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3349       igvn->register_new_node_with_optimizer(mb);
3350       igvn->replace_node(storestore, mb);





3351     }
3352   }
3353 }
3354 

























3355 // Optimize objects compare.
3356 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3357   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3358   if (!OptimizePtrCompare) {
3359     return UNKNOWN;
3360   }
3361   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3362   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3363 
3364   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3365   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3366   JavaObjectNode* jobj1 = unique_java_object(left);
3367   JavaObjectNode* jobj2 = unique_java_object(right);
3368 
3369   // The use of this method during allocation merge reduction may cause 'left'
3370   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3371   // that doesn't reference an unique java object.
3372   if (ptn1 == nullptr || ptn2 == nullptr ||
3373       jobj1 == nullptr || jobj2 == nullptr) {
3374     return UNKNOWN;

3494   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3495   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3496   PointsToNode* ptadr = _nodes.at(n->_idx);
3497   if (ptadr != nullptr) {
3498     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3499     return;
3500   }
3501   Compile* C = _compile;
3502   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3503   map_ideal_node(n, ptadr);
3504   // Add edge from arraycopy node to source object.
3505   (void)add_edge(ptadr, src);
3506   src->set_arraycopy_src();
3507   // Add edge from destination object to arraycopy node.
3508   (void)add_edge(dst, ptadr);
3509   dst->set_arraycopy_dst();
3510 }
3511 
3512 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3513   const Type* adr_type = n->as_AddP()->bottom_type();

3514   BasicType bt = T_INT;
3515   if (offset == Type::OffsetBot) {
3516     // Check only oop fields.
3517     if (!adr_type->isa_aryptr() ||
3518         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3519         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3520       // OffsetBot is used to reference array's element. Ignore first AddP.
3521       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3522         bt = T_OBJECT;
3523       }
3524     }
3525   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3526     if (adr_type->isa_instptr()) {
3527       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
3528       if (field != nullptr) {
3529         bt = field->layout_type();
3530       } else {
3531         // Check for unsafe oop field access
3532         if (has_oop_node_outs(n)) {
3533           bt = T_OBJECT;
3534           (*unsafe) = true;
3535         }
3536       }
3537     } else if (adr_type->isa_aryptr()) {
3538       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3539         // Ignore array length load.
3540       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3541         // Ignore first AddP.
3542       } else {
3543         const Type* elemtype = adr_type->isa_aryptr()->elem();
3544         bt = elemtype->array_element_basic_type();












3545       }
3546     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3547       // Allocation initialization, ThreadLocal field access, unsafe access
3548       if (has_oop_node_outs(n)) {
3549         bt = T_OBJECT;
3550       }
3551     }
3552   }
3553   // Note: T_NARROWOOP is not classed as a real reference type
3554   bool res = (is_reference_type(bt) || bt == T_NARROWOOP);
3555   assert(!has_oop_node_outs(n) || res, "sanity: AddP has oop outs, needs to be treated as oop field");
3556   return res;
3557 }
3558 
3559 bool ConnectionGraph::has_oop_node_outs(Node* n) {
3560   return n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3561          n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3562          n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3563          BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n);
3564 }

3727             return true;
3728           }
3729         }
3730       }
3731     }
3732   }
3733   return false;
3734 }
3735 
3736 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3737   const Type *adr_type = phase->type(adr);
3738   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3739     // We are computing a raw address for a store captured by an Initialize
3740     // compute an appropriate address type. AddP cases #3 and #5 (see below).
3741     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3742     assert(offs != Type::OffsetBot ||
3743            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3744            "offset must be a constant or it is initialization of array");
3745     return offs;
3746   }
3747   const TypePtr *t_ptr = adr_type->isa_ptr();
3748   assert(t_ptr != nullptr, "must be a pointer type");
3749   return t_ptr->offset();
3750 }
3751 
3752 Node* ConnectionGraph::get_addp_base(Node *addp) {
3753   assert(addp->is_AddP(), "must be AddP");
3754   //
3755   // AddP cases for Base and Address inputs:
3756   // case #1. Direct object's field reference:
3757   //     Allocate
3758   //       |
3759   //     Proj #5 ( oop result )
3760   //       |
3761   //     CheckCastPP (cast to instance type)
3762   //      | |
3763   //     AddP  ( base == address )
3764   //
3765   // case #2. Indirect object's field reference:
3766   //      Phi
3767   //       |
3768   //     CastPP (cast to instance type)
3769   //      | |
3770   //     AddP  ( base == address )
3771   //
3772   // case #3. Raw object's field reference for Initialize node:

3773   //      Allocate
3774   //        |
3775   //      Proj #5 ( oop result )
3776   //  top   |
3777   //     \  |
3778   //     AddP  ( base == top )
3779   //
3780   // case #4. Array's element reference:
3781   //   {CheckCastPP | CastPP}
3782   //     |  | |
3783   //     |  AddP ( array's element offset )
3784   //     |  |
3785   //     AddP ( array's offset )
3786   //
3787   // case #5. Raw object's field reference for arraycopy stub call:
3788   //          The inline_native_clone() case when the arraycopy stub is called
3789   //          after the allocation before Initialize and CheckCastPP nodes.
3790   //      Allocate
3791   //        |
3792   //      Proj #5 ( oop result )

3803   // case #7. Klass's field reference.
3804   //      LoadKlass
3805   //       | |
3806   //       AddP  ( base == address )
3807   //
3808   // case #8. narrow Klass's field reference.
3809   //      LoadNKlass
3810   //       |
3811   //      DecodeN
3812   //       | |
3813   //       AddP  ( base == address )
3814   //
3815   // case #9. Mixed unsafe access
3816   //    {instance}
3817   //        |
3818   //      CheckCastPP (raw)
3819   //  top   |
3820   //     \  |
3821   //     AddP  ( base == top )
3822   //












3823   Node *base = addp->in(AddPNode::Base);
3824   if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
3825     base = addp->in(AddPNode::Address);
3826     while (base->is_AddP()) {
3827       // Case #6 (unsafe access) may have several chained AddP nodes.
3828       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
3829       base = base->in(AddPNode::Address);
3830     }
3831     if (base->Opcode() == Op_CheckCastPP &&
3832         base->bottom_type()->isa_rawptr() &&
3833         _igvn->type(base->in(1))->isa_oopptr()) {
3834       base = base->in(1); // Case #9
3835     } else {

3836       Node* uncast_base = base->uncast();
3837       int opcode = uncast_base->Opcode();
3838       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
3839              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
3840              (_igvn->C->is_osr_compilation() && uncast_base->is_Parm() && uncast_base->as_Parm()->_con == TypeFunc::Parms)||
3841              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) ||
3842              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_klassptr() != nullptr)) ||
3843              is_captured_store_address(addp), "sanity");

3844     }
3845   }
3846   return base;
3847 }
3848 













3849 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
3850   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
3851   Node* addp2 = addp->raw_out(0);
3852   if (addp->outcnt() == 1 && addp2->is_AddP() &&
3853       addp2->in(AddPNode::Base) == n &&
3854       addp2->in(AddPNode::Address) == addp) {
3855     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
3856     //
3857     // Find array's offset to push it on worklist first and
3858     // as result process an array's element offset first (pushed second)
3859     // to avoid CastPP for the array's offset.
3860     // Otherwise the inserted CastPP (LocalVar) will point to what
3861     // the AddP (Field) points to. Which would be wrong since
3862     // the algorithm expects the CastPP has the same point as
3863     // as AddP's base CheckCastPP (LocalVar).
3864     //
3865     //    ArrayAllocation
3866     //     |
3867     //    CheckCastPP
3868     //     |

3885   }
3886   return nullptr;
3887 }
3888 
3889 //
3890 // Adjust the type and inputs of an AddP which computes the
3891 // address of a field of an instance
3892 //
3893 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3894   PhaseGVN* igvn = _igvn;
3895   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3896   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3897   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3898   if (t == nullptr) {
3899     // We are computing a raw address for a store captured by an Initialize
3900     // compute an appropriate address type (cases #3 and #5).
3901     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3902     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3903     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3904     assert(offs != Type::OffsetBot, "offset must be a constant");
3905     t = base_t->add_offset(offs)->is_oopptr();







3906   }
3907   int inst_id =  base_t->instance_id();
3908   assert(!t->is_known_instance() || t->instance_id() == inst_id,
3909                              "old type must be non-instance or match new type");
3910 
3911   // The type 't' could be subclass of 'base_t'.
3912   // As result t->offset() could be large then base_t's size and it will
3913   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3914   // constructor verifies correctness of the offset.
3915   //
3916   // It could happened on subclass's branch (from the type profiling
3917   // inlining) which was not eliminated during parsing since the exactness
3918   // of the allocation type was not propagated to the subclass type check.
3919   //
3920   // Or the type 't' could be not related to 'base_t' at all.
3921   // It could happened when CHA type is different from MDO type on a dead path
3922   // (for example, from instanceof check) which is not collapsed during parsing.
3923   //
3924   // Do nothing for such AddP node and don't process its users since
3925   // this code branch will go away.
3926   //
3927   if (!t->is_known_instance() &&
3928       !base_t->maybe_java_subtype_of(t)) {
3929      return false; // bail out
3930   }
3931   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();











3932   // Do NOT remove the next line: ensure a new alias index is allocated
3933   // for the instance type. Note: C++ will not remove it since the call
3934   // has side effect.
3935   int alias_idx = _compile->get_alias_index(tinst);
3936   igvn->set_type(addp, tinst);
3937   // record the allocation in the node map
3938   set_map(addp, get_map(base->_idx));
3939   // Set addp's Base and Address to 'base'.
3940   Node *abase = addp->in(AddPNode::Base);
3941   Node *adr   = addp->in(AddPNode::Address);
3942   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3943       adr->in(0)->_idx == (uint)inst_id) {
3944     // Skip AddP cases #3 and #5.
3945   } else {
3946     assert(!abase->is_top(), "sanity"); // AddP case #3
3947     if (abase != base) {
3948       igvn->hash_delete(addp);
3949       addp->set_req(AddPNode::Base, base);
3950       if (abase == adr) {
3951         addp->set_req(AddPNode::Address, base);

4520       //   - not determined to be ineligible by escape analysis
4521       set_map(alloc, n);
4522       set_map(n, alloc);
4523       const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
4524       igvn->hash_delete(n);
4525       igvn->set_type(n,  tinst);
4526       n->raise_bottom_type(tinst);
4527       igvn->hash_insert(n);
4528       record_for_optimizer(n);
4529       // Allocate an alias index for the header fields. Accesses to
4530       // the header emitted during macro expansion wouldn't have
4531       // correct memory state otherwise.
4532       _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
4533       _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
4534       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
4535         // Add a new NarrowMem projection for each existing NarrowMem projection with new adr type
4536         InitializeNode* init = alloc->as_Allocate()->initialization();
4537         assert(init != nullptr, "can't find Initialization node for this Allocate node");
4538         auto process_narrow_proj = [&](NarrowMemProjNode* proj) {
4539           const TypePtr* adr_type = proj->adr_type();
4540           const TypePtr* new_adr_type = tinst->add_offset(adr_type->offset());





4541           if (adr_type != new_adr_type && !init->already_has_narrow_mem_proj_with_adr_type(new_adr_type)) {
4542             DEBUG_ONLY( uint alias_idx = _compile->get_alias_index(new_adr_type); )
4543             assert(_compile->get_general_index(alias_idx) == _compile->get_alias_index(adr_type), "new adr type should be narrowed down from existing adr type");
4544             NarrowMemProjNode* new_proj = new NarrowMemProjNode(init, new_adr_type);
4545             igvn->set_type(new_proj, new_proj->bottom_type());
4546             record_for_optimizer(new_proj);
4547             set_map(proj, new_proj); // record it so ConnectionGraph::find_inst_mem() can find it
4548           }
4549         };
4550         init->for_each_narrow_mem_proj_with_new_uses(process_narrow_proj);
4551 
4552         // First, put on the worklist all Field edges from Connection Graph
4553         // which is more accurate than putting immediate users from Ideal Graph.
4554         for (EdgeIterator e(ptn); e.has_next(); e.next()) {
4555           PointsToNode* tgt = e.get();
4556           if (tgt->is_Arraycopy()) {
4557             continue;
4558           }
4559           Node* use = tgt->ideal_node();
4560           assert(tgt->is_Field() && use->is_AddP(),

4637         ptnode_adr(n->_idx)->dump();
4638         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4639 #endif
4640         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4641         return;
4642       } else {
4643         Node *val = get_map(jobj->idx());   // CheckCastPP node
4644         TypeNode *tn = n->as_Type();
4645         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4646         assert(tinst != nullptr && tinst->is_known_instance() &&
4647                tinst->instance_id() == jobj->idx() , "instance type expected.");
4648 
4649         const Type *tn_type = igvn->type(tn);
4650         const TypeOopPtr *tn_t;
4651         if (tn_type->isa_narrowoop()) {
4652           tn_t = tn_type->make_ptr()->isa_oopptr();
4653         } else {
4654           tn_t = tn_type->isa_oopptr();
4655         }
4656         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {







4657           if (tn_type->isa_narrowoop()) {
4658             tn_type = tinst->make_narrowoop();
4659           } else {
4660             tn_type = tinst;
4661           }
4662           igvn->hash_delete(tn);
4663           igvn->set_type(tn, tn_type);
4664           tn->set_type(tn_type);
4665           igvn->hash_insert(tn);
4666           record_for_optimizer(n);
4667         } else {
4668           assert(tn_type == TypePtr::NULL_PTR ||
4669                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4670                  "unexpected type");
4671           continue; // Skip dead path with different type
4672         }
4673       }
4674     } else {
4675       DEBUG_ONLY(n->dump();)
4676       assert(false, "EA: unexpected node");
4677       continue;
4678     }
4679     // push allocation's users on appropriate worklist
4680     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4681       Node *use = n->fast_out(i);
4682       if(use->is_Mem() && use->in(MemNode::Address) == n) {
4683         // Load/store to instance's field
4684         memnode_worklist.append_if_missing(use);
4685       } else if (use->is_MemBar()) {
4686         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4687           memnode_worklist.append_if_missing(use);
4688         }
4689       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4690         Node* addp2 = find_second_addp(use, n);
4691         if (addp2 != nullptr) {
4692           alloc_worklist.append_if_missing(addp2);
4693         }
4694         alloc_worklist.append_if_missing(use);
4695       } else if (use->is_Phi() ||
4696                  use->is_CheckCastPP() ||
4697                  use->is_EncodeNarrowPtr() ||
4698                  use->is_DecodeNarrowPtr() ||
4699                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4700         alloc_worklist.append_if_missing(use);
4701 #ifdef ASSERT
4702       } else if (use->is_Mem()) {
4703         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4704       } else if (use->is_MergeMem()) {
4705         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4706       } else if (use->is_SafePoint()) {
4707         // Look for MergeMem nodes for calls which reference unique allocation
4708         // (through CheckCastPP nodes) even for debug info.
4709         Node* m = use->in(TypeFunc::Memory);
4710         if (m->is_MergeMem()) {
4711           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4712         }
4713       } else if (use->Opcode() == Op_EncodeISOArray) {
4714         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4715           // EncodeISOArray overwrites destination array
4716           memnode_worklist.append_if_missing(use);
4717         }



4718       } else {
4719         uint op = use->Opcode();
4720         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4721             (use->in(MemNode::Memory) == n)) {
4722           // They overwrite memory edge corresponding to destination array,
4723           memnode_worklist.append_if_missing(use);
4724         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4725               op == Op_CastP2X ||
4726               op == Op_FastLock || op == Op_AryEq ||
4727               op == Op_StrComp || op == Op_CountPositives ||
4728               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4729               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4730               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4731               op == Op_SubTypeCheck ||
4732               op == Op_ReinterpretS2HF ||
4733               op == Op_ReachabilityFence ||
4734               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4735           n->dump();
4736           use->dump();
4737           assert(false, "EA: missing allocation reference path");
4738         }
4739 #endif
4740       }
4741     }
4742 
4743   }
4744 
4745 #ifdef ASSERT
4746   if (VerifyReduceAllocationMerges) {
4747     for (uint i = 0; i < reducible_merges.size(); i++) {
4748       Node* phi = reducible_merges.at(i);
4749 
4750       if (!reduced_merges.member(phi)) {
4751         phi->dump(2);

4820         n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4821         if (n == nullptr) {
4822           continue;
4823         }
4824       }
4825     } else if (n->is_CallLeaf()) {
4826       // Runtime calls with narrow memory input (no MergeMem node)
4827       // get the memory projection
4828       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4829       if (n == nullptr) {
4830         continue;
4831       }
4832     } else if (n->Opcode() == Op_StrInflatedCopy) {
4833       // Check direct uses of StrInflatedCopy.
4834       // It is memory type Node - no special SCMemProj node.
4835     } else if (n->Opcode() == Op_StrCompressedCopy ||
4836                n->Opcode() == Op_EncodeISOArray) {
4837       // get the memory projection
4838       n = n->find_out_with(Op_SCMemProj);
4839       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");



4840     } else if (n->is_Proj()) {
4841       assert(n->in(0)->is_Initialize(), "we only push memory projections for Initialize");
4842     } else {
4843 #ifdef ASSERT
4844       if (!n->is_Mem()) {
4845         n->dump();
4846       }
4847       assert(n->is_Mem(), "memory node required.");
4848 #endif
4849       Node *addr = n->in(MemNode::Address);
4850       const Type *addr_t = igvn->type(addr);
4851       if (addr_t == Type::TOP) {
4852         continue;
4853       }
4854       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4855       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4856       assert ((uint)alias_idx < new_index_end, "wrong alias index");
4857       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4858       if (_compile->failing()) {
4859         return;

4871         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4872       }
4873     }
4874     // push user on appropriate worklist
4875     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4876       Node *use = n->fast_out(i);
4877       if (use->is_Phi() || use->is_ClearArray()) {
4878         memnode_worklist.append_if_missing(use);
4879       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4880         memnode_worklist.append_if_missing(use);
4881       } else if (use->is_MemBar() || use->is_CallLeaf()) {
4882         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4883           memnode_worklist.append_if_missing(use);
4884         }
4885       } else if (use->is_Proj()) {
4886         assert(n->is_Initialize(), "We only push projections of Initialize");
4887         if (use->as_Proj()->_con == TypeFunc::Memory) { // Ignore precedent edge
4888           memnode_worklist.append_if_missing(use);
4889         }
4890 #ifdef ASSERT
4891       } else if(use->is_Mem()) {
4892         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4893       } else if (use->is_MergeMem()) {
4894         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4895       } else if (use->Opcode() == Op_EncodeISOArray) {
4896         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4897           // EncodeISOArray overwrites destination array
4898           memnode_worklist.append_if_missing(use);
4899         }




4900       } else {
4901         uint op = use->Opcode();
4902         if ((use->in(MemNode::Memory) == n) &&
4903             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4904           // They overwrite memory edge corresponding to destination array,
4905           memnode_worklist.append_if_missing(use);
4906         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4907               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4908               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4909               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
4910           n->dump();
4911           use->dump();
4912           assert(false, "EA: missing memory path");
4913         }
4914 #endif
4915       }
4916     }
4917   }
4918 
4919   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4920   //            Walk each memory slice moving the first node encountered of each
4921   //            instance type to the input corresponding to its alias index.
4922   uint length = mergemem_worklist.length();
4923   for( uint next = 0; next < length; ++next ) {
4924     MergeMemNode* nmm = mergemem_worklist.at(next);
4925     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4926     // Note: we don't want to use MergeMemStream here because we only want to
4927     // scan inputs which exist at the start, not ones we add during processing.
4928     // Note 2: MergeMem may already contains instance memory slices added
4929     // during find_inst_mem() call when memory nodes were processed above.

4992         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4993       } else if (_invocation > 0) {
4994         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4995       } else {
4996         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4997       }
4998       return;
4999     }
5000 
5001     igvn->hash_insert(nmm);
5002     record_for_optimizer(nmm);
5003   }
5004 
5005   _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_3, 5);
5006 
5007   //  Phase 4:  Update the inputs of non-instance memory Phis and
5008   //            the Memory input of memnodes
5009   // First update the inputs of any non-instance Phi's from
5010   // which we split out an instance Phi.  Note we don't have
5011   // to recursively process Phi's encountered on the input memory
5012   // chains as is done in split_memory_phi() since they  will
5013   // also be processed here.
5014   for (int j = 0; j < orig_phis.length(); j++) {
5015     PhiNode *phi = orig_phis.at(j);
5016     int alias_idx = _compile->get_alias_index(phi->adr_type());
5017     igvn->hash_delete(phi);
5018     for (uint i = 1; i < phi->req(); i++) {
5019       Node *mem = phi->in(i);
5020       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
5021       if (_compile->failing()) {
5022         return;
5023       }
5024       if (mem != new_mem) {
5025         phi->set_req(i, new_mem);
5026       }
5027     }
5028     igvn->hash_insert(phi);
5029     record_for_optimizer(phi);
5030   }
5031 
5032   // Update the memory inputs of MemNodes with the value we computed

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/metaspace.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/c2compiler.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/escape.hpp"
  40 #include "opto/inlinetypenode.hpp"
  41 #include "opto/locknode.hpp"
  42 #include "opto/macro.hpp"
  43 #include "opto/movenode.hpp"
  44 #include "opto/narrowptrnode.hpp"
  45 #include "opto/phaseX.hpp"
  46 #include "opto/rootnode.hpp"
  47 #include "utilities/macros.hpp"
  48 
  49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  50   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  51   // split_unique_types and that will create additional nodes that need to be
  52   // pushed to the ConnectionGraph. The code below bumps the initial capacity of
  53   // _nodes by 10% to account for these additional nodes. If capacity is exceeded
  54   // the array will be reallocated.
  55   _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
  56   _in_worklist(C->comp_arena()),
  57   _next_pidx(0),
  58   _collecting(true),
  59   _verify(false),
  60   _compile(C),

 152   GrowableArray<SafePointNode*>  sfn_worklist;
 153   GrowableArray<MergeMemNode*>   mergemem_worklist;
 154   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 155 
 156   { Compile::TracePhase tp(Phase::_t_connectionGraph);
 157 
 158   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 159   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 160   // Initialize worklist
 161   if (C->root() != nullptr) {
 162     ideal_nodes.push(C->root());
 163   }
 164   // Processed ideal nodes are unique on ideal_nodes list
 165   // but several ideal nodes are mapped to the phantom_obj.
 166   // To avoid duplicated entries on the following worklists
 167   // add the phantom_obj only once to them.
 168   ptnodes_worklist.append(phantom_obj);
 169   java_objects_worklist.append(phantom_obj);
 170   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 171     Node* n = ideal_nodes.at(next);
 172     if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
 173         !n->in(MemNode::Address)->is_AddP() &&
 174         _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
 175       // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
 176       Node* addp = AddPNode::make_with_base(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
 177       _igvn->register_new_node_with_optimizer(addp);
 178       _igvn->replace_input_of(n, MemNode::Address, addp);
 179       ideal_nodes.push(addp);
 180       _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
 181     }
 182     // Create PointsTo nodes and add them to Connection Graph. Called
 183     // only once per ideal node since ideal_nodes is Unique_Node list.
 184     add_node_to_connection_graph(n, &delayed_worklist);
 185     PointsToNode* ptn = ptnode_adr(n->_idx);
 186     if (ptn != nullptr && ptn != phantom_obj) {
 187       ptnodes_worklist.append(ptn);
 188       if (ptn->is_JavaObject()) {
 189         java_objects_worklist.append(ptn->as_JavaObject());
 190         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 191             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 192           // Only allocations and java static calls results are interesting.
 193           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 194         }
 195       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 196         oop_fields_worklist.append(ptn->as_Field());
 197       }
 198     }
 199     // Collect some interesting nodes for further use.
 200     switch (n->Opcode()) {
 201       case Op_MergeMem:

 419     // scalar replaceable objects.
 420     split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges);
 421     if (C->failing()) {
 422       NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 423       return false;
 424     }
 425 
 426 #ifdef ASSERT
 427   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
 428     tty->print("=== No allocations eliminated for ");
 429     C->method()->print_short_name();
 430     if (!EliminateAllocations) {
 431       tty->print(" since EliminateAllocations is off ===");
 432     } else if(!has_scalar_replaceable_candidates) {
 433       tty->print(" since there are no scalar replaceable candidates ===");
 434     }
 435     tty->cr();
 436 #endif
 437   }
 438 
 439   // 6. Expand flat accesses if the object does not escape. This adds nodes to
 440   // the graph, so it has to be after split_unique_types. This expands atomic
 441   // mismatched accesses (though encapsulated in LoadFlats and StoreFlats) into
 442   // non-mismatched accesses, so it is better before reduce allocation merges.
 443   if (has_non_escaping_obj) {
 444     optimize_flat_accesses(sfn_worklist);
 445   }
 446 
 447   _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES, 4);
 448 
 449   // 7. Reduce allocation merges used as debug information. This is done after
 450   // split_unique_types because the methods used to create SafePointScalarObject
 451   // need to traverse the memory graph to find values for object fields. We also
 452   // set to null the scalarized inputs of reducible Phis so that the Allocate
 453   // that they point can be later scalar replaced.
 454   bool delay = _igvn->delay_transform();
 455   _igvn->set_delay_transform(true);
 456   for (uint i = 0; i < reducible_merges.size(); i++) {
 457     Node* n = reducible_merges.at(i);
 458     if (n->outcnt() > 0) {
 459       if (!reduce_phi_on_safepoints(n->as_Phi())) {
 460         NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 461         C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
 462         return false;
 463       }
 464 
 465       // Now we set the scalar replaceable inputs of ophi to null, which is
 466       // the last piece that would prevent it from being scalar replaceable.
 467       reset_scalar_replaceable_entries(n->as_Phi());
 468     }
 469   }

1315     //  (2) A selector, used to decide if we need to rematerialize an object
1316     //      or use the pointer to a NSR object.
1317     // See more details of these fields in the declaration of SafePointScalarMergeNode.
1318     // It is safe to include them into debug info straight away since create_scalarized_object_description()
1319     // will include all newly added inputs into debug info anyway.
1320     sfpt->add_req(nsr_merge_pointer);
1321     sfpt->add_req(selector);
1322     sfpt->jvms()->set_endoff(sfpt->req());
1323 
1324     for (uint i = 1; i < ophi->req(); i++) {
1325       Node* base = ophi->in(i);
1326       JavaObjectNode* ptn = unique_java_object(base);
1327 
1328       // If the base is not scalar replaceable we don't need to register information about
1329       // it at this time.
1330       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1331         continue;
1332       }
1333 
1334       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1335       Unique_Node_List value_worklist;
1336 #ifdef ASSERT
1337       const Type* res_type = alloc->result_cast()->bottom_type();
1338       if (res_type->is_inlinetypeptr() && !Compile::current()->has_circular_inline_type()) {
1339         assert(!ophi->as_Phi()->can_push_inline_types_down(_igvn), "missed earlier scalarization opportunity");
1340       }
1341 #endif
1342       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
1343       if (sobj == nullptr) {
1344         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1345         sfpt->restore_non_debug_edges(non_debug_edges_worklist);
1346         return false; // non-recoverable failure; recompile
1347       }
1348 
1349       // Now make a pass over the debug information replacing any references
1350       // to the allocated object with "sobj"
1351       Node* ccpp = alloc->result_cast();
1352       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1353       non_debug_edges_worklist.remove_edge_if_present(ccpp); // drop scalarized input from non-debug info
1354 
1355       // Register the scalarized object as a candidate for reallocation
1356       smerge->add_req(sobj);
1357 
1358       // Scalarize inline types that were added to the safepoint.
1359       // Don't allow linking a constant oop (if available) for flat array elements
1360       // because Deoptimization::reassign_flat_array_elements needs field values.
1361       const bool allow_oop = !merge_t->is_flat();
1362       for (uint j = 0; j < value_worklist.size(); ++j) {
1363         InlineTypeNode* vt = value_worklist.at(j)->as_InlineType();
1364         vt->make_scalar_in_safepoints(_igvn, allow_oop);
1365       }
1366     }
1367 
1368     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1369     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1370     non_debug_edges_worklist.remove_edge_if_present(original_sfpt_parent); // drop scalarized input from non-debug info
1371 
1372     // The call to 'replace_edges_in_range' above might have removed the
1373     // reference to ophi that we need at _merge_pointer_idx. The line below make
1374     // sure the reference is maintained.
1375     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1376 
1377     sfpt->restore_non_debug_edges(non_debug_edges_worklist);
1378 
1379     _igvn->_worklist.push(sfpt);
1380   }
1381 
1382   return true;
1383 }
1384 
1385 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist) {

1552   return false;
1553 }
1554 
1555 // Returns true if at least one of the arguments to the call is an object
1556 // that does not escape globally.
1557 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1558   if (call->method() != nullptr) {
1559     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1560     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1561       Node* p = call->in(idx);
1562       if (not_global_escape(p)) {
1563         return true;
1564       }
1565     }
1566   } else {
1567     const char* name = call->as_CallStaticJava()->_name;
1568     assert(name != nullptr, "no name");
1569     // no arg escapes through uncommon traps
1570     if (strcmp(name, "uncommon_trap") != 0) {
1571       // process_call_arguments() assumes that all arguments escape globally
1572       const TypeTuple* d = call->tf()->domain_sig();
1573       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1574         const Type* at = d->field_at(i);
1575         if (at->isa_oopptr() != nullptr) {
1576           return true;
1577         }
1578       }
1579     }
1580   }
1581   return false;
1582 }
1583 
1584 
1585 
1586 // Utility function for nodes that load an object
1587 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1588   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1589   // ThreadLocal has RawPtr type.
1590   const Type* t = _igvn->type(n);
1591   if (t->make_ptr() != nullptr) {
1592     Node* adr = n->in(MemNode::Address);
1593 #ifdef ASSERT
1594     if (!adr->is_AddP()) {
1595       assert(_igvn->type(adr)->isa_rawptr(), "sanity");
1596     } else {
1597       assert((ptnode_adr(adr->_idx) == nullptr ||
1598               ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
1599     }
1600 #endif
1601     add_local_var_and_edge(n, PointsToNode::NoEscape,
1602                            adr, delayed_worklist);
1603   }
1604 }
1605 
1606 void ConnectionGraph::add_proj(Node* n, Unique_Node_List* delayed_worklist) {
1607   if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && n->in(0)->as_Call()->returns_pointer()) {
1608     add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1609   } else if (n->in(0)->is_LoadFlat()) {
1610     // Treat LoadFlat outputs similar to a call return value
1611     add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1612   } else if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && n->bottom_type()->isa_ptr()) {
1613     CallNode* call = n->in(0)->as_Call();
1614     assert(call->tf()->returns_inline_type_as_fields(), "");
1615     if (n->as_Proj()->_con == TypeFunc::Parms || !returns_an_argument(call)) {
1616       // either:
1617       // - not an argument returned
1618       // - the returned buffer for a returned scalarized argument
1619       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1620     } else {
1621       add_local_var(n, PointsToNode::NoEscape);
1622     }
1623   }
1624 }
1625 
1626 // Populate Connection Graph with PointsTo nodes and create simple
1627 // connection graph edges.
1628 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1629   assert(!_verify, "this method should not be called for verification");
1630   PhaseGVN* igvn = _igvn;
1631   uint n_idx = n->_idx;
1632   PointsToNode* n_ptn = ptnode_adr(n_idx);
1633   if (n_ptn != nullptr) {
1634     return; // No need to redefine PointsTo node during first iteration.
1635   }
1636   int opcode = n->Opcode();
1637   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
1638   if (gc_handled) {
1639     return; // Ignore node if already handled by GC.
1640   }
1641 
1642   if (n->is_Call()) {
1643     // Arguments to allocation and locking don't escape.
1644     if (n->is_AbstractLock()) {
1645       // Put Lock and Unlock nodes on IGVN worklist to process them during
1646       // first IGVN optimization when escape information is still available.
1647       record_for_optimizer(n);
1648     } else if (n->is_Allocate()) {
1649       add_call_node(n->as_Call());
1650       record_for_optimizer(n);
1651     } else {
1652       if (n->is_CallStaticJava()) {
1653         const char* name = n->as_CallStaticJava()->_name;
1654         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1655           return; // Skip uncommon traps
1656         }
1657       }
1658       // Don't mark as processed since call's arguments have to be processed.
1659       delayed_worklist->push(n);
1660       // Check if a call returns an object.
1661       if ((n->as_Call()->returns_pointer() &&
1662            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1663           (n->is_CallStaticJava() &&
1664            n->as_CallStaticJava()->is_boxing_method())) {
1665         add_call_node(n->as_Call());
1666       } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
1667         bool returns_oop = false;
1668         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
1669           ProjNode* pn = n->fast_out(i)->as_Proj();
1670           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
1671             returns_oop = true;
1672           }
1673         }
1674         if (returns_oop) {
1675           add_call_node(n->as_Call());
1676         }
1677       }
1678     }
1679     return;
1680   }
1681   // Put this check here to process call arguments since some call nodes
1682   // point to phantom_obj.
1683   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1684     return; // Skip predefined nodes.
1685   }
1686   switch (opcode) {
1687     case Op_AddP: {
1688       Node* base = get_addp_base(n);
1689       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1690       // Field nodes are created for all field types. They are used in
1691       // adjust_scalar_replaceable_state() and split_unique_types().
1692       // Note, non-oop fields will have only base edges in Connection
1693       // Graph because such fields are not used for oop loads and stores.
1694       int offset = address_offset(n, igvn);
1695       add_field(n, PointsToNode::NoEscape, offset);
1696       if (ptn_base == nullptr) {
1697         delayed_worklist->push(n); // Process it later.
1698       } else {
1699         n_ptn = ptnode_adr(n_idx);
1700         add_base(n_ptn->as_Field(), ptn_base);
1701       }
1702       break;
1703     }
1704     case Op_CastX2P:
1705     case Op_CastI2N: {
1706       map_ideal_node(n, phantom_obj);
1707       break;
1708     }
1709     case Op_InlineType:
1710     case Op_CastPP:
1711     case Op_CheckCastPP:
1712     case Op_EncodeP:
1713     case Op_DecodeN:
1714     case Op_EncodePKlass:
1715     case Op_DecodeNKlass: {
1716       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1717       break;
1718     }
1719     case Op_CMoveP: {
1720       add_local_var(n, PointsToNode::NoEscape);
1721       // Do not add edges during first iteration because some could be
1722       // not defined yet.
1723       delayed_worklist->push(n);
1724       break;
1725     }
1726     case Op_ConP:
1727     case Op_ConN:
1728     case Op_ConNKlass: {
1729       // assume all oop constants globally escape except for null

1759       break;
1760     }
1761     case Op_PartialSubtypeCheck: {
1762       // Produces Null or notNull and is used in only in CmpP so
1763       // phantom_obj could be used.
1764       map_ideal_node(n, phantom_obj); // Result is unknown
1765       break;
1766     }
1767     case Op_Phi: {
1768       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1769       // ThreadLocal has RawPtr type.
1770       const Type* t = n->as_Phi()->type();
1771       if (t->make_ptr() != nullptr) {
1772         add_local_var(n, PointsToNode::NoEscape);
1773         // Do not add edges during first iteration because some could be
1774         // not defined yet.
1775         delayed_worklist->push(n);
1776       }
1777       break;
1778     }
1779     case Op_LoadFlat:
1780       // Treat LoadFlat similar to an unknown call that receives nothing and produces its results
1781       map_ideal_node(n, phantom_obj);
1782       break;
1783     case Op_StoreFlat:
1784       // Treat StoreFlat similar to a call that escapes the stored flattened fields
1785       delayed_worklist->push(n);
1786       break;
1787     case Op_Proj: {
1788       // we are only interested in the oop result projection from a call
1789       add_proj(n, delayed_worklist);



1790       break;
1791     }
1792     case Op_Rethrow: // Exception object escapes
1793     case Op_Return: {
1794       if (n->req() > TypeFunc::Parms &&
1795           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1796         // Treat Return value as LocalVar with GlobalEscape escape state.
1797         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1798       }
1799       break;
1800     }
1801     case Op_CompareAndExchangeP:
1802     case Op_CompareAndExchangeN:
1803     case Op_GetAndSetP:
1804     case Op_GetAndSetN: {
1805       add_objload_to_connection_graph(n, delayed_worklist);
1806       // fall-through
1807     }
1808     case Op_StoreP:
1809     case Op_StoreN:

1853       break;
1854     }
1855     default:
1856       ; // Do nothing for nodes not related to EA.
1857   }
1858   return;
1859 }
1860 
1861 // Add final simple edges to graph.
1862 void ConnectionGraph::add_final_edges(Node *n) {
1863   PointsToNode* n_ptn = ptnode_adr(n->_idx);
1864 #ifdef ASSERT
1865   if (_verify && n_ptn->is_JavaObject())
1866     return; // This method does not change graph for JavaObject.
1867 #endif
1868 
1869   if (n->is_Call()) {
1870     process_call_arguments(n->as_Call());
1871     return;
1872   }
1873   assert(n->is_Store() || n->is_LoadStore() || n->is_StoreFlat() ||
1874          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1875          "node should be registered already");
1876   int opcode = n->Opcode();
1877   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1878   if (gc_handled) {
1879     return; // Ignore node if already handled by GC.
1880   }
1881   switch (opcode) {
1882     case Op_AddP: {
1883       Node* base = get_addp_base(n);
1884       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1885       assert(ptn_base != nullptr, "field's base should be registered");
1886       add_base(n_ptn->as_Field(), ptn_base);
1887       break;
1888     }
1889     case Op_InlineType:
1890     case Op_CastPP:
1891     case Op_CheckCastPP:
1892     case Op_EncodeP:
1893     case Op_DecodeN:
1894     case Op_EncodePKlass:
1895     case Op_DecodeNKlass: {
1896       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1897       break;
1898     }
1899     case Op_CMoveP: {
1900       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1901         Node* in = n->in(i);
1902         if (in == nullptr) {
1903           continue;  // ignore null
1904         }
1905         Node* uncast_in = in->uncast();
1906         if (uncast_in->is_top() || uncast_in == n) {
1907           continue;  // ignore top or inputs which go back this node
1908         }
1909         PointsToNode* ptn = ptnode_adr(in->_idx);

1922     }
1923     case Op_Phi: {
1924       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1925       // ThreadLocal has RawPtr type.
1926       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1927       for (uint i = 1; i < n->req(); i++) {
1928         Node* in = n->in(i);
1929         if (in == nullptr) {
1930           continue;  // ignore null
1931         }
1932         Node* uncast_in = in->uncast();
1933         if (uncast_in->is_top() || uncast_in == n) {
1934           continue;  // ignore top or inputs which go back this node
1935         }
1936         PointsToNode* ptn = ptnode_adr(in->_idx);
1937         assert(ptn != nullptr, "node should be registered");
1938         add_edge(n_ptn, ptn);
1939       }
1940       break;
1941     }
1942     case Op_StoreFlat: {
1943       // StoreFlat globally escapes its stored flattened fields
1944       InlineTypeNode* value = n->as_StoreFlat()->value();
1945       ciInlineKlass* vk = _igvn->type(value)->inline_klass();
1946       for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
1947         ciField* field = vk->nonstatic_field_at(i);
1948         if (field->type()->is_primitive_type()) {
1949           continue;
1950         }
1951 
1952         Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true);
1953         PointsToNode* field_value_ptn = ptnode_adr(field_value->_idx);
1954         set_escape_state(field_value_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "store into a flat field"));
1955       }
1956       break;
1957     }
1958     case Op_Proj: {
1959       add_proj(n, nullptr);



1960       break;
1961     }
1962     case Op_Rethrow: // Exception object escapes
1963     case Op_Return: {
1964       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1965              "Unexpected node type");
1966       // Treat Return value as LocalVar with GlobalEscape escape state.
1967       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1968       break;
1969     }
1970     case Op_CompareAndExchangeP:
1971     case Op_CompareAndExchangeN:
1972     case Op_GetAndSetP:
1973     case Op_GetAndSetN:{
1974       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1975       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1976       // fall-through
1977     }
1978     case Op_CompareAndSwapP:
1979     case Op_CompareAndSwapN:

2113     Node* val = n->in(MemNode::ValueIn);
2114     PointsToNode* ptn = ptnode_adr(val->_idx);
2115     assert(ptn != nullptr, "node should be registered");
2116     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
2117     // Add edge to object for unsafe access with offset.
2118     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
2119     assert(adr_ptn != nullptr, "node should be registered");
2120     if (adr_ptn->is_Field()) {
2121       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
2122       add_edge(adr_ptn, ptn);
2123     }
2124     return true;
2125   }
2126 #ifdef ASSERT
2127   n->dump(1);
2128   assert(false, "not unsafe");
2129 #endif
2130   return false;
2131 }
2132 
2133 // Iterate over the domains for the scalarized and non scalarized calling conventions: Only move to the next element
2134 // in the non scalarized calling convention once all elements of the scalarized calling convention for that parameter
2135 // have been iterated over. So (ignoring hidden arguments such as the null marker) iterating over:
2136 // value class MyValue {
2137 //   int f1;
2138 //   float f2;
2139 // }
2140 // void m(Object o, MyValue v, int i)
2141 // produces the pairs:
2142 // (Object, Object), (Myvalue, int), (MyValue, float), (int, int)
2143 class DomainIterator : public StackObj {
2144 private:
2145   const TypeTuple* _domain;
2146   const TypeTuple* _domain_cc;
2147   const GrowableArray<SigEntry>* _sig_cc;
2148 
2149   uint _i_domain;
2150   uint _i_domain_cc;
2151   int _i_sig_cc;
2152   uint _depth;
2153   uint _first_field_pos;
2154   const bool _is_static;
2155 
2156   void next_helper() {
2157     if (_sig_cc == nullptr) {
2158       return;
2159     }
2160     BasicType prev_bt = _i_sig_cc > 0 ? _sig_cc->at(_i_sig_cc-1)._bt : T_ILLEGAL;
2161     BasicType prev_prev_bt = _i_sig_cc > 1 ? _sig_cc->at(_i_sig_cc-2)._bt : T_ILLEGAL;
2162     while (_i_sig_cc < _sig_cc->length()) {
2163       BasicType bt = _sig_cc->at(_i_sig_cc)._bt;
2164       assert(bt != T_VOID || _sig_cc->at(_i_sig_cc-1)._bt == prev_bt, "incorrect prev bt");
2165       if (bt == T_METADATA) {
2166         if (_depth == 0) {
2167           _first_field_pos = _i_domain_cc;
2168         }
2169         _depth++;
2170       } else if (bt == T_VOID && (prev_bt != T_LONG && prev_bt != T_DOUBLE)) {
2171         _depth--;
2172         if (_depth == 0) {
2173           _i_domain++;
2174         }
2175       } else if (bt == T_OBJECT && prev_bt == T_METADATA && (_is_static || _i_domain > 0) && _sig_cc->at(_i_sig_cc)._offset == 0) {
2176         assert(_sig_cc->at(_i_sig_cc)._vt_oop, "buffer expected right after T_METADATA");
2177         assert(_depth == 1, "only root value has buffer");
2178         _i_domain_cc++;
2179         _first_field_pos = _i_domain_cc;
2180       } else if (bt == T_BOOLEAN && prev_prev_bt == T_METADATA && (_is_static || _i_domain > 0) && _sig_cc->at(_i_sig_cc)._offset == -1) {
2181         assert(_sig_cc->at(_i_sig_cc)._null_marker, "null marker expected right after T_METADATA");
2182         assert(_depth == 1, "only root value null marker");
2183         _i_domain_cc++;
2184         _first_field_pos = _i_domain_cc;
2185       } else {
2186         return;
2187       }
2188       prev_prev_bt = prev_bt;
2189       prev_bt = bt;
2190       _i_sig_cc++;
2191     }
2192   }
2193 
2194 public:
2195 
2196   DomainIterator(CallJavaNode* call) :
2197     _domain(call->tf()->domain_sig()),
2198     _domain_cc(call->tf()->domain_cc()),
2199     _sig_cc(call->method()->get_sig_cc()),
2200     _i_domain(TypeFunc::Parms),
2201     _i_domain_cc(TypeFunc::Parms),
2202     _i_sig_cc(0),
2203     _depth(0),
2204     _first_field_pos(0),
2205     _is_static(call->method()->is_static()) {
2206     next_helper();
2207   }
2208 
2209   bool has_next() const {
2210     assert(_sig_cc == nullptr || (_i_sig_cc < _sig_cc->length()) == (_i_domain < _domain->cnt()), "should reach end in sync");
2211     assert((_i_domain < _domain->cnt()) == (_i_domain_cc < _domain_cc->cnt()), "should reach end in sync");
2212     return _i_domain < _domain->cnt();
2213   }
2214 
2215   void next() {
2216     assert(_depth != 0 || _domain->field_at(_i_domain) == _domain_cc->field_at(_i_domain_cc), "should produce same non scalarized elements");
2217     _i_sig_cc++;
2218     if (_depth == 0) {
2219       _i_domain++;
2220     }
2221     _i_domain_cc++;
2222     next_helper();
2223   }
2224 
2225   uint i_domain() const {
2226     return _i_domain;
2227   }
2228 
2229   uint i_domain_cc() const {
2230     return _i_domain_cc;
2231   }
2232 
2233   const Type* current_domain() const {
2234     return _domain->field_at(_i_domain);
2235   }
2236 
2237   const Type* current_domain_cc() const {
2238     return _domain_cc->field_at(_i_domain_cc);
2239   }
2240 
2241   uint first_field_pos() const {
2242     assert(_first_field_pos >= TypeFunc::Parms, "not yet updated?");
2243     return _first_field_pos;
2244   }
2245 };
2246 
2247 // Determine whether any arguments are returned.
2248 bool ConnectionGraph::returns_an_argument(CallNode* call) {
2249   ciMethod* meth = call->as_CallJava()->method();
2250   BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2251   if (call_analyzer == nullptr) {
2252     return false;
2253   }
2254 
2255   const TypeTuple* d = call->tf()->domain_sig();
2256   bool ret_arg = false;
2257   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2258     if (d->field_at(i)->isa_ptr() != nullptr &&
2259         call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2260       if (meth->is_scalarized_arg(i - TypeFunc::Parms) && !compatible_return(call->as_CallJava(), i)) {
2261         return false;
2262       }
2263       if (call->tf()->returns_inline_type_as_fields() != meth->is_scalarized_arg(i - TypeFunc::Parms)) {
2264         return false;
2265       }
2266       ret_arg = true;
2267     }
2268   }
2269   return ret_arg;
2270 }
2271 
2272 void ConnectionGraph::add_call_node(CallNode* call) {
2273   assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
2274   uint call_idx = call->_idx;
2275   if (call->is_Allocate()) {
2276     Node* k = call->in(AllocateNode::KlassNode);
2277     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
2278     assert(kt != nullptr, "TypeKlassPtr  required.");
2279     PointsToNode::EscapeState es = PointsToNode::NoEscape;
2280     bool scalar_replaceable = true;
2281     NOT_PRODUCT(const char* nsr_reason = "");
2282     if (call->is_AllocateArray()) {
2283       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
2284         es = PointsToNode::GlobalEscape;
2285       } else {
2286         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2287         if (length < 0) {
2288           // Not scalar replaceable if the length is not constant.
2289           scalar_replaceable = false;
2290           NOT_PRODUCT(nsr_reason = "has a non-constant length");
2291         } else if (length > EliminateAllocationArraySizeLimit) {
2292           // Not scalar replaceable if the length is too big.
2293           scalar_replaceable = false;

2328     //    - mapped to GlobalEscape JavaObject node if oop is returned;
2329     //
2330     //    - all oop arguments are escaping globally;
2331     //
2332     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2333     //
2334     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2335     //
2336     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2337     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2338     //      during call is returned;
2339     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2340     //      which are returned and does not escape during call;
2341     //
2342     //    - oop arguments escaping status is defined by bytecode analysis;
2343     //
2344     // For a static call, we know exactly what method is being called.
2345     // Use bytecode estimator to record whether the call's return value escapes.
2346     ciMethod* meth = call->as_CallJava()->method();
2347     if (meth == nullptr) {
2348       const char* name = call->as_CallStaticJava()->_name;
2349       assert(call->as_CallStaticJava()->is_call_to_multianewarray_stub() ||
2350              strncmp(name, "load_unknown_inline", 19) == 0 ||
2351              strncmp(name, "store_inline_type_fields_to_buf", 31) == 0, "TODO: add failed case check");
2352       // Returns a newly allocated non-escaped object.
2353       add_java_object(call, PointsToNode::NoEscape);
2354       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2355     } else if (meth->is_boxing_method()) {
2356       // Returns boxing object
2357       PointsToNode::EscapeState es;
2358       vmIntrinsics::ID intr = meth->intrinsic_id();
2359       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2360         // It does not escape if object is always allocated.
2361         es = PointsToNode::NoEscape;
2362       } else {
2363         // It escapes globally if object could be loaded from cache.
2364         es = PointsToNode::GlobalEscape;
2365       }
2366       add_java_object(call, es);
2367       if (es == PointsToNode::GlobalEscape) {
2368         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2369       }
2370     } else {
2371       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2372       call_analyzer->copy_dependencies(_compile->dependencies());
2373       if (call_analyzer->is_return_allocated()) {
2374         // Returns a newly allocated non-escaped object, simply
2375         // update dependency information.
2376         // Mark it as NoEscape so that objects referenced by
2377         // it's fields will be marked as NoEscape at least.
2378         add_java_object(call, PointsToNode::NoEscape);
2379         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2380       } else {
2381         // For non scalarized argument/return: add_proj() adds an edge between the return projection and the call,
2382         // process_call_arguments() adds an edge between the call and the argument
2383         // For scalarized argument/return: process_call_arguments() adds an edge between a call projection for a field
2384         // and the argument input to the call for that field. An edge is added between the projection for the returned
2385         // buffer and the call.
2386         if (returns_an_argument(call) && !call->tf()->returns_inline_type_as_fields()) {
2387           // returns non scalarized argument




2388           add_local_var(call, PointsToNode::ArgEscape);
2389         } else {
2390           // Returns unknown object or scalarized argument being returned
2391           map_ideal_node(call, phantom_obj);
2392         }
2393       }
2394     }
2395   } else {
2396     // An other type of call, assume the worst case:
2397     // returned value is unknown and globally escapes.
2398     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
2399     map_ideal_node(call, phantom_obj);
2400   }
2401 }
2402 
2403 // Check that the return type is compatible with the type of the argument being returned i.e. that there's no cast that
2404 // fails in the method
2405 bool ConnectionGraph::compatible_return(CallJavaNode* call, uint k) {
2406   return call->tf()->domain_sig()->field_at(k)->is_instptr()->instance_klass() == call->tf()->range_sig()->field_at(TypeFunc::Parms)->is_instptr()->instance_klass();
2407 }
2408 
2409 void ConnectionGraph::process_call_arguments(CallNode *call) {
2410     bool is_arraycopy = false;
2411     switch (call->Opcode()) {
2412 #ifdef ASSERT
2413     case Op_Allocate:
2414     case Op_AllocateArray:
2415     case Op_Lock:
2416     case Op_Unlock:
2417       assert(false, "should be done already");
2418       break;
2419 #endif
2420     case Op_ArrayCopy:
2421     case Op_CallLeafNoFP:
2422       // Most array copies are ArrayCopy nodes at this point but there
2423       // are still a few direct calls to the copy subroutines (See
2424       // PhaseStringOpts::copy_string())
2425       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2426         call->as_CallLeaf()->is_call_to_arraycopystub();
2427       // fall through
2428     case Op_CallLeafVector:
2429     case Op_CallLeaf: {
2430       // Stub calls, objects do not escape but they are not scale replaceable.
2431       // Adjust escape state for outgoing arguments.
2432       const TypeTuple * d = call->tf()->domain_sig();
2433       bool src_has_oops = false;
2434       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2435         const Type* at = d->field_at(i);
2436         Node *arg = call->in(i);
2437         if (arg == nullptr) {
2438           continue;
2439         }
2440         const Type *aat = _igvn->type(arg);
2441         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2442           continue;
2443         }
2444         if (arg->is_AddP()) {
2445           //
2446           // The inline_native_clone() case when the arraycopy stub is called
2447           // after the allocation before Initialize and CheckCastPP nodes.
2448           // Or normal arraycopy for object arrays case.
2449           //
2450           // Set AddP's base (Allocate) as not scalar replaceable since
2451           // pointer to the base (with offset) is passed as argument.
2452           //
2453           arg = get_addp_base(arg);
2454         }
2455         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2456         assert(arg_ptn != nullptr, "should be registered");
2457         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2458         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2459           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2460                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2461           bool arg_has_oops = aat->isa_oopptr() &&
2462                               (aat->isa_instptr() ||
2463                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
2464                                (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
2465                                                                aat->isa_aryptr()->is_flat() &&
2466                                                                aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
2467           if (i == TypeFunc::Parms) {
2468             src_has_oops = arg_has_oops;
2469           }
2470           //
2471           // src or dst could be j.l.Object when other is basic type array:
2472           //
2473           //   arraycopy(char[],0,Object*,0,size);
2474           //   arraycopy(Object*,0,char[],0,size);
2475           //
2476           // Don't add edges in such cases.
2477           //
2478           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2479                                        arg_has_oops && (i > TypeFunc::Parms);
2480 #ifdef ASSERT
2481           if (!(is_arraycopy ||
2482                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2483                 (call->as_CallLeaf()->_name != nullptr &&
2484                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2485                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2486                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

2510                   strcmp(call->as_CallLeaf()->_name, "dilithiumMontMulByConstant") == 0 ||
2511                   strcmp(call->as_CallLeaf()->_name, "dilithiumDecomposePoly") == 0 ||
2512                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2513                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2514                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2515                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2516                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2517                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2518                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2519                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2520                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2521                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2522                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2523                   strcmp(call->as_CallLeaf()->_name, "double_keccak") == 0 ||
2524                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2525                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2526                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2527                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2528                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2529                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
2530                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2531                   strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
2532                   strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
2533                   strcmp(call->as_CallLeaf()->_name, "store_inline_type_fields_to_buf") == 0 ||
2534                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2535                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2536                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2537                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2538                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2539                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2540                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2541                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2542                  ))) {
2543             call->dump();
2544             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2545           }
2546 #endif
2547           // Always process arraycopy's destination object since
2548           // we need to add all possible edges to references in
2549           // source object.
2550           if (arg_esc >= PointsToNode::ArgEscape &&
2551               !arg_is_arraycopy_dest) {
2552             continue;
2553           }

2576           }
2577         }
2578       }
2579       break;
2580     }
2581     case Op_CallStaticJava: {
2582       // For a static call, we know exactly what method is being called.
2583       // Use bytecode estimator to record the call's escape affects
2584 #ifdef ASSERT
2585       const char* name = call->as_CallStaticJava()->_name;
2586       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2587 #endif
2588       ciMethod* meth = call->as_CallJava()->method();
2589       if ((meth != nullptr) && meth->is_boxing_method()) {
2590         break; // Boxing methods do not modify any oops.
2591       }
2592       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2593       // fall-through if not a Java method or no analyzer information
2594       if (call_analyzer != nullptr) {
2595         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2596         bool ret_arg = returns_an_argument(call);
2597         for (DomainIterator di(call->as_CallJava()); di.has_next(); di.next()) {
2598           int k = di.i_domain() - TypeFunc::Parms;
2599           const Type* at = di.current_domain_cc();
2600           Node* arg = call->in(di.i_domain_cc());
2601           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2602           assert(!call_analyzer->is_arg_returned(k) || !meth->is_scalarized_arg(k) ||
2603                  !compatible_return(call->as_CallJava(), di.i_domain()) ||
2604                  call->proj_out_or_null(di.i_domain_cc() - di.first_field_pos() + TypeFunc::Parms + 1) == nullptr ||
2605                  _igvn->type(call->proj_out_or_null(di.i_domain_cc() - di.first_field_pos() + TypeFunc::Parms + 1)) == at,
2606                  "scalarized return and scalarized argument should match");
2607           if (at->isa_ptr() != nullptr && call_analyzer->is_arg_returned(k) && ret_arg) {
2608             // The call returns arguments.
2609             if (meth->is_scalarized_arg(k)) {
2610               ProjNode* res_proj = call->proj_out_or_null(di.i_domain_cc() - di.first_field_pos() + TypeFunc::Parms + 1);
2611               if (res_proj != nullptr) {
2612                 assert(_igvn->type(res_proj)->isa_ptr(), "scalarized return and scalarized argument should match");
2613                 if (res_proj->_con != TypeFunc::Parms) {
2614                   // add an edge between the result projection for a field and the argument projection for the same argument field
2615                   PointsToNode* proj_ptn = ptnode_adr(res_proj->_idx);
2616                   add_edge(proj_ptn, arg_ptn);
2617                   if (!call_analyzer->is_return_local()) {
2618                     add_edge(proj_ptn, phantom_obj);
2619                   }
2620                 }
2621               }
2622             } else if (call_ptn != nullptr) { // Is call's result used?
2623               assert(call_ptn->is_LocalVar(), "node should be registered");
2624               assert(arg_ptn != nullptr, "node should be registered");
2625               add_edge(call_ptn, arg_ptn);
2626             }
2627           }
2628           if (at->isa_oopptr() != nullptr &&
2629               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2630             if (!call_analyzer->is_arg_stack(k)) {
2631               // The argument global escapes
2632               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2633             } else {
2634               set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2635               if (!call_analyzer->is_arg_local(k)) {
2636                 // The argument itself doesn't escape, but any fields might
2637                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2638               }
2639             }
2640           }
2641         }
2642         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2643           // The call returns arguments.
2644           assert(call_ptn->edge_count() > 0, "sanity");
2645           if (!call_analyzer->is_return_local()) {
2646             // Returns also unknown object.
2647             add_edge(call_ptn, phantom_obj);
2648           }
2649         }
2650         break;
2651       }
2652     }
2653     default: {
2654       // Fall-through here if not a Java method or no analyzer information
2655       // or some other type of call, assume the worst case: all arguments
2656       // globally escape.
2657       const TypeTuple* d = call->tf()->domain_cc();
2658       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2659         const Type* at = d->field_at(i);
2660         if (at->isa_oopptr() != nullptr) {
2661           Node* arg = call->in(i);
2662           if (arg->is_AddP()) {
2663             arg = get_addp_base(arg);
2664           }
2665           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2666           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2667         }
2668       }
2669     }
2670   }
2671 }
2672 
2673 
2674 // Finish Graph construction.
2675 bool ConnectionGraph::complete_connection_graph(
2676                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2677                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

3055     PointsToNode* base = i.get();
3056     if (base->is_JavaObject()) {
3057       // Skip Allocate's fields which will be processed later.
3058       if (base->ideal_node()->is_Allocate()) {
3059         return 0;
3060       }
3061       assert(base == null_obj, "only null ptr base expected here");
3062     }
3063   }
3064   if (add_edge(field, phantom_obj)) {
3065     // New edge was added
3066     new_edges++;
3067     add_field_uses_to_worklist(field);
3068   }
3069   return new_edges;
3070 }
3071 
3072 // Find fields initializing values for allocations.
3073 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
3074   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
3075   PointsToNode* init_val = phantom_obj;
3076   Node* alloc = pta->ideal_node();
3077 
3078   // Do nothing for Allocate nodes since its fields values are
3079   // "known" unless they are initialized by arraycopy/clone.
3080   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
3081     if (alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) {
3082       // Null-free inline type arrays are initialized with an init value instead of null
3083       init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::InitValue)->_idx);
3084       assert(init_val != nullptr, "init value should be registered");
3085     } else {
3086       return 0;
3087     }
3088   }
3089   // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
3090   assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
3091 #ifdef ASSERT
3092   if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
3093     const char* name = alloc->as_CallStaticJava()->_name;
3094     assert(alloc->as_CallStaticJava()->is_call_to_multianewarray_stub() ||
3095            strncmp(name, "load_unknown_inline", 19) == 0 ||
3096            strncmp(name, "store_inline_type_fields_to_buf", 31) == 0, "sanity");
3097   }
3098 #endif
3099   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
3100   int new_edges = 0;
3101   for (EdgeIterator i(pta); i.has_next(); i.next()) {
3102     PointsToNode* field = i.get();
3103     if (field->is_Field() && field->as_Field()->is_oop()) {
3104       if (add_edge(field, init_val)) {
3105         // New edge was added
3106         new_edges++;
3107         add_field_uses_to_worklist(field->as_Field());
3108       }
3109     }
3110   }
3111   return new_edges;
3112 }
3113 
3114 // Find fields initializing values for allocations.
3115 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
3116   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
3117   Node* alloc = pta->ideal_node();
3118   // Do nothing for Call nodes since its fields values are unknown.
3119   if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) {
3120     return 0;
3121   }
3122   InitializeNode* ini = alloc->as_Allocate()->initialization();
3123   bool visited_bottom_offset = false;
3124   GrowableArray<int> offsets_worklist;
3125   int new_edges = 0;
3126 
3127   // Check if an oop field's initializing value is recorded and add
3128   // a corresponding null if field's value if it is not recorded.
3129   // Connection Graph does not record a default initialization by null
3130   // captured by Initialize node.
3131   //
3132   for (EdgeIterator i(pta); i.has_next(); i.next()) {
3133     PointsToNode* field = i.get(); // Field (AddP)
3134     if (!field->is_Field() || !field->as_Field()->is_oop()) {
3135       continue; // Not oop field
3136     }
3137     int offset = field->as_Field()->offset();
3138     if (offset == Type::OffsetBot) {
3139       if (!visited_bottom_offset) {

3185               } else {
3186                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
3187                   tty->print_cr("----------init store has invalid value -----");
3188                   store->dump();
3189                   val->dump();
3190                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
3191                 }
3192                 for (EdgeIterator j(val); j.has_next(); j.next()) {
3193                   PointsToNode* obj = j.get();
3194                   if (obj->is_JavaObject()) {
3195                     if (!field->points_to(obj->as_JavaObject())) {
3196                       missed_obj = obj;
3197                       break;
3198                     }
3199                   }
3200                 }
3201               }
3202               if (missed_obj != nullptr) {
3203                 tty->print_cr("----------field---------------------------------");
3204                 field->dump();
3205                 tty->print_cr("----------missed reference to object------------");
3206                 missed_obj->dump();
3207                 tty->print_cr("----------object referenced by init store-------");
3208                 store->dump();
3209                 val->dump();
3210                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
3211               }
3212             }
3213 #endif
3214           } else {
3215             // There could be initializing stores which follow allocation.
3216             // For example, a volatile field store is not collected
3217             // by Initialize node.
3218             //
3219             // Need to check for dependent loads to separate such stores from
3220             // stores which follow loads. For now, add initial value null so
3221             // that compare pointers optimization works correctly.
3222           }
3223         }
3224         if (value == nullptr) {
3225           // A field's initializing value was not recorded. Add null.
3226           if (add_edge(field, null_obj)) {
3227             // New edge was added

3552         assert(field->edge_count() > 0, "sanity");
3553       }
3554     }
3555   }
3556 }
3557 #endif
3558 
3559 // Optimize ideal graph.
3560 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3561                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3562   Compile* C = _compile;
3563   PhaseIterGVN* igvn = _igvn;
3564   if (EliminateLocks) {
3565     // Mark locks before changing ideal graph.
3566     int cnt = C->macro_count();
3567     for (int i = 0; i < cnt; i++) {
3568       Node *n = C->macro_node(i);
3569       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3570         AbstractLockNode* alock = n->as_AbstractLock();
3571         if (!alock->is_non_esc_obj()) {
3572           const Type* obj_type = igvn->type(alock->obj_node());
3573           if (can_eliminate_lock(alock) && !obj_type->is_inlinetypeptr()) {
3574             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3575             // The lock could be marked eliminated by lock coarsening
3576             // code during first IGVN before EA. Replace coarsened flag
3577             // to eliminate all associated locks/unlocks.
3578 #ifdef ASSERT
3579             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3580 #endif
3581             alock->set_non_esc_obj();
3582           }
3583         }
3584       }
3585     }
3586   }
3587 
3588   if (OptimizePtrCompare) {
3589     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3590       Node *n = ptr_cmp_worklist.at(i);
3591       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3592       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3593       if (tcmp->singleton()) {

3595 #ifndef PRODUCT
3596         if (PrintOptimizePtrCompare) {
3597           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3598           if (Verbose) {
3599             n->dump(1);
3600           }
3601         }
3602 #endif
3603         igvn->replace_node(n, cmp);
3604       }
3605     }
3606   }
3607 
3608   // For MemBarStoreStore nodes added in library_call.cpp, check
3609   // escape status of associated AllocateNode and optimize out
3610   // MemBarStoreStore node if the allocated object never escapes.
3611   for (int i = 0; i < storestore_worklist.length(); i++) {
3612     Node* storestore = storestore_worklist.at(i);
3613     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3614     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3615       if (alloc->in(AllocateNode::InlineType) != nullptr) {
3616         // Non-escaping inline type buffer allocations don't require a membar
3617         storestore->as_MemBar()->remove(_igvn);
3618       } else {
3619         MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3620         mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3621         mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3622         igvn->register_new_node_with_optimizer(mb);
3623         igvn->replace_node(storestore, mb);
3624       }
3625     }
3626   }
3627 }
3628 
3629 // Atomic flat accesses on non-escaping objects can be optimized to non-atomic accesses
3630 void ConnectionGraph::optimize_flat_accesses(GrowableArray<SafePointNode*>& sfn_worklist) {
3631   PhaseIterGVN& igvn = *_igvn;
3632   bool delay = igvn.delay_transform();
3633   igvn.set_delay_transform(true);
3634   igvn.C->for_each_flat_access([&](Node* n) {
3635     Node* base = n->is_LoadFlat() ? n->as_LoadFlat()->base() : n->as_StoreFlat()->base();
3636     if (!not_global_escape(base)) {
3637       return;
3638     }
3639 
3640     bool expanded;
3641     if (n->is_LoadFlat()) {
3642       expanded = n->as_LoadFlat()->expand_non_atomic(igvn);
3643     } else {
3644       expanded = n->as_StoreFlat()->expand_non_atomic(igvn);
3645     }
3646     if (expanded) {
3647       sfn_worklist.remove(n->as_SafePoint());
3648       igvn.C->remove_flat_access(n);
3649     }
3650   });
3651   igvn.set_delay_transform(delay);
3652 }
3653 
3654 // Optimize objects compare.
3655 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3656   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3657   if (!OptimizePtrCompare) {
3658     return UNKNOWN;
3659   }
3660   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3661   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3662 
3663   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3664   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3665   JavaObjectNode* jobj1 = unique_java_object(left);
3666   JavaObjectNode* jobj2 = unique_java_object(right);
3667 
3668   // The use of this method during allocation merge reduction may cause 'left'
3669   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3670   // that doesn't reference an unique java object.
3671   if (ptn1 == nullptr || ptn2 == nullptr ||
3672       jobj1 == nullptr || jobj2 == nullptr) {
3673     return UNKNOWN;

3793   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3794   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3795   PointsToNode* ptadr = _nodes.at(n->_idx);
3796   if (ptadr != nullptr) {
3797     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3798     return;
3799   }
3800   Compile* C = _compile;
3801   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3802   map_ideal_node(n, ptadr);
3803   // Add edge from arraycopy node to source object.
3804   (void)add_edge(ptadr, src);
3805   src->set_arraycopy_src();
3806   // Add edge from destination object to arraycopy node.
3807   (void)add_edge(dst, ptadr);
3808   dst->set_arraycopy_dst();
3809 }
3810 
3811 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3812   const Type* adr_type = n->as_AddP()->bottom_type();
3813   int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
3814   BasicType bt = T_INT;
3815   if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
3816     // Check only oop fields.
3817     if (!adr_type->isa_aryptr() ||
3818         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3819         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3820       // OffsetBot is used to reference array's element. Ignore first AddP.
3821       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3822         bt = T_OBJECT;
3823       }
3824     }
3825   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3826     if (adr_type->isa_instptr()) {
3827       ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
3828       if (field != nullptr) {
3829         bt = field->layout_type();
3830       } else {
3831         // Check for unsafe oop field access
3832         if (has_oop_node_outs(n)) {
3833           bt = T_OBJECT;
3834           (*unsafe) = true;
3835         }
3836       }
3837     } else if (adr_type->isa_aryptr()) {
3838       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3839         // Ignore array length load.
3840       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3841         // Ignore first AddP.
3842       } else {
3843         const Type* elemtype = adr_type->is_aryptr()->elem();
3844         if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
3845           ciInlineKlass* vk = elemtype->inline_klass();
3846           field_offset += vk->payload_offset();
3847           ciField* field = vk->get_field_by_offset(field_offset, false);
3848           if (field != nullptr) {
3849             bt = field->layout_type();
3850           } else {
3851             assert(field_offset == vk->payload_offset() + vk->null_marker_offset_in_payload(), "no field or null marker of %s at offset %d", vk->name()->as_utf8(), field_offset);
3852             bt = T_BOOLEAN;
3853           }
3854         } else {
3855           bt = elemtype->array_element_basic_type();
3856         }
3857       }
3858     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3859       // Allocation initialization, ThreadLocal field access, unsafe access
3860       if (has_oop_node_outs(n)) {
3861         bt = T_OBJECT;
3862       }
3863     }
3864   }
3865   // Note: T_NARROWOOP is not classed as a real reference type
3866   bool res = (is_reference_type(bt) || bt == T_NARROWOOP);
3867   assert(!has_oop_node_outs(n) || res, "sanity: AddP has oop outs, needs to be treated as oop field");
3868   return res;
3869 }
3870 
3871 bool ConnectionGraph::has_oop_node_outs(Node* n) {
3872   return n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3873          n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3874          n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3875          BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n);
3876 }

4039             return true;
4040           }
4041         }
4042       }
4043     }
4044   }
4045   return false;
4046 }
4047 
4048 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
4049   const Type *adr_type = phase->type(adr);
4050   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
4051     // We are computing a raw address for a store captured by an Initialize
4052     // compute an appropriate address type. AddP cases #3 and #5 (see below).
4053     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
4054     assert(offs != Type::OffsetBot ||
4055            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
4056            "offset must be a constant or it is initialization of array");
4057     return offs;
4058   }
4059   return adr_type->is_ptr()->flat_offset();


4060 }
4061 
4062 Node* ConnectionGraph::get_addp_base(Node *addp) {
4063   assert(addp->is_AddP(), "must be AddP");
4064   //
4065   // AddP cases for Base and Address inputs:
4066   // case #1. Direct object's field reference:
4067   //     Allocate
4068   //       |
4069   //     Proj #5 ( oop result )
4070   //       |
4071   //     CheckCastPP (cast to instance type)
4072   //      | |
4073   //     AddP  ( base == address )
4074   //
4075   // case #2. Indirect object's field reference:
4076   //      Phi
4077   //       |
4078   //     CastPP (cast to instance type)
4079   //      | |
4080   //     AddP  ( base == address )
4081   //
4082   // case #3. Raw object's field reference for Initialize node.
4083   //          Could have an additional Phi merging multiple allocations.
4084   //      Allocate
4085   //        |
4086   //      Proj #5 ( oop result )
4087   //  top   |
4088   //     \  |
4089   //     AddP  ( base == top )
4090   //
4091   // case #4. Array's element reference:
4092   //   {CheckCastPP | CastPP}
4093   //     |  | |
4094   //     |  AddP ( array's element offset )
4095   //     |  |
4096   //     AddP ( array's offset )
4097   //
4098   // case #5. Raw object's field reference for arraycopy stub call:
4099   //          The inline_native_clone() case when the arraycopy stub is called
4100   //          after the allocation before Initialize and CheckCastPP nodes.
4101   //      Allocate
4102   //        |
4103   //      Proj #5 ( oop result )

4114   // case #7. Klass's field reference.
4115   //      LoadKlass
4116   //       | |
4117   //       AddP  ( base == address )
4118   //
4119   // case #8. narrow Klass's field reference.
4120   //      LoadNKlass
4121   //       |
4122   //      DecodeN
4123   //       | |
4124   //       AddP  ( base == address )
4125   //
4126   // case #9. Mixed unsafe access
4127   //    {instance}
4128   //        |
4129   //      CheckCastPP (raw)
4130   //  top   |
4131   //     \  |
4132   //     AddP  ( base == top )
4133   //
4134   // case #10. Klass fetched with
4135   //           LibraryCallKit::load_*_refined_array_klass()
4136   //           which has en extra Phi.
4137   //  LoadKlass   LoadKlass
4138   //       |          |
4139   //     CastPP    CastPP
4140   //          \   /
4141   //           Phi
4142   //      top   |
4143   //         \  |
4144   //         AddP  ( base == top )
4145   //
4146   Node *base = addp->in(AddPNode::Base);
4147   if (base->uncast()->is_top()) { // The AddP case #3, #6, #9, and #10.
4148     base = addp->in(AddPNode::Address);
4149     while (base->is_AddP()) {
4150       // Case #6 (unsafe access) may have several chained AddP nodes.
4151       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
4152       base = base->in(AddPNode::Address);
4153     }
4154     if (base->Opcode() == Op_CheckCastPP &&
4155         base->bottom_type()->isa_rawptr() &&
4156         _igvn->type(base->in(1))->isa_oopptr()) {
4157       base = base->in(1); // Case #9
4158     } else {
4159       // Case #3, #6, and #10
4160       Node* uncast_base = base->uncast();
4161       int opcode = uncast_base->Opcode();
4162       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
4163              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
4164              (_igvn->C->is_osr_compilation() && uncast_base->is_Parm() && uncast_base->as_Parm()->_con == TypeFunc::Parms)||
4165              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) ||
4166              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_klassptr() != nullptr)) ||
4167              is_captured_store_address(addp) ||
4168              is_load_array_klass_related(uncast_base), "sanity");
4169     }
4170   }
4171   return base;
4172 }
4173 
4174 #ifdef ASSERT
4175 // Case #10
4176 bool ConnectionGraph::is_load_array_klass_related(const Node* uncast_base) {
4177   if (!uncast_base->is_Phi() || uncast_base->req() != 3) {
4178     return false;
4179   }
4180   Node* in1 = uncast_base->in(1);
4181   Node* in2 = uncast_base->in(2);
4182   return in1->uncast()->Opcode() == Op_LoadKlass &&
4183          in2->uncast()->Opcode() == Op_LoadKlass;
4184 }
4185 #endif
4186 
4187 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
4188   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
4189   Node* addp2 = addp->raw_out(0);
4190   if (addp->outcnt() == 1 && addp2->is_AddP() &&
4191       addp2->in(AddPNode::Base) == n &&
4192       addp2->in(AddPNode::Address) == addp) {
4193     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
4194     //
4195     // Find array's offset to push it on worklist first and
4196     // as result process an array's element offset first (pushed second)
4197     // to avoid CastPP for the array's offset.
4198     // Otherwise the inserted CastPP (LocalVar) will point to what
4199     // the AddP (Field) points to. Which would be wrong since
4200     // the algorithm expects the CastPP has the same point as
4201     // as AddP's base CheckCastPP (LocalVar).
4202     //
4203     //    ArrayAllocation
4204     //     |
4205     //    CheckCastPP
4206     //     |

4223   }
4224   return nullptr;
4225 }
4226 
4227 //
4228 // Adjust the type and inputs of an AddP which computes the
4229 // address of a field of an instance
4230 //
4231 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
4232   PhaseGVN* igvn = _igvn;
4233   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
4234   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
4235   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
4236   if (t == nullptr) {
4237     // We are computing a raw address for a store captured by an Initialize
4238     // compute an appropriate address type (cases #3 and #5).
4239     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
4240     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
4241     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
4242     assert(offs != Type::OffsetBot, "offset must be a constant");
4243     if (base_t->isa_aryptr() != nullptr) {
4244       // In the case of a flat inline type array, each field has its
4245       // own slice so we need to extract the field being accessed from
4246       // the address computation
4247       t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
4248     } else {
4249       t = base_t->add_offset(offs)->is_oopptr();
4250     }
4251   }
4252   int inst_id = base_t->instance_id();
4253   assert(!t->is_known_instance() || t->instance_id() == inst_id,
4254                              "old type must be non-instance or match new type");
4255 
4256   // The type 't' could be subclass of 'base_t'.
4257   // As result t->offset() could be large then base_t's size and it will
4258   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
4259   // constructor verifies correctness of the offset.
4260   //
4261   // It could happened on subclass's branch (from the type profiling
4262   // inlining) which was not eliminated during parsing since the exactness
4263   // of the allocation type was not propagated to the subclass type check.
4264   //
4265   // Or the type 't' could be not related to 'base_t' at all.
4266   // It could happen when CHA type is different from MDO type on a dead path
4267   // (for example, from instanceof check) which is not collapsed during parsing.
4268   //
4269   // Do nothing for such AddP node and don't process its users since
4270   // this code branch will go away.
4271   //
4272   if (!t->is_known_instance() &&
4273       !base_t->maybe_java_subtype_of(t)) {
4274      return false; // bail out
4275   }
4276   const TypePtr* tinst = base_t->add_offset(t->offset());
4277   if (tinst->isa_aryptr() && t->isa_aryptr()) {
4278     // In the case of a flat inline type array, each field has its
4279     // own slice so we need to keep track of the field being accessed.
4280     tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
4281     // Keep array properties (not flat/null-free)
4282     tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
4283     if (tinst == nullptr) {
4284       return false; // Skip dead path with inconsistent properties
4285     }
4286   }
4287 
4288   // Do NOT remove the next line: ensure a new alias index is allocated
4289   // for the instance type. Note: C++ will not remove it since the call
4290   // has side effect.
4291   int alias_idx = _compile->get_alias_index(tinst);
4292   igvn->set_type(addp, tinst);
4293   // record the allocation in the node map
4294   set_map(addp, get_map(base->_idx));
4295   // Set addp's Base and Address to 'base'.
4296   Node *abase = addp->in(AddPNode::Base);
4297   Node *adr   = addp->in(AddPNode::Address);
4298   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
4299       adr->in(0)->_idx == (uint)inst_id) {
4300     // Skip AddP cases #3 and #5.
4301   } else {
4302     assert(!abase->is_top(), "sanity"); // AddP case #3
4303     if (abase != base) {
4304       igvn->hash_delete(addp);
4305       addp->set_req(AddPNode::Base, base);
4306       if (abase == adr) {
4307         addp->set_req(AddPNode::Address, base);

4876       //   - not determined to be ineligible by escape analysis
4877       set_map(alloc, n);
4878       set_map(n, alloc);
4879       const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
4880       igvn->hash_delete(n);
4881       igvn->set_type(n,  tinst);
4882       n->raise_bottom_type(tinst);
4883       igvn->hash_insert(n);
4884       record_for_optimizer(n);
4885       // Allocate an alias index for the header fields. Accesses to
4886       // the header emitted during macro expansion wouldn't have
4887       // correct memory state otherwise.
4888       _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
4889       _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
4890       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
4891         // Add a new NarrowMem projection for each existing NarrowMem projection with new adr type
4892         InitializeNode* init = alloc->as_Allocate()->initialization();
4893         assert(init != nullptr, "can't find Initialization node for this Allocate node");
4894         auto process_narrow_proj = [&](NarrowMemProjNode* proj) {
4895           const TypePtr* adr_type = proj->adr_type();
4896           const TypePtr* new_adr_type = tinst->with_offset(adr_type->offset());
4897           if (adr_type->isa_aryptr()) {
4898             // In the case of a flat inline type array, each field has its own slice so we need a
4899             // NarrowMemProj for each field of the flat array elements
4900             new_adr_type = new_adr_type->is_aryptr()->with_field_offset(adr_type->is_aryptr()->field_offset().get());
4901           }
4902           if (adr_type != new_adr_type && !init->already_has_narrow_mem_proj_with_adr_type(new_adr_type)) {
4903             DEBUG_ONLY( uint alias_idx = _compile->get_alias_index(new_adr_type); )
4904             assert(_compile->get_general_index(alias_idx) == _compile->get_alias_index(adr_type), "new adr type should be narrowed down from existing adr type");
4905             NarrowMemProjNode* new_proj = new NarrowMemProjNode(init, new_adr_type);
4906             igvn->set_type(new_proj, new_proj->bottom_type());
4907             record_for_optimizer(new_proj);
4908             set_map(proj, new_proj); // record it so ConnectionGraph::find_inst_mem() can find it
4909           }
4910         };
4911         init->for_each_narrow_mem_proj_with_new_uses(process_narrow_proj);
4912 
4913         // First, put on the worklist all Field edges from Connection Graph
4914         // which is more accurate than putting immediate users from Ideal Graph.
4915         for (EdgeIterator e(ptn); e.has_next(); e.next()) {
4916           PointsToNode* tgt = e.get();
4917           if (tgt->is_Arraycopy()) {
4918             continue;
4919           }
4920           Node* use = tgt->ideal_node();
4921           assert(tgt->is_Field() && use->is_AddP(),

4998         ptnode_adr(n->_idx)->dump();
4999         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
5000 #endif
5001         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
5002         return;
5003       } else {
5004         Node *val = get_map(jobj->idx());   // CheckCastPP node
5005         TypeNode *tn = n->as_Type();
5006         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
5007         assert(tinst != nullptr && tinst->is_known_instance() &&
5008                tinst->instance_id() == jobj->idx() , "instance type expected.");
5009 
5010         const Type *tn_type = igvn->type(tn);
5011         const TypeOopPtr *tn_t;
5012         if (tn_type->isa_narrowoop()) {
5013           tn_t = tn_type->make_ptr()->isa_oopptr();
5014         } else {
5015           tn_t = tn_type->isa_oopptr();
5016         }
5017         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
5018           if (tn_t->isa_aryptr()) {
5019             // Keep array properties (not flat/null-free)
5020             tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
5021             if (tinst == nullptr) {
5022               continue; // Skip dead path with inconsistent properties
5023             }
5024           }
5025           if (tn_type->isa_narrowoop()) {
5026             tn_type = tinst->make_narrowoop();
5027           } else {
5028             tn_type = tinst;
5029           }
5030           igvn->hash_delete(tn);
5031           igvn->set_type(tn, tn_type);
5032           tn->set_type(tn_type);
5033           igvn->hash_insert(tn);
5034           record_for_optimizer(n);
5035         } else {
5036           assert(tn_type == TypePtr::NULL_PTR ||
5037                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
5038                  "unexpected type");
5039           continue; // Skip dead path with different type
5040         }
5041       }
5042     } else {
5043       DEBUG_ONLY(n->dump();)
5044       assert(false, "EA: unexpected node");
5045       continue;
5046     }
5047     // push allocation's users on appropriate worklist
5048     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5049       Node *use = n->fast_out(i);
5050       if (use->is_Mem() && use->in(MemNode::Address) == n) {
5051         // Load/store to instance's field
5052         memnode_worklist.append_if_missing(use);
5053       } else if (use->is_MemBar()) {
5054         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
5055           memnode_worklist.append_if_missing(use);
5056         }
5057       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
5058         Node* addp2 = find_second_addp(use, n);
5059         if (addp2 != nullptr) {
5060           alloc_worklist.append_if_missing(addp2);
5061         }
5062         alloc_worklist.append_if_missing(use);
5063       } else if (use->is_Phi() ||
5064                  use->is_CheckCastPP() ||
5065                  use->is_EncodeNarrowPtr() ||
5066                  use->is_DecodeNarrowPtr() ||
5067                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
5068         alloc_worklist.append_if_missing(use);
5069 #ifdef ASSERT
5070       } else if (use->is_Mem()) {
5071         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
5072       } else if (use->is_MergeMem()) {
5073         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
5074       } else if (use->is_SafePoint()) {
5075         // Look for MergeMem nodes for calls which reference unique allocation
5076         // (through CheckCastPP nodes) even for debug info.
5077         Node* m = use->in(TypeFunc::Memory);
5078         if (m->is_MergeMem()) {
5079           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
5080         }
5081       } else if (use->Opcode() == Op_EncodeISOArray) {
5082         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
5083           // EncodeISOArray overwrites destination array
5084           memnode_worklist.append_if_missing(use);
5085         }
5086       } else if (use->Opcode() == Op_Return) {
5087         // Allocation is referenced by field of returned inline type
5088         assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
5089       } else {
5090         uint op = use->Opcode();
5091         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
5092             (use->in(MemNode::Memory) == n)) {
5093           // They overwrite memory edge corresponding to destination array,
5094           memnode_worklist.append_if_missing(use);
5095         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
5096               op == Op_CastP2X ||
5097               op == Op_FastLock || op == Op_AryEq ||
5098               op == Op_StrComp || op == Op_CountPositives ||
5099               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
5100               op == Op_StrEquals || op == Op_VectorizedHashCode ||
5101               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
5102               op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
5103               op == Op_ReinterpretS2HF ||
5104               op == Op_ReachabilityFence ||
5105               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
5106           n->dump();
5107           use->dump();
5108           assert(false, "EA: missing allocation reference path");
5109         }
5110 #endif
5111       }
5112     }
5113 
5114   }
5115 
5116 #ifdef ASSERT
5117   if (VerifyReduceAllocationMerges) {
5118     for (uint i = 0; i < reducible_merges.size(); i++) {
5119       Node* phi = reducible_merges.at(i);
5120 
5121       if (!reduced_merges.member(phi)) {
5122         phi->dump(2);

5191         n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
5192         if (n == nullptr) {
5193           continue;
5194         }
5195       }
5196     } else if (n->is_CallLeaf()) {
5197       // Runtime calls with narrow memory input (no MergeMem node)
5198       // get the memory projection
5199       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
5200       if (n == nullptr) {
5201         continue;
5202       }
5203     } else if (n->Opcode() == Op_StrInflatedCopy) {
5204       // Check direct uses of StrInflatedCopy.
5205       // It is memory type Node - no special SCMemProj node.
5206     } else if (n->Opcode() == Op_StrCompressedCopy ||
5207                n->Opcode() == Op_EncodeISOArray) {
5208       // get the memory projection
5209       n = n->find_out_with(Op_SCMemProj);
5210       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
5211     } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
5212                strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
5213       n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
5214     } else if (n->is_Proj()) {
5215       assert(n->in(0)->is_Initialize(), "we only push memory projections for Initialize");
5216     } else {
5217 #ifdef ASSERT
5218       if (!n->is_Mem()) {
5219         n->dump();
5220       }
5221       assert(n->is_Mem(), "memory node required.");
5222 #endif
5223       Node *addr = n->in(MemNode::Address);
5224       const Type *addr_t = igvn->type(addr);
5225       if (addr_t == Type::TOP) {
5226         continue;
5227       }
5228       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
5229       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
5230       assert ((uint)alias_idx < new_index_end, "wrong alias index");
5231       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
5232       if (_compile->failing()) {
5233         return;

5245         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
5246       }
5247     }
5248     // push user on appropriate worklist
5249     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5250       Node *use = n->fast_out(i);
5251       if (use->is_Phi() || use->is_ClearArray()) {
5252         memnode_worklist.append_if_missing(use);
5253       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
5254         memnode_worklist.append_if_missing(use);
5255       } else if (use->is_MemBar() || use->is_CallLeaf()) {
5256         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
5257           memnode_worklist.append_if_missing(use);
5258         }
5259       } else if (use->is_Proj()) {
5260         assert(n->is_Initialize(), "We only push projections of Initialize");
5261         if (use->as_Proj()->_con == TypeFunc::Memory) { // Ignore precedent edge
5262           memnode_worklist.append_if_missing(use);
5263         }
5264 #ifdef ASSERT
5265       } else if (use->is_Mem()) {
5266         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
5267       } else if (use->is_MergeMem()) {
5268         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
5269       } else if (use->Opcode() == Op_EncodeISOArray) {
5270         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
5271           // EncodeISOArray overwrites destination array
5272           memnode_worklist.append_if_missing(use);
5273         }
5274       } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
5275                  strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
5276         // store_unknown_inline overwrites destination array
5277         memnode_worklist.append_if_missing(use);
5278       } else {
5279         uint op = use->Opcode();
5280         if ((use->in(MemNode::Memory) == n) &&
5281             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
5282           // They overwrite memory edge corresponding to destination array,
5283           memnode_worklist.append_if_missing(use);
5284         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
5285               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
5286               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
5287               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
5288           n->dump();
5289           use->dump();
5290           assert(false, "EA: missing memory path");
5291         }
5292 #endif
5293       }
5294     }
5295   }
5296 
5297   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
5298   //            Walk each memory slice moving the first node encountered of each
5299   //            instance type to the input corresponding to its alias index.
5300   uint length = mergemem_worklist.length();
5301   for( uint next = 0; next < length; ++next ) {
5302     MergeMemNode* nmm = mergemem_worklist.at(next);
5303     assert(!visited.test_set(nmm->_idx), "should not be visited before");
5304     // Note: we don't want to use MergeMemStream here because we only want to
5305     // scan inputs which exist at the start, not ones we add during processing.
5306     // Note 2: MergeMem may already contains instance memory slices added
5307     // during find_inst_mem() call when memory nodes were processed above.

5370         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
5371       } else if (_invocation > 0) {
5372         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
5373       } else {
5374         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
5375       }
5376       return;
5377     }
5378 
5379     igvn->hash_insert(nmm);
5380     record_for_optimizer(nmm);
5381   }
5382 
5383   _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_3, 5);
5384 
5385   //  Phase 4:  Update the inputs of non-instance memory Phis and
5386   //            the Memory input of memnodes
5387   // First update the inputs of any non-instance Phi's from
5388   // which we split out an instance Phi.  Note we don't have
5389   // to recursively process Phi's encountered on the input memory
5390   // chains as is done in split_memory_phi() since they will
5391   // also be processed here.
5392   for (int j = 0; j < orig_phis.length(); j++) {
5393     PhiNode *phi = orig_phis.at(j);
5394     int alias_idx = _compile->get_alias_index(phi->adr_type());
5395     igvn->hash_delete(phi);
5396     for (uint i = 1; i < phi->req(); i++) {
5397       Node *mem = phi->in(i);
5398       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
5399       if (_compile->failing()) {
5400         return;
5401       }
5402       if (mem != new_mem) {
5403         phi->set_req(i, new_mem);
5404       }
5405     }
5406     igvn->hash_insert(phi);
5407     record_for_optimizer(phi);
5408   }
5409 
5410   // Update the memory inputs of MemNodes with the value we computed
< prev index next >