< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"

  32 #include "memory/resourceArea.hpp"
  33 #include "opto/c2compiler.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"
  39 #include "opto/macro.hpp"
  40 #include "opto/locknode.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/movenode.hpp"
  43 #include "opto/narrowptrnode.hpp"
  44 #include "opto/castnode.hpp"
  45 #include "opto/rootnode.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  49   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  50   // split_unique_types and that will create additional nodes that need to be
  51   // pushed to the ConnectionGraph. The code below bumps the initial capacity of

 147   GrowableArray<SafePointNode*>  sfn_worklist;
 148   GrowableArray<MergeMemNode*>   mergemem_worklist;
 149   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 150 
 151   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 152 
 153   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 154   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 155   // Initialize worklist
 156   if (C->root() != nullptr) {
 157     ideal_nodes.push(C->root());
 158   }
 159   // Processed ideal nodes are unique on ideal_nodes list
 160   // but several ideal nodes are mapped to the phantom_obj.
 161   // To avoid duplicated entries on the following worklists
 162   // add the phantom_obj only once to them.
 163   ptnodes_worklist.append(phantom_obj);
 164   java_objects_worklist.append(phantom_obj);
 165   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 166     Node* n = ideal_nodes.at(next);










 167     // Create PointsTo nodes and add them to Connection Graph. Called
 168     // only once per ideal node since ideal_nodes is Unique_Node list.
 169     add_node_to_connection_graph(n, &delayed_worklist);
 170     PointsToNode* ptn = ptnode_adr(n->_idx);
 171     if (ptn != nullptr && ptn != phantom_obj) {
 172       ptnodes_worklist.append(ptn);
 173       if (ptn->is_JavaObject()) {
 174         java_objects_worklist.append(ptn->as_JavaObject());
 175         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 176             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 177           // Only allocations and java static calls results are interesting.
 178           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 179         }
 180       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 181         oop_fields_worklist.append(ptn->as_Field());
 182       }
 183     }
 184     // Collect some interesting nodes for further use.
 185     switch (n->Opcode()) {
 186       case Op_MergeMem:

 539         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);)
 540         return false;
 541       }
 542     } else if (use->is_AddP()) {
 543       Node* addp = use;
 544       for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) {
 545         Node* use_use = addp->fast_out(j);
 546         const Type* load_type = _igvn->type(use_use);
 547 
 548         if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) {
 549           NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());)
 550           return false;
 551         } else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) {
 552           NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());)
 553           return false;
 554         }
 555       }
 556     } else if (nesting > 0) {
 557       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);)
 558       return false;
 559     } else if (use->is_CastPP()) {

 560       const Type* cast_t = _igvn->type(use);
 561       if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) {
 562 #ifndef PRODUCT
 563         if (TraceReduceAllocationMerges) {
 564           tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation);
 565           use->dump();
 566         }
 567 #endif
 568         return false;
 569       }
 570 
 571       bool is_trivial_control = use->in(0) == nullptr || use->in(0) == n->in(0);
 572       if (!is_trivial_control) {
 573         // If it's not a trivial control then we check if we can reduce the
 574         // CmpP/N used by the If controlling the cast.
 575         if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) {
 576           Node* iff = use->in(0)->in(0);
 577           if (iff->Opcode() == Op_If && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
 578             Node* iff_cmp = iff->in(1)->in(1);
 579             int opc = iff_cmp->Opcode();

1220 
1221     // The next two inputs are:
1222     //  (1) A copy of the original pointer to NSR objects.
1223     //  (2) A selector, used to decide if we need to rematerialize an object
1224     //      or use the pointer to a NSR object.
1225     // See more details of these fields in the declaration of SafePointScalarMergeNode
1226     sfpt->add_req(nsr_merge_pointer);
1227     sfpt->add_req(selector);
1228 
1229     for (uint i = 1; i < ophi->req(); i++) {
1230       Node* base = ophi->in(i);
1231       JavaObjectNode* ptn = unique_java_object(base);
1232 
1233       // If the base is not scalar replaceable we don't need to register information about
1234       // it at this time.
1235       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1236         continue;
1237       }
1238 
1239       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1240       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt);





1241       if (sobj == nullptr) {
1242         return false;
1243       }
1244 
1245       // Now make a pass over the debug information replacing any references
1246       // to the allocated object with "sobj"
1247       Node* ccpp = alloc->result_cast();
1248       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1249 
1250       // Register the scalarized object as a candidate for reallocation
1251       smerge->add_req(sobj);
1252     }
1253 
1254     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1255     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1256 
1257     // The call to 'replace_edges_in_range' above might have removed the
1258     // reference to ophi that we need at _merge_pointer_idx. The line below make
1259     // sure the reference is maintained.
1260     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);

1430   return false;
1431 }
1432 
1433 // Returns true if at least one of the arguments to the call is an object
1434 // that does not escape globally.
1435 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1436   if (call->method() != nullptr) {
1437     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1438     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1439       Node* p = call->in(idx);
1440       if (not_global_escape(p)) {
1441         return true;
1442       }
1443     }
1444   } else {
1445     const char* name = call->as_CallStaticJava()->_name;
1446     assert(name != nullptr, "no name");
1447     // no arg escapes through uncommon traps
1448     if (strcmp(name, "uncommon_trap") != 0) {
1449       // process_call_arguments() assumes that all arguments escape globally
1450       const TypeTuple* d = call->tf()->domain();
1451       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1452         const Type* at = d->field_at(i);
1453         if (at->isa_oopptr() != nullptr) {
1454           return true;
1455         }
1456       }
1457     }
1458   }
1459   return false;
1460 }
1461 
1462 
1463 
1464 // Utility function for nodes that load an object
1465 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1466   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1467   // ThreadLocal has RawPtr type.
1468   const Type* t = _igvn->type(n);
1469   if (t->make_ptr() != nullptr) {
1470     Node* adr = n->in(MemNode::Address);

1504       // first IGVN optimization when escape information is still available.
1505       record_for_optimizer(n);
1506     } else if (n->is_Allocate()) {
1507       add_call_node(n->as_Call());
1508       record_for_optimizer(n);
1509     } else {
1510       if (n->is_CallStaticJava()) {
1511         const char* name = n->as_CallStaticJava()->_name;
1512         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1513           return; // Skip uncommon traps
1514         }
1515       }
1516       // Don't mark as processed since call's arguments have to be processed.
1517       delayed_worklist->push(n);
1518       // Check if a call returns an object.
1519       if ((n->as_Call()->returns_pointer() &&
1520            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1521           (n->is_CallStaticJava() &&
1522            n->as_CallStaticJava()->is_boxing_method())) {
1523         add_call_node(n->as_Call());











1524       }
1525     }
1526     return;
1527   }
1528   // Put this check here to process call arguments since some call nodes
1529   // point to phantom_obj.
1530   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1531     return; // Skip predefined nodes.
1532   }
1533   switch (opcode) {
1534     case Op_AddP: {
1535       Node* base = get_addp_base(n);
1536       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1537       // Field nodes are created for all field types. They are used in
1538       // adjust_scalar_replaceable_state() and split_unique_types().
1539       // Note, non-oop fields will have only base edges in Connection
1540       // Graph because such fields are not used for oop loads and stores.
1541       int offset = address_offset(n, igvn);
1542       add_field(n, PointsToNode::NoEscape, offset);
1543       if (ptn_base == nullptr) {
1544         delayed_worklist->push(n); // Process it later.
1545       } else {
1546         n_ptn = ptnode_adr(n_idx);
1547         add_base(n_ptn->as_Field(), ptn_base);
1548       }
1549       break;
1550     }
1551     case Op_CastX2P: {
1552       map_ideal_node(n, phantom_obj);
1553       break;
1554     }

1555     case Op_CastPP:
1556     case Op_CheckCastPP:
1557     case Op_EncodeP:
1558     case Op_DecodeN:
1559     case Op_EncodePKlass:
1560     case Op_DecodeNKlass: {
1561       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1562       break;
1563     }
1564     case Op_CMoveP: {
1565       add_local_var(n, PointsToNode::NoEscape);
1566       // Do not add edges during first iteration because some could be
1567       // not defined yet.
1568       delayed_worklist->push(n);
1569       break;
1570     }
1571     case Op_ConP:
1572     case Op_ConN:
1573     case Op_ConNKlass: {
1574       // assume all oop constants globally escape except for null

1606     case Op_PartialSubtypeCheck: {
1607       // Produces Null or notNull and is used in only in CmpP so
1608       // phantom_obj could be used.
1609       map_ideal_node(n, phantom_obj); // Result is unknown
1610       break;
1611     }
1612     case Op_Phi: {
1613       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1614       // ThreadLocal has RawPtr type.
1615       const Type* t = n->as_Phi()->type();
1616       if (t->make_ptr() != nullptr) {
1617         add_local_var(n, PointsToNode::NoEscape);
1618         // Do not add edges during first iteration because some could be
1619         // not defined yet.
1620         delayed_worklist->push(n);
1621       }
1622       break;
1623     }
1624     case Op_Proj: {
1625       // we are only interested in the oop result projection from a call
1626       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1627           n->in(0)->as_Call()->returns_pointer()) {


1628         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1629       }
1630       break;
1631     }
1632     case Op_Rethrow: // Exception object escapes
1633     case Op_Return: {
1634       if (n->req() > TypeFunc::Parms &&
1635           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1636         // Treat Return value as LocalVar with GlobalEscape escape state.
1637         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1638       }
1639       break;
1640     }
1641     case Op_CompareAndExchangeP:
1642     case Op_CompareAndExchangeN:
1643     case Op_GetAndSetP:
1644     case Op_GetAndSetN: {
1645       add_objload_to_connection_graph(n, delayed_worklist);
1646       // fall-through
1647     }

1709   if (n->is_Call()) {
1710     process_call_arguments(n->as_Call());
1711     return;
1712   }
1713   assert(n->is_Store() || n->is_LoadStore() ||
1714          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1715          "node should be registered already");
1716   int opcode = n->Opcode();
1717   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1718   if (gc_handled) {
1719     return; // Ignore node if already handled by GC.
1720   }
1721   switch (opcode) {
1722     case Op_AddP: {
1723       Node* base = get_addp_base(n);
1724       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1725       assert(ptn_base != nullptr, "field's base should be registered");
1726       add_base(n_ptn->as_Field(), ptn_base);
1727       break;
1728     }

1729     case Op_CastPP:
1730     case Op_CheckCastPP:
1731     case Op_EncodeP:
1732     case Op_DecodeN:
1733     case Op_EncodePKlass:
1734     case Op_DecodeNKlass: {
1735       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1736       break;
1737     }
1738     case Op_CMoveP: {
1739       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1740         Node* in = n->in(i);
1741         if (in == nullptr) {
1742           continue;  // ignore null
1743         }
1744         Node* uncast_in = in->uncast();
1745         if (uncast_in->is_top() || uncast_in == n) {
1746           continue;  // ignore top or inputs which go back this node
1747         }
1748         PointsToNode* ptn = ptnode_adr(in->_idx);

1763       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1764       // ThreadLocal has RawPtr type.
1765       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1766       for (uint i = 1; i < n->req(); i++) {
1767         Node* in = n->in(i);
1768         if (in == nullptr) {
1769           continue;  // ignore null
1770         }
1771         Node* uncast_in = in->uncast();
1772         if (uncast_in->is_top() || uncast_in == n) {
1773           continue;  // ignore top or inputs which go back this node
1774         }
1775         PointsToNode* ptn = ptnode_adr(in->_idx);
1776         assert(ptn != nullptr, "node should be registered");
1777         add_edge(n_ptn, ptn);
1778       }
1779       break;
1780     }
1781     case Op_Proj: {
1782       // we are only interested in the oop result projection from a call
1783       assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1784              n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
1785       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1786       break;
1787     }
1788     case Op_Rethrow: // Exception object escapes
1789     case Op_Return: {
1790       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1791              "Unexpected node type");
1792       // Treat Return value as LocalVar with GlobalEscape escape state.
1793       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1794       break;
1795     }
1796     case Op_CompareAndExchangeP:
1797     case Op_CompareAndExchangeN:
1798     case Op_GetAndSetP:
1799     case Op_GetAndSetN:{
1800       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1801       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1802       // fall-through
1803     }
1804     case Op_CompareAndSwapP:

1940     PointsToNode* ptn = ptnode_adr(val->_idx);
1941     assert(ptn != nullptr, "node should be registered");
1942     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1943     // Add edge to object for unsafe access with offset.
1944     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1945     assert(adr_ptn != nullptr, "node should be registered");
1946     if (adr_ptn->is_Field()) {
1947       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
1948       add_edge(adr_ptn, ptn);
1949     }
1950     return true;
1951   }
1952 #ifdef ASSERT
1953   n->dump(1);
1954   assert(false, "not unsafe");
1955 #endif
1956   return false;
1957 }
1958 
1959 void ConnectionGraph::add_call_node(CallNode* call) {
1960   assert(call->returns_pointer(), "only for call which returns pointer");
1961   uint call_idx = call->_idx;
1962   if (call->is_Allocate()) {
1963     Node* k = call->in(AllocateNode::KlassNode);
1964     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
1965     assert(kt != nullptr, "TypeKlassPtr  required.");
1966     PointsToNode::EscapeState es = PointsToNode::NoEscape;
1967     bool scalar_replaceable = true;
1968     NOT_PRODUCT(const char* nsr_reason = "");
1969     if (call->is_AllocateArray()) {
1970       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
1971         es = PointsToNode::GlobalEscape;
1972       } else {
1973         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
1974         if (length < 0) {
1975           // Not scalar replaceable if the length is not constant.
1976           scalar_replaceable = false;
1977           NOT_PRODUCT(nsr_reason = "has a non-constant length");
1978         } else if (length > EliminateAllocationArraySizeLimit) {
1979           // Not scalar replaceable if the length is too big.
1980           scalar_replaceable = false;

2016     //
2017     //    - all oop arguments are escaping globally;
2018     //
2019     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2020     //
2021     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2022     //
2023     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2024     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2025     //      during call is returned;
2026     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2027     //      which are returned and does not escape during call;
2028     //
2029     //    - oop arguments escaping status is defined by bytecode analysis;
2030     //
2031     // For a static call, we know exactly what method is being called.
2032     // Use bytecode estimator to record whether the call's return value escapes.
2033     ciMethod* meth = call->as_CallJava()->method();
2034     if (meth == nullptr) {
2035       const char* name = call->as_CallStaticJava()->_name;
2036       assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");

2037       // Returns a newly allocated non-escaped object.
2038       add_java_object(call, PointsToNode::NoEscape);
2039       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2040     } else if (meth->is_boxing_method()) {
2041       // Returns boxing object
2042       PointsToNode::EscapeState es;
2043       vmIntrinsics::ID intr = meth->intrinsic_id();
2044       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2045         // It does not escape if object is always allocated.
2046         es = PointsToNode::NoEscape;
2047       } else {
2048         // It escapes globally if object could be loaded from cache.
2049         es = PointsToNode::GlobalEscape;
2050       }
2051       add_java_object(call, es);
2052       if (es == PointsToNode::GlobalEscape) {
2053         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2054       }
2055     } else {
2056       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2057       call_analyzer->copy_dependencies(_compile->dependencies());
2058       if (call_analyzer->is_return_allocated()) {
2059         // Returns a newly allocated non-escaped object, simply
2060         // update dependency information.
2061         // Mark it as NoEscape so that objects referenced by
2062         // it's fields will be marked as NoEscape at least.
2063         add_java_object(call, PointsToNode::NoEscape);
2064         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2065       } else {
2066         // Determine whether any arguments are returned.
2067         const TypeTuple* d = call->tf()->domain();
2068         bool ret_arg = false;
2069         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2070           if (d->field_at(i)->isa_ptr() != nullptr &&
2071               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2072             ret_arg = true;
2073             break;
2074           }
2075         }
2076         if (ret_arg) {
2077           add_local_var(call, PointsToNode::ArgEscape);
2078         } else {
2079           // Returns unknown object.
2080           map_ideal_node(call, phantom_obj);
2081         }
2082       }
2083     }
2084   } else {
2085     // An other type of call, assume the worst case:
2086     // returned value is unknown and globally escapes.
2087     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

2095 #ifdef ASSERT
2096     case Op_Allocate:
2097     case Op_AllocateArray:
2098     case Op_Lock:
2099     case Op_Unlock:
2100       assert(false, "should be done already");
2101       break;
2102 #endif
2103     case Op_ArrayCopy:
2104     case Op_CallLeafNoFP:
2105       // Most array copies are ArrayCopy nodes at this point but there
2106       // are still a few direct calls to the copy subroutines (See
2107       // PhaseStringOpts::copy_string())
2108       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2109         call->as_CallLeaf()->is_call_to_arraycopystub();
2110       // fall through
2111     case Op_CallLeafVector:
2112     case Op_CallLeaf: {
2113       // Stub calls, objects do not escape but they are not scale replaceable.
2114       // Adjust escape state for outgoing arguments.
2115       const TypeTuple * d = call->tf()->domain();
2116       bool src_has_oops = false;
2117       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2118         const Type* at = d->field_at(i);
2119         Node *arg = call->in(i);
2120         if (arg == nullptr) {
2121           continue;
2122         }
2123         const Type *aat = _igvn->type(arg);
2124         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2125           continue;
2126         }
2127         if (arg->is_AddP()) {
2128           //
2129           // The inline_native_clone() case when the arraycopy stub is called
2130           // after the allocation before Initialize and CheckCastPP nodes.
2131           // Or normal arraycopy for object arrays case.
2132           //
2133           // Set AddP's base (Allocate) as not scalar replaceable since
2134           // pointer to the base (with offset) is passed as argument.
2135           //
2136           arg = get_addp_base(arg);
2137         }
2138         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2139         assert(arg_ptn != nullptr, "should be registered");
2140         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2141         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2142           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2143                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2144           bool arg_has_oops = aat->isa_oopptr() &&
2145                               (aat->isa_instptr() ||
2146                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));



2147           if (i == TypeFunc::Parms) {
2148             src_has_oops = arg_has_oops;
2149           }
2150           //
2151           // src or dst could be j.l.Object when other is basic type array:
2152           //
2153           //   arraycopy(char[],0,Object*,0,size);
2154           //   arraycopy(Object*,0,char[],0,size);
2155           //
2156           // Don't add edges in such cases.
2157           //
2158           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2159                                        arg_has_oops && (i > TypeFunc::Parms);
2160 #ifdef ASSERT
2161           if (!(is_arraycopy ||
2162                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2163                 (call->as_CallLeaf()->_name != nullptr &&
2164                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2165                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2166                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

2177                   strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2178                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2179                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2180                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2181                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2182                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2183                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2184                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2185                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2186                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2187                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2188                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2189                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2190                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2191                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2192                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2193                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2194                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2195                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2196                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||



2197                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2198                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2199                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2200                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2201                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2202                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2203                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2204                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2205                  ))) {
2206             call->dump();
2207             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2208           }
2209 #endif
2210           // Always process arraycopy's destination object since
2211           // we need to add all possible edges to references in
2212           // source object.
2213           if (arg_esc >= PointsToNode::ArgEscape &&
2214               !arg_is_arraycopy_dest) {
2215             continue;
2216           }

2243           }
2244         }
2245       }
2246       break;
2247     }
2248     case Op_CallStaticJava: {
2249       // For a static call, we know exactly what method is being called.
2250       // Use bytecode estimator to record the call's escape affects
2251 #ifdef ASSERT
2252       const char* name = call->as_CallStaticJava()->_name;
2253       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2254 #endif
2255       ciMethod* meth = call->as_CallJava()->method();
2256       if ((meth != nullptr) && meth->is_boxing_method()) {
2257         break; // Boxing methods do not modify any oops.
2258       }
2259       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2260       // fall-through if not a Java method or no analyzer information
2261       if (call_analyzer != nullptr) {
2262         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2263         const TypeTuple* d = call->tf()->domain();
2264         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2265           const Type* at = d->field_at(i);
2266           int k = i - TypeFunc::Parms;
2267           Node* arg = call->in(i);
2268           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2269           if (at->isa_ptr() != nullptr &&
2270               call_analyzer->is_arg_returned(k)) {
2271             // The call returns arguments.
2272             if (call_ptn != nullptr) { // Is call's result used?
2273               assert(call_ptn->is_LocalVar(), "node should be registered");
2274               assert(arg_ptn != nullptr, "node should be registered");
2275               add_edge(call_ptn, arg_ptn);
2276             }
2277           }
2278           if (at->isa_oopptr() != nullptr &&
2279               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2280             if (!call_analyzer->is_arg_stack(k)) {
2281               // The argument global escapes
2282               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2283             } else {

2287                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2288               }
2289             }
2290           }
2291         }
2292         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2293           // The call returns arguments.
2294           assert(call_ptn->edge_count() > 0, "sanity");
2295           if (!call_analyzer->is_return_local()) {
2296             // Returns also unknown object.
2297             add_edge(call_ptn, phantom_obj);
2298           }
2299         }
2300         break;
2301       }
2302     }
2303     default: {
2304       // Fall-through here if not a Java method or no analyzer information
2305       // or some other type of call, assume the worst case: all arguments
2306       // globally escape.
2307       const TypeTuple* d = call->tf()->domain();
2308       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2309         const Type* at = d->field_at(i);
2310         if (at->isa_oopptr() != nullptr) {
2311           Node* arg = call->in(i);
2312           if (arg->is_AddP()) {
2313             arg = get_addp_base(arg);
2314           }
2315           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2316           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2317         }
2318       }
2319     }
2320   }
2321 }
2322 
2323 
2324 // Finish Graph construction.
2325 bool ConnectionGraph::complete_connection_graph(
2326                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2327                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

2700     PointsToNode* base = i.get();
2701     if (base->is_JavaObject()) {
2702       // Skip Allocate's fields which will be processed later.
2703       if (base->ideal_node()->is_Allocate()) {
2704         return 0;
2705       }
2706       assert(base == null_obj, "only null ptr base expected here");
2707     }
2708   }
2709   if (add_edge(field, phantom_obj)) {
2710     // New edge was added
2711     new_edges++;
2712     add_field_uses_to_worklist(field);
2713   }
2714   return new_edges;
2715 }
2716 
2717 // Find fields initializing values for allocations.
2718 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2719   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");

2720   Node* alloc = pta->ideal_node();
2721 
2722   // Do nothing for Allocate nodes since its fields values are
2723   // "known" unless they are initialized by arraycopy/clone.
2724   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2725     return 0;







2726   }
2727   assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");

2728 #ifdef ASSERT
2729   if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
2730     const char* name = alloc->as_CallStaticJava()->_name;
2731     assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");

2732   }
2733 #endif
2734   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2735   int new_edges = 0;
2736   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2737     PointsToNode* field = i.get();
2738     if (field->is_Field() && field->as_Field()->is_oop()) {
2739       if (add_edge(field, phantom_obj)) {
2740         // New edge was added
2741         new_edges++;
2742         add_field_uses_to_worklist(field->as_Field());
2743       }
2744     }
2745   }
2746   return new_edges;
2747 }
2748 
2749 // Find fields initializing values for allocations.
2750 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2751   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2752   Node* alloc = pta->ideal_node();
2753   // Do nothing for Call nodes since its fields values are unknown.
2754   if (!alloc->is_Allocate()) {
2755     return 0;
2756   }
2757   InitializeNode* ini = alloc->as_Allocate()->initialization();
2758   bool visited_bottom_offset = false;
2759   GrowableArray<int> offsets_worklist;
2760   int new_edges = 0;
2761 
2762   // Check if an oop field's initializing value is recorded and add
2763   // a corresponding null if field's value if it is not recorded.
2764   // Connection Graph does not record a default initialization by null
2765   // captured by Initialize node.
2766   //
2767   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2768     PointsToNode* field = i.get(); // Field (AddP)
2769     if (!field->is_Field() || !field->as_Field()->is_oop()) {
2770       continue; // Not oop field
2771     }
2772     int offset = field->as_Field()->offset();
2773     if (offset == Type::OffsetBot) {
2774       if (!visited_bottom_offset) {

2820               } else {
2821                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2822                   tty->print_cr("----------init store has invalid value -----");
2823                   store->dump();
2824                   val->dump();
2825                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2826                 }
2827                 for (EdgeIterator j(val); j.has_next(); j.next()) {
2828                   PointsToNode* obj = j.get();
2829                   if (obj->is_JavaObject()) {
2830                     if (!field->points_to(obj->as_JavaObject())) {
2831                       missed_obj = obj;
2832                       break;
2833                     }
2834                   }
2835                 }
2836               }
2837               if (missed_obj != nullptr) {
2838                 tty->print_cr("----------field---------------------------------");
2839                 field->dump();
2840                 tty->print_cr("----------missed referernce to object-----------");
2841                 missed_obj->dump();
2842                 tty->print_cr("----------object referernced by init store -----");
2843                 store->dump();
2844                 val->dump();
2845                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2846               }
2847             }
2848 #endif
2849           } else {
2850             // There could be initializing stores which follow allocation.
2851             // For example, a volatile field store is not collected
2852             // by Initialize node.
2853             //
2854             // Need to check for dependent loads to separate such stores from
2855             // stores which follow loads. For now, add initial value null so
2856             // that compare pointers optimization works correctly.
2857           }
2858         }
2859         if (value == nullptr) {
2860           // A field's initializing value was not recorded. Add null.
2861           if (add_edge(field, null_obj)) {
2862             // New edge was added

3139         assert(field->edge_count() > 0, "sanity");
3140       }
3141     }
3142   }
3143 }
3144 #endif
3145 
3146 // Optimize ideal graph.
3147 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3148                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3149   Compile* C = _compile;
3150   PhaseIterGVN* igvn = _igvn;
3151   if (EliminateLocks) {
3152     // Mark locks before changing ideal graph.
3153     int cnt = C->macro_count();
3154     for (int i = 0; i < cnt; i++) {
3155       Node *n = C->macro_node(i);
3156       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3157         AbstractLockNode* alock = n->as_AbstractLock();
3158         if (!alock->is_non_esc_obj()) {
3159           if (can_eliminate_lock(alock)) {

3160             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3161             // The lock could be marked eliminated by lock coarsening
3162             // code during first IGVN before EA. Replace coarsened flag
3163             // to eliminate all associated locks/unlocks.
3164 #ifdef ASSERT
3165             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3166 #endif
3167             alock->set_non_esc_obj();
3168           }
3169         }
3170       }
3171     }
3172   }
3173 
3174   if (OptimizePtrCompare) {
3175     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3176       Node *n = ptr_cmp_worklist.at(i);
3177       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3178       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3179       if (tcmp->singleton()) {

3181 #ifndef PRODUCT
3182         if (PrintOptimizePtrCompare) {
3183           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3184           if (Verbose) {
3185             n->dump(1);
3186           }
3187         }
3188 #endif
3189         igvn->replace_node(n, cmp);
3190       }
3191     }
3192   }
3193 
3194   // For MemBarStoreStore nodes added in library_call.cpp, check
3195   // escape status of associated AllocateNode and optimize out
3196   // MemBarStoreStore node if the allocated object never escapes.
3197   for (int i = 0; i < storestore_worklist.length(); i++) {
3198     Node* storestore = storestore_worklist.at(i);
3199     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3200     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3201       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3202       mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3203       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3204       igvn->register_new_node_with_optimizer(mb);
3205       igvn->replace_node(storestore, mb);





3206     }
3207   }
3208 }
3209 
3210 // Optimize objects compare.
3211 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3212   assert(OptimizePtrCompare, "sanity");
3213   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3214   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3215   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3216 
3217   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3218   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3219   JavaObjectNode* jobj1 = unique_java_object(left);
3220   JavaObjectNode* jobj2 = unique_java_object(right);
3221 
3222   // The use of this method during allocation merge reduction may cause 'left'
3223   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3224   // that doesn't reference an unique java object.
3225   if (ptn1 == nullptr || ptn2 == nullptr ||

3347   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3348   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3349   PointsToNode* ptadr = _nodes.at(n->_idx);
3350   if (ptadr != nullptr) {
3351     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3352     return;
3353   }
3354   Compile* C = _compile;
3355   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3356   map_ideal_node(n, ptadr);
3357   // Add edge from arraycopy node to source object.
3358   (void)add_edge(ptadr, src);
3359   src->set_arraycopy_src();
3360   // Add edge from destination object to arraycopy node.
3361   (void)add_edge(dst, ptadr);
3362   dst->set_arraycopy_dst();
3363 }
3364 
3365 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3366   const Type* adr_type = n->as_AddP()->bottom_type();

3367   BasicType bt = T_INT;
3368   if (offset == Type::OffsetBot) {
3369     // Check only oop fields.
3370     if (!adr_type->isa_aryptr() ||
3371         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3372         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3373       // OffsetBot is used to reference array's element. Ignore first AddP.
3374       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3375         bt = T_OBJECT;
3376       }
3377     }
3378   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3379     if (adr_type->isa_instptr()) {
3380       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
3381       if (field != nullptr) {
3382         bt = field->layout_type();
3383       } else {
3384         // Check for unsafe oop field access
3385         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3386             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3387             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3388             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3389           bt = T_OBJECT;
3390           (*unsafe) = true;
3391         }
3392       }
3393     } else if (adr_type->isa_aryptr()) {
3394       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3395         // Ignore array length load.
3396       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3397         // Ignore first AddP.
3398       } else {
3399         const Type* elemtype = adr_type->isa_aryptr()->elem();
3400         bt = elemtype->array_element_basic_type();






3401       }
3402     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3403       // Allocation initialization, ThreadLocal field access, unsafe access
3404       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3405           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3406           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3407           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3408         bt = T_OBJECT;
3409       }
3410     }
3411   }
3412   // Note: T_NARROWOOP is not classed as a real reference type
3413   return (is_reference_type(bt) || bt == T_NARROWOOP);
3414 }
3415 
3416 // Returns unique pointed java object or null.
3417 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3418   // If the node was created after the escape computation we can't answer.
3419   uint idx = n->_idx;
3420   if (idx >= nodes_size()) {

3578             return true;
3579           }
3580         }
3581       }
3582     }
3583   }
3584   return false;
3585 }
3586 
3587 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3588   const Type *adr_type = phase->type(adr);
3589   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3590     // We are computing a raw address for a store captured by an Initialize
3591     // compute an appropriate address type. AddP cases #3 and #5 (see below).
3592     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3593     assert(offs != Type::OffsetBot ||
3594            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3595            "offset must be a constant or it is initialization of array");
3596     return offs;
3597   }
3598   const TypePtr *t_ptr = adr_type->isa_ptr();
3599   assert(t_ptr != nullptr, "must be a pointer type");
3600   return t_ptr->offset();
3601 }
3602 
3603 Node* ConnectionGraph::get_addp_base(Node *addp) {
3604   assert(addp->is_AddP(), "must be AddP");
3605   //
3606   // AddP cases for Base and Address inputs:
3607   // case #1. Direct object's field reference:
3608   //     Allocate
3609   //       |
3610   //     Proj #5 ( oop result )
3611   //       |
3612   //     CheckCastPP (cast to instance type)
3613   //      | |
3614   //     AddP  ( base == address )
3615   //
3616   // case #2. Indirect object's field reference:
3617   //      Phi
3618   //       |
3619   //     CastPP (cast to instance type)
3620   //      | |

3734   }
3735   return nullptr;
3736 }
3737 
3738 //
3739 // Adjust the type and inputs of an AddP which computes the
3740 // address of a field of an instance
3741 //
3742 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3743   PhaseGVN* igvn = _igvn;
3744   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3745   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3746   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3747   if (t == nullptr) {
3748     // We are computing a raw address for a store captured by an Initialize
3749     // compute an appropriate address type (cases #3 and #5).
3750     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3751     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3752     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3753     assert(offs != Type::OffsetBot, "offset must be a constant");
3754     t = base_t->add_offset(offs)->is_oopptr();







3755   }
3756   int inst_id =  base_t->instance_id();
3757   assert(!t->is_known_instance() || t->instance_id() == inst_id,
3758                              "old type must be non-instance or match new type");
3759 
3760   // The type 't' could be subclass of 'base_t'.
3761   // As result t->offset() could be large then base_t's size and it will
3762   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3763   // constructor verifies correctness of the offset.
3764   //
3765   // It could happened on subclass's branch (from the type profiling
3766   // inlining) which was not eliminated during parsing since the exactness
3767   // of the allocation type was not propagated to the subclass type check.
3768   //
3769   // Or the type 't' could be not related to 'base_t' at all.
3770   // It could happened when CHA type is different from MDO type on a dead path
3771   // (for example, from instanceof check) which is not collapsed during parsing.
3772   //
3773   // Do nothing for such AddP node and don't process its users since
3774   // this code branch will go away.
3775   //
3776   if (!t->is_known_instance() &&
3777       !base_t->maybe_java_subtype_of(t)) {
3778      return false; // bail out
3779   }
3780   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();











3781   // Do NOT remove the next line: ensure a new alias index is allocated
3782   // for the instance type. Note: C++ will not remove it since the call
3783   // has side effect.
3784   int alias_idx = _compile->get_alias_index(tinst);
3785   igvn->set_type(addp, tinst);
3786   // record the allocation in the node map
3787   set_map(addp, get_map(base->_idx));
3788   // Set addp's Base and Address to 'base'.
3789   Node *abase = addp->in(AddPNode::Base);
3790   Node *adr   = addp->in(AddPNode::Address);
3791   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3792       adr->in(0)->_idx == (uint)inst_id) {
3793     // Skip AddP cases #3 and #5.
3794   } else {
3795     assert(!abase->is_top(), "sanity"); // AddP case #3
3796     if (abase != base) {
3797       igvn->hash_delete(addp);
3798       addp->set_req(AddPNode::Base, base);
3799       if (abase == adr) {
3800         addp->set_req(AddPNode::Address, base);

4465         ptnode_adr(n->_idx)->dump();
4466         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4467 #endif
4468         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4469         return;
4470       } else {
4471         Node *val = get_map(jobj->idx());   // CheckCastPP node
4472         TypeNode *tn = n->as_Type();
4473         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4474         assert(tinst != nullptr && tinst->is_known_instance() &&
4475                tinst->instance_id() == jobj->idx() , "instance type expected.");
4476 
4477         const Type *tn_type = igvn->type(tn);
4478         const TypeOopPtr *tn_t;
4479         if (tn_type->isa_narrowoop()) {
4480           tn_t = tn_type->make_ptr()->isa_oopptr();
4481         } else {
4482           tn_t = tn_type->isa_oopptr();
4483         }
4484         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {







4485           if (tn_type->isa_narrowoop()) {
4486             tn_type = tinst->make_narrowoop();
4487           } else {
4488             tn_type = tinst;
4489           }
4490           igvn->hash_delete(tn);
4491           igvn->set_type(tn, tn_type);
4492           tn->set_type(tn_type);
4493           igvn->hash_insert(tn);
4494           record_for_optimizer(n);
4495         } else {
4496           assert(tn_type == TypePtr::NULL_PTR ||
4497                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4498                  "unexpected type");
4499           continue; // Skip dead path with different type
4500         }
4501       }
4502     } else {
4503       debug_only(n->dump();)
4504       assert(false, "EA: unexpected node");
4505       continue;
4506     }
4507     // push allocation's users on appropriate worklist
4508     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4509       Node *use = n->fast_out(i);
4510       if(use->is_Mem() && use->in(MemNode::Address) == n) {
4511         // Load/store to instance's field
4512         memnode_worklist.append_if_missing(use);
4513       } else if (use->is_MemBar()) {
4514         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4515           memnode_worklist.append_if_missing(use);
4516         }
4517       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4518         Node* addp2 = find_second_addp(use, n);
4519         if (addp2 != nullptr) {
4520           alloc_worklist.append_if_missing(addp2);
4521         }
4522         alloc_worklist.append_if_missing(use);
4523       } else if (use->is_Phi() ||
4524                  use->is_CheckCastPP() ||
4525                  use->is_EncodeNarrowPtr() ||
4526                  use->is_DecodeNarrowPtr() ||
4527                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4528         alloc_worklist.append_if_missing(use);
4529 #ifdef ASSERT
4530       } else if (use->is_Mem()) {
4531         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4532       } else if (use->is_MergeMem()) {
4533         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4534       } else if (use->is_SafePoint()) {
4535         // Look for MergeMem nodes for calls which reference unique allocation
4536         // (through CheckCastPP nodes) even for debug info.
4537         Node* m = use->in(TypeFunc::Memory);
4538         if (m->is_MergeMem()) {
4539           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4540         }
4541       } else if (use->Opcode() == Op_EncodeISOArray) {
4542         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4543           // EncodeISOArray overwrites destination array
4544           memnode_worklist.append_if_missing(use);
4545         }



4546       } else {
4547         uint op = use->Opcode();
4548         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4549             (use->in(MemNode::Memory) == n)) {
4550           // They overwrite memory edge corresponding to destination array,
4551           memnode_worklist.append_if_missing(use);
4552         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4553               op == Op_CastP2X || op == Op_StoreCM ||
4554               op == Op_FastLock || op == Op_AryEq ||
4555               op == Op_StrComp || op == Op_CountPositives ||
4556               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4557               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4558               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4559               op == Op_SubTypeCheck ||
4560               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4561           n->dump();
4562           use->dump();
4563           assert(false, "EA: missing allocation reference path");
4564         }
4565 #endif
4566       }
4567     }
4568 
4569   }
4570 
4571 #ifdef ASSERT
4572   if (VerifyReduceAllocationMerges) {
4573     for (uint i = 0; i < reducible_merges.size(); i++) {
4574       Node* phi = reducible_merges.at(i);
4575 
4576       if (!reduced_merges.member(phi)) {
4577         phi->dump(2);
4578         phi->dump(-2);
4579         assert(false, "This reducible merge wasn't reduced.");

4639     if (n->is_Phi() || n->is_ClearArray()) {
4640       // we don't need to do anything, but the users must be pushed
4641     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
4642       // we don't need to do anything, but the users must be pushed
4643       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4644       if (n == nullptr) {
4645         continue;
4646       }
4647     } else if (n->is_CallLeaf()) {
4648       // Runtime calls with narrow memory input (no MergeMem node)
4649       // get the memory projection
4650       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4651       if (n == nullptr) {
4652         continue;
4653       }
4654     } else if (n->Opcode() == Op_StrCompressedCopy ||
4655                n->Opcode() == Op_EncodeISOArray) {
4656       // get the memory projection
4657       n = n->find_out_with(Op_SCMemProj);
4658       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");



4659     } else {
4660       assert(n->is_Mem(), "memory node required.");
4661       Node *addr = n->in(MemNode::Address);
4662       const Type *addr_t = igvn->type(addr);
4663       if (addr_t == Type::TOP) {
4664         continue;
4665       }
4666       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4667       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4668       assert ((uint)alias_idx < new_index_end, "wrong alias index");
4669       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4670       if (_compile->failing()) {
4671         return;
4672       }
4673       if (mem != n->in(MemNode::Memory)) {
4674         // We delay the memory edge update since we need old one in
4675         // MergeMem code below when instances memory slices are separated.
4676         set_map(n, mem);
4677       }
4678       if (n->is_Load()) {

4681         // get the memory projection
4682         n = n->find_out_with(Op_SCMemProj);
4683         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4684       }
4685     }
4686     // push user on appropriate worklist
4687     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4688       Node *use = n->fast_out(i);
4689       if (use->is_Phi() || use->is_ClearArray()) {
4690         memnode_worklist.append_if_missing(use);
4691       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4692         if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
4693           continue;
4694         }
4695         memnode_worklist.append_if_missing(use);
4696       } else if (use->is_MemBar() || use->is_CallLeaf()) {
4697         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4698           memnode_worklist.append_if_missing(use);
4699         }
4700 #ifdef ASSERT
4701       } else if(use->is_Mem()) {
4702         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4703       } else if (use->is_MergeMem()) {
4704         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4705       } else if (use->Opcode() == Op_EncodeISOArray) {
4706         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4707           // EncodeISOArray overwrites destination array
4708           memnode_worklist.append_if_missing(use);
4709         }




4710       } else {
4711         uint op = use->Opcode();
4712         if ((use->in(MemNode::Memory) == n) &&
4713             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4714           // They overwrite memory edge corresponding to destination array,
4715           memnode_worklist.append_if_missing(use);
4716         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4717               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4718               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4719               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
4720           n->dump();
4721           use->dump();
4722           assert(false, "EA: missing memory path");
4723         }
4724 #endif
4725       }
4726     }
4727   }
4728 
4729   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4730   //            Walk each memory slice moving the first node encountered of each
4731   //            instance type to the input corresponding to its alias index.
4732   uint length = mergemem_worklist.length();
4733   for( uint next = 0; next < length; ++next ) {
4734     MergeMemNode* nmm = mergemem_worklist.at(next);
4735     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4736     // Note: we don't want to use MergeMemStream here because we only want to
4737     // scan inputs which exist at the start, not ones we add during processing.
4738     // Note 2: MergeMem may already contains instance memory slices added
4739     // during find_inst_mem() call when memory nodes were processed above.

4800     if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
4801       if (_compile->do_reduce_allocation_merges()) {
4802         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4803       } else if (_invocation > 0) {
4804         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4805       } else {
4806         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4807       }
4808       return;
4809     }
4810 
4811     igvn->hash_insert(nmm);
4812     record_for_optimizer(nmm);
4813   }
4814 
4815   //  Phase 4:  Update the inputs of non-instance memory Phis and
4816   //            the Memory input of memnodes
4817   // First update the inputs of any non-instance Phi's from
4818   // which we split out an instance Phi.  Note we don't have
4819   // to recursively process Phi's encountered on the input memory
4820   // chains as is done in split_memory_phi() since they  will
4821   // also be processed here.
4822   for (int j = 0; j < orig_phis.length(); j++) {
4823     PhiNode *phi = orig_phis.at(j);
4824     int alias_idx = _compile->get_alias_index(phi->adr_type());
4825     igvn->hash_delete(phi);
4826     for (uint i = 1; i < phi->req(); i++) {
4827       Node *mem = phi->in(i);
4828       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4829       if (_compile->failing()) {
4830         return;
4831       }
4832       if (mem != new_mem) {
4833         phi->set_req(i, new_mem);
4834       }
4835     }
4836     igvn->hash_insert(phi);
4837     record_for_optimizer(phi);
4838   }
4839 
4840   // Update the memory inputs of MemNodes with the value we computed

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/metaspace.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "opto/c2compiler.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/escape.hpp"
  40 #include "opto/macro.hpp"
  41 #include "opto/locknode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/movenode.hpp"
  44 #include "opto/narrowptrnode.hpp"
  45 #include "opto/castnode.hpp"
  46 #include "opto/rootnode.hpp"
  47 #include "utilities/macros.hpp"
  48 
  49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  50   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  51   // split_unique_types and that will create additional nodes that need to be
  52   // pushed to the ConnectionGraph. The code below bumps the initial capacity of

 148   GrowableArray<SafePointNode*>  sfn_worklist;
 149   GrowableArray<MergeMemNode*>   mergemem_worklist;
 150   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 151 
 152   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 153 
 154   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 155   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 156   // Initialize worklist
 157   if (C->root() != nullptr) {
 158     ideal_nodes.push(C->root());
 159   }
 160   // Processed ideal nodes are unique on ideal_nodes list
 161   // but several ideal nodes are mapped to the phantom_obj.
 162   // To avoid duplicated entries on the following worklists
 163   // add the phantom_obj only once to them.
 164   ptnodes_worklist.append(phantom_obj);
 165   java_objects_worklist.append(phantom_obj);
 166   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 167     Node* n = ideal_nodes.at(next);
 168     if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
 169         !n->in(MemNode::Address)->is_AddP() &&
 170         _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
 171       // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
 172       Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
 173       _igvn->register_new_node_with_optimizer(addp);
 174       _igvn->replace_input_of(n, MemNode::Address, addp);
 175       ideal_nodes.push(addp);
 176       _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
 177     }
 178     // Create PointsTo nodes and add them to Connection Graph. Called
 179     // only once per ideal node since ideal_nodes is Unique_Node list.
 180     add_node_to_connection_graph(n, &delayed_worklist);
 181     PointsToNode* ptn = ptnode_adr(n->_idx);
 182     if (ptn != nullptr && ptn != phantom_obj) {
 183       ptnodes_worklist.append(ptn);
 184       if (ptn->is_JavaObject()) {
 185         java_objects_worklist.append(ptn->as_JavaObject());
 186         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 187             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 188           // Only allocations and java static calls results are interesting.
 189           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 190         }
 191       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 192         oop_fields_worklist.append(ptn->as_Field());
 193       }
 194     }
 195     // Collect some interesting nodes for further use.
 196     switch (n->Opcode()) {
 197       case Op_MergeMem:

 550         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);)
 551         return false;
 552       }
 553     } else if (use->is_AddP()) {
 554       Node* addp = use;
 555       for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) {
 556         Node* use_use = addp->fast_out(j);
 557         const Type* load_type = _igvn->type(use_use);
 558 
 559         if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) {
 560           NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());)
 561           return false;
 562         } else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) {
 563           NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());)
 564           return false;
 565         }
 566       }
 567     } else if (nesting > 0) {
 568       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);)
 569       return false;
 570     // TODO 8315003 Re-enable
 571     } else if (use->is_CastPP() && false) {
 572       const Type* cast_t = _igvn->type(use);
 573       if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) {
 574 #ifndef PRODUCT
 575         if (TraceReduceAllocationMerges) {
 576           tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation);
 577           use->dump();
 578         }
 579 #endif
 580         return false;
 581       }
 582 
 583       bool is_trivial_control = use->in(0) == nullptr || use->in(0) == n->in(0);
 584       if (!is_trivial_control) {
 585         // If it's not a trivial control then we check if we can reduce the
 586         // CmpP/N used by the If controlling the cast.
 587         if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) {
 588           Node* iff = use->in(0)->in(0);
 589           if (iff->Opcode() == Op_If && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
 590             Node* iff_cmp = iff->in(1)->in(1);
 591             int opc = iff_cmp->Opcode();

1232 
1233     // The next two inputs are:
1234     //  (1) A copy of the original pointer to NSR objects.
1235     //  (2) A selector, used to decide if we need to rematerialize an object
1236     //      or use the pointer to a NSR object.
1237     // See more details of these fields in the declaration of SafePointScalarMergeNode
1238     sfpt->add_req(nsr_merge_pointer);
1239     sfpt->add_req(selector);
1240 
1241     for (uint i = 1; i < ophi->req(); i++) {
1242       Node* base = ophi->in(i);
1243       JavaObjectNode* ptn = unique_java_object(base);
1244 
1245       // If the base is not scalar replaceable we don't need to register information about
1246       // it at this time.
1247       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1248         continue;
1249       }
1250 
1251       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1252       Unique_Node_List value_worklist;
1253       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
1254       // TODO 8315003 Remove this bailout
1255       if (value_worklist.size() != 0) {
1256         return false;
1257       }
1258       if (sobj == nullptr) {
1259         return false;
1260       }
1261 
1262       // Now make a pass over the debug information replacing any references
1263       // to the allocated object with "sobj"
1264       Node* ccpp = alloc->result_cast();
1265       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1266 
1267       // Register the scalarized object as a candidate for reallocation
1268       smerge->add_req(sobj);
1269     }
1270 
1271     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1272     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1273 
1274     // The call to 'replace_edges_in_range' above might have removed the
1275     // reference to ophi that we need at _merge_pointer_idx. The line below make
1276     // sure the reference is maintained.
1277     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);

1447   return false;
1448 }
1449 
1450 // Returns true if at least one of the arguments to the call is an object
1451 // that does not escape globally.
1452 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1453   if (call->method() != nullptr) {
1454     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1455     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1456       Node* p = call->in(idx);
1457       if (not_global_escape(p)) {
1458         return true;
1459       }
1460     }
1461   } else {
1462     const char* name = call->as_CallStaticJava()->_name;
1463     assert(name != nullptr, "no name");
1464     // no arg escapes through uncommon traps
1465     if (strcmp(name, "uncommon_trap") != 0) {
1466       // process_call_arguments() assumes that all arguments escape globally
1467       const TypeTuple* d = call->tf()->domain_sig();
1468       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1469         const Type* at = d->field_at(i);
1470         if (at->isa_oopptr() != nullptr) {
1471           return true;
1472         }
1473       }
1474     }
1475   }
1476   return false;
1477 }
1478 
1479 
1480 
1481 // Utility function for nodes that load an object
1482 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1483   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1484   // ThreadLocal has RawPtr type.
1485   const Type* t = _igvn->type(n);
1486   if (t->make_ptr() != nullptr) {
1487     Node* adr = n->in(MemNode::Address);

1521       // first IGVN optimization when escape information is still available.
1522       record_for_optimizer(n);
1523     } else if (n->is_Allocate()) {
1524       add_call_node(n->as_Call());
1525       record_for_optimizer(n);
1526     } else {
1527       if (n->is_CallStaticJava()) {
1528         const char* name = n->as_CallStaticJava()->_name;
1529         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1530           return; // Skip uncommon traps
1531         }
1532       }
1533       // Don't mark as processed since call's arguments have to be processed.
1534       delayed_worklist->push(n);
1535       // Check if a call returns an object.
1536       if ((n->as_Call()->returns_pointer() &&
1537            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1538           (n->is_CallStaticJava() &&
1539            n->as_CallStaticJava()->is_boxing_method())) {
1540         add_call_node(n->as_Call());
1541       } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
1542         bool returns_oop = false;
1543         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
1544           ProjNode* pn = n->fast_out(i)->as_Proj();
1545           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
1546             returns_oop = true;
1547           }
1548         }
1549         if (returns_oop) {
1550           add_call_node(n->as_Call());
1551         }
1552       }
1553     }
1554     return;
1555   }
1556   // Put this check here to process call arguments since some call nodes
1557   // point to phantom_obj.
1558   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1559     return; // Skip predefined nodes.
1560   }
1561   switch (opcode) {
1562     case Op_AddP: {
1563       Node* base = get_addp_base(n);
1564       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1565       // Field nodes are created for all field types. They are used in
1566       // adjust_scalar_replaceable_state() and split_unique_types().
1567       // Note, non-oop fields will have only base edges in Connection
1568       // Graph because such fields are not used for oop loads and stores.
1569       int offset = address_offset(n, igvn);
1570       add_field(n, PointsToNode::NoEscape, offset);
1571       if (ptn_base == nullptr) {
1572         delayed_worklist->push(n); // Process it later.
1573       } else {
1574         n_ptn = ptnode_adr(n_idx);
1575         add_base(n_ptn->as_Field(), ptn_base);
1576       }
1577       break;
1578     }
1579     case Op_CastX2P: {
1580       map_ideal_node(n, phantom_obj);
1581       break;
1582     }
1583     case Op_InlineType:
1584     case Op_CastPP:
1585     case Op_CheckCastPP:
1586     case Op_EncodeP:
1587     case Op_DecodeN:
1588     case Op_EncodePKlass:
1589     case Op_DecodeNKlass: {
1590       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1591       break;
1592     }
1593     case Op_CMoveP: {
1594       add_local_var(n, PointsToNode::NoEscape);
1595       // Do not add edges during first iteration because some could be
1596       // not defined yet.
1597       delayed_worklist->push(n);
1598       break;
1599     }
1600     case Op_ConP:
1601     case Op_ConN:
1602     case Op_ConNKlass: {
1603       // assume all oop constants globally escape except for null

1635     case Op_PartialSubtypeCheck: {
1636       // Produces Null or notNull and is used in only in CmpP so
1637       // phantom_obj could be used.
1638       map_ideal_node(n, phantom_obj); // Result is unknown
1639       break;
1640     }
1641     case Op_Phi: {
1642       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1643       // ThreadLocal has RawPtr type.
1644       const Type* t = n->as_Phi()->type();
1645       if (t->make_ptr() != nullptr) {
1646         add_local_var(n, PointsToNode::NoEscape);
1647         // Do not add edges during first iteration because some could be
1648         // not defined yet.
1649         delayed_worklist->push(n);
1650       }
1651       break;
1652     }
1653     case Op_Proj: {
1654       // we are only interested in the oop result projection from a call
1655       if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
1656           (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
1657         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1658                n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1659         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1660       }
1661       break;
1662     }
1663     case Op_Rethrow: // Exception object escapes
1664     case Op_Return: {
1665       if (n->req() > TypeFunc::Parms &&
1666           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1667         // Treat Return value as LocalVar with GlobalEscape escape state.
1668         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1669       }
1670       break;
1671     }
1672     case Op_CompareAndExchangeP:
1673     case Op_CompareAndExchangeN:
1674     case Op_GetAndSetP:
1675     case Op_GetAndSetN: {
1676       add_objload_to_connection_graph(n, delayed_worklist);
1677       // fall-through
1678     }

1740   if (n->is_Call()) {
1741     process_call_arguments(n->as_Call());
1742     return;
1743   }
1744   assert(n->is_Store() || n->is_LoadStore() ||
1745          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1746          "node should be registered already");
1747   int opcode = n->Opcode();
1748   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1749   if (gc_handled) {
1750     return; // Ignore node if already handled by GC.
1751   }
1752   switch (opcode) {
1753     case Op_AddP: {
1754       Node* base = get_addp_base(n);
1755       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1756       assert(ptn_base != nullptr, "field's base should be registered");
1757       add_base(n_ptn->as_Field(), ptn_base);
1758       break;
1759     }
1760     case Op_InlineType:
1761     case Op_CastPP:
1762     case Op_CheckCastPP:
1763     case Op_EncodeP:
1764     case Op_DecodeN:
1765     case Op_EncodePKlass:
1766     case Op_DecodeNKlass: {
1767       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1768       break;
1769     }
1770     case Op_CMoveP: {
1771       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1772         Node* in = n->in(i);
1773         if (in == nullptr) {
1774           continue;  // ignore null
1775         }
1776         Node* uncast_in = in->uncast();
1777         if (uncast_in->is_top() || uncast_in == n) {
1778           continue;  // ignore top or inputs which go back this node
1779         }
1780         PointsToNode* ptn = ptnode_adr(in->_idx);

1795       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1796       // ThreadLocal has RawPtr type.
1797       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1798       for (uint i = 1; i < n->req(); i++) {
1799         Node* in = n->in(i);
1800         if (in == nullptr) {
1801           continue;  // ignore null
1802         }
1803         Node* uncast_in = in->uncast();
1804         if (uncast_in->is_top() || uncast_in == n) {
1805           continue;  // ignore top or inputs which go back this node
1806         }
1807         PointsToNode* ptn = ptnode_adr(in->_idx);
1808         assert(ptn != nullptr, "node should be registered");
1809         add_edge(n_ptn, ptn);
1810       }
1811       break;
1812     }
1813     case Op_Proj: {
1814       // we are only interested in the oop result projection from a call
1815       assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1816              n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1817       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1818       break;
1819     }
1820     case Op_Rethrow: // Exception object escapes
1821     case Op_Return: {
1822       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1823              "Unexpected node type");
1824       // Treat Return value as LocalVar with GlobalEscape escape state.
1825       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1826       break;
1827     }
1828     case Op_CompareAndExchangeP:
1829     case Op_CompareAndExchangeN:
1830     case Op_GetAndSetP:
1831     case Op_GetAndSetN:{
1832       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1833       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1834       // fall-through
1835     }
1836     case Op_CompareAndSwapP:

1972     PointsToNode* ptn = ptnode_adr(val->_idx);
1973     assert(ptn != nullptr, "node should be registered");
1974     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1975     // Add edge to object for unsafe access with offset.
1976     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1977     assert(adr_ptn != nullptr, "node should be registered");
1978     if (adr_ptn->is_Field()) {
1979       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
1980       add_edge(adr_ptn, ptn);
1981     }
1982     return true;
1983   }
1984 #ifdef ASSERT
1985   n->dump(1);
1986   assert(false, "not unsafe");
1987 #endif
1988   return false;
1989 }
1990 
1991 void ConnectionGraph::add_call_node(CallNode* call) {
1992   assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
1993   uint call_idx = call->_idx;
1994   if (call->is_Allocate()) {
1995     Node* k = call->in(AllocateNode::KlassNode);
1996     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
1997     assert(kt != nullptr, "TypeKlassPtr  required.");
1998     PointsToNode::EscapeState es = PointsToNode::NoEscape;
1999     bool scalar_replaceable = true;
2000     NOT_PRODUCT(const char* nsr_reason = "");
2001     if (call->is_AllocateArray()) {
2002       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
2003         es = PointsToNode::GlobalEscape;
2004       } else {
2005         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2006         if (length < 0) {
2007           // Not scalar replaceable if the length is not constant.
2008           scalar_replaceable = false;
2009           NOT_PRODUCT(nsr_reason = "has a non-constant length");
2010         } else if (length > EliminateAllocationArraySizeLimit) {
2011           // Not scalar replaceable if the length is too big.
2012           scalar_replaceable = false;

2048     //
2049     //    - all oop arguments are escaping globally;
2050     //
2051     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2052     //
2053     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2054     //
2055     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2056     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2057     //      during call is returned;
2058     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2059     //      which are returned and does not escape during call;
2060     //
2061     //    - oop arguments escaping status is defined by bytecode analysis;
2062     //
2063     // For a static call, we know exactly what method is being called.
2064     // Use bytecode estimator to record whether the call's return value escapes.
2065     ciMethod* meth = call->as_CallJava()->method();
2066     if (meth == nullptr) {
2067       const char* name = call->as_CallStaticJava()->_name;
2068       assert(strncmp(name, "_multianewarray", 15) == 0 ||
2069              strncmp(name, "_load_unknown_inline", 20) == 0, "TODO: add failed case check");
2070       // Returns a newly allocated non-escaped object.
2071       add_java_object(call, PointsToNode::NoEscape);
2072       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2073     } else if (meth->is_boxing_method()) {
2074       // Returns boxing object
2075       PointsToNode::EscapeState es;
2076       vmIntrinsics::ID intr = meth->intrinsic_id();
2077       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2078         // It does not escape if object is always allocated.
2079         es = PointsToNode::NoEscape;
2080       } else {
2081         // It escapes globally if object could be loaded from cache.
2082         es = PointsToNode::GlobalEscape;
2083       }
2084       add_java_object(call, es);
2085       if (es == PointsToNode::GlobalEscape) {
2086         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2087       }
2088     } else {
2089       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2090       call_analyzer->copy_dependencies(_compile->dependencies());
2091       if (call_analyzer->is_return_allocated()) {
2092         // Returns a newly allocated non-escaped object, simply
2093         // update dependency information.
2094         // Mark it as NoEscape so that objects referenced by
2095         // it's fields will be marked as NoEscape at least.
2096         add_java_object(call, PointsToNode::NoEscape);
2097         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2098       } else {
2099         // Determine whether any arguments are returned.
2100         const TypeTuple* d = call->tf()->domain_cc();
2101         bool ret_arg = false;
2102         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2103           if (d->field_at(i)->isa_ptr() != nullptr &&
2104               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2105             ret_arg = true;
2106             break;
2107           }
2108         }
2109         if (ret_arg) {
2110           add_local_var(call, PointsToNode::ArgEscape);
2111         } else {
2112           // Returns unknown object.
2113           map_ideal_node(call, phantom_obj);
2114         }
2115       }
2116     }
2117   } else {
2118     // An other type of call, assume the worst case:
2119     // returned value is unknown and globally escapes.
2120     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

2128 #ifdef ASSERT
2129     case Op_Allocate:
2130     case Op_AllocateArray:
2131     case Op_Lock:
2132     case Op_Unlock:
2133       assert(false, "should be done already");
2134       break;
2135 #endif
2136     case Op_ArrayCopy:
2137     case Op_CallLeafNoFP:
2138       // Most array copies are ArrayCopy nodes at this point but there
2139       // are still a few direct calls to the copy subroutines (See
2140       // PhaseStringOpts::copy_string())
2141       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2142         call->as_CallLeaf()->is_call_to_arraycopystub();
2143       // fall through
2144     case Op_CallLeafVector:
2145     case Op_CallLeaf: {
2146       // Stub calls, objects do not escape but they are not scale replaceable.
2147       // Adjust escape state for outgoing arguments.
2148       const TypeTuple * d = call->tf()->domain_sig();
2149       bool src_has_oops = false;
2150       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2151         const Type* at = d->field_at(i);
2152         Node *arg = call->in(i);
2153         if (arg == nullptr) {
2154           continue;
2155         }
2156         const Type *aat = _igvn->type(arg);
2157         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2158           continue;
2159         }
2160         if (arg->is_AddP()) {
2161           //
2162           // The inline_native_clone() case when the arraycopy stub is called
2163           // after the allocation before Initialize and CheckCastPP nodes.
2164           // Or normal arraycopy for object arrays case.
2165           //
2166           // Set AddP's base (Allocate) as not scalar replaceable since
2167           // pointer to the base (with offset) is passed as argument.
2168           //
2169           arg = get_addp_base(arg);
2170         }
2171         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2172         assert(arg_ptn != nullptr, "should be registered");
2173         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2174         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2175           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2176                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2177           bool arg_has_oops = aat->isa_oopptr() &&
2178                               (aat->isa_instptr() ||
2179                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
2180                                (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
2181                                                                aat->isa_aryptr()->is_flat() &&
2182                                                                aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
2183           if (i == TypeFunc::Parms) {
2184             src_has_oops = arg_has_oops;
2185           }
2186           //
2187           // src or dst could be j.l.Object when other is basic type array:
2188           //
2189           //   arraycopy(char[],0,Object*,0,size);
2190           //   arraycopy(Object*,0,char[],0,size);
2191           //
2192           // Don't add edges in such cases.
2193           //
2194           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2195                                        arg_has_oops && (i > TypeFunc::Parms);
2196 #ifdef ASSERT
2197           if (!(is_arraycopy ||
2198                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2199                 (call->as_CallLeaf()->_name != nullptr &&
2200                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2201                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2202                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

2213                   strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2214                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2215                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2216                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2217                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2218                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2219                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2220                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2221                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2222                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2223                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2224                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2225                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2226                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2227                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2228                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2229                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2230                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2231                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2232                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
2233                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2234                   strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
2235                   strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
2236                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2237                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2238                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2239                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2240                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2241                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2242                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2243                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2244                  ))) {
2245             call->dump();
2246             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2247           }
2248 #endif
2249           // Always process arraycopy's destination object since
2250           // we need to add all possible edges to references in
2251           // source object.
2252           if (arg_esc >= PointsToNode::ArgEscape &&
2253               !arg_is_arraycopy_dest) {
2254             continue;
2255           }

2282           }
2283         }
2284       }
2285       break;
2286     }
2287     case Op_CallStaticJava: {
2288       // For a static call, we know exactly what method is being called.
2289       // Use bytecode estimator to record the call's escape affects
2290 #ifdef ASSERT
2291       const char* name = call->as_CallStaticJava()->_name;
2292       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2293 #endif
2294       ciMethod* meth = call->as_CallJava()->method();
2295       if ((meth != nullptr) && meth->is_boxing_method()) {
2296         break; // Boxing methods do not modify any oops.
2297       }
2298       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2299       // fall-through if not a Java method or no analyzer information
2300       if (call_analyzer != nullptr) {
2301         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2302         const TypeTuple* d = call->tf()->domain_cc();
2303         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2304           const Type* at = d->field_at(i);
2305           int k = i - TypeFunc::Parms;
2306           Node* arg = call->in(i);
2307           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2308           if (at->isa_ptr() != nullptr &&
2309               call_analyzer->is_arg_returned(k)) {
2310             // The call returns arguments.
2311             if (call_ptn != nullptr) { // Is call's result used?
2312               assert(call_ptn->is_LocalVar(), "node should be registered");
2313               assert(arg_ptn != nullptr, "node should be registered");
2314               add_edge(call_ptn, arg_ptn);
2315             }
2316           }
2317           if (at->isa_oopptr() != nullptr &&
2318               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2319             if (!call_analyzer->is_arg_stack(k)) {
2320               // The argument global escapes
2321               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2322             } else {

2326                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2327               }
2328             }
2329           }
2330         }
2331         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2332           // The call returns arguments.
2333           assert(call_ptn->edge_count() > 0, "sanity");
2334           if (!call_analyzer->is_return_local()) {
2335             // Returns also unknown object.
2336             add_edge(call_ptn, phantom_obj);
2337           }
2338         }
2339         break;
2340       }
2341     }
2342     default: {
2343       // Fall-through here if not a Java method or no analyzer information
2344       // or some other type of call, assume the worst case: all arguments
2345       // globally escape.
2346       const TypeTuple* d = call->tf()->domain_cc();
2347       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2348         const Type* at = d->field_at(i);
2349         if (at->isa_oopptr() != nullptr) {
2350           Node* arg = call->in(i);
2351           if (arg->is_AddP()) {
2352             arg = get_addp_base(arg);
2353           }
2354           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2355           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2356         }
2357       }
2358     }
2359   }
2360 }
2361 
2362 
2363 // Finish Graph construction.
2364 bool ConnectionGraph::complete_connection_graph(
2365                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2366                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

2739     PointsToNode* base = i.get();
2740     if (base->is_JavaObject()) {
2741       // Skip Allocate's fields which will be processed later.
2742       if (base->ideal_node()->is_Allocate()) {
2743         return 0;
2744       }
2745       assert(base == null_obj, "only null ptr base expected here");
2746     }
2747   }
2748   if (add_edge(field, phantom_obj)) {
2749     // New edge was added
2750     new_edges++;
2751     add_field_uses_to_worklist(field);
2752   }
2753   return new_edges;
2754 }
2755 
2756 // Find fields initializing values for allocations.
2757 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2758   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2759   PointsToNode* init_val = phantom_obj;
2760   Node* alloc = pta->ideal_node();
2761 
2762   // Do nothing for Allocate nodes since its fields values are
2763   // "known" unless they are initialized by arraycopy/clone.
2764   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2765     if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
2766       // Non-flat inline type arrays are initialized with
2767       // the default value instead of null. Handle them here.
2768       init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx);
2769       assert(init_val != nullptr, "default value should be registered");
2770     } else {
2771       return 0;
2772     }
2773   }
2774   // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
2775   assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
2776 #ifdef ASSERT
2777   if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
2778     const char* name = alloc->as_CallStaticJava()->_name;
2779     assert(strncmp(name, "_multianewarray", 15) == 0 ||
2780            strncmp(name, "_load_unknown_inline", 20) == 0, "sanity");
2781   }
2782 #endif
2783   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2784   int new_edges = 0;
2785   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2786     PointsToNode* field = i.get();
2787     if (field->is_Field() && field->as_Field()->is_oop()) {
2788       if (add_edge(field, init_val)) {
2789         // New edge was added
2790         new_edges++;
2791         add_field_uses_to_worklist(field->as_Field());
2792       }
2793     }
2794   }
2795   return new_edges;
2796 }
2797 
2798 // Find fields initializing values for allocations.
2799 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2800   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2801   Node* alloc = pta->ideal_node();
2802   // Do nothing for Call nodes since its fields values are unknown.
2803   if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
2804     return 0;
2805   }
2806   InitializeNode* ini = alloc->as_Allocate()->initialization();
2807   bool visited_bottom_offset = false;
2808   GrowableArray<int> offsets_worklist;
2809   int new_edges = 0;
2810 
2811   // Check if an oop field's initializing value is recorded and add
2812   // a corresponding null if field's value if it is not recorded.
2813   // Connection Graph does not record a default initialization by null
2814   // captured by Initialize node.
2815   //
2816   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2817     PointsToNode* field = i.get(); // Field (AddP)
2818     if (!field->is_Field() || !field->as_Field()->is_oop()) {
2819       continue; // Not oop field
2820     }
2821     int offset = field->as_Field()->offset();
2822     if (offset == Type::OffsetBot) {
2823       if (!visited_bottom_offset) {

2869               } else {
2870                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2871                   tty->print_cr("----------init store has invalid value -----");
2872                   store->dump();
2873                   val->dump();
2874                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2875                 }
2876                 for (EdgeIterator j(val); j.has_next(); j.next()) {
2877                   PointsToNode* obj = j.get();
2878                   if (obj->is_JavaObject()) {
2879                     if (!field->points_to(obj->as_JavaObject())) {
2880                       missed_obj = obj;
2881                       break;
2882                     }
2883                   }
2884                 }
2885               }
2886               if (missed_obj != nullptr) {
2887                 tty->print_cr("----------field---------------------------------");
2888                 field->dump();
2889                 tty->print_cr("----------missed reference to object------------");
2890                 missed_obj->dump();
2891                 tty->print_cr("----------object referenced by init store-------");
2892                 store->dump();
2893                 val->dump();
2894                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2895               }
2896             }
2897 #endif
2898           } else {
2899             // There could be initializing stores which follow allocation.
2900             // For example, a volatile field store is not collected
2901             // by Initialize node.
2902             //
2903             // Need to check for dependent loads to separate such stores from
2904             // stores which follow loads. For now, add initial value null so
2905             // that compare pointers optimization works correctly.
2906           }
2907         }
2908         if (value == nullptr) {
2909           // A field's initializing value was not recorded. Add null.
2910           if (add_edge(field, null_obj)) {
2911             // New edge was added

3188         assert(field->edge_count() > 0, "sanity");
3189       }
3190     }
3191   }
3192 }
3193 #endif
3194 
3195 // Optimize ideal graph.
3196 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3197                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3198   Compile* C = _compile;
3199   PhaseIterGVN* igvn = _igvn;
3200   if (EliminateLocks) {
3201     // Mark locks before changing ideal graph.
3202     int cnt = C->macro_count();
3203     for (int i = 0; i < cnt; i++) {
3204       Node *n = C->macro_node(i);
3205       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3206         AbstractLockNode* alock = n->as_AbstractLock();
3207         if (!alock->is_non_esc_obj()) {
3208           const Type* obj_type = igvn->type(alock->obj_node());
3209           if (can_eliminate_lock(alock) && !obj_type->is_inlinetypeptr()) {
3210             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3211             // The lock could be marked eliminated by lock coarsening
3212             // code during first IGVN before EA. Replace coarsened flag
3213             // to eliminate all associated locks/unlocks.
3214 #ifdef ASSERT
3215             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3216 #endif
3217             alock->set_non_esc_obj();
3218           }
3219         }
3220       }
3221     }
3222   }
3223 
3224   if (OptimizePtrCompare) {
3225     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3226       Node *n = ptr_cmp_worklist.at(i);
3227       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3228       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3229       if (tcmp->singleton()) {

3231 #ifndef PRODUCT
3232         if (PrintOptimizePtrCompare) {
3233           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3234           if (Verbose) {
3235             n->dump(1);
3236           }
3237         }
3238 #endif
3239         igvn->replace_node(n, cmp);
3240       }
3241     }
3242   }
3243 
3244   // For MemBarStoreStore nodes added in library_call.cpp, check
3245   // escape status of associated AllocateNode and optimize out
3246   // MemBarStoreStore node if the allocated object never escapes.
3247   for (int i = 0; i < storestore_worklist.length(); i++) {
3248     Node* storestore = storestore_worklist.at(i);
3249     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3250     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3251       if (alloc->in(AllocateNode::InlineType) != nullptr) {
3252         // Non-escaping inline type buffer allocations don't require a membar
3253         storestore->as_MemBar()->remove(_igvn);
3254       } else {
3255         MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3256         mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3257         mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3258         igvn->register_new_node_with_optimizer(mb);
3259         igvn->replace_node(storestore, mb);
3260       }
3261     }
3262   }
3263 }
3264 
3265 // Optimize objects compare.
3266 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3267   assert(OptimizePtrCompare, "sanity");
3268   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3269   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3270   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3271 
3272   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3273   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3274   JavaObjectNode* jobj1 = unique_java_object(left);
3275   JavaObjectNode* jobj2 = unique_java_object(right);
3276 
3277   // The use of this method during allocation merge reduction may cause 'left'
3278   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3279   // that doesn't reference an unique java object.
3280   if (ptn1 == nullptr || ptn2 == nullptr ||

3402   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3403   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3404   PointsToNode* ptadr = _nodes.at(n->_idx);
3405   if (ptadr != nullptr) {
3406     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3407     return;
3408   }
3409   Compile* C = _compile;
3410   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3411   map_ideal_node(n, ptadr);
3412   // Add edge from arraycopy node to source object.
3413   (void)add_edge(ptadr, src);
3414   src->set_arraycopy_src();
3415   // Add edge from destination object to arraycopy node.
3416   (void)add_edge(dst, ptadr);
3417   dst->set_arraycopy_dst();
3418 }
3419 
3420 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3421   const Type* adr_type = n->as_AddP()->bottom_type();
3422   int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
3423   BasicType bt = T_INT;
3424   if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
3425     // Check only oop fields.
3426     if (!adr_type->isa_aryptr() ||
3427         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3428         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3429       // OffsetBot is used to reference array's element. Ignore first AddP.
3430       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3431         bt = T_OBJECT;
3432       }
3433     }
3434   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3435     if (adr_type->isa_instptr()) {
3436       ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
3437       if (field != nullptr) {
3438         bt = field->layout_type();
3439       } else {
3440         // Check for unsafe oop field access
3441         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3442             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3443             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3444             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3445           bt = T_OBJECT;
3446           (*unsafe) = true;
3447         }
3448       }
3449     } else if (adr_type->isa_aryptr()) {
3450       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3451         // Ignore array length load.
3452       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3453         // Ignore first AddP.
3454       } else {
3455         const Type* elemtype = adr_type->is_aryptr()->elem();
3456         if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
3457           ciInlineKlass* vk = elemtype->inline_klass();
3458           field_offset += vk->first_field_offset();
3459           bt = vk->get_field_by_offset(field_offset, false)->layout_type();
3460         } else {
3461           bt = elemtype->array_element_basic_type();
3462         }
3463       }
3464     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3465       // Allocation initialization, ThreadLocal field access, unsafe access
3466       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3467           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3468           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3469           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3470         bt = T_OBJECT;
3471       }
3472     }
3473   }
3474   // Note: T_NARROWOOP is not classed as a real reference type
3475   return (is_reference_type(bt) || bt == T_NARROWOOP);
3476 }
3477 
3478 // Returns unique pointed java object or null.
3479 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3480   // If the node was created after the escape computation we can't answer.
3481   uint idx = n->_idx;
3482   if (idx >= nodes_size()) {

3640             return true;
3641           }
3642         }
3643       }
3644     }
3645   }
3646   return false;
3647 }
3648 
3649 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3650   const Type *adr_type = phase->type(adr);
3651   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3652     // We are computing a raw address for a store captured by an Initialize
3653     // compute an appropriate address type. AddP cases #3 and #5 (see below).
3654     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3655     assert(offs != Type::OffsetBot ||
3656            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3657            "offset must be a constant or it is initialization of array");
3658     return offs;
3659   }
3660   return adr_type->is_ptr()->flat_offset();


3661 }
3662 
3663 Node* ConnectionGraph::get_addp_base(Node *addp) {
3664   assert(addp->is_AddP(), "must be AddP");
3665   //
3666   // AddP cases for Base and Address inputs:
3667   // case #1. Direct object's field reference:
3668   //     Allocate
3669   //       |
3670   //     Proj #5 ( oop result )
3671   //       |
3672   //     CheckCastPP (cast to instance type)
3673   //      | |
3674   //     AddP  ( base == address )
3675   //
3676   // case #2. Indirect object's field reference:
3677   //      Phi
3678   //       |
3679   //     CastPP (cast to instance type)
3680   //      | |

3794   }
3795   return nullptr;
3796 }
3797 
3798 //
3799 // Adjust the type and inputs of an AddP which computes the
3800 // address of a field of an instance
3801 //
3802 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3803   PhaseGVN* igvn = _igvn;
3804   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3805   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3806   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3807   if (t == nullptr) {
3808     // We are computing a raw address for a store captured by an Initialize
3809     // compute an appropriate address type (cases #3 and #5).
3810     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3811     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3812     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3813     assert(offs != Type::OffsetBot, "offset must be a constant");
3814     if (base_t->isa_aryptr() != nullptr) {
3815       // In the case of a flat inline type array, each field has its
3816       // own slice so we need to extract the field being accessed from
3817       // the address computation
3818       t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
3819     } else {
3820       t = base_t->add_offset(offs)->is_oopptr();
3821     }
3822   }
3823   int inst_id = base_t->instance_id();
3824   assert(!t->is_known_instance() || t->instance_id() == inst_id,
3825                              "old type must be non-instance or match new type");
3826 
3827   // The type 't' could be subclass of 'base_t'.
3828   // As result t->offset() could be large then base_t's size and it will
3829   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3830   // constructor verifies correctness of the offset.
3831   //
3832   // It could happened on subclass's branch (from the type profiling
3833   // inlining) which was not eliminated during parsing since the exactness
3834   // of the allocation type was not propagated to the subclass type check.
3835   //
3836   // Or the type 't' could be not related to 'base_t' at all.
3837   // It could happen when CHA type is different from MDO type on a dead path
3838   // (for example, from instanceof check) which is not collapsed during parsing.
3839   //
3840   // Do nothing for such AddP node and don't process its users since
3841   // this code branch will go away.
3842   //
3843   if (!t->is_known_instance() &&
3844       !base_t->maybe_java_subtype_of(t)) {
3845      return false; // bail out
3846   }
3847   const TypePtr* tinst = base_t->add_offset(t->offset());
3848   if (tinst->isa_aryptr() && t->isa_aryptr()) {
3849     // In the case of a flat inline type array, each field has its
3850     // own slice so we need to keep track of the field being accessed.
3851     tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
3852     // Keep array properties (not flat/null-free)
3853     tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
3854     if (tinst == nullptr) {
3855       return false; // Skip dead path with inconsistent properties
3856     }
3857   }
3858 
3859   // Do NOT remove the next line: ensure a new alias index is allocated
3860   // for the instance type. Note: C++ will not remove it since the call
3861   // has side effect.
3862   int alias_idx = _compile->get_alias_index(tinst);
3863   igvn->set_type(addp, tinst);
3864   // record the allocation in the node map
3865   set_map(addp, get_map(base->_idx));
3866   // Set addp's Base and Address to 'base'.
3867   Node *abase = addp->in(AddPNode::Base);
3868   Node *adr   = addp->in(AddPNode::Address);
3869   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3870       adr->in(0)->_idx == (uint)inst_id) {
3871     // Skip AddP cases #3 and #5.
3872   } else {
3873     assert(!abase->is_top(), "sanity"); // AddP case #3
3874     if (abase != base) {
3875       igvn->hash_delete(addp);
3876       addp->set_req(AddPNode::Base, base);
3877       if (abase == adr) {
3878         addp->set_req(AddPNode::Address, base);

4543         ptnode_adr(n->_idx)->dump();
4544         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4545 #endif
4546         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4547         return;
4548       } else {
4549         Node *val = get_map(jobj->idx());   // CheckCastPP node
4550         TypeNode *tn = n->as_Type();
4551         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4552         assert(tinst != nullptr && tinst->is_known_instance() &&
4553                tinst->instance_id() == jobj->idx() , "instance type expected.");
4554 
4555         const Type *tn_type = igvn->type(tn);
4556         const TypeOopPtr *tn_t;
4557         if (tn_type->isa_narrowoop()) {
4558           tn_t = tn_type->make_ptr()->isa_oopptr();
4559         } else {
4560           tn_t = tn_type->isa_oopptr();
4561         }
4562         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
4563           if (tn_t->isa_aryptr()) {
4564             // Keep array properties (not flat/null-free)
4565             tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
4566             if (tinst == nullptr) {
4567               continue; // Skip dead path with inconsistent properties
4568             }
4569           }
4570           if (tn_type->isa_narrowoop()) {
4571             tn_type = tinst->make_narrowoop();
4572           } else {
4573             tn_type = tinst;
4574           }
4575           igvn->hash_delete(tn);
4576           igvn->set_type(tn, tn_type);
4577           tn->set_type(tn_type);
4578           igvn->hash_insert(tn);
4579           record_for_optimizer(n);
4580         } else {
4581           assert(tn_type == TypePtr::NULL_PTR ||
4582                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4583                  "unexpected type");
4584           continue; // Skip dead path with different type
4585         }
4586       }
4587     } else {
4588       debug_only(n->dump();)
4589       assert(false, "EA: unexpected node");
4590       continue;
4591     }
4592     // push allocation's users on appropriate worklist
4593     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4594       Node *use = n->fast_out(i);
4595       if (use->is_Mem() && use->in(MemNode::Address) == n) {
4596         // Load/store to instance's field
4597         memnode_worklist.append_if_missing(use);
4598       } else if (use->is_MemBar()) {
4599         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4600           memnode_worklist.append_if_missing(use);
4601         }
4602       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4603         Node* addp2 = find_second_addp(use, n);
4604         if (addp2 != nullptr) {
4605           alloc_worklist.append_if_missing(addp2);
4606         }
4607         alloc_worklist.append_if_missing(use);
4608       } else if (use->is_Phi() ||
4609                  use->is_CheckCastPP() ||
4610                  use->is_EncodeNarrowPtr() ||
4611                  use->is_DecodeNarrowPtr() ||
4612                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4613         alloc_worklist.append_if_missing(use);
4614 #ifdef ASSERT
4615       } else if (use->is_Mem()) {
4616         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4617       } else if (use->is_MergeMem()) {
4618         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4619       } else if (use->is_SafePoint()) {
4620         // Look for MergeMem nodes for calls which reference unique allocation
4621         // (through CheckCastPP nodes) even for debug info.
4622         Node* m = use->in(TypeFunc::Memory);
4623         if (m->is_MergeMem()) {
4624           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4625         }
4626       } else if (use->Opcode() == Op_EncodeISOArray) {
4627         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4628           // EncodeISOArray overwrites destination array
4629           memnode_worklist.append_if_missing(use);
4630         }
4631       } else if (use->Opcode() == Op_Return) {
4632         // Allocation is referenced by field of returned inline type
4633         assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
4634       } else {
4635         uint op = use->Opcode();
4636         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4637             (use->in(MemNode::Memory) == n)) {
4638           // They overwrite memory edge corresponding to destination array,
4639           memnode_worklist.append_if_missing(use);
4640         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4641               op == Op_CastP2X || op == Op_StoreCM ||
4642               op == Op_FastLock || op == Op_AryEq ||
4643               op == Op_StrComp || op == Op_CountPositives ||
4644               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4645               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4646               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4647               op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
4648               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4649           n->dump();
4650           use->dump();
4651           assert(false, "EA: missing allocation reference path");
4652         }
4653 #endif
4654       }
4655     }
4656 
4657   }
4658 
4659 #ifdef ASSERT
4660   if (VerifyReduceAllocationMerges) {
4661     for (uint i = 0; i < reducible_merges.size(); i++) {
4662       Node* phi = reducible_merges.at(i);
4663 
4664       if (!reduced_merges.member(phi)) {
4665         phi->dump(2);
4666         phi->dump(-2);
4667         assert(false, "This reducible merge wasn't reduced.");

4727     if (n->is_Phi() || n->is_ClearArray()) {
4728       // we don't need to do anything, but the users must be pushed
4729     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
4730       // we don't need to do anything, but the users must be pushed
4731       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4732       if (n == nullptr) {
4733         continue;
4734       }
4735     } else if (n->is_CallLeaf()) {
4736       // Runtime calls with narrow memory input (no MergeMem node)
4737       // get the memory projection
4738       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4739       if (n == nullptr) {
4740         continue;
4741       }
4742     } else if (n->Opcode() == Op_StrCompressedCopy ||
4743                n->Opcode() == Op_EncodeISOArray) {
4744       // get the memory projection
4745       n = n->find_out_with(Op_SCMemProj);
4746       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4747     } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
4748                strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4749       n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
4750     } else {
4751       assert(n->is_Mem(), "memory node required.");
4752       Node *addr = n->in(MemNode::Address);
4753       const Type *addr_t = igvn->type(addr);
4754       if (addr_t == Type::TOP) {
4755         continue;
4756       }
4757       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4758       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4759       assert ((uint)alias_idx < new_index_end, "wrong alias index");
4760       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4761       if (_compile->failing()) {
4762         return;
4763       }
4764       if (mem != n->in(MemNode::Memory)) {
4765         // We delay the memory edge update since we need old one in
4766         // MergeMem code below when instances memory slices are separated.
4767         set_map(n, mem);
4768       }
4769       if (n->is_Load()) {

4772         // get the memory projection
4773         n = n->find_out_with(Op_SCMemProj);
4774         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4775       }
4776     }
4777     // push user on appropriate worklist
4778     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4779       Node *use = n->fast_out(i);
4780       if (use->is_Phi() || use->is_ClearArray()) {
4781         memnode_worklist.append_if_missing(use);
4782       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4783         if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
4784           continue;
4785         }
4786         memnode_worklist.append_if_missing(use);
4787       } else if (use->is_MemBar() || use->is_CallLeaf()) {
4788         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4789           memnode_worklist.append_if_missing(use);
4790         }
4791 #ifdef ASSERT
4792       } else if (use->is_Mem()) {
4793         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4794       } else if (use->is_MergeMem()) {
4795         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4796       } else if (use->Opcode() == Op_EncodeISOArray) {
4797         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4798           // EncodeISOArray overwrites destination array
4799           memnode_worklist.append_if_missing(use);
4800         }
4801       } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
4802                  strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4803         // store_unknown_inline overwrites destination array
4804         memnode_worklist.append_if_missing(use);
4805       } else {
4806         uint op = use->Opcode();
4807         if ((use->in(MemNode::Memory) == n) &&
4808             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4809           // They overwrite memory edge corresponding to destination array,
4810           memnode_worklist.append_if_missing(use);
4811         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4812               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4813               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4814               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
4815           n->dump();
4816           use->dump();
4817           assert(false, "EA: missing memory path");
4818         }
4819 #endif
4820       }
4821     }
4822   }
4823 
4824   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4825   //            Walk each memory slice moving the first node encountered of each
4826   //            instance type to the input corresponding to its alias index.
4827   uint length = mergemem_worklist.length();
4828   for( uint next = 0; next < length; ++next ) {
4829     MergeMemNode* nmm = mergemem_worklist.at(next);
4830     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4831     // Note: we don't want to use MergeMemStream here because we only want to
4832     // scan inputs which exist at the start, not ones we add during processing.
4833     // Note 2: MergeMem may already contains instance memory slices added
4834     // during find_inst_mem() call when memory nodes were processed above.

4895     if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
4896       if (_compile->do_reduce_allocation_merges()) {
4897         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4898       } else if (_invocation > 0) {
4899         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4900       } else {
4901         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4902       }
4903       return;
4904     }
4905 
4906     igvn->hash_insert(nmm);
4907     record_for_optimizer(nmm);
4908   }
4909 
4910   //  Phase 4:  Update the inputs of non-instance memory Phis and
4911   //            the Memory input of memnodes
4912   // First update the inputs of any non-instance Phi's from
4913   // which we split out an instance Phi.  Note we don't have
4914   // to recursively process Phi's encountered on the input memory
4915   // chains as is done in split_memory_phi() since they will
4916   // also be processed here.
4917   for (int j = 0; j < orig_phis.length(); j++) {
4918     PhiNode *phi = orig_phis.at(j);
4919     int alias_idx = _compile->get_alias_index(phi->adr_type());
4920     igvn->hash_delete(phi);
4921     for (uint i = 1; i < phi->req(); i++) {
4922       Node *mem = phi->in(i);
4923       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4924       if (_compile->failing()) {
4925         return;
4926       }
4927       if (mem != new_mem) {
4928         phi->set_req(i, new_mem);
4929       }
4930     }
4931     igvn->hash_insert(phi);
4932     record_for_optimizer(phi);
4933   }
4934 
4935   // Update the memory inputs of MemNodes with the value we computed
< prev index next >