1 /*
   2  * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/metaspace.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/c2compiler.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/escape.hpp"
  40 #include "opto/inlinetypenode.hpp"
  41 #include "opto/locknode.hpp"
  42 #include "opto/macro.hpp"
  43 #include "opto/movenode.hpp"
  44 #include "opto/narrowptrnode.hpp"
  45 #include "opto/phaseX.hpp"
  46 #include "opto/rootnode.hpp"
  47 #include "utilities/macros.hpp"
  48 
  49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  50   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  51   // split_unique_types and that will create additional nodes that need to be
  52   // pushed to the ConnectionGraph. The code below bumps the initial capacity of
  53   // _nodes by 10% to account for these additional nodes. If capacity is exceeded
  54   // the array will be reallocated.
  55   _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
  56   _in_worklist(C->comp_arena()),
  57   _next_pidx(0),
  58   _collecting(true),
  59   _verify(false),
  60   _compile(C),
  61   _igvn(igvn),
  62   _invocation(invocation),
  63   _build_iterations(0),
  64   _build_time(0.),
  65   _node_map(C->comp_arena()) {
  66   // Add unknown java object.
  67   add_java_object(C->top(), PointsToNode::GlobalEscape);
  68   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  69   set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object"));
  70   // Add ConP and ConN null oop nodes
  71   Node* oop_null = igvn->zerocon(T_OBJECT);
  72   assert(oop_null->_idx < nodes_size(), "should be created already");
  73   add_java_object(oop_null, PointsToNode::NoEscape);
  74   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  75   set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object"));
  76   if (UseCompressedOops) {
  77     Node* noop_null = igvn->zerocon(T_NARROWOOP);
  78     assert(noop_null->_idx < nodes_size(), "should be created already");
  79     map_ideal_node(noop_null, null_obj);
  80   }
  81 }
  82 
  83 bool ConnectionGraph::has_candidates(Compile *C) {
  84   // EA brings benefits only when the code has allocations and/or locks which
  85   // are represented by ideal Macro nodes.
  86   int cnt = C->macro_count();
  87   for (int i = 0; i < cnt; i++) {
  88     Node *n = C->macro_node(i);
  89     if (n->is_Allocate()) {
  90       return true;
  91     }
  92     if (n->is_Lock()) {
  93       Node* obj = n->as_Lock()->obj_node()->uncast();
  94       if (!(obj->is_Parm() || obj->is_Con())) {
  95         return true;
  96       }
  97     }
  98     if (n->is_CallStaticJava() &&
  99         n->as_CallStaticJava()->is_boxing_method()) {
 100       return true;
 101     }
 102   }
 103   return false;
 104 }
 105 
 106 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
 107   Compile::TracePhase tp(Phase::_t_escapeAnalysis);
 108   ResourceMark rm;
 109 
 110   // Add ConP and ConN null oop nodes before ConnectionGraph construction
 111   // to create space for them in ConnectionGraph::_nodes[].
 112   Node* oop_null = igvn->zerocon(T_OBJECT);
 113   Node* noop_null = igvn->zerocon(T_NARROWOOP);
 114   int invocation = 0;
 115   if (C->congraph() != nullptr) {
 116     invocation = C->congraph()->_invocation + 1;
 117   }
 118   ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation);
 119   NOT_PRODUCT(if (C->should_print_igv(/* Any level */ 1)) C->igv_printer()->set_congraph(congraph);)
 120   // Perform escape analysis
 121   if (congraph->compute_escape()) {
 122     // There are non escaping objects.
 123     C->set_congraph(congraph);
 124   }
 125   NOT_PRODUCT(if (C->should_print_igv(/* Any level */ 1)) C->igv_printer()->set_congraph(nullptr);)
 126   // Cleanup.
 127   if (oop_null->outcnt() == 0) {
 128     igvn->hash_delete(oop_null);
 129   }
 130   if (noop_null->outcnt() == 0) {
 131     igvn->hash_delete(noop_null);
 132   }
 133 
 134   C->print_method(PHASE_AFTER_EA, 2);
 135 }
 136 
 137 bool ConnectionGraph::compute_escape() {
 138   Compile* C = _compile;
 139   PhaseGVN* igvn = _igvn;
 140 
 141   // Worklists used by EA.
 142   Unique_Node_List delayed_worklist;
 143   Unique_Node_List reducible_merges;
 144   GrowableArray<Node*> alloc_worklist;
 145   GrowableArray<Node*> ptr_cmp_worklist;
 146   GrowableArray<MemBarStoreStoreNode*> storestore_worklist;
 147   GrowableArray<ArrayCopyNode*>  arraycopy_worklist;
 148   GrowableArray<PointsToNode*>   ptnodes_worklist;
 149   GrowableArray<JavaObjectNode*> java_objects_worklist;
 150   GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist;
 151   GrowableArray<FieldNode*>      oop_fields_worklist;
 152   GrowableArray<SafePointNode*>  sfn_worklist;
 153   GrowableArray<MergeMemNode*>   mergemem_worklist;
 154   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 155 
 156   { Compile::TracePhase tp(Phase::_t_connectionGraph);
 157 
 158   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 159   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 160   // Initialize worklist
 161   if (C->root() != nullptr) {
 162     ideal_nodes.push(C->root());
 163   }
 164   // Processed ideal nodes are unique on ideal_nodes list
 165   // but several ideal nodes are mapped to the phantom_obj.
 166   // To avoid duplicated entries on the following worklists
 167   // add the phantom_obj only once to them.
 168   ptnodes_worklist.append(phantom_obj);
 169   java_objects_worklist.append(phantom_obj);
 170   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 171     Node* n = ideal_nodes.at(next);
 172     if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
 173         !n->in(MemNode::Address)->is_AddP() &&
 174         _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
 175       // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
 176       Node* addp = AddPNode::make_with_base(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
 177       _igvn->register_new_node_with_optimizer(addp);
 178       _igvn->replace_input_of(n, MemNode::Address, addp);
 179       ideal_nodes.push(addp);
 180       _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
 181     }
 182     // Create PointsTo nodes and add them to Connection Graph. Called
 183     // only once per ideal node since ideal_nodes is Unique_Node list.
 184     add_node_to_connection_graph(n, &delayed_worklist);
 185     PointsToNode* ptn = ptnode_adr(n->_idx);
 186     if (ptn != nullptr && ptn != phantom_obj) {
 187       ptnodes_worklist.append(ptn);
 188       if (ptn->is_JavaObject()) {
 189         java_objects_worklist.append(ptn->as_JavaObject());
 190         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 191             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 192           // Only allocations and java static calls results are interesting.
 193           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 194         }
 195       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 196         oop_fields_worklist.append(ptn->as_Field());
 197       }
 198     }
 199     // Collect some interesting nodes for further use.
 200     switch (n->Opcode()) {
 201       case Op_MergeMem:
 202         // Collect all MergeMem nodes to add memory slices for
 203         // scalar replaceable objects in split_unique_types().
 204         mergemem_worklist.append(n->as_MergeMem());
 205         break;
 206       case Op_CmpP:
 207       case Op_CmpN:
 208         // Collect compare pointers nodes.
 209         if (OptimizePtrCompare) {
 210           ptr_cmp_worklist.append(n);
 211         }
 212         break;
 213       case Op_MemBarStoreStore:
 214         // Collect all MemBarStoreStore nodes so that depending on the
 215         // escape status of the associated Allocate node some of them
 216         // may be eliminated.
 217         if (!UseStoreStoreForCtor || n->req() > MemBarNode::Precedent) {
 218           storestore_worklist.append(n->as_MemBarStoreStore());
 219         }
 220         // If MemBarStoreStore has a precedent edge add it to the worklist (like MemBarRelease)
 221       case Op_MemBarRelease:
 222         if (n->req() > MemBarNode::Precedent) {
 223           record_for_optimizer(n);
 224         }
 225         break;
 226 #ifdef ASSERT
 227       case Op_AddP:
 228         // Collect address nodes for graph verification.
 229         addp_worklist.append(n);
 230         break;
 231 #endif
 232       case Op_ArrayCopy:
 233         // Keep a list of ArrayCopy nodes so if one of its input is non
 234         // escaping, we can record a unique type
 235         arraycopy_worklist.append(n->as_ArrayCopy());
 236         break;
 237       default:
 238         // not interested now, ignore...
 239         break;
 240     }
 241     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 242       Node* m = n->fast_out(i);   // Get user
 243       ideal_nodes.push(m);
 244     }
 245     if (n->is_SafePoint()) {
 246       sfn_worklist.append(n->as_SafePoint());
 247     }
 248   }
 249 
 250 #ifndef PRODUCT
 251   if (_compile->directive()->TraceEscapeAnalysisOption) {
 252     tty->print("+++++ Initial worklist for ");
 253     _compile->method()->print_name();
 254     tty->print_cr(" (ea_inv=%d)", _invocation);
 255     for (int i = 0; i < ptnodes_worklist.length(); i++) {
 256       PointsToNode* ptn = ptnodes_worklist.at(i);
 257       ptn->dump();
 258     }
 259     tty->print_cr("+++++ Calculating escape states and scalar replaceability");
 260   }
 261 #endif
 262 
 263   if (non_escaped_allocs_worklist.length() == 0) {
 264     _collecting = false;
 265     NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 266     return false; // Nothing to do.
 267   }
 268   // Add final simple edges to graph.
 269   while(delayed_worklist.size() > 0) {
 270     Node* n = delayed_worklist.pop();
 271     add_final_edges(n);
 272   }
 273 
 274 #ifdef ASSERT
 275   if (VerifyConnectionGraph) {
 276     // Verify that no new simple edges could be created and all
 277     // local vars has edges.
 278     _verify = true;
 279     int ptnodes_length = ptnodes_worklist.length();
 280     for (int next = 0; next < ptnodes_length; ++next) {
 281       PointsToNode* ptn = ptnodes_worklist.at(next);
 282       add_final_edges(ptn->ideal_node());
 283       if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
 284         ptn->dump();
 285         assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
 286       }
 287     }
 288     _verify = false;
 289   }
 290 #endif
 291   // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes
 292   // processing, calls to CI to resolve symbols (types, fields, methods)
 293   // referenced in bytecode. During symbol resolution VM may throw
 294   // an exception which CI cleans and converts to compilation failure.
 295   if (C->failing()) {
 296     NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 297     return false;
 298   }
 299 
 300   _compile->print_method(PHASE_EA_AFTER_INITIAL_CONGRAPH, 4);
 301 
 302   // 2. Finish Graph construction by propagating references to all
 303   //    java objects through graph.
 304   if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
 305                                  java_objects_worklist, oop_fields_worklist)) {
 306     // All objects escaped or hit time or iterations limits.
 307     _collecting = false;
 308     NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 309     return false;
 310   }
 311 
 312   _compile->print_method(PHASE_EA_AFTER_COMPLETE_CONGRAPH, 4);
 313 
 314   // 3. Adjust scalar_replaceable state of nonescaping objects and push
 315   //    scalar replaceable allocations on alloc_worklist for processing
 316   //    in split_unique_types().
 317   GrowableArray<JavaObjectNode*> jobj_worklist;
 318   int non_escaped_length = non_escaped_allocs_worklist.length();
 319   bool found_nsr_alloc = false;
 320   for (int next = 0; next < non_escaped_length; next++) {
 321     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
 322     bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
 323     Node* n = ptn->ideal_node();
 324     if (n->is_Allocate()) {
 325       n->as_Allocate()->_is_non_escaping = noescape;
 326     }
 327     if (noescape && ptn->scalar_replaceable()) {
 328       adjust_scalar_replaceable_state(ptn, reducible_merges);
 329       if (ptn->scalar_replaceable()) {
 330         jobj_worklist.push(ptn);
 331       } else {
 332         found_nsr_alloc = true;
 333       }
 334     }
 335     _compile->print_method(PHASE_EA_ADJUST_SCALAR_REPLACEABLE_ITER, 6, n);
 336   }
 337 
 338   // Propagate NSR (Not Scalar Replaceable) state.
 339   if (found_nsr_alloc) {
 340     find_scalar_replaceable_allocs(jobj_worklist, reducible_merges);
 341   }
 342 
 343   // alloc_worklist will be processed in reverse push order.
 344   // Therefore the reducible Phis will be processed for last and that's what we
 345   // want because by then the scalarizable inputs of the merge will already have
 346   // an unique instance type.
 347   for (uint i = 0; i < reducible_merges.size(); i++ ) {
 348     Node* n = reducible_merges.at(i);
 349     alloc_worklist.append(n);
 350   }
 351 
 352   for (int next = 0; next < jobj_worklist.length(); ++next) {
 353     JavaObjectNode* jobj = jobj_worklist.at(next);
 354     if (jobj->scalar_replaceable()) {
 355       alloc_worklist.append(jobj->ideal_node());
 356     }
 357   }
 358 
 359 #ifdef ASSERT
 360   if (VerifyConnectionGraph) {
 361     // Verify that graph is complete - no new edges could be added or needed.
 362     verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
 363                             java_objects_worklist, addp_worklist);
 364   }
 365   assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
 366   assert(null_obj->escape_state() == PointsToNode::NoEscape &&
 367          null_obj->edge_count() == 0 &&
 368          !null_obj->arraycopy_src() &&
 369          !null_obj->arraycopy_dst(), "sanity");
 370 #endif
 371 
 372   _collecting = false;
 373 
 374   _compile->print_method(PHASE_EA_AFTER_PROPAGATE_NSR, 4);
 375   } // TracePhase t3("connectionGraph")
 376 
 377   // 4. Optimize ideal graph based on EA information.
 378   bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0);
 379   if (has_non_escaping_obj) {
 380     optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
 381   }
 382 
 383 #ifndef PRODUCT
 384   if (PrintEscapeAnalysis) {
 385     dump(ptnodes_worklist); // Dump ConnectionGraph
 386   }
 387 #endif
 388 
 389 #ifdef ASSERT
 390   if (VerifyConnectionGraph) {
 391     int alloc_length = alloc_worklist.length();
 392     for (int next = 0; next < alloc_length; ++next) {
 393       Node* n = alloc_worklist.at(next);
 394       PointsToNode* ptn = ptnode_adr(n->_idx);
 395       assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
 396     }
 397   }
 398 
 399   if (VerifyReduceAllocationMerges) {
 400     for (uint i = 0; i < reducible_merges.size(); i++ ) {
 401       Node* n = reducible_merges.at(i);
 402       if (!can_reduce_phi(n->as_Phi())) {
 403         TraceReduceAllocationMerges = true;
 404         n->dump(2);
 405         n->dump(-2);
 406         assert(can_reduce_phi(n->as_Phi()), "Sanity: previous reducible Phi is no longer reducible before SUT.");
 407       }
 408     }
 409   }
 410 #endif
 411 
 412   _compile->print_method(PHASE_EA_AFTER_GRAPH_OPTIMIZATION, 4);
 413 
 414   // 5. Separate memory graph for scalar replaceable allcations.
 415   bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
 416   if (has_scalar_replaceable_candidates && EliminateAllocations) {
 417     assert(C->do_aliasing(), "Aliasing should be enabled");
 418     // Now use the escape information to create unique types for
 419     // scalar replaceable objects.
 420     split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges);
 421     if (C->failing()) {
 422       NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 423       return false;
 424     }
 425 
 426 #ifdef ASSERT
 427   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
 428     tty->print("=== No allocations eliminated for ");
 429     C->method()->print_short_name();
 430     if (!EliminateAllocations) {
 431       tty->print(" since EliminateAllocations is off ===");
 432     } else if(!has_scalar_replaceable_candidates) {
 433       tty->print(" since there are no scalar replaceable candidates ===");
 434     }
 435     tty->cr();
 436 #endif
 437   }
 438 
 439   // 6. Expand flat accesses if the object does not escape. This adds nodes to
 440   // the graph, so it has to be after split_unique_types. This expands atomic
 441   // mismatched accesses (though encapsulated in LoadFlats and StoreFlats) into
 442   // non-mismatched accesses, so it is better before reduce allocation merges.
 443   if (has_non_escaping_obj) {
 444     optimize_flat_accesses(sfn_worklist);
 445   }
 446 
 447   _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES, 4);
 448 
 449   // 7. Reduce allocation merges used as debug information. This is done after
 450   // split_unique_types because the methods used to create SafePointScalarObject
 451   // need to traverse the memory graph to find values for object fields. We also
 452   // set to null the scalarized inputs of reducible Phis so that the Allocate
 453   // that they point can be later scalar replaced.
 454   bool delay = _igvn->delay_transform();
 455   _igvn->set_delay_transform(true);
 456   for (uint i = 0; i < reducible_merges.size(); i++) {
 457     Node* n = reducible_merges.at(i);
 458     if (n->outcnt() > 0) {
 459       if (!reduce_phi_on_safepoints(n->as_Phi())) {
 460         NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 461         C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
 462         return false;
 463       }
 464 
 465       // Now we set the scalar replaceable inputs of ophi to null, which is
 466       // the last piece that would prevent it from being scalar replaceable.
 467       reset_scalar_replaceable_entries(n->as_Phi());
 468     }
 469   }
 470   _igvn->set_delay_transform(delay);
 471 
 472   // Annotate at safepoints if they have <= ArgEscape objects in their scope and at
 473   // java calls if they pass ArgEscape objects as parameters.
 474   if (has_non_escaping_obj &&
 475       (C->env()->should_retain_local_variables() ||
 476        C->env()->jvmti_can_get_owned_monitor_info() ||
 477        C->env()->jvmti_can_walk_any_space() ||
 478        DeoptimizeObjectsALot)) {
 479     int sfn_length = sfn_worklist.length();
 480     for (int next = 0; next < sfn_length; next++) {
 481       SafePointNode* sfn = sfn_worklist.at(next);
 482       sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn));
 483       if (sfn->is_CallJava()) {
 484         CallJavaNode* call = sfn->as_CallJava();
 485         call->set_arg_escape(has_arg_escape(call));
 486       }
 487     }
 488   }
 489 
 490   _compile->print_method(PHASE_EA_AFTER_REDUCE_PHI_ON_SAFEPOINTS, 4);
 491 
 492   NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 493   return has_non_escaping_obj;
 494 }
 495 
 496 // Check if it's profitable to reduce the Phi passed as parameter.  Returns true
 497 // if at least one scalar replaceable allocation participates in the merge.
 498 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const {
 499   bool found_sr_allocate = false;
 500 
 501   for (uint i = 1; i < ophi->req(); i++) {
 502     JavaObjectNode* ptn = unique_java_object(ophi->in(i));
 503     if (ptn != nullptr && ptn->scalar_replaceable()) {
 504       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
 505 
 506       // Don't handle arrays.
 507       if (alloc->Opcode() != Op_Allocate) {
 508         assert(alloc->Opcode() == Op_AllocateArray, "Unexpected type of allocation.");
 509         continue;
 510       }
 511 
 512       if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) {
 513         found_sr_allocate = true;
 514       } else {
 515         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("%dth input of Phi %d is SR but can't be eliminated.", i, ophi->_idx);)
 516         ptn->set_scalar_replaceable(false);
 517       }
 518     }
 519   }
 520 
 521   NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);)
 522   return found_sr_allocate;
 523 }
 524 
 525 // We can reduce the Cmp if it's a comparison between the Phi and a constant.
 526 // I require the 'other' input to be a constant so that I can move the Cmp
 527 // around safely.
 528 bool ConnectionGraph::can_reduce_cmp(Node* n, Node* cmp) const {
 529   assert(cmp->Opcode() == Op_CmpP || cmp->Opcode() == Op_CmpN, "not expected node: %s", cmp->Name());
 530   Node* left = cmp->in(1);
 531   Node* right = cmp->in(2);
 532 
 533   return (left == n || right == n) &&
 534          (left->is_Con() || right->is_Con()) &&
 535          cmp->outcnt() == 1;
 536 }
 537 
 538 // We are going to check if any of the SafePointScalarMerge entries
 539 // in the SafePoint reference the Phi that we are checking.
 540 bool ConnectionGraph::has_been_reduced(PhiNode* n, SafePointNode* sfpt) const {
 541   JVMState *jvms = sfpt->jvms();
 542 
 543   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 544     Node* sfpt_in = sfpt->in(i);
 545     if (sfpt_in->is_SafePointScalarMerge()) {
 546       SafePointScalarMergeNode* smerge = sfpt_in->as_SafePointScalarMerge();
 547       Node* nsr_ptr = sfpt->in(smerge->merge_pointer_idx(jvms));
 548       if (nsr_ptr == n) {
 549         return true;
 550       }
 551     }
 552   }
 553 
 554   return false;
 555 }
 556 
 557 // Check if we are able to untangle the merge. The following patterns are
 558 // supported:
 559 //  - Phi -> SafePoints
 560 //  - Phi -> CmpP/N
 561 //  - Phi -> AddP -> Load
 562 //  - Phi -> CastPP -> SafePoints
 563 //  - Phi -> CastPP -> AddP -> Load
 564 bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
 565   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 566     Node* use = n->fast_out(i);
 567 
 568     if (use->is_SafePoint()) {
 569       if (use->is_Call() && use->as_Call()->has_non_debug_use(n)) {
 570         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", n->_idx, _invocation);)
 571         return false;
 572       } else if (has_been_reduced(n->is_Phi() ? n->as_Phi() : n->as_CastPP()->in(1)->as_Phi(), use->as_SafePoint())) {
 573         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);)
 574         return false;
 575       }
 576     } else if (use->is_AddP()) {
 577       Node* addp = use;
 578       for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) {
 579         Node* use_use = addp->fast_out(j);
 580         const Type* load_type = _igvn->type(use_use);
 581 
 582         if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) {
 583           NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());)
 584           return false;
 585         } else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) {
 586           NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());)
 587           return false;
 588         }
 589       }
 590     } else if (nesting > 0) {
 591       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);)
 592       return false;
 593     } else if (use->is_CastPP()) {
 594       const Type* cast_t = _igvn->type(use);
 595       if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) {
 596 #ifndef PRODUCT
 597         if (TraceReduceAllocationMerges) {
 598           tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation);
 599           use->dump();
 600         }
 601 #endif
 602         return false;
 603       }
 604 
 605       bool is_trivial_control = use->in(0) == nullptr || use->in(0) == n->in(0);
 606       if (!is_trivial_control) {
 607         // If it's not a trivial control then we check if we can reduce the
 608         // CmpP/N used by the If controlling the cast.
 609         if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) {
 610           Node* iff = use->in(0)->in(0);
 611           // We may have an OpaqueConstantBool node between If and Bool nodes. But we could also have a sub class of IfNode,
 612           // for example, an OuterStripMinedLoopEnd or a Parse Predicate. Bail out in all these cases.
 613           bool can_reduce = (iff->Opcode() == Op_If) && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp();
 614           if (can_reduce) {
 615             Node* iff_cmp = iff->in(1)->in(1);
 616             int opc = iff_cmp->Opcode();
 617             can_reduce = (opc == Op_CmpP || opc == Op_CmpN) && can_reduce_cmp(n, iff_cmp);
 618           }
 619           if (!can_reduce) {
 620 #ifndef PRODUCT
 621             if (TraceReduceAllocationMerges) {
 622               tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
 623               n->dump(5);
 624             }
 625 #endif
 626             return false;
 627           }
 628         }
 629       }
 630 
 631       if (!can_reduce_check_users(use, nesting+1)) {
 632         return false;
 633       }
 634     } else if (use->Opcode() == Op_CmpP || use->Opcode() == Op_CmpN) {
 635       if (!can_reduce_cmp(n, use)) {
 636         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. CmpP/N %d isn't reducible.", n->_idx, _invocation, use->_idx);)
 637         return false;
 638       }
 639     } else {
 640       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", n->_idx, _invocation, use->_idx, use->Name());)
 641       return false;
 642     }
 643   }
 644 
 645   return true;
 646 }
 647 
 648 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is
 649 // only used in some certain code shapes. Check comments in
 650 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more
 651 // details.
 652 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const {
 653   // If there was an error attempting to reduce allocation merges for this
 654   // method we might have disabled the compilation and be retrying with RAM
 655   // disabled.
 656   if (!_compile->do_reduce_allocation_merges() || ophi->region()->Opcode() != Op_Region) {
 657     return false;
 658   }
 659 
 660   const Type* phi_t = _igvn->type(ophi);
 661   if (phi_t == nullptr ||
 662       phi_t->make_ptr() == nullptr ||
 663       phi_t->make_ptr()->isa_aryptr() != nullptr) {
 664     return false;
 665   }
 666 
 667   if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_check_users(ophi, /* nesting: */ 0)) {
 668     return false;
 669   }
 670 
 671   NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); })
 672   return true;
 673 }
 674 
 675 // This method will return a CmpP/N that we need to use on the If controlling a
 676 // CastPP after it was split. This method is only called on bases that are
 677 // nullable therefore we always need a controlling if for the splitted CastPP.
 678 //
 679 // 'curr_ctrl' is the control of the CastPP that we want to split through phi.
 680 // If the CastPP currently doesn't have a control then the CmpP/N will be
 681 // against the null constant, otherwise it will be against the constant input of
 682 // the existing CmpP/N. It's guaranteed that there will be a CmpP/N in the later
 683 // case because we have constraints on it and because the CastPP has a control
 684 // input.
 685 Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) {
 686   const Type* t = base->bottom_type();
 687   Node* con = nullptr;
 688 
 689   if (curr_ctrl == nullptr || curr_ctrl->is_Region()) {
 690     con = _igvn->zerocon(t->basic_type());
 691   } else {
 692     // can_reduce_check_users() verified graph: true/false -> if -> bool -> cmp
 693     assert(curr_ctrl->in(0)->Opcode() == Op_If, "unexpected node %s", curr_ctrl->in(0)->Name());
 694     Node* bol = curr_ctrl->in(0)->in(1);
 695     assert(bol->is_Bool(), "unexpected node %s", bol->Name());
 696     Node* curr_cmp = bol->in(1);
 697     assert(curr_cmp->Opcode() == Op_CmpP || curr_cmp->Opcode() == Op_CmpN, "unexpected node %s", curr_cmp->Name());
 698     con = curr_cmp->in(1)->is_Con() ? curr_cmp->in(1) : curr_cmp->in(2);
 699   }
 700 
 701   return CmpNode::make(base, con, t->basic_type());
 702 }
 703 
 704 // This method 'specializes' the CastPP passed as parameter to the base passed
 705 // as parameter. Note that the existing CastPP input is a Phi. "Specialize"
 706 // means that the CastPP now will be specific for a given base instead of a Phi.
 707 // An If-Then-Else-Region block is inserted to control the CastPP. The control
 708 // of the CastPP is a copy of the current one (if there is one) or a check
 709 // against null.
 710 //
 711 // Before:
 712 //
 713 //    C1     C2  ... Cn
 714 //     \      |      /
 715 //      \     |     /
 716 //       \    |    /
 717 //        \   |   /
 718 //         \  |  /
 719 //          \ | /
 720 //           \|/
 721 //          Region     B1      B2  ... Bn
 722 //            |          \      |      /
 723 //            |           \     |     /
 724 //            |            \    |    /
 725 //            |             \   |   /
 726 //            |              \  |  /
 727 //            |               \ | /
 728 //            ---------------> Phi
 729 //                              |
 730 //                      X       |
 731 //                      |       |
 732 //                      |       |
 733 //                      ------> CastPP
 734 //
 735 // After (only partial illustration; base = B2, current_control = C2):
 736 //
 737 //                      C2
 738 //                      |
 739 //                      If
 740 //                     / \
 741 //                    /   \
 742 //                   T     F
 743 //                  /\     /
 744 //                 /  \   /
 745 //                /    \ /
 746 //      C1    CastPP   Reg        Cn
 747 //       |              |          |
 748 //       |              |          |
 749 //       |              |          |
 750 //       -------------- | ----------
 751 //                    | | |
 752 //                    Region
 753 //
 754 Node* ConnectionGraph::specialize_castpp(Node* castpp, Node* base, Node* current_control) {
 755   Node* control_successor  = current_control->unique_ctrl_out();
 756   Node* cmp                = _igvn->transform(specialize_cmp(base, castpp->in(0)));
 757   Node* bol                = _igvn->transform(new BoolNode(cmp, BoolTest::ne));
 758   IfNode* if_ne            = _igvn->transform(new IfNode(current_control, bol, PROB_MIN, COUNT_UNKNOWN))->as_If();
 759   Node* not_eq_control     = _igvn->transform(new IfTrueNode(if_ne));
 760   Node* yes_eq_control     = _igvn->transform(new IfFalseNode(if_ne));
 761   Node* end_region         = _igvn->transform(new RegionNode(3));
 762 
 763   // Insert the new if-else-region block into the graph
 764   end_region->set_req(1, not_eq_control);
 765   end_region->set_req(2, yes_eq_control);
 766   control_successor->replace_edge(current_control, end_region, _igvn);
 767 
 768   _igvn->_worklist.push(current_control);
 769   _igvn->_worklist.push(control_successor);
 770 
 771   return _igvn->transform(ConstraintCastNode::make_cast_for_type(not_eq_control, base, _igvn->type(castpp), ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, nullptr));
 772 }
 773 
 774 Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *>  &alloc_worklist) {
 775   const Type* load_type = _igvn->type(curr_load);
 776   Node* nsr_value = _igvn->zerocon(load_type->basic_type());
 777   Node* memory = curr_load->in(MemNode::Memory);
 778 
 779   // The data_phi merging the loads needs to be nullable if
 780   // we are loading pointers.
 781   if (load_type->make_ptr() != nullptr) {
 782     if (load_type->isa_narrowoop()) {
 783       load_type = load_type->meet(TypeNarrowOop::NULL_PTR);
 784     } else if (load_type->isa_ptr()) {
 785       load_type = load_type->meet(TypePtr::NULL_PTR);
 786     } else {
 787       assert(false, "Unexpected load ptr type.");
 788     }
 789   }
 790 
 791   Node* data_phi = PhiNode::make(region, nsr_value, load_type);
 792 
 793   for (int i = 1; i < bases_for_loads->length(); i++) {
 794     Node* base = bases_for_loads->at(i);
 795     Node* cmp_region = nullptr;
 796     if (base != nullptr) {
 797       if (base->is_CFG()) { // means that we added a CastPP as child of this CFG node
 798         cmp_region = base->unique_ctrl_out_or_null();
 799         assert(cmp_region != nullptr, "There should be.");
 800         base = base->find_out_with(Op_CastPP);
 801       }
 802 
 803       Node* addr = _igvn->transform(AddPNode::make_with_base(base, curr_addp->in(AddPNode::Offset)));
 804       Node* mem = (memory->is_Phi() && (memory->in(0) == region)) ? memory->in(i) : memory;
 805       Node* load = curr_load->clone();
 806       load->set_req(0, nullptr);
 807       load->set_req(1, mem);
 808       load->set_req(2, addr);
 809 
 810       if (cmp_region != nullptr) { // see comment on previous if
 811         Node* intermediate_phi = PhiNode::make(cmp_region, nsr_value, load_type);
 812         intermediate_phi->set_req(1, _igvn->transform(load));
 813         load = intermediate_phi;
 814       }
 815 
 816       data_phi->set_req(i, _igvn->transform(load));
 817     } else {
 818       // Just use the default, which is already in phi
 819     }
 820   }
 821 
 822   // Takes care of updating CG and split_unique_types worklists due
 823   // to cloned AddP->Load.
 824   updates_after_load_split(data_phi, curr_load, alloc_worklist);
 825 
 826   return _igvn->transform(data_phi);
 827 }
 828 
 829 // This method only reduces CastPP fields loads; SafePoints are handled
 830 // separately. The idea here is basically to clone the CastPP and place copies
 831 // on each input of the Phi, including non-scalar replaceable inputs.
 832 // Experimentation shows that the resulting IR graph is simpler that way than if
 833 // we just split the cast through scalar-replaceable inputs.
 834 //
 835 // The reduction process requires that CastPP's control be one of:
 836 //  1) no control,
 837 //  2) the same region as Ophi, or
 838 //  3) an IfTrue/IfFalse coming from an CmpP/N between Ophi and a constant.
 839 //
 840 // After splitting the CastPP we'll put it under an If-Then-Else-Region control
 841 // flow. If the CastPP originally had an IfTrue/False control input then we'll
 842 // use a similar CmpP/N to control the new If-Then-Else-Region. Otherwise, we'll
 843 // juse use a CmpP/N against the null constant.
 844 //
 845 // The If-Then-Else-Region isn't always needed. For instance, if input to
 846 // splitted cast was not nullable (or if it was the null constant) then we don't
 847 // need (shouldn't) use a CastPP at all.
 848 //
 849 // After the casts are splitted we'll split the AddP->Loads through the Phi and
 850 // connect them to the just split CastPPs.
 851 //
 852 // Before (CastPP control is same as Phi):
 853 //
 854 //          Region     Allocate   Null    Call
 855 //            |             \      |      /
 856 //            |              \     |     /
 857 //            |               \    |    /
 858 //            |                \   |   /
 859 //            |                 \  |  /
 860 //            |                  \ | /
 861 //            ------------------> Phi            # Oop Phi
 862 //            |                    |
 863 //            |                    |
 864 //            |                    |
 865 //            |                    |
 866 //            ----------------> CastPP
 867 //                                 |
 868 //                               AddP
 869 //                                 |
 870 //                               Load
 871 //
 872 // After (Very much simplified):
 873 //
 874 //                         Call  Null
 875 //                            \  /
 876 //                            CmpP
 877 //                             |
 878 //                           Bool#NE
 879 //                             |
 880 //                             If
 881 //                            / \
 882 //                           T   F
 883 //                          / \ /
 884 //                         /   R
 885 //                     CastPP  |
 886 //                       |     |
 887 //                     AddP    |
 888 //                       |     |
 889 //                     Load    |
 890 //                         \   |   0
 891 //            Allocate      \  |  /
 892 //                \          \ | /
 893 //               AddP         Phi
 894 //                  \         /
 895 //                 Load      /
 896 //                    \  0  /
 897 //                     \ | /
 898 //                      \|/
 899 //                      Phi        # "Field" Phi
 900 //
 901 void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node*> &alloc_worklist) {
 902   Node* ophi = curr_castpp->in(1);
 903   assert(ophi->is_Phi(), "Expected this to be a Phi node.");
 904 
 905   // Identify which base should be used for AddP->Load later when spliting the
 906   // CastPP->Loads through ophi. Three kind of values may be stored in this
 907   // array, depending on the nullability status of the corresponding input in
 908   // ophi.
 909   //
 910   //  - nullptr:    Meaning that the base is actually the null constant and therefore
 911   //                we won't try to load from it.
 912   //
 913   //  - CFG Node:   Meaning that the base is a CastPP that was specialized for
 914   //                this input of Ophi. I.e., we added an If->Then->Else-Region
 915   //                that will 'activate' the CastPp only when the input is not Null.
 916   //
 917   //  - Other Node: Meaning that the base is not nullable and therefore we'll try
 918   //                to load directly from it.
 919   GrowableArray<Node*> bases_for_loads(ophi->req(), ophi->req(), nullptr);
 920 
 921   for (uint i = 1; i < ophi->req(); i++) {
 922     Node* base = ophi->in(i);
 923     const Type* base_t = _igvn->type(base);
 924 
 925     if (base_t->maybe_null()) {
 926       if (base->is_Con()) {
 927         // Nothing todo as bases_for_loads[i] is already null
 928       } else {
 929         Node* new_castpp = specialize_castpp(curr_castpp, base, ophi->in(0)->in(i));
 930         bases_for_loads.at_put(i, new_castpp->in(0)); // Use the ctrl of the new node just as a flag
 931       }
 932     } else {
 933       bases_for_loads.at_put(i, base);
 934     }
 935   }
 936 
 937   // Now let's split the CastPP->Loads through the Phi
 938   for (int i = curr_castpp->outcnt()-1; i >= 0;) {
 939     Node* use = curr_castpp->raw_out(i);
 940     if (use->is_AddP()) {
 941       for (int j = use->outcnt()-1; j >= 0;) {
 942         Node* use_use = use->raw_out(j);
 943         assert(use_use->is_Load(), "Expected this to be a Load node.");
 944 
 945         // We can't make an unconditional load from a nullable input. The
 946         // 'split_castpp_load_through_phi` method will add an
 947         // 'If-Then-Else-Region` around nullable bases and only load from them
 948         // when the input is not null.
 949         Node* phi = split_castpp_load_through_phi(use, use_use, ophi->in(0), &bases_for_loads, alloc_worklist);
 950         _igvn->replace_node(use_use, phi);
 951 
 952         --j;
 953         j = MIN2(j, (int)use->outcnt()-1);
 954       }
 955 
 956       _igvn->remove_dead_node(use);
 957     }
 958     --i;
 959     i = MIN2(i, (int)curr_castpp->outcnt()-1);
 960   }
 961 }
 962 
 963 // This method split a given CmpP/N through the Phi used in one of its inputs.
 964 // As a result we convert a comparison with a pointer to a comparison with an
 965 // integer.
 966 // The only requirement is that one of the inputs of the CmpP/N must be a Phi
 967 // while the other must be a constant.
 968 // The splitting process is basically just cloning the CmpP/N above the input
 969 // Phi.  However, some (most) of the cloned CmpP/Ns won't be requred because we
 970 // can prove at compile time the result of the comparison.
 971 //
 972 // Before:
 973 //
 974 //             in1    in2 ... inN
 975 //              \      |      /
 976 //               \     |     /
 977 //                \    |    /
 978 //                 \   |   /
 979 //                  \  |  /
 980 //                   \ | /
 981 //                    Phi
 982 //                     |   Other
 983 //                     |    /
 984 //                     |   /
 985 //                     |  /
 986 //                    CmpP/N
 987 //
 988 // After:
 989 //
 990 //        in1  Other   in2 Other  inN  Other
 991 //         |    |      |   |      |    |
 992 //         \    |      |   |      |    |
 993 //          \  /       |   /      |    /
 994 //          CmpP/N    CmpP/N     CmpP/N
 995 //          Bool      Bool       Bool
 996 //            \        |        /
 997 //             \       |       /
 998 //              \      |      /
 999 //               \     |     /
1000 //                \    |    /
1001 //                 \   |   /
1002 //                  \  |  /
1003 //                   \ | /
1004 //                    Phi
1005 //                     |
1006 //                     |   Zero
1007 //                     |    /
1008 //                     |   /
1009 //                     |  /
1010 //                     CmpI
1011 //
1012 //
1013 void ConnectionGraph::reduce_phi_on_cmp(Node* cmp) {
1014   Node* ophi = cmp->in(1)->is_Con() ? cmp->in(2) : cmp->in(1);
1015   assert(ophi->is_Phi(), "Expected this to be a Phi node.");
1016 
1017   Node* other = cmp->in(1)->is_Con() ? cmp->in(1) : cmp->in(2);
1018   Node* zero = _igvn->intcon(0);
1019   Node* one = _igvn->intcon(1);
1020   BoolTest::mask mask = cmp->unique_out()->as_Bool()->_test._test;
1021 
1022   // This Phi will merge the result of the Cmps split through the Phi
1023   Node* res_phi = PhiNode::make(ophi->in(0), zero, TypeInt::INT);
1024 
1025   for (uint i=1; i<ophi->req(); i++) {
1026     Node* ophi_input = ophi->in(i);
1027     Node* res_phi_input = nullptr;
1028 
1029     const TypeInt* tcmp = optimize_ptr_compare(ophi_input, other);
1030     if (tcmp->singleton()) {
1031       if ((mask == BoolTest::mask::eq && tcmp == TypeInt::CC_EQ) ||
1032           (mask == BoolTest::mask::ne && tcmp == TypeInt::CC_GT)) {
1033         res_phi_input = one;
1034       } else {
1035         res_phi_input = zero;
1036       }
1037     } else {
1038       Node* ncmp = _igvn->transform(cmp->clone());
1039       ncmp->set_req(1, ophi_input);
1040       ncmp->set_req(2, other);
1041       Node* bol = _igvn->transform(new BoolNode(ncmp, mask));
1042       res_phi_input = bol->as_Bool()->as_int_value(_igvn);
1043     }
1044 
1045     res_phi->set_req(i, res_phi_input);
1046   }
1047 
1048   // This CMP always compares whether the output of "res_phi" is TRUE as far as the "mask".
1049   Node* new_cmp = _igvn->transform(new CmpINode(_igvn->transform(res_phi), (mask == BoolTest::mask::eq) ? one : zero));
1050   _igvn->replace_node(cmp, new_cmp);
1051 }
1052 
1053 // Push the newly created AddP on alloc_worklist and patch
1054 // the connection graph. Note that the changes in the CG below
1055 // won't affect the ES of objects since the new nodes have the
1056 // same status as the old ones.
1057 void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *>  &alloc_worklist) {
1058   assert(data_phi != nullptr, "Output of split_through_phi is null.");
1059   assert(data_phi != previous_load, "Output of split_through_phi is same as input.");
1060   assert(data_phi->is_Phi(), "Output of split_through_phi isn't a Phi.");
1061 
1062   if (data_phi == nullptr || !data_phi->is_Phi()) {
1063     // Make this a retry?
1064     return ;
1065   }
1066 
1067   Node* previous_addp = previous_load->in(MemNode::Address);
1068   FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field();
1069   for (uint i = 1; i < data_phi->req(); i++) {
1070     Node* new_load = data_phi->in(i);
1071 
1072     if (new_load->is_Phi()) {
1073       // new_load is currently the "intermediate_phi" from an specialized
1074       // CastPP.
1075       new_load = new_load->in(1);
1076     }
1077 
1078     // "new_load" might actually be a constant, parameter, etc.
1079     if (new_load->is_Load()) {
1080       Node* new_addp = new_load->in(MemNode::Address);
1081 
1082       // If new_load is a Load but not from an AddP, it means that the load is folded into another
1083       // load. And since this load is not from a field, we cannot create a unique type for it.
1084       // For example:
1085       //
1086       //   if (b) {
1087       //       Holder h1 = new Holder();
1088       //       Object o = ...;
1089       //       h.o = o.getClass();
1090       //   } else {
1091       //       Holder h2 = ...;
1092       //   }
1093       //   Holder h = Phi(h1, h2);
1094       //   Object r = h.o;
1095       //
1096       // Then, splitting r through the merge point results in:
1097       //
1098       //   if (b) {
1099       //       Holder h1 = new Holder();
1100       //       Object o = ...;
1101       //       h.o = o.getClass();
1102       //       Object o1 = h.o;
1103       //   } else {
1104       //       Holder h2 = ...;
1105       //       Object o2 = h2.o;
1106       //   }
1107       //   Object r = Phi(o1, o2);
1108       //
1109       // In this case, o1 is folded to o.getClass() which is a Load but not from an AddP, but from
1110       // an OopHandle that is loaded from the Klass of o.
1111       if (!new_addp->is_AddP()) {
1112         continue;
1113       }
1114       Node* base = get_addp_base(new_addp);
1115 
1116       // The base might not be something that we can create an unique
1117       // type for. If that's the case we are done with that input.
1118       PointsToNode* jobj_ptn = unique_java_object(base);
1119       if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) {
1120         continue;
1121       }
1122 
1123       // Push to alloc_worklist since the base has an unique_type
1124       alloc_worklist.append_if_missing(new_addp);
1125 
1126       // Now let's add the node to the connection graph
1127       _nodes.at_grow(new_addp->_idx, nullptr);
1128       add_field(new_addp, fn->escape_state(), fn->offset());
1129       add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx));
1130 
1131       // If the load doesn't load an object then it won't be
1132       // part of the connection graph
1133       PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx);
1134       if (curr_load_ptn != nullptr) {
1135         _nodes.at_grow(new_load->_idx, nullptr);
1136         add_local_var(new_load, curr_load_ptn->escape_state());
1137         add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field());
1138       }
1139     }
1140   }
1141 }
1142 
1143 void ConnectionGraph::reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *>  &alloc_worklist) {
1144   // We'll pass this to 'split_through_phi' so that it'll do the split even
1145   // though the load doesn't have an unique instance type.
1146   bool ignore_missing_instance_id = true;
1147 
1148   // All AddPs are present in the connection graph
1149   FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field();
1150 
1151   // Iterate over AddP looking for a Load
1152   for (int k = previous_addp->outcnt()-1; k >= 0;) {
1153     Node* previous_load = previous_addp->raw_out(k);
1154     if (previous_load->is_Load()) {
1155       Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id);
1156 
1157       // Takes care of updating CG and split_unique_types worklists due to cloned
1158       // AddP->Load.
1159       updates_after_load_split(data_phi, previous_load, alloc_worklist);
1160 
1161       _igvn->replace_node(previous_load, data_phi);
1162     }
1163     --k;
1164     k = MIN2(k, (int)previous_addp->outcnt()-1);
1165   }
1166 
1167   // Remove the old AddP from the processing list because it's dead now
1168   assert(previous_addp->outcnt() == 0, "AddP should be dead now.");
1169   alloc_worklist.remove_if_existing(previous_addp);
1170 }
1171 
1172 // Create a 'selector' Phi based on the inputs of 'ophi'. If index 'i' of the
1173 // selector is:
1174 //    -> a '-1' constant, the i'th input of the original Phi is NSR.
1175 //    -> a 'x' constant >=0, the i'th input of of original Phi will be SR and
1176 //       the info about the scalarized object will be at index x of ObjectMergeValue::possible_objects
1177 PhiNode* ConnectionGraph::create_selector(PhiNode* ophi) const {
1178   Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1));
1179   Node* selector  = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT));
1180   uint number_of_sr_objects = 0;
1181   for (uint i = 1; i < ophi->req(); i++) {
1182     Node* base = ophi->in(i);
1183     JavaObjectNode* ptn = unique_java_object(base);
1184 
1185     if (ptn != nullptr && ptn->scalar_replaceable()) {
1186       Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects));
1187       selector->set_req(i, sr_obj_idx);
1188       number_of_sr_objects++;
1189     }
1190   }
1191 
1192   return selector->as_Phi();
1193 }
1194 
1195 // Returns true if the AddP node 'n' has at least one base that is a reducible
1196 // merge. If the base is a CastPP/CheckCastPP then the input of the cast is
1197 // checked instead.
1198 bool ConnectionGraph::has_reducible_merge_base(AddPNode* n, Unique_Node_List &reducible_merges) {
1199   PointsToNode* ptn = ptnode_adr(n->_idx);
1200   if (ptn == nullptr || !ptn->is_Field() || ptn->as_Field()->base_count() < 2) {
1201     return false;
1202   }
1203 
1204   for (BaseIterator i(ptn->as_Field()); i.has_next(); i.next()) {
1205     Node* base = i.get()->ideal_node();
1206 
1207     if (reducible_merges.member(base)) {
1208       return true;
1209     }
1210 
1211     if (base->is_CastPP() || base->is_CheckCastPP()) {
1212       base = base->in(1);
1213       if (reducible_merges.member(base)) {
1214         return true;
1215       }
1216     }
1217   }
1218 
1219   return false;
1220 }
1221 
1222 // This method will call its helper method to reduce SafePoint nodes that use
1223 // 'ophi' or a casted version of 'ophi'. All SafePoint nodes using the same
1224 // "version" of Phi use the same debug information (regarding the Phi).
1225 // Therefore, I collect all safepoints and patch them all at once.
1226 //
1227 // The safepoints using the Phi node have to be processed before safepoints of
1228 // CastPP nodes. The reason is, when reducing a CastPP we add a reference (the
1229 // NSR merge pointer) to the input of the CastPP (i.e., the Phi) in the
1230 // safepoint. If we process CastPP's safepoints before Phi's safepoints the
1231 // algorithm that process Phi's safepoints will think that the added Phi
1232 // reference is a regular reference.
1233 bool ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi) {
1234   PhiNode* selector = create_selector(ophi);
1235   Unique_Node_List safepoints;
1236   Unique_Node_List casts;
1237 
1238   // Just collect the users of the Phis for later processing
1239   // in the needed order.
1240   for (uint i = 0; i < ophi->outcnt(); i++) {
1241     Node* use = ophi->raw_out(i);
1242     if (use->is_SafePoint()) {
1243       safepoints.push(use);
1244     } else if (use->is_CastPP()) {
1245       casts.push(use);
1246     } else {
1247       assert(use->outcnt() == 0, "Only CastPP & SafePoint users should be left.");
1248     }
1249   }
1250 
1251   // Need to process safepoints using the Phi first
1252   if (!reduce_phi_on_safepoints_helper(ophi, nullptr, selector, safepoints)) {
1253     return false;
1254   }
1255 
1256   // Now process CastPP->safepoints
1257   for (uint i = 0; i < casts.size(); i++) {
1258     Node* cast = casts.at(i);
1259     Unique_Node_List cast_sfpts;
1260 
1261     for (DUIterator_Fast jmax, j = cast->fast_outs(jmax); j < jmax; j++) {
1262       Node* use_use = cast->fast_out(j);
1263       if (use_use->is_SafePoint()) {
1264         cast_sfpts.push(use_use);
1265       } else {
1266         assert(use_use->outcnt() == 0, "Only SafePoint users should be left.");
1267       }
1268     }
1269 
1270     if (!reduce_phi_on_safepoints_helper(ophi, cast, selector, cast_sfpts)) {
1271       return false;
1272     }
1273   }
1274 
1275   return true;
1276 }
1277 
1278 // This method will create a SafePointScalarMERGEnode for each SafePoint in
1279 // 'safepoints'. It then will iterate on the inputs of 'ophi' and create a
1280 // SafePointScalarObjectNode for each scalar replaceable input. Each
1281 // SafePointScalarMergeNode may describe multiple scalar replaced objects -
1282 // check detailed description in SafePointScalarMergeNode class header.
1283 bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints) {
1284   PhaseMacroExpand mexp(*_igvn);
1285   Node* original_sfpt_parent =  cast != nullptr ? cast : ophi;
1286   const TypeOopPtr* merge_t = _igvn->type(original_sfpt_parent)->make_oopptr();
1287 
1288   Node* nsr_merge_pointer = ophi;
1289   if (cast != nullptr) {
1290     const Type* new_t = merge_t->meet(TypePtr::NULL_PTR);
1291     nsr_merge_pointer = _igvn->transform(ConstraintCastNode::make_cast_for_type(cast->in(0), cast->in(1), new_t, ConstraintCastNode::DependencyType::FloatingNarrowing, nullptr));
1292   }
1293 
1294   for (uint spi = 0; spi < safepoints.size(); spi++) {
1295     SafePointNode* sfpt = safepoints.at(spi)->as_SafePoint();
1296     JVMState *jvms      = sfpt->jvms();
1297     uint merge_idx      = (sfpt->req() - jvms->scloff());
1298     int debug_start     = jvms->debug_start();
1299 
1300     SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx);
1301     smerge->init_req(0, _compile->root());
1302     _igvn->register_new_node_with_optimizer(smerge);
1303 
1304     // The next two inputs are:
1305     //  (1) A copy of the original pointer to NSR objects.
1306     //  (2) A selector, used to decide if we need to rematerialize an object
1307     //      or use the pointer to a NSR object.
1308     // See more details of these fields in the declaration of SafePointScalarMergeNode
1309     sfpt->add_req(nsr_merge_pointer);
1310     sfpt->add_req(selector);
1311 
1312     for (uint i = 1; i < ophi->req(); i++) {
1313       Node* base = ophi->in(i);
1314       JavaObjectNode* ptn = unique_java_object(base);
1315 
1316       // If the base is not scalar replaceable we don't need to register information about
1317       // it at this time.
1318       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1319         continue;
1320       }
1321 
1322       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1323       Unique_Node_List value_worklist;
1324 #ifdef ASSERT
1325       const Type* res_type = alloc->result_cast()->bottom_type();
1326       if (res_type->is_inlinetypeptr() && !Compile::current()->has_circular_inline_type()) {
1327         PhiNode* phi = ophi->as_Phi();
1328         assert(!ophi->as_Phi()->can_push_inline_types_down(_igvn), "missed earlier scalarization opportunity");
1329       }
1330 #endif
1331       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
1332       if (sobj == nullptr) {
1333         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1334         return false;
1335       }
1336 
1337       // Now make a pass over the debug information replacing any references
1338       // to the allocated object with "sobj"
1339       Node* ccpp = alloc->result_cast();
1340       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1341 
1342       // Register the scalarized object as a candidate for reallocation
1343       smerge->add_req(sobj);
1344 
1345       // Scalarize inline types that were added to the safepoint.
1346       // Don't allow linking a constant oop (if available) for flat array elements
1347       // because Deoptimization::reassign_flat_array_elements needs field values.
1348       const bool allow_oop = !merge_t->is_flat();
1349       for (uint j = 0; j < value_worklist.size(); ++j) {
1350         InlineTypeNode* vt = value_worklist.at(j)->as_InlineType();
1351         vt->make_scalar_in_safepoints(_igvn, allow_oop);
1352       }
1353     }
1354 
1355     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1356     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1357 
1358     // The call to 'replace_edges_in_range' above might have removed the
1359     // reference to ophi that we need at _merge_pointer_idx. The line below make
1360     // sure the reference is maintained.
1361     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1362     _igvn->_worklist.push(sfpt);
1363   }
1364 
1365   return true;
1366 }
1367 
1368 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist) {
1369   bool delay = _igvn->delay_transform();
1370   _igvn->set_delay_transform(true);
1371   _igvn->hash_delete(ophi);
1372 
1373   // Copying all users first because some will be removed and others won't.
1374   // Ophi also may acquire some new users as part of Cast reduction.
1375   // CastPPs also need to be processed before CmpPs.
1376   Unique_Node_List castpps;
1377   Unique_Node_List others;
1378   for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) {
1379     Node* use = ophi->fast_out(i);
1380 
1381     if (use->is_CastPP()) {
1382       castpps.push(use);
1383     } else if (use->is_AddP() || use->is_Cmp()) {
1384       others.push(use);
1385     } else {
1386       // Safepoints to be processed later; other users aren't expected here
1387       assert(use->is_SafePoint(), "Unexpected user of reducible Phi %d -> %d:%s:%d", ophi->_idx, use->_idx, use->Name(), use->outcnt());
1388     }
1389   }
1390 
1391   _compile->print_method(PHASE_EA_BEFORE_PHI_REDUCTION, 5, ophi);
1392 
1393   // CastPPs need to be processed before Cmps because during the process of
1394   // splitting CastPPs we make reference to the inputs of the Cmp that is used
1395   // by the If controlling the CastPP.
1396   for (uint i = 0; i < castpps.size(); i++) {
1397     reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist);
1398     _compile->print_method(PHASE_EA_AFTER_PHI_CASTPP_REDUCTION, 6, castpps.at(i));
1399   }
1400 
1401   for (uint i = 0; i < others.size(); i++) {
1402     Node* use = others.at(i);
1403 
1404     if (use->is_AddP()) {
1405       reduce_phi_on_field_access(use, alloc_worklist);
1406       _compile->print_method(PHASE_EA_AFTER_PHI_ADDP_REDUCTION, 6, use);
1407     } else if(use->is_Cmp()) {
1408       reduce_phi_on_cmp(use);
1409       _compile->print_method(PHASE_EA_AFTER_PHI_CMP_REDUCTION, 6, use);
1410     }
1411   }
1412 
1413   _igvn->set_delay_transform(delay);
1414 }
1415 
1416 void ConnectionGraph::reset_scalar_replaceable_entries(PhiNode* ophi) {
1417   Node* null_ptr            = _igvn->makecon(TypePtr::NULL_PTR);
1418   const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr();
1419   const Type* new_t         = merge_t->meet(TypePtr::NULL_PTR);
1420   Node* new_phi             = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t));
1421 
1422   for (uint i = 1; i < ophi->req(); i++) {
1423     Node* base          = ophi->in(i);
1424     JavaObjectNode* ptn = unique_java_object(base);
1425 
1426     if (ptn != nullptr && ptn->scalar_replaceable()) {
1427       new_phi->set_req(i, null_ptr);
1428     } else {
1429       new_phi->set_req(i, ophi->in(i));
1430     }
1431   }
1432 
1433   for (int i = ophi->outcnt()-1; i >= 0;) {
1434     Node* out = ophi->raw_out(i);
1435 
1436     if (out->is_ConstraintCast()) {
1437       const Type* out_t = _igvn->type(out)->make_ptr();
1438       const Type* out_new_t = out_t->meet(TypePtr::NULL_PTR);
1439       bool change = out_new_t != out_t;
1440 
1441       for (int j = out->outcnt()-1; change && j >= 0; --j) {
1442         Node* out2 = out->raw_out(j);
1443         if (!out2->is_SafePoint()) {
1444           change = false;
1445           break;
1446         }
1447       }
1448 
1449       if (change) {
1450         Node* new_cast = ConstraintCastNode::make_cast_for_type(out->in(0), out->in(1), out_new_t, ConstraintCastNode::DependencyType::NonFloatingNarrowing, nullptr);
1451         _igvn->replace_node(out, new_cast);
1452         _igvn->register_new_node_with_optimizer(new_cast);
1453       }
1454     }
1455 
1456     --i;
1457     i = MIN2(i, (int)ophi->outcnt()-1);
1458   }
1459 
1460   _igvn->replace_node(ophi, new_phi);
1461 }
1462 
1463 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) {
1464   if (!C->do_reduce_allocation_merges()) return;
1465 
1466   Unique_Node_List ideal_nodes;
1467   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
1468   ideal_nodes.push(root);
1469 
1470   for (uint next = 0; next < ideal_nodes.size(); ++next) {
1471     Node* n = ideal_nodes.at(next);
1472 
1473     if (n->is_SafePointScalarMerge()) {
1474       SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge();
1475 
1476       // Validate inputs of merge
1477       for (uint i = 1; i < merge->req(); i++) {
1478         if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) {
1479           assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject.");
1480           C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1481         }
1482       }
1483 
1484       // Validate users of merge
1485       for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) {
1486         Node* sfpt = merge->fast_out(i);
1487         if (sfpt->is_SafePoint()) {
1488           int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms());
1489 
1490           if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) {
1491             assert(false, "SafePointScalarMerge nodes can't be nested.");
1492             C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1493           }
1494         } else {
1495           assert(false, "Only safepoints can use SafePointScalarMerge nodes.");
1496           C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1497         }
1498       }
1499     }
1500 
1501     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1502       Node* m = n->fast_out(i);
1503       ideal_nodes.push(m);
1504     }
1505   }
1506 }
1507 
1508 // Returns true if there is an object in the scope of sfn that does not escape globally.
1509 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) {
1510   Compile* C = _compile;
1511   for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1512     if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() ||
1513         DeoptimizeObjectsALot) {
1514       // Jvmti agents can access locals. Must provide info about local objects at runtime.
1515       int num_locs = jvms->loc_size();
1516       for (int idx = 0; idx < num_locs; idx++) {
1517         Node* l = sfn->local(jvms, idx);
1518         if (not_global_escape(l)) {
1519           return true;
1520         }
1521       }
1522     }
1523     if (C->env()->jvmti_can_get_owned_monitor_info() ||
1524         C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) {
1525       // Jvmti agents can read monitors. Must provide info about locked objects at runtime.
1526       int num_mon = jvms->nof_monitors();
1527       for (int idx = 0; idx < num_mon; idx++) {
1528         Node* m = sfn->monitor_obj(jvms, idx);
1529         if (m != nullptr && not_global_escape(m)) {
1530           return true;
1531         }
1532       }
1533     }
1534   }
1535   return false;
1536 }
1537 
1538 // Returns true if at least one of the arguments to the call is an object
1539 // that does not escape globally.
1540 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1541   if (call->method() != nullptr) {
1542     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1543     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1544       Node* p = call->in(idx);
1545       if (not_global_escape(p)) {
1546         return true;
1547       }
1548     }
1549   } else {
1550     const char* name = call->as_CallStaticJava()->_name;
1551     assert(name != nullptr, "no name");
1552     // no arg escapes through uncommon traps
1553     if (strcmp(name, "uncommon_trap") != 0) {
1554       // process_call_arguments() assumes that all arguments escape globally
1555       const TypeTuple* d = call->tf()->domain_sig();
1556       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1557         const Type* at = d->field_at(i);
1558         if (at->isa_oopptr() != nullptr) {
1559           return true;
1560         }
1561       }
1562     }
1563   }
1564   return false;
1565 }
1566 
1567 
1568 
1569 // Utility function for nodes that load an object
1570 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1571   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1572   // ThreadLocal has RawPtr type.
1573   const Type* t = _igvn->type(n);
1574   if (t->make_ptr() != nullptr) {
1575     Node* adr = n->in(MemNode::Address);
1576 #ifdef ASSERT
1577     if (!adr->is_AddP()) {
1578       assert(_igvn->type(adr)->isa_rawptr(), "sanity");
1579     } else {
1580       assert((ptnode_adr(adr->_idx) == nullptr ||
1581               ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
1582     }
1583 #endif
1584     add_local_var_and_edge(n, PointsToNode::NoEscape,
1585                            adr, delayed_worklist);
1586   }
1587 }
1588 
1589 void ConnectionGraph::add_proj(Node* n, Unique_Node_List* delayed_worklist) {
1590   if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && n->in(0)->as_Call()->returns_pointer()) {
1591     add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1592   } else if (n->in(0)->is_LoadFlat()) {
1593     // Treat LoadFlat outputs similar to a call return value
1594     add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1595   } else if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && n->bottom_type()->isa_ptr()) {
1596     CallNode* call = n->in(0)->as_Call();
1597     assert(call->tf()->returns_inline_type_as_fields(), "");
1598     if (n->as_Proj()->_con == TypeFunc::Parms || !returns_an_argument(call)) {
1599       // either:
1600       // - not an argument returned
1601       // - the returned buffer for a returned scalarized argument
1602       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1603     } else {
1604       add_local_var(n, PointsToNode::NoEscape);
1605     }
1606   }
1607 }
1608 
1609 // Populate Connection Graph with PointsTo nodes and create simple
1610 // connection graph edges.
1611 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1612   assert(!_verify, "this method should not be called for verification");
1613   PhaseGVN* igvn = _igvn;
1614   uint n_idx = n->_idx;
1615   PointsToNode* n_ptn = ptnode_adr(n_idx);
1616   if (n_ptn != nullptr) {
1617     return; // No need to redefine PointsTo node during first iteration.
1618   }
1619   int opcode = n->Opcode();
1620   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
1621   if (gc_handled) {
1622     return; // Ignore node if already handled by GC.
1623   }
1624 
1625   if (n->is_Call()) {
1626     // Arguments to allocation and locking don't escape.
1627     if (n->is_AbstractLock()) {
1628       // Put Lock and Unlock nodes on IGVN worklist to process them during
1629       // first IGVN optimization when escape information is still available.
1630       record_for_optimizer(n);
1631     } else if (n->is_Allocate()) {
1632       add_call_node(n->as_Call());
1633       record_for_optimizer(n);
1634     } else {
1635       if (n->is_CallStaticJava()) {
1636         const char* name = n->as_CallStaticJava()->_name;
1637         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1638           return; // Skip uncommon traps
1639         }
1640       }
1641       // Don't mark as processed since call's arguments have to be processed.
1642       delayed_worklist->push(n);
1643       // Check if a call returns an object.
1644       if ((n->as_Call()->returns_pointer() &&
1645            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1646           (n->is_CallStaticJava() &&
1647            n->as_CallStaticJava()->is_boxing_method())) {
1648         add_call_node(n->as_Call());
1649       } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
1650         bool returns_oop = false;
1651         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
1652           ProjNode* pn = n->fast_out(i)->as_Proj();
1653           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
1654             returns_oop = true;
1655           }
1656         }
1657         if (returns_oop) {
1658           add_call_node(n->as_Call());
1659         }
1660       }
1661     }
1662     return;
1663   }
1664   // Put this check here to process call arguments since some call nodes
1665   // point to phantom_obj.
1666   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1667     return; // Skip predefined nodes.
1668   }
1669   switch (opcode) {
1670     case Op_AddP: {
1671       Node* base = get_addp_base(n);
1672       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1673       // Field nodes are created for all field types. They are used in
1674       // adjust_scalar_replaceable_state() and split_unique_types().
1675       // Note, non-oop fields will have only base edges in Connection
1676       // Graph because such fields are not used for oop loads and stores.
1677       int offset = address_offset(n, igvn);
1678       add_field(n, PointsToNode::NoEscape, offset);
1679       if (ptn_base == nullptr) {
1680         delayed_worklist->push(n); // Process it later.
1681       } else {
1682         n_ptn = ptnode_adr(n_idx);
1683         add_base(n_ptn->as_Field(), ptn_base);
1684       }
1685       break;
1686     }
1687     case Op_CastX2P:
1688     case Op_CastI2N: {
1689       map_ideal_node(n, phantom_obj);
1690       break;
1691     }
1692     case Op_InlineType:
1693     case Op_CastPP:
1694     case Op_CheckCastPP:
1695     case Op_EncodeP:
1696     case Op_DecodeN:
1697     case Op_EncodePKlass:
1698     case Op_DecodeNKlass: {
1699       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1700       break;
1701     }
1702     case Op_CMoveP: {
1703       add_local_var(n, PointsToNode::NoEscape);
1704       // Do not add edges during first iteration because some could be
1705       // not defined yet.
1706       delayed_worklist->push(n);
1707       break;
1708     }
1709     case Op_ConP:
1710     case Op_ConN:
1711     case Op_ConNKlass: {
1712       // assume all oop constants globally escape except for null
1713       PointsToNode::EscapeState es;
1714       const Type* t = igvn->type(n);
1715       if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
1716         es = PointsToNode::NoEscape;
1717       } else {
1718         es = PointsToNode::GlobalEscape;
1719       }
1720       PointsToNode* ptn_con = add_java_object(n, es);
1721       set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer"));
1722       break;
1723     }
1724     case Op_CreateEx: {
1725       // assume that all exception objects globally escape
1726       map_ideal_node(n, phantom_obj);
1727       break;
1728     }
1729     case Op_LoadKlass:
1730     case Op_LoadNKlass: {
1731       // Unknown class is loaded
1732       map_ideal_node(n, phantom_obj);
1733       break;
1734     }
1735     case Op_LoadP:
1736     case Op_LoadN: {
1737       add_objload_to_connection_graph(n, delayed_worklist);
1738       break;
1739     }
1740     case Op_Parm: {
1741       map_ideal_node(n, phantom_obj);
1742       break;
1743     }
1744     case Op_PartialSubtypeCheck: {
1745       // Produces Null or notNull and is used in only in CmpP so
1746       // phantom_obj could be used.
1747       map_ideal_node(n, phantom_obj); // Result is unknown
1748       break;
1749     }
1750     case Op_Phi: {
1751       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1752       // ThreadLocal has RawPtr type.
1753       const Type* t = n->as_Phi()->type();
1754       if (t->make_ptr() != nullptr) {
1755         add_local_var(n, PointsToNode::NoEscape);
1756         // Do not add edges during first iteration because some could be
1757         // not defined yet.
1758         delayed_worklist->push(n);
1759       }
1760       break;
1761     }
1762     case Op_LoadFlat:
1763       // Treat LoadFlat similar to an unknown call that receives nothing and produces its results
1764       map_ideal_node(n, phantom_obj);
1765       break;
1766     case Op_StoreFlat:
1767       // Treat StoreFlat similar to a call that escapes the stored flattened fields
1768       delayed_worklist->push(n);
1769       break;
1770     case Op_Proj: {
1771       // we are only interested in the oop result projection from a call
1772       add_proj(n, delayed_worklist);
1773       break;
1774     }
1775     case Op_Rethrow: // Exception object escapes
1776     case Op_Return: {
1777       if (n->req() > TypeFunc::Parms &&
1778           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1779         // Treat Return value as LocalVar with GlobalEscape escape state.
1780         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1781       }
1782       break;
1783     }
1784     case Op_CompareAndExchangeP:
1785     case Op_CompareAndExchangeN:
1786     case Op_GetAndSetP:
1787     case Op_GetAndSetN: {
1788       add_objload_to_connection_graph(n, delayed_worklist);
1789       // fall-through
1790     }
1791     case Op_StoreP:
1792     case Op_StoreN:
1793     case Op_StoreNKlass:
1794     case Op_WeakCompareAndSwapP:
1795     case Op_WeakCompareAndSwapN:
1796     case Op_CompareAndSwapP:
1797     case Op_CompareAndSwapN: {
1798       add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
1799       break;
1800     }
1801     case Op_AryEq:
1802     case Op_CountPositives:
1803     case Op_StrComp:
1804     case Op_StrEquals:
1805     case Op_StrIndexOf:
1806     case Op_StrIndexOfChar:
1807     case Op_StrInflatedCopy:
1808     case Op_StrCompressedCopy:
1809     case Op_VectorizedHashCode:
1810     case Op_EncodeISOArray: {
1811       add_local_var(n, PointsToNode::ArgEscape);
1812       delayed_worklist->push(n); // Process it later.
1813       break;
1814     }
1815     case Op_ThreadLocal: {
1816       PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape);
1817       set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer"));
1818       break;
1819     }
1820     case Op_Blackhole: {
1821       // All blackhole pointer arguments are globally escaping.
1822       // Only do this if there is at least one pointer argument.
1823       // Do not add edges during first iteration because some could be
1824       // not defined yet, defer to final step.
1825       for (uint i = 0; i < n->req(); i++) {
1826         Node* in = n->in(i);
1827         if (in != nullptr) {
1828           const Type* at = _igvn->type(in);
1829           if (!at->isa_ptr()) continue;
1830 
1831           add_local_var(n, PointsToNode::GlobalEscape);
1832           delayed_worklist->push(n);
1833           break;
1834         }
1835       }
1836       break;
1837     }
1838     default:
1839       ; // Do nothing for nodes not related to EA.
1840   }
1841   return;
1842 }
1843 
1844 // Add final simple edges to graph.
1845 void ConnectionGraph::add_final_edges(Node *n) {
1846   PointsToNode* n_ptn = ptnode_adr(n->_idx);
1847 #ifdef ASSERT
1848   if (_verify && n_ptn->is_JavaObject())
1849     return; // This method does not change graph for JavaObject.
1850 #endif
1851 
1852   if (n->is_Call()) {
1853     process_call_arguments(n->as_Call());
1854     return;
1855   }
1856   assert(n->is_Store() || n->is_LoadStore() || n->is_StoreFlat() ||
1857          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1858          "node should be registered already");
1859   int opcode = n->Opcode();
1860   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1861   if (gc_handled) {
1862     return; // Ignore node if already handled by GC.
1863   }
1864   switch (opcode) {
1865     case Op_AddP: {
1866       Node* base = get_addp_base(n);
1867       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1868       assert(ptn_base != nullptr, "field's base should be registered");
1869       add_base(n_ptn->as_Field(), ptn_base);
1870       break;
1871     }
1872     case Op_InlineType:
1873     case Op_CastPP:
1874     case Op_CheckCastPP:
1875     case Op_EncodeP:
1876     case Op_DecodeN:
1877     case Op_EncodePKlass:
1878     case Op_DecodeNKlass: {
1879       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1880       break;
1881     }
1882     case Op_CMoveP: {
1883       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1884         Node* in = n->in(i);
1885         if (in == nullptr) {
1886           continue;  // ignore null
1887         }
1888         Node* uncast_in = in->uncast();
1889         if (uncast_in->is_top() || uncast_in == n) {
1890           continue;  // ignore top or inputs which go back this node
1891         }
1892         PointsToNode* ptn = ptnode_adr(in->_idx);
1893         assert(ptn != nullptr, "node should be registered");
1894         add_edge(n_ptn, ptn);
1895       }
1896       break;
1897     }
1898     case Op_LoadP:
1899     case Op_LoadN: {
1900       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1901       // ThreadLocal has RawPtr type.
1902       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1903       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1904       break;
1905     }
1906     case Op_Phi: {
1907       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1908       // ThreadLocal has RawPtr type.
1909       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1910       for (uint i = 1; i < n->req(); i++) {
1911         Node* in = n->in(i);
1912         if (in == nullptr) {
1913           continue;  // ignore null
1914         }
1915         Node* uncast_in = in->uncast();
1916         if (uncast_in->is_top() || uncast_in == n) {
1917           continue;  // ignore top or inputs which go back this node
1918         }
1919         PointsToNode* ptn = ptnode_adr(in->_idx);
1920         assert(ptn != nullptr, "node should be registered");
1921         add_edge(n_ptn, ptn);
1922       }
1923       break;
1924     }
1925     case Op_StoreFlat: {
1926       // StoreFlat globally escapes its stored flattened fields
1927       InlineTypeNode* value = n->as_StoreFlat()->value();
1928       ciInlineKlass* vk = _igvn->type(value)->inline_klass();
1929       for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
1930         ciField* field = vk->nonstatic_field_at(i);
1931         if (field->type()->is_primitive_type()) {
1932           continue;
1933         }
1934 
1935         Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true);
1936         PointsToNode* field_value_ptn = ptnode_adr(field_value->_idx);
1937         set_escape_state(field_value_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "store into a flat field"));
1938       }
1939       break;
1940     }
1941     case Op_Proj: {
1942       add_proj(n, nullptr);
1943       break;
1944     }
1945     case Op_Rethrow: // Exception object escapes
1946     case Op_Return: {
1947       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1948              "Unexpected node type");
1949       // Treat Return value as LocalVar with GlobalEscape escape state.
1950       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1951       break;
1952     }
1953     case Op_CompareAndExchangeP:
1954     case Op_CompareAndExchangeN:
1955     case Op_GetAndSetP:
1956     case Op_GetAndSetN:{
1957       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1958       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1959       // fall-through
1960     }
1961     case Op_CompareAndSwapP:
1962     case Op_CompareAndSwapN:
1963     case Op_WeakCompareAndSwapP:
1964     case Op_WeakCompareAndSwapN:
1965     case Op_StoreP:
1966     case Op_StoreN:
1967     case Op_StoreNKlass:{
1968       add_final_edges_unsafe_access(n, opcode);
1969       break;
1970     }
1971     case Op_VectorizedHashCode:
1972     case Op_AryEq:
1973     case Op_CountPositives:
1974     case Op_StrComp:
1975     case Op_StrEquals:
1976     case Op_StrIndexOf:
1977     case Op_StrIndexOfChar:
1978     case Op_StrInflatedCopy:
1979     case Op_StrCompressedCopy:
1980     case Op_EncodeISOArray: {
1981       // char[]/byte[] arrays passed to string intrinsic do not escape but
1982       // they are not scalar replaceable. Adjust escape state for them.
1983       // Start from in(2) edge since in(1) is memory edge.
1984       for (uint i = 2; i < n->req(); i++) {
1985         Node* adr = n->in(i);
1986         const Type* at = _igvn->type(adr);
1987         if (!adr->is_top() && at->isa_ptr()) {
1988           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
1989                  at->isa_ptr() != nullptr, "expecting a pointer");
1990           if (adr->is_AddP()) {
1991             adr = get_addp_base(adr);
1992           }
1993           PointsToNode* ptn = ptnode_adr(adr->_idx);
1994           assert(ptn != nullptr, "node should be registered");
1995           add_edge(n_ptn, ptn);
1996         }
1997       }
1998       break;
1999     }
2000     case Op_Blackhole: {
2001       // All blackhole pointer arguments are globally escaping.
2002       for (uint i = 0; i < n->req(); i++) {
2003         Node* in = n->in(i);
2004         if (in != nullptr) {
2005           const Type* at = _igvn->type(in);
2006           if (!at->isa_ptr()) continue;
2007 
2008           if (in->is_AddP()) {
2009             in = get_addp_base(in);
2010           }
2011 
2012           PointsToNode* ptn = ptnode_adr(in->_idx);
2013           assert(ptn != nullptr, "should be defined already");
2014           set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole"));
2015           add_edge(n_ptn, ptn);
2016         }
2017       }
2018       break;
2019     }
2020     default: {
2021       // This method should be called only for EA specific nodes which may
2022       // miss some edges when they were created.
2023 #ifdef ASSERT
2024       n->dump(1);
2025 #endif
2026       guarantee(false, "unknown node");
2027     }
2028   }
2029   return;
2030 }
2031 
2032 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {
2033   Node* adr = n->in(MemNode::Address);
2034   const Type* adr_type = _igvn->type(adr);
2035   adr_type = adr_type->make_ptr();
2036   if (adr_type == nullptr) {
2037     return; // skip dead nodes
2038   }
2039   if (adr_type->isa_oopptr()
2040       || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
2041           && adr_type == TypeRawPtr::NOTNULL
2042           && is_captured_store_address(adr))) {
2043     delayed_worklist->push(n); // Process it later.
2044 #ifdef ASSERT
2045     assert (adr->is_AddP(), "expecting an AddP");
2046     if (adr_type == TypeRawPtr::NOTNULL) {
2047       // Verify a raw address for a store captured by Initialize node.
2048       int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2049       assert(offs != Type::OffsetBot, "offset must be a constant");
2050     }
2051 #endif
2052   } else {
2053     // Ignore copy the displaced header to the BoxNode (OSR compilation).
2054     if (adr->is_BoxLock()) {
2055       return;
2056     }
2057     // Stored value escapes in unsafe access.
2058     if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
2059       delayed_worklist->push(n); // Process unsafe access later.
2060       return;
2061     }
2062 #ifdef ASSERT
2063     n->dump(1);
2064     assert(false, "not unsafe");
2065 #endif
2066   }
2067 }
2068 
2069 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {
2070   Node* adr = n->in(MemNode::Address);
2071   const Type *adr_type = _igvn->type(adr);
2072   adr_type = adr_type->make_ptr();
2073 #ifdef ASSERT
2074   if (adr_type == nullptr) {
2075     n->dump(1);
2076     assert(adr_type != nullptr, "dead node should not be on list");
2077     return true;
2078   }
2079 #endif
2080 
2081   if (adr_type->isa_oopptr()
2082       || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
2083            && adr_type == TypeRawPtr::NOTNULL
2084            && is_captured_store_address(adr))) {
2085     // Point Address to Value
2086     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
2087     assert(adr_ptn != nullptr &&
2088            adr_ptn->as_Field()->is_oop(), "node should be registered");
2089     Node* val = n->in(MemNode::ValueIn);
2090     PointsToNode* ptn = ptnode_adr(val->_idx);
2091     assert(ptn != nullptr, "node should be registered");
2092     add_edge(adr_ptn, ptn);
2093     return true;
2094   } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
2095     // Stored value escapes in unsafe access.
2096     Node* val = n->in(MemNode::ValueIn);
2097     PointsToNode* ptn = ptnode_adr(val->_idx);
2098     assert(ptn != nullptr, "node should be registered");
2099     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
2100     // Add edge to object for unsafe access with offset.
2101     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
2102     assert(adr_ptn != nullptr, "node should be registered");
2103     if (adr_ptn->is_Field()) {
2104       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
2105       add_edge(adr_ptn, ptn);
2106     }
2107     return true;
2108   }
2109 #ifdef ASSERT
2110   n->dump(1);
2111   assert(false, "not unsafe");
2112 #endif
2113   return false;
2114 }
2115 
2116 // Iterate over the domains for the scalarized and non scalarized calling conventions: Only move to the next element
2117 // in the non scalarized calling convention once all elements of the scalarized calling convention for that parameter
2118 // have been iterated over. So (ignoring hidden arguments such as the null marker) iterating over:
2119 // value class MyValue {
2120 //   int f1;
2121 //   float f2;
2122 // }
2123 // void m(Object o, MyValue v, int i)
2124 // produces the pairs:
2125 // (Object, Object), (Myvalue, int), (MyValue, float), (int, int)
2126 class DomainIterator : public StackObj {
2127 private:
2128   const TypeTuple* _domain;
2129   const TypeTuple* _domain_cc;
2130   const GrowableArray<SigEntry>* _sig_cc;
2131 
2132   uint _i_domain;
2133   uint _i_domain_cc;
2134   int _i_sig_cc;
2135   uint _depth;
2136   uint _first_field_pos;
2137   const bool _is_static;
2138 
2139   void next_helper() {
2140     if (_sig_cc == nullptr) {
2141       return;
2142     }
2143     BasicType prev_bt = _i_sig_cc > 0 ? _sig_cc->at(_i_sig_cc-1)._bt : T_ILLEGAL;
2144     BasicType prev_prev_bt = _i_sig_cc > 1 ? _sig_cc->at(_i_sig_cc-2)._bt : T_ILLEGAL;
2145     while (_i_sig_cc < _sig_cc->length()) {
2146       BasicType bt = _sig_cc->at(_i_sig_cc)._bt;
2147       assert(bt != T_VOID || _sig_cc->at(_i_sig_cc-1)._bt == prev_bt, "incorrect prev bt");
2148       if (bt == T_METADATA) {
2149         if (_depth == 0) {
2150           _first_field_pos = _i_domain_cc;
2151         }
2152         _depth++;
2153       } else if (bt == T_VOID && (prev_bt != T_LONG && prev_bt != T_DOUBLE)) {
2154         _depth--;
2155         if (_depth == 0) {
2156           _i_domain++;
2157         }
2158       } else if (bt == T_OBJECT && prev_bt == T_METADATA && (_is_static || _i_domain > 0) && _sig_cc->at(_i_sig_cc)._offset == 0) {
2159         assert(_sig_cc->at(_i_sig_cc)._vt_oop, "buffer expected right after T_METADATA");
2160         assert(_depth == 1, "only root value has buffer");
2161         _i_domain_cc++;
2162         _first_field_pos = _i_domain_cc;
2163       } else if (bt == T_BOOLEAN && prev_prev_bt == T_METADATA && (_is_static || _i_domain > 0) && _sig_cc->at(_i_sig_cc)._offset == -1) {
2164         assert(_sig_cc->at(_i_sig_cc)._null_marker, "null marker expected right after T_METADATA");
2165         assert(_depth == 1, "only root value null marker");
2166         _i_domain_cc++;
2167         _first_field_pos = _i_domain_cc;
2168       } else {
2169         return;
2170       }
2171       prev_prev_bt = prev_bt;
2172       prev_bt = bt;
2173       _i_sig_cc++;
2174     }
2175   }
2176 
2177 public:
2178 
2179   DomainIterator(CallJavaNode* call) :
2180     _domain(call->tf()->domain_sig()),
2181     _domain_cc(call->tf()->domain_cc()),
2182     _sig_cc(call->method()->get_sig_cc()),
2183     _i_domain(TypeFunc::Parms),
2184     _i_domain_cc(TypeFunc::Parms),
2185     _i_sig_cc(0),
2186     _depth(0),
2187     _first_field_pos(0),
2188     _is_static(call->method()->is_static()) {
2189     next_helper();
2190   }
2191 
2192   bool has_next() const {
2193     assert(_sig_cc == nullptr || (_i_sig_cc < _sig_cc->length()) == (_i_domain < _domain->cnt()), "should reach end in sync");
2194     assert((_i_domain < _domain->cnt()) == (_i_domain_cc < _domain_cc->cnt()), "should reach end in sync");
2195     return _i_domain < _domain->cnt();
2196   }
2197 
2198   void next() {
2199     assert(_depth != 0 || _domain->field_at(_i_domain) == _domain_cc->field_at(_i_domain_cc), "should produce same non scalarized elements");
2200     _i_sig_cc++;
2201     if (_depth == 0) {
2202       _i_domain++;
2203     }
2204     _i_domain_cc++;
2205     next_helper();
2206   }
2207 
2208   uint i_domain() const {
2209     return _i_domain;
2210   }
2211 
2212   uint i_domain_cc() const {
2213     return _i_domain_cc;
2214   }
2215 
2216   const Type* current_domain() const {
2217     return _domain->field_at(_i_domain);
2218   }
2219 
2220   const Type* current_domain_cc() const {
2221     return _domain_cc->field_at(_i_domain_cc);
2222   }
2223 
2224   uint first_field_pos() const {
2225     assert(_first_field_pos >= TypeFunc::Parms, "not yet updated?");
2226     return _first_field_pos;
2227   }
2228 };
2229 
2230 // Determine whether any arguments are returned.
2231 bool ConnectionGraph::returns_an_argument(CallNode* call) {
2232   ciMethod* meth = call->as_CallJava()->method();
2233   BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2234   if (call_analyzer == nullptr) {
2235     return false;
2236   }
2237 
2238   const TypeTuple* d = call->tf()->domain_sig();
2239   bool ret_arg = false;
2240   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2241     if (d->field_at(i)->isa_ptr() != nullptr &&
2242         call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2243       if (meth->is_scalarized_arg(i - TypeFunc::Parms) && !compatible_return(call->as_CallJava(), i)) {
2244         return false;
2245       }
2246       if (call->tf()->returns_inline_type_as_fields() != meth->is_scalarized_arg(i - TypeFunc::Parms)) {
2247         return false;
2248       }
2249       ret_arg = true;
2250     }
2251   }
2252   return ret_arg;
2253 }
2254 
2255 void ConnectionGraph::add_call_node(CallNode* call) {
2256   assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
2257   uint call_idx = call->_idx;
2258   if (call->is_Allocate()) {
2259     Node* k = call->in(AllocateNode::KlassNode);
2260     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
2261     assert(kt != nullptr, "TypeKlassPtr  required.");
2262     PointsToNode::EscapeState es = PointsToNode::NoEscape;
2263     bool scalar_replaceable = true;
2264     NOT_PRODUCT(const char* nsr_reason = "");
2265     if (call->is_AllocateArray()) {
2266       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
2267         es = PointsToNode::GlobalEscape;
2268       } else {
2269         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2270         if (length < 0) {
2271           // Not scalar replaceable if the length is not constant.
2272           scalar_replaceable = false;
2273           NOT_PRODUCT(nsr_reason = "has a non-constant length");
2274         } else if (length > EliminateAllocationArraySizeLimit) {
2275           // Not scalar replaceable if the length is too big.
2276           scalar_replaceable = false;
2277           NOT_PRODUCT(nsr_reason = "has a length that is too big");
2278         }
2279       }
2280     } else {  // Allocate instance
2281       if (!kt->isa_instklassptr()) { // StressReflectiveCode
2282         es = PointsToNode::GlobalEscape;
2283       } else {
2284         const TypeInstKlassPtr* ikt = kt->is_instklassptr();
2285         ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass();
2286         if (ik->is_subclass_of(_compile->env()->Thread_klass()) ||
2287             ik->is_subclass_of(_compile->env()->Reference_klass()) ||
2288             !ik->can_be_instantiated() ||
2289             ik->has_finalizer()) {
2290           es = PointsToNode::GlobalEscape;
2291         } else {
2292           int nfields = ik->as_instance_klass()->nof_nonstatic_fields();
2293           if (nfields > EliminateAllocationFieldsLimit) {
2294             // Not scalar replaceable if there are too many fields.
2295             scalar_replaceable = false;
2296             NOT_PRODUCT(nsr_reason = "has too many fields");
2297           }
2298         }
2299       }
2300     }
2301     add_java_object(call, es);
2302     PointsToNode* ptn = ptnode_adr(call_idx);
2303     if (!scalar_replaceable && ptn->scalar_replaceable()) {
2304       set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason));
2305     }
2306   } else if (call->is_CallStaticJava()) {
2307     // Call nodes could be different types:
2308     //
2309     // 1. CallDynamicJavaNode (what happened during call is unknown):
2310     //
2311     //    - mapped to GlobalEscape JavaObject node if oop is returned;
2312     //
2313     //    - all oop arguments are escaping globally;
2314     //
2315     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2316     //
2317     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2318     //
2319     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2320     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2321     //      during call is returned;
2322     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2323     //      which are returned and does not escape during call;
2324     //
2325     //    - oop arguments escaping status is defined by bytecode analysis;
2326     //
2327     // For a static call, we know exactly what method is being called.
2328     // Use bytecode estimator to record whether the call's return value escapes.
2329     ciMethod* meth = call->as_CallJava()->method();
2330     if (meth == nullptr) {
2331       const char* name = call->as_CallStaticJava()->_name;
2332       assert(call->as_CallStaticJava()->is_call_to_multianewarray_stub() ||
2333              strncmp(name, "load_unknown_inline", 19) == 0 ||
2334              strncmp(name, "store_inline_type_fields_to_buf", 31) == 0, "TODO: add failed case check");
2335       // Returns a newly allocated non-escaped object.
2336       add_java_object(call, PointsToNode::NoEscape);
2337       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2338     } else if (meth->is_boxing_method()) {
2339       // Returns boxing object
2340       PointsToNode::EscapeState es;
2341       vmIntrinsics::ID intr = meth->intrinsic_id();
2342       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2343         // It does not escape if object is always allocated.
2344         es = PointsToNode::NoEscape;
2345       } else {
2346         // It escapes globally if object could be loaded from cache.
2347         es = PointsToNode::GlobalEscape;
2348       }
2349       add_java_object(call, es);
2350       if (es == PointsToNode::GlobalEscape) {
2351         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2352       }
2353     } else {
2354       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2355       call_analyzer->copy_dependencies(_compile->dependencies());
2356       if (call_analyzer->is_return_allocated()) {
2357         // Returns a newly allocated non-escaped object, simply
2358         // update dependency information.
2359         // Mark it as NoEscape so that objects referenced by
2360         // it's fields will be marked as NoEscape at least.
2361         add_java_object(call, PointsToNode::NoEscape);
2362         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2363       } else {
2364         // For non scalarized argument/return: add_proj() adds an edge between the return projection and the call,
2365         // process_call_arguments() adds an edge between the call and the argument
2366         // For scalarized argument/return: process_call_arguments() adds an edge between a call projection for a field
2367         // and the argument input to the call for that field. An edge is added between the projection for the returned
2368         // buffer and the call.
2369         if (returns_an_argument(call) && !call->tf()->returns_inline_type_as_fields()) {
2370           // returns non scalarized argument
2371           add_local_var(call, PointsToNode::ArgEscape);
2372         } else {
2373           // Returns unknown object or scalarized argument being returned
2374           map_ideal_node(call, phantom_obj);
2375         }
2376       }
2377     }
2378   } else {
2379     // An other type of call, assume the worst case:
2380     // returned value is unknown and globally escapes.
2381     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
2382     map_ideal_node(call, phantom_obj);
2383   }
2384 }
2385 
2386 // Check that the return type is compatible with the type of the argument being returned i.e. that there's no cast that
2387 // fails in the method
2388 bool ConnectionGraph::compatible_return(CallJavaNode* call, uint k) {
2389   return call->tf()->domain_sig()->field_at(k)->is_instptr()->instance_klass() == call->tf()->range_sig()->field_at(TypeFunc::Parms)->is_instptr()->instance_klass();
2390 }
2391 
2392 void ConnectionGraph::process_call_arguments(CallNode *call) {
2393     bool is_arraycopy = false;
2394     switch (call->Opcode()) {
2395 #ifdef ASSERT
2396     case Op_Allocate:
2397     case Op_AllocateArray:
2398     case Op_Lock:
2399     case Op_Unlock:
2400       assert(false, "should be done already");
2401       break;
2402 #endif
2403     case Op_ArrayCopy:
2404     case Op_CallLeafNoFP:
2405       // Most array copies are ArrayCopy nodes at this point but there
2406       // are still a few direct calls to the copy subroutines (See
2407       // PhaseStringOpts::copy_string())
2408       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2409         call->as_CallLeaf()->is_call_to_arraycopystub();
2410       // fall through
2411     case Op_CallLeafVector:
2412     case Op_CallLeaf: {
2413       // Stub calls, objects do not escape but they are not scale replaceable.
2414       // Adjust escape state for outgoing arguments.
2415       const TypeTuple * d = call->tf()->domain_sig();
2416       bool src_has_oops = false;
2417       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2418         const Type* at = d->field_at(i);
2419         Node *arg = call->in(i);
2420         if (arg == nullptr) {
2421           continue;
2422         }
2423         const Type *aat = _igvn->type(arg);
2424         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2425           continue;
2426         }
2427         if (arg->is_AddP()) {
2428           //
2429           // The inline_native_clone() case when the arraycopy stub is called
2430           // after the allocation before Initialize and CheckCastPP nodes.
2431           // Or normal arraycopy for object arrays case.
2432           //
2433           // Set AddP's base (Allocate) as not scalar replaceable since
2434           // pointer to the base (with offset) is passed as argument.
2435           //
2436           arg = get_addp_base(arg);
2437         }
2438         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2439         assert(arg_ptn != nullptr, "should be registered");
2440         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2441         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2442           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2443                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2444           bool arg_has_oops = aat->isa_oopptr() &&
2445                               (aat->isa_instptr() ||
2446                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
2447                                (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
2448                                                                aat->isa_aryptr()->is_flat() &&
2449                                                                aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
2450           if (i == TypeFunc::Parms) {
2451             src_has_oops = arg_has_oops;
2452           }
2453           //
2454           // src or dst could be j.l.Object when other is basic type array:
2455           //
2456           //   arraycopy(char[],0,Object*,0,size);
2457           //   arraycopy(Object*,0,char[],0,size);
2458           //
2459           // Don't add edges in such cases.
2460           //
2461           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2462                                        arg_has_oops && (i > TypeFunc::Parms);
2463 #ifdef ASSERT
2464           if (!(is_arraycopy ||
2465                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2466                 (call->as_CallLeaf()->_name != nullptr &&
2467                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2468                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2469                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
2470                   strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
2471                   strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
2472                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
2473                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
2474                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||
2475                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||
2476                   strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
2477                   strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 ||
2478                   strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
2479                   strcmp(call->as_CallLeaf()->_name, "intpoly_montgomeryMult_P256") == 0 ||
2480                   strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2481                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2482                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2483                   strcmp(call->as_CallLeaf()->_name, "kyberNtt") == 0 ||
2484                   strcmp(call->as_CallLeaf()->_name, "kyberInverseNtt") == 0 ||
2485                   strcmp(call->as_CallLeaf()->_name, "kyberNttMult") == 0 ||
2486                   strcmp(call->as_CallLeaf()->_name, "kyberAddPoly_2") == 0 ||
2487                   strcmp(call->as_CallLeaf()->_name, "kyberAddPoly_3") == 0 ||
2488                   strcmp(call->as_CallLeaf()->_name, "kyber12To16") == 0 ||
2489                   strcmp(call->as_CallLeaf()->_name, "kyberBarrettReduce") == 0 ||
2490                   strcmp(call->as_CallLeaf()->_name, "dilithiumAlmostNtt") == 0 ||
2491                   strcmp(call->as_CallLeaf()->_name, "dilithiumAlmostInverseNtt") == 0 ||
2492                   strcmp(call->as_CallLeaf()->_name, "dilithiumNttMult") == 0 ||
2493                   strcmp(call->as_CallLeaf()->_name, "dilithiumMontMulByConstant") == 0 ||
2494                   strcmp(call->as_CallLeaf()->_name, "dilithiumDecomposePoly") == 0 ||
2495                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2496                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2497                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2498                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2499                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2500                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2501                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2502                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2503                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2504                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2505                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2506                   strcmp(call->as_CallLeaf()->_name, "double_keccak") == 0 ||
2507                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2508                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2509                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2510                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2511                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2512                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
2513                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2514                   strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
2515                   strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
2516                   strcmp(call->as_CallLeaf()->_name, "store_inline_type_fields_to_buf") == 0 ||
2517                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2518                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2519                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2520                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2521                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2522                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2523                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2524                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2525                  ))) {
2526             call->dump();
2527             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2528           }
2529 #endif
2530           // Always process arraycopy's destination object since
2531           // we need to add all possible edges to references in
2532           // source object.
2533           if (arg_esc >= PointsToNode::ArgEscape &&
2534               !arg_is_arraycopy_dest) {
2535             continue;
2536           }
2537           PointsToNode::EscapeState es = PointsToNode::ArgEscape;
2538           if (call->is_ArrayCopy()) {
2539             ArrayCopyNode* ac = call->as_ArrayCopy();
2540             if (ac->is_clonebasic() ||
2541                 ac->is_arraycopy_validated() ||
2542                 ac->is_copyof_validated() ||
2543                 ac->is_copyofrange_validated()) {
2544               es = PointsToNode::NoEscape;
2545             }
2546           }
2547           set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2548           if (arg_is_arraycopy_dest) {
2549             Node* src = call->in(TypeFunc::Parms);
2550             if (src->is_AddP()) {
2551               src = get_addp_base(src);
2552             }
2553             PointsToNode* src_ptn = ptnode_adr(src->_idx);
2554             assert(src_ptn != nullptr, "should be registered");
2555             // Special arraycopy edge:
2556             // Only escape state of destination object's fields affects
2557             // escape state of fields in source object.
2558             add_arraycopy(call, es, src_ptn, arg_ptn);
2559           }
2560         }
2561       }
2562       break;
2563     }
2564     case Op_CallStaticJava: {
2565       // For a static call, we know exactly what method is being called.
2566       // Use bytecode estimator to record the call's escape affects
2567 #ifdef ASSERT
2568       const char* name = call->as_CallStaticJava()->_name;
2569       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2570 #endif
2571       ciMethod* meth = call->as_CallJava()->method();
2572       if ((meth != nullptr) && meth->is_boxing_method()) {
2573         break; // Boxing methods do not modify any oops.
2574       }
2575       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2576       // fall-through if not a Java method or no analyzer information
2577       if (call_analyzer != nullptr) {
2578         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2579         bool ret_arg = returns_an_argument(call);
2580         for (DomainIterator di(call->as_CallJava()); di.has_next(); di.next()) {
2581           int k = di.i_domain() - TypeFunc::Parms;
2582           const Type* at = di.current_domain_cc();
2583           Node* arg = call->in(di.i_domain_cc());
2584           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2585           assert(!call_analyzer->is_arg_returned(k) || !meth->is_scalarized_arg(k) ||
2586                  !compatible_return(call->as_CallJava(), di.i_domain()) ||
2587                  call->proj_out_or_null(di.i_domain_cc() - di.first_field_pos() + TypeFunc::Parms + 1) == nullptr ||
2588                  _igvn->type(call->proj_out_or_null(di.i_domain_cc() - di.first_field_pos() + TypeFunc::Parms + 1)) == at,
2589                  "scalarized return and scalarized argument should match");
2590           if (at->isa_ptr() != nullptr && call_analyzer->is_arg_returned(k) && ret_arg) {
2591             // The call returns arguments.
2592             if (meth->is_scalarized_arg(k)) {
2593               ProjNode* res_proj = call->proj_out_or_null(di.i_domain_cc() - di.first_field_pos() + TypeFunc::Parms + 1);
2594               if (res_proj != nullptr) {
2595                 assert(_igvn->type(res_proj)->isa_ptr(), "scalarized return and scalarized argument should match");
2596                 if (res_proj->_con != TypeFunc::Parms) {
2597                   // add an edge between the result projection for a field and the argument projection for the same argument field
2598                   PointsToNode* proj_ptn = ptnode_adr(res_proj->_idx);
2599                   add_edge(proj_ptn, arg_ptn);
2600                   if (!call_analyzer->is_return_local()) {
2601                     add_edge(proj_ptn, phantom_obj);
2602                   }
2603                 }
2604               }
2605             } else if (call_ptn != nullptr) { // Is call's result used?
2606               assert(call_ptn->is_LocalVar(), "node should be registered");
2607               assert(arg_ptn != nullptr, "node should be registered");
2608               add_edge(call_ptn, arg_ptn);
2609             }
2610           }
2611           if (at->isa_oopptr() != nullptr &&
2612               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2613             if (!call_analyzer->is_arg_stack(k)) {
2614               // The argument global escapes
2615               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2616             } else {
2617               set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2618               if (!call_analyzer->is_arg_local(k)) {
2619                 // The argument itself doesn't escape, but any fields might
2620                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2621               }
2622             }
2623           }
2624         }
2625         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2626           // The call returns arguments.
2627           assert(call_ptn->edge_count() > 0, "sanity");
2628           if (!call_analyzer->is_return_local()) {
2629             // Returns also unknown object.
2630             add_edge(call_ptn, phantom_obj);
2631           }
2632         }
2633         break;
2634       }
2635     }
2636     default: {
2637       // Fall-through here if not a Java method or no analyzer information
2638       // or some other type of call, assume the worst case: all arguments
2639       // globally escape.
2640       const TypeTuple* d = call->tf()->domain_cc();
2641       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2642         const Type* at = d->field_at(i);
2643         if (at->isa_oopptr() != nullptr) {
2644           Node* arg = call->in(i);
2645           if (arg->is_AddP()) {
2646             arg = get_addp_base(arg);
2647           }
2648           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2649           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2650         }
2651       }
2652     }
2653   }
2654 }
2655 
2656 
2657 // Finish Graph construction.
2658 bool ConnectionGraph::complete_connection_graph(
2659                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2660                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2661                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
2662                          GrowableArray<FieldNode*>&      oop_fields_worklist) {
2663   // Normally only 1-3 passes needed to build Connection Graph depending
2664   // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
2665   // Set limit to 20 to catch situation when something did go wrong and
2666   // bailout Escape Analysis.
2667   // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
2668 #define GRAPH_BUILD_ITER_LIMIT 20
2669 
2670   // Propagate GlobalEscape and ArgEscape escape states and check that
2671   // we still have non-escaping objects. The method pushs on _worklist
2672   // Field nodes which reference phantom_object.
2673   if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
2674     return false; // Nothing to do.
2675   }
2676   // Now propagate references to all JavaObject nodes.
2677   int java_objects_length = java_objects_worklist.length();
2678   elapsedTimer build_time;
2679   build_time.start();
2680   elapsedTimer time;
2681   bool timeout = false;
2682   int new_edges = 1;
2683   int iterations = 0;
2684   do {
2685     while ((new_edges > 0) &&
2686            (iterations++ < GRAPH_BUILD_ITER_LIMIT)) {
2687       double start_time = time.seconds();
2688       time.start();
2689       new_edges = 0;
2690       // Propagate references to phantom_object for nodes pushed on _worklist
2691       // by find_non_escaped_objects() and find_field_value().
2692       new_edges += add_java_object_edges(phantom_obj, false);
2693       for (int next = 0; next < java_objects_length; ++next) {
2694         JavaObjectNode* ptn = java_objects_worklist.at(next);
2695         new_edges += add_java_object_edges(ptn, true);
2696 
2697 #define SAMPLE_SIZE 4
2698         if ((next % SAMPLE_SIZE) == 0) {
2699           // Each 4 iterations calculate how much time it will take
2700           // to complete graph construction.
2701           time.stop();
2702           // Poll for requests from shutdown mechanism to quiesce compiler
2703           // because Connection graph construction may take long time.
2704           CompileBroker::maybe_block();
2705           double stop_time = time.seconds();
2706           double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
2707           double time_until_end = time_per_iter * (double)(java_objects_length - next);
2708           if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
2709             timeout = true;
2710             break; // Timeout
2711           }
2712           start_time = stop_time;
2713           time.start();
2714         }
2715 #undef SAMPLE_SIZE
2716 
2717       }
2718       if (timeout) break;
2719       if (new_edges > 0) {
2720         // Update escape states on each iteration if graph was updated.
2721         if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
2722           return false; // Nothing to do.
2723         }
2724       }
2725       time.stop();
2726       if (time.seconds() >= EscapeAnalysisTimeout) {
2727         timeout = true;
2728         break;
2729       }
2730       _compile->print_method(PHASE_EA_COMPLETE_CONNECTION_GRAPH_ITER, 5);
2731     }
2732     if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) {
2733       time.start();
2734       // Find fields which have unknown value.
2735       int fields_length = oop_fields_worklist.length();
2736       for (int next = 0; next < fields_length; next++) {
2737         FieldNode* field = oop_fields_worklist.at(next);
2738         if (field->edge_count() == 0) {
2739           new_edges += find_field_value(field);
2740           // This code may added new edges to phantom_object.
2741           // Need an other cycle to propagate references to phantom_object.
2742         }
2743       }
2744       time.stop();
2745       if (time.seconds() >= EscapeAnalysisTimeout) {
2746         timeout = true;
2747         break;
2748       }
2749     } else {
2750       new_edges = 0; // Bailout
2751     }
2752   } while (new_edges > 0);
2753 
2754   build_time.stop();
2755   _build_time = build_time.seconds();
2756   _build_iterations = iterations;
2757 
2758   // Bailout if passed limits.
2759   if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) {
2760     Compile* C = _compile;
2761     if (C->log() != nullptr) {
2762       C->log()->begin_elem("connectionGraph_bailout reason='reached ");
2763       C->log()->text("%s", timeout ? "time" : "iterations");
2764       C->log()->end_elem(" limit'");
2765     }
2766     assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d",
2767            _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length());
2768     // Possible infinite build_connection_graph loop,
2769     // bailout (no changes to ideal graph were made).
2770     return false;
2771   }
2772 
2773 #undef GRAPH_BUILD_ITER_LIMIT
2774 
2775   // Find fields initialized by null for non-escaping Allocations.
2776   int non_escaped_length = non_escaped_allocs_worklist.length();
2777   for (int next = 0; next < non_escaped_length; next++) {
2778     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
2779     PointsToNode::EscapeState es = ptn->escape_state();
2780     assert(es <= PointsToNode::ArgEscape, "sanity");
2781     if (es == PointsToNode::NoEscape) {
2782       if (find_init_values_null(ptn, _igvn) > 0) {
2783         // Adding references to null object does not change escape states
2784         // since it does not escape. Also no fields are added to null object.
2785         add_java_object_edges(null_obj, false);
2786       }
2787     }
2788     Node* n = ptn->ideal_node();
2789     if (n->is_Allocate()) {
2790       // The object allocated by this Allocate node will never be
2791       // seen by an other thread. Mark it so that when it is
2792       // expanded no MemBarStoreStore is added.
2793       InitializeNode* ini = n->as_Allocate()->initialization();
2794       if (ini != nullptr)
2795         ini->set_does_not_escape();
2796     }
2797   }
2798   return true; // Finished graph construction.
2799 }
2800 
2801 // Propagate GlobalEscape and ArgEscape escape states to all nodes
2802 // and check that we still have non-escaping java objects.
2803 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
2804                                                GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2805                                                bool print_method) {
2806   GrowableArray<PointsToNode*> escape_worklist;
2807   // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
2808   int ptnodes_length = ptnodes_worklist.length();
2809   for (int next = 0; next < ptnodes_length; ++next) {
2810     PointsToNode* ptn = ptnodes_worklist.at(next);
2811     if (ptn->escape_state() >= PointsToNode::ArgEscape ||
2812         ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
2813       escape_worklist.push(ptn);
2814     }
2815   }
2816   // Set escape states to referenced nodes (edges list).
2817   while (escape_worklist.length() > 0) {
2818     PointsToNode* ptn = escape_worklist.pop();
2819     PointsToNode::EscapeState es  = ptn->escape_state();
2820     PointsToNode::EscapeState field_es = ptn->fields_escape_state();
2821     if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
2822         es >= PointsToNode::ArgEscape) {
2823       // GlobalEscape or ArgEscape state of field means it has unknown value.
2824       if (add_edge(ptn, phantom_obj)) {
2825         // New edge was added
2826         add_field_uses_to_worklist(ptn->as_Field());
2827       }
2828     }
2829     for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2830       PointsToNode* e = i.get();
2831       if (e->is_Arraycopy()) {
2832         assert(ptn->arraycopy_dst(), "sanity");
2833         // Propagate only fields escape state through arraycopy edge.
2834         if (e->fields_escape_state() < field_es) {
2835           set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2836           escape_worklist.push(e);
2837         }
2838       } else if (es >= field_es) {
2839         // fields_escape_state is also set to 'es' if it is less than 'es'.
2840         if (e->escape_state() < es) {
2841           set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2842           escape_worklist.push(e);
2843         }
2844       } else {
2845         // Propagate field escape state.
2846         bool es_changed = false;
2847         if (e->fields_escape_state() < field_es) {
2848           set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2849           es_changed = true;
2850         }
2851         if ((e->escape_state() < field_es) &&
2852             e->is_Field() && ptn->is_JavaObject() &&
2853             e->as_Field()->is_oop()) {
2854           // Change escape state of referenced fields.
2855           set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2856           es_changed = true;
2857         } else if (e->escape_state() < es) {
2858           set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2859           es_changed = true;
2860         }
2861         if (es_changed) {
2862           escape_worklist.push(e);
2863         }
2864       }
2865       if (print_method) {
2866         _compile->print_method(PHASE_EA_CONNECTION_GRAPH_PROPAGATE_ITER, 6, e->ideal_node());
2867       }
2868     }
2869   }
2870   // Remove escaped objects from non_escaped list.
2871   for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) {
2872     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
2873     if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
2874       non_escaped_allocs_worklist.delete_at(next);
2875     }
2876     if (ptn->escape_state() == PointsToNode::NoEscape) {
2877       // Find fields in non-escaped allocations which have unknown value.
2878       find_init_values_phantom(ptn);
2879     }
2880   }
2881   return (non_escaped_allocs_worklist.length() > 0);
2882 }
2883 
2884 // Add all references to JavaObject node by walking over all uses.
2885 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
2886   int new_edges = 0;
2887   if (populate_worklist) {
2888     // Populate _worklist by uses of jobj's uses.
2889     for (UseIterator i(jobj); i.has_next(); i.next()) {
2890       PointsToNode* use = i.get();
2891       if (use->is_Arraycopy()) {
2892         continue;
2893       }
2894       add_uses_to_worklist(use);
2895       if (use->is_Field() && use->as_Field()->is_oop()) {
2896         // Put on worklist all field's uses (loads) and
2897         // related field nodes (same base and offset).
2898         add_field_uses_to_worklist(use->as_Field());
2899       }
2900     }
2901   }
2902   for (int l = 0; l < _worklist.length(); l++) {
2903     PointsToNode* use = _worklist.at(l);
2904     if (PointsToNode::is_base_use(use)) {
2905       // Add reference from jobj to field and from field to jobj (field's base).
2906       use = PointsToNode::get_use_node(use)->as_Field();
2907       if (add_base(use->as_Field(), jobj)) {
2908         new_edges++;
2909       }
2910       continue;
2911     }
2912     assert(!use->is_JavaObject(), "sanity");
2913     if (use->is_Arraycopy()) {
2914       if (jobj == null_obj) { // null object does not have field edges
2915         continue;
2916       }
2917       // Added edge from Arraycopy node to arraycopy's source java object
2918       if (add_edge(use, jobj)) {
2919         jobj->set_arraycopy_src();
2920         new_edges++;
2921       }
2922       // and stop here.
2923       continue;
2924     }
2925     if (!add_edge(use, jobj)) {
2926       continue; // No new edge added, there was such edge already.
2927     }
2928     new_edges++;
2929     if (use->is_LocalVar()) {
2930       add_uses_to_worklist(use);
2931       if (use->arraycopy_dst()) {
2932         for (EdgeIterator i(use); i.has_next(); i.next()) {
2933           PointsToNode* e = i.get();
2934           if (e->is_Arraycopy()) {
2935             if (jobj == null_obj) { // null object does not have field edges
2936               continue;
2937             }
2938             // Add edge from arraycopy's destination java object to Arraycopy node.
2939             if (add_edge(jobj, e)) {
2940               new_edges++;
2941               jobj->set_arraycopy_dst();
2942             }
2943           }
2944         }
2945       }
2946     } else {
2947       // Added new edge to stored in field values.
2948       // Put on worklist all field's uses (loads) and
2949       // related field nodes (same base and offset).
2950       add_field_uses_to_worklist(use->as_Field());
2951     }
2952   }
2953   _worklist.clear();
2954   _in_worklist.reset();
2955   return new_edges;
2956 }
2957 
2958 // Put on worklist all related field nodes.
2959 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
2960   assert(field->is_oop(), "sanity");
2961   int offset = field->offset();
2962   add_uses_to_worklist(field);
2963   // Loop over all bases of this field and push on worklist Field nodes
2964   // with the same offset and base (since they may reference the same field).
2965   for (BaseIterator i(field); i.has_next(); i.next()) {
2966     PointsToNode* base = i.get();
2967     add_fields_to_worklist(field, base);
2968     // Check if the base was source object of arraycopy and go over arraycopy's
2969     // destination objects since values stored to a field of source object are
2970     // accessible by uses (loads) of fields of destination objects.
2971     if (base->arraycopy_src()) {
2972       for (UseIterator j(base); j.has_next(); j.next()) {
2973         PointsToNode* arycp = j.get();
2974         if (arycp->is_Arraycopy()) {
2975           for (UseIterator k(arycp); k.has_next(); k.next()) {
2976             PointsToNode* abase = k.get();
2977             if (abase->arraycopy_dst() && abase != base) {
2978               // Look for the same arraycopy reference.
2979               add_fields_to_worklist(field, abase);
2980             }
2981           }
2982         }
2983       }
2984     }
2985   }
2986 }
2987 
2988 // Put on worklist all related field nodes.
2989 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
2990   int offset = field->offset();
2991   if (base->is_LocalVar()) {
2992     for (UseIterator j(base); j.has_next(); j.next()) {
2993       PointsToNode* f = j.get();
2994       if (PointsToNode::is_base_use(f)) { // Field
2995         f = PointsToNode::get_use_node(f);
2996         if (f == field || !f->as_Field()->is_oop()) {
2997           continue;
2998         }
2999         int offs = f->as_Field()->offset();
3000         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
3001           add_to_worklist(f);
3002         }
3003       }
3004     }
3005   } else {
3006     assert(base->is_JavaObject(), "sanity");
3007     if (// Skip phantom_object since it is only used to indicate that
3008         // this field's content globally escapes.
3009         (base != phantom_obj) &&
3010         // null object node does not have fields.
3011         (base != null_obj)) {
3012       for (EdgeIterator i(base); i.has_next(); i.next()) {
3013         PointsToNode* f = i.get();
3014         // Skip arraycopy edge since store to destination object field
3015         // does not update value in source object field.
3016         if (f->is_Arraycopy()) {
3017           assert(base->arraycopy_dst(), "sanity");
3018           continue;
3019         }
3020         if (f == field || !f->as_Field()->is_oop()) {
3021           continue;
3022         }
3023         int offs = f->as_Field()->offset();
3024         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
3025           add_to_worklist(f);
3026         }
3027       }
3028     }
3029   }
3030 }
3031 
3032 // Find fields which have unknown value.
3033 int ConnectionGraph::find_field_value(FieldNode* field) {
3034   // Escaped fields should have init value already.
3035   assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
3036   int new_edges = 0;
3037   for (BaseIterator i(field); i.has_next(); i.next()) {
3038     PointsToNode* base = i.get();
3039     if (base->is_JavaObject()) {
3040       // Skip Allocate's fields which will be processed later.
3041       if (base->ideal_node()->is_Allocate()) {
3042         return 0;
3043       }
3044       assert(base == null_obj, "only null ptr base expected here");
3045     }
3046   }
3047   if (add_edge(field, phantom_obj)) {
3048     // New edge was added
3049     new_edges++;
3050     add_field_uses_to_worklist(field);
3051   }
3052   return new_edges;
3053 }
3054 
3055 // Find fields initializing values for allocations.
3056 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
3057   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
3058   PointsToNode* init_val = phantom_obj;
3059   Node* alloc = pta->ideal_node();
3060 
3061   // Do nothing for Allocate nodes since its fields values are
3062   // "known" unless they are initialized by arraycopy/clone.
3063   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
3064     if (alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) {
3065       // Null-free inline type arrays are initialized with an init value instead of null
3066       init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::InitValue)->_idx);
3067       assert(init_val != nullptr, "init value should be registered");
3068     } else {
3069       return 0;
3070     }
3071   }
3072   // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
3073   assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
3074 #ifdef ASSERT
3075   if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
3076     const char* name = alloc->as_CallStaticJava()->_name;
3077     assert(alloc->as_CallStaticJava()->is_call_to_multianewarray_stub() ||
3078            strncmp(name, "load_unknown_inline", 19) == 0 ||
3079            strncmp(name, "store_inline_type_fields_to_buf", 31) == 0, "sanity");
3080   }
3081 #endif
3082   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
3083   int new_edges = 0;
3084   for (EdgeIterator i(pta); i.has_next(); i.next()) {
3085     PointsToNode* field = i.get();
3086     if (field->is_Field() && field->as_Field()->is_oop()) {
3087       if (add_edge(field, init_val)) {
3088         // New edge was added
3089         new_edges++;
3090         add_field_uses_to_worklist(field->as_Field());
3091       }
3092     }
3093   }
3094   return new_edges;
3095 }
3096 
3097 // Find fields initializing values for allocations.
3098 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
3099   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
3100   Node* alloc = pta->ideal_node();
3101   // Do nothing for Call nodes since its fields values are unknown.
3102   if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) {
3103     return 0;
3104   }
3105   InitializeNode* ini = alloc->as_Allocate()->initialization();
3106   bool visited_bottom_offset = false;
3107   GrowableArray<int> offsets_worklist;
3108   int new_edges = 0;
3109 
3110   // Check if an oop field's initializing value is recorded and add
3111   // a corresponding null if field's value if it is not recorded.
3112   // Connection Graph does not record a default initialization by null
3113   // captured by Initialize node.
3114   //
3115   for (EdgeIterator i(pta); i.has_next(); i.next()) {
3116     PointsToNode* field = i.get(); // Field (AddP)
3117     if (!field->is_Field() || !field->as_Field()->is_oop()) {
3118       continue; // Not oop field
3119     }
3120     int offset = field->as_Field()->offset();
3121     if (offset == Type::OffsetBot) {
3122       if (!visited_bottom_offset) {
3123         // OffsetBot is used to reference array's element,
3124         // always add reference to null to all Field nodes since we don't
3125         // known which element is referenced.
3126         if (add_edge(field, null_obj)) {
3127           // New edge was added
3128           new_edges++;
3129           add_field_uses_to_worklist(field->as_Field());
3130           visited_bottom_offset = true;
3131         }
3132       }
3133     } else {
3134       // Check only oop fields.
3135       const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();
3136       if (adr_type->isa_rawptr()) {
3137 #ifdef ASSERT
3138         // Raw pointers are used for initializing stores so skip it
3139         // since it should be recorded already
3140         Node* base = get_addp_base(field->ideal_node());
3141         assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type");
3142 #endif
3143         continue;
3144       }
3145       if (!offsets_worklist.contains(offset)) {
3146         offsets_worklist.append(offset);
3147         Node* value = nullptr;
3148         if (ini != nullptr) {
3149           // StoreP::value_basic_type() == T_ADDRESS
3150           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;
3151           Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);
3152           // Make sure initializing store has the same type as this AddP.
3153           // This AddP may reference non existing field because it is on a
3154           // dead branch of bimorphic call which is not eliminated yet.
3155           if (store != nullptr && store->is_Store() &&
3156               store->as_Store()->value_basic_type() == ft) {
3157             value = store->in(MemNode::ValueIn);
3158 #ifdef ASSERT
3159             if (VerifyConnectionGraph) {
3160               // Verify that AddP already points to all objects the value points to.
3161               PointsToNode* val = ptnode_adr(value->_idx);
3162               assert((val != nullptr), "should be processed already");
3163               PointsToNode* missed_obj = nullptr;
3164               if (val->is_JavaObject()) {
3165                 if (!field->points_to(val->as_JavaObject())) {
3166                   missed_obj = val;
3167                 }
3168               } else {
3169                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
3170                   tty->print_cr("----------init store has invalid value -----");
3171                   store->dump();
3172                   val->dump();
3173                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
3174                 }
3175                 for (EdgeIterator j(val); j.has_next(); j.next()) {
3176                   PointsToNode* obj = j.get();
3177                   if (obj->is_JavaObject()) {
3178                     if (!field->points_to(obj->as_JavaObject())) {
3179                       missed_obj = obj;
3180                       break;
3181                     }
3182                   }
3183                 }
3184               }
3185               if (missed_obj != nullptr) {
3186                 tty->print_cr("----------field---------------------------------");
3187                 field->dump();
3188                 tty->print_cr("----------missed reference to object------------");
3189                 missed_obj->dump();
3190                 tty->print_cr("----------object referenced by init store-------");
3191                 store->dump();
3192                 val->dump();
3193                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
3194               }
3195             }
3196 #endif
3197           } else {
3198             // There could be initializing stores which follow allocation.
3199             // For example, a volatile field store is not collected
3200             // by Initialize node.
3201             //
3202             // Need to check for dependent loads to separate such stores from
3203             // stores which follow loads. For now, add initial value null so
3204             // that compare pointers optimization works correctly.
3205           }
3206         }
3207         if (value == nullptr) {
3208           // A field's initializing value was not recorded. Add null.
3209           if (add_edge(field, null_obj)) {
3210             // New edge was added
3211             new_edges++;
3212             add_field_uses_to_worklist(field->as_Field());
3213           }
3214         }
3215       }
3216     }
3217   }
3218   return new_edges;
3219 }
3220 
3221 // Adjust scalar_replaceable state after Connection Graph is built.
3222 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) {
3223   // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)'
3224   // returns true. If one of the constraints in this method set 'jobj' to NSR
3225   // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as
3226   // input, 'adjust_scalar_replaceable_state' will eventually be called with
3227   // that other object and the Phi will become a reducible Phi.
3228   // There could be multiple merges involving the same jobj.
3229   Unique_Node_List candidates;
3230 
3231   // Search for non-escaping objects which are not scalar replaceable
3232   // and mark them to propagate the state to referenced objects.
3233 
3234   for (UseIterator i(jobj); i.has_next(); i.next()) {
3235     PointsToNode* use = i.get();
3236     if (use->is_Arraycopy()) {
3237       continue;
3238     }
3239     if (use->is_Field()) {
3240       FieldNode* field = use->as_Field();
3241       assert(field->is_oop() && field->scalar_replaceable(), "sanity");
3242       // 1. An object is not scalar replaceable if the field into which it is
3243       // stored has unknown offset (stored into unknown element of an array).
3244       if (field->offset() == Type::OffsetBot) {
3245         set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset"));
3246         return;
3247       }
3248       for (BaseIterator i(field); i.has_next(); i.next()) {
3249         PointsToNode* base = i.get();
3250         // 2. An object is not scalar replaceable if the field into which it is
3251         // stored has multiple bases one of which is null.
3252         if ((base == null_obj) && (field->base_count() > 1)) {
3253           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base"));
3254           return;
3255         }
3256         // 2.5. An object is not scalar replaceable if the field into which it is
3257         // stored has NSR base.
3258         if (!base->scalar_replaceable()) {
3259           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base"));
3260           return;
3261         }
3262       }
3263     }
3264     assert(use->is_Field() || use->is_LocalVar(), "sanity");
3265     // 3. An object is not scalar replaceable if it is merged with other objects
3266     // and we can't remove the merge
3267     for (EdgeIterator j(use); j.has_next(); j.next()) {
3268       PointsToNode* ptn = j.get();
3269       if (ptn->is_JavaObject() && ptn != jobj) {
3270         Node* use_n = use->ideal_node();
3271 
3272         // These other local vars may point to multiple objects through a Phi
3273         // In this case we skip them and see if we can reduce the Phi.
3274         if (use_n->is_CastPP() || use_n->is_CheckCastPP()) {
3275           use_n = use_n->in(1);
3276         }
3277 
3278         // If it's already a candidate or confirmed reducible merge we can skip verification
3279         if (candidates.member(use_n) || reducible_merges.member(use_n)) {
3280           continue;
3281         }
3282 
3283         if (use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) {
3284           candidates.push(use_n);
3285         } else {
3286           // Mark all objects as NSR if we can't remove the merge
3287           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn)));
3288           set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj)));
3289         }
3290       }
3291     }
3292     if (!jobj->scalar_replaceable()) {
3293       return;
3294     }
3295   }
3296 
3297   for (EdgeIterator j(jobj); j.has_next(); j.next()) {
3298     if (j.get()->is_Arraycopy()) {
3299       continue;
3300     }
3301 
3302     // Non-escaping object node should point only to field nodes.
3303     FieldNode* field = j.get()->as_Field();
3304     int offset = field->as_Field()->offset();
3305 
3306     // 4. An object is not scalar replaceable if it has a field with unknown
3307     // offset (array's element is accessed in loop).
3308     if (offset == Type::OffsetBot) {
3309       set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset"));
3310       return;
3311     }
3312     // 5. Currently an object is not scalar replaceable if a LoadStore node
3313     // access its field since the field value is unknown after it.
3314     //
3315     Node* n = field->ideal_node();
3316 
3317     // Test for an unsafe access that was parsed as maybe off heap
3318     // (with a CheckCastPP to raw memory).
3319     assert(n->is_AddP(), "expect an address computation");
3320     if (n->in(AddPNode::Base)->is_top() &&
3321         n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) {
3322       assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected");
3323       assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected");
3324       set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access"));
3325       return;
3326     }
3327 
3328     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3329       Node* u = n->fast_out(i);
3330       if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) {
3331         set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access"));
3332         return;
3333       }
3334     }
3335 
3336     // 6. Or the address may point to more then one object. This may produce
3337     // the false positive result (set not scalar replaceable)
3338     // since the flow-insensitive escape analysis can't separate
3339     // the case when stores overwrite the field's value from the case
3340     // when stores happened on different control branches.
3341     //
3342     // Note: it will disable scalar replacement in some cases:
3343     //
3344     //    Point p[] = new Point[1];
3345     //    p[0] = new Point(); // Will be not scalar replaced
3346     //
3347     // but it will save us from incorrect optimizations in next cases:
3348     //
3349     //    Point p[] = new Point[1];
3350     //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
3351     //
3352     if (field->base_count() > 1 && candidates.size() == 0) {
3353       if (has_non_reducible_merge(field, reducible_merges)) {
3354         for (BaseIterator i(field); i.has_next(); i.next()) {
3355           PointsToNode* base = i.get();
3356           // Don't take into account LocalVar nodes which
3357           // may point to only one object which should be also
3358           // this field's base by now.
3359           if (base->is_JavaObject() && base != jobj) {
3360             // Mark all bases.
3361             set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object"));
3362             set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object"));
3363           }
3364         }
3365 
3366         if (!jobj->scalar_replaceable()) {
3367           return;
3368         }
3369       }
3370     }
3371   }
3372 
3373   // The candidate is truly a reducible merge only if none of the other
3374   // constraints ruled it as NSR. There could be multiple merges involving the
3375   // same jobj.
3376   assert(jobj->scalar_replaceable(), "sanity");
3377   for (uint i = 0; i < candidates.size(); i++ ) {
3378     Node* candidate = candidates.at(i);
3379     reducible_merges.push(candidate);
3380   }
3381 }
3382 
3383 bool ConnectionGraph::has_non_reducible_merge(FieldNode* field, Unique_Node_List& reducible_merges) {
3384   for (BaseIterator i(field); i.has_next(); i.next()) {
3385     Node* base = i.get()->ideal_node();
3386     if (base->is_Phi() && !reducible_merges.member(base)) {
3387       return true;
3388     }
3389   }
3390   return false;
3391 }
3392 
3393 void ConnectionGraph::revisit_reducible_phi_status(JavaObjectNode* jobj, Unique_Node_List& reducible_merges) {
3394   assert(jobj != nullptr && !jobj->scalar_replaceable(), "jobj should be set as NSR before calling this function.");
3395 
3396   // Look for 'phis' that refer to 'jobj' as the last
3397   // remaining scalar replaceable input.
3398   uint reducible_merges_cnt = reducible_merges.size();
3399   for (uint i = 0; i < reducible_merges_cnt; i++) {
3400     Node* phi = reducible_merges.at(i);
3401 
3402     // This 'Phi' will be a 'good' if it still points to
3403     // at least one scalar replaceable object. Note that 'obj'
3404     // was/should be marked as NSR before calling this function.
3405     bool good_phi = false;
3406 
3407     for (uint j = 1; j < phi->req(); j++) {
3408       JavaObjectNode* phi_in_obj = unique_java_object(phi->in(j));
3409       if (phi_in_obj != nullptr && phi_in_obj->scalar_replaceable()) {
3410         good_phi = true;
3411         break;
3412       }
3413     }
3414 
3415     if (!good_phi) {
3416       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Phi %d became non-reducible after node %d became NSR.", phi->_idx, jobj->ideal_node()->_idx);)
3417       reducible_merges.remove(i);
3418 
3419       // Decrement the index because the 'remove' call above actually
3420       // moves the last entry of the list to position 'i'.
3421       i--;
3422 
3423       reducible_merges_cnt--;
3424     }
3425   }
3426 }
3427 
3428 // Propagate NSR (Not scalar replaceable) state.
3429 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist, Unique_Node_List &reducible_merges) {
3430   int jobj_length = jobj_worklist.length();
3431   bool found_nsr_alloc = true;
3432   while (found_nsr_alloc) {
3433     found_nsr_alloc = false;
3434     for (int next = 0; next < jobj_length; ++next) {
3435       JavaObjectNode* jobj = jobj_worklist.at(next);
3436       for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) {
3437         PointsToNode* use = i.get();
3438         if (use->is_Field()) {
3439           FieldNode* field = use->as_Field();
3440           assert(field->is_oop() && field->scalar_replaceable(), "sanity");
3441           assert(field->offset() != Type::OffsetBot, "sanity");
3442           for (BaseIterator i(field); i.has_next(); i.next()) {
3443             PointsToNode* base = i.get();
3444             // An object is not scalar replaceable if the field into which
3445             // it is stored has NSR base.
3446             if ((base != null_obj) && !base->scalar_replaceable()) {
3447               set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base"));
3448               // Any merge that had only 'jobj' as scalar-replaceable will now be non-reducible,
3449               // because there is no point in reducing a Phi that won't improve the number of SR
3450               // objects.
3451               revisit_reducible_phi_status(jobj, reducible_merges);
3452               found_nsr_alloc = true;
3453               break;
3454             }
3455           }
3456         } else if (use->is_LocalVar()) {
3457           Node* phi = use->ideal_node();
3458           if (phi->Opcode() == Op_Phi && reducible_merges.member(phi) && !can_reduce_phi(phi->as_Phi())) {
3459             set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is merged in a non-reducible phi"));
3460             reducible_merges.yank(phi);
3461             found_nsr_alloc = true;
3462             break;
3463           }
3464         }
3465         _compile->print_method(PHASE_EA_PROPAGATE_NSR_ITER, 5, jobj->ideal_node());
3466       }
3467     }
3468   }
3469 }
3470 
3471 #ifdef ASSERT
3472 void ConnectionGraph::verify_connection_graph(
3473                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
3474                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
3475                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
3476                          GrowableArray<Node*>& addp_worklist) {
3477   // Verify that graph is complete - no new edges could be added.
3478   int java_objects_length = java_objects_worklist.length();
3479   int non_escaped_length  = non_escaped_allocs_worklist.length();
3480   int new_edges = 0;
3481   for (int next = 0; next < java_objects_length; ++next) {
3482     JavaObjectNode* ptn = java_objects_worklist.at(next);
3483     new_edges += add_java_object_edges(ptn, true);
3484   }
3485   assert(new_edges == 0, "graph was not complete");
3486   // Verify that escape state is final.
3487   int length = non_escaped_allocs_worklist.length();
3488   find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist, /*print_method=*/ false);
3489   assert((non_escaped_length == non_escaped_allocs_worklist.length()) &&
3490          (non_escaped_length == length) &&
3491          (_worklist.length() == 0), "escape state was not final");
3492 
3493   // Verify fields information.
3494   int addp_length = addp_worklist.length();
3495   for (int next = 0; next < addp_length; ++next ) {
3496     Node* n = addp_worklist.at(next);
3497     FieldNode* field = ptnode_adr(n->_idx)->as_Field();
3498     if (field->is_oop()) {
3499       // Verify that field has all bases
3500       Node* base = get_addp_base(n);
3501       PointsToNode* ptn = ptnode_adr(base->_idx);
3502       if (ptn->is_JavaObject()) {
3503         assert(field->has_base(ptn->as_JavaObject()), "sanity");
3504       } else {
3505         assert(ptn->is_LocalVar(), "sanity");
3506         for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3507           PointsToNode* e = i.get();
3508           if (e->is_JavaObject()) {
3509             assert(field->has_base(e->as_JavaObject()), "sanity");
3510           }
3511         }
3512       }
3513       // Verify that all fields have initializing values.
3514       if (field->edge_count() == 0) {
3515         tty->print_cr("----------field does not have references----------");
3516         field->dump();
3517         for (BaseIterator i(field); i.has_next(); i.next()) {
3518           PointsToNode* base = i.get();
3519           tty->print_cr("----------field has next base---------------------");
3520           base->dump();
3521           if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {
3522             tty->print_cr("----------base has fields-------------------------");
3523             for (EdgeIterator j(base); j.has_next(); j.next()) {
3524               j.get()->dump();
3525             }
3526             tty->print_cr("----------base has references---------------------");
3527             for (UseIterator j(base); j.has_next(); j.next()) {
3528               j.get()->dump();
3529             }
3530           }
3531         }
3532         for (UseIterator i(field); i.has_next(); i.next()) {
3533           i.get()->dump();
3534         }
3535         assert(field->edge_count() > 0, "sanity");
3536       }
3537     }
3538   }
3539 }
3540 #endif
3541 
3542 // Optimize ideal graph.
3543 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3544                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3545   Compile* C = _compile;
3546   PhaseIterGVN* igvn = _igvn;
3547   if (EliminateLocks) {
3548     // Mark locks before changing ideal graph.
3549     int cnt = C->macro_count();
3550     for (int i = 0; i < cnt; i++) {
3551       Node *n = C->macro_node(i);
3552       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3553         AbstractLockNode* alock = n->as_AbstractLock();
3554         if (!alock->is_non_esc_obj()) {
3555           const Type* obj_type = igvn->type(alock->obj_node());
3556           if (can_eliminate_lock(alock) && !obj_type->is_inlinetypeptr()) {
3557             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3558             // The lock could be marked eliminated by lock coarsening
3559             // code during first IGVN before EA. Replace coarsened flag
3560             // to eliminate all associated locks/unlocks.
3561 #ifdef ASSERT
3562             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3563 #endif
3564             alock->set_non_esc_obj();
3565           }
3566         }
3567       }
3568     }
3569   }
3570 
3571   if (OptimizePtrCompare) {
3572     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3573       Node *n = ptr_cmp_worklist.at(i);
3574       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3575       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3576       if (tcmp->singleton()) {
3577         Node* cmp = igvn->makecon(tcmp);
3578 #ifndef PRODUCT
3579         if (PrintOptimizePtrCompare) {
3580           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3581           if (Verbose) {
3582             n->dump(1);
3583           }
3584         }
3585 #endif
3586         igvn->replace_node(n, cmp);
3587       }
3588     }
3589   }
3590 
3591   // For MemBarStoreStore nodes added in library_call.cpp, check
3592   // escape status of associated AllocateNode and optimize out
3593   // MemBarStoreStore node if the allocated object never escapes.
3594   for (int i = 0; i < storestore_worklist.length(); i++) {
3595     Node* storestore = storestore_worklist.at(i);
3596     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3597     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3598       if (alloc->in(AllocateNode::InlineType) != nullptr) {
3599         // Non-escaping inline type buffer allocations don't require a membar
3600         storestore->as_MemBar()->remove(_igvn);
3601       } else {
3602         MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3603         mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3604         mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3605         igvn->register_new_node_with_optimizer(mb);
3606         igvn->replace_node(storestore, mb);
3607       }
3608     }
3609   }
3610 }
3611 
3612 // Atomic flat accesses on non-escaping objects can be optimized to non-atomic accesses
3613 void ConnectionGraph::optimize_flat_accesses(GrowableArray<SafePointNode*>& sfn_worklist) {
3614   PhaseIterGVN& igvn = *_igvn;
3615   bool delay = igvn.delay_transform();
3616   igvn.set_delay_transform(true);
3617   igvn.C->for_each_flat_access([&](Node* n) {
3618     Node* base = n->is_LoadFlat() ? n->as_LoadFlat()->base() : n->as_StoreFlat()->base();
3619     if (!not_global_escape(base)) {
3620       return;
3621     }
3622 
3623     bool expanded;
3624     if (n->is_LoadFlat()) {
3625       expanded = n->as_LoadFlat()->expand_non_atomic(igvn);
3626     } else {
3627       expanded = n->as_StoreFlat()->expand_non_atomic(igvn);
3628     }
3629     if (expanded) {
3630       sfn_worklist.remove(n->as_SafePoint());
3631       igvn.C->remove_flat_access(n);
3632     }
3633   });
3634   igvn.set_delay_transform(delay);
3635 }
3636 
3637 // Optimize objects compare.
3638 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3639   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3640   if (!OptimizePtrCompare) {
3641     return UNKNOWN;
3642   }
3643   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3644   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3645 
3646   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3647   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3648   JavaObjectNode* jobj1 = unique_java_object(left);
3649   JavaObjectNode* jobj2 = unique_java_object(right);
3650 
3651   // The use of this method during allocation merge reduction may cause 'left'
3652   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3653   // that doesn't reference an unique java object.
3654   if (ptn1 == nullptr || ptn2 == nullptr ||
3655       jobj1 == nullptr || jobj2 == nullptr) {
3656     return UNKNOWN;
3657   }
3658 
3659   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
3660   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
3661 
3662   // Check simple cases first.
3663   if (jobj1 != nullptr) {
3664     if (jobj1->escape_state() == PointsToNode::NoEscape) {
3665       if (jobj1 == jobj2) {
3666         // Comparing the same not escaping object.
3667         return EQ;
3668       }
3669       Node* obj = jobj1->ideal_node();
3670       // Comparing not escaping allocation.
3671       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
3672           !ptn2->points_to(jobj1)) {
3673         return NE; // This includes nullness check.
3674       }
3675     }
3676   }
3677   if (jobj2 != nullptr) {
3678     if (jobj2->escape_state() == PointsToNode::NoEscape) {
3679       Node* obj = jobj2->ideal_node();
3680       // Comparing not escaping allocation.
3681       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
3682           !ptn1->points_to(jobj2)) {
3683         return NE; // This includes nullness check.
3684       }
3685     }
3686   }
3687   if (jobj1 != nullptr && jobj1 != phantom_obj &&
3688       jobj2 != nullptr && jobj2 != phantom_obj &&
3689       jobj1->ideal_node()->is_Con() &&
3690       jobj2->ideal_node()->is_Con()) {
3691     // Klass or String constants compare. Need to be careful with
3692     // compressed pointers - compare types of ConN and ConP instead of nodes.
3693     const Type* t1 = jobj1->ideal_node()->get_ptr_type();
3694     const Type* t2 = jobj2->ideal_node()->get_ptr_type();
3695     if (t1->make_ptr() == t2->make_ptr()) {
3696       return EQ;
3697     } else {
3698       return NE;
3699     }
3700   }
3701   if (ptn1->meet(ptn2)) {
3702     return UNKNOWN; // Sets are not disjoint
3703   }
3704 
3705   // Sets are disjoint.
3706   bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
3707   bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
3708   bool set1_has_null_ptr    = ptn1->points_to(null_obj);
3709   bool set2_has_null_ptr    = ptn2->points_to(null_obj);
3710   if ((set1_has_unknown_ptr && set2_has_null_ptr) ||
3711       (set2_has_unknown_ptr && set1_has_null_ptr)) {
3712     // Check nullness of unknown object.
3713     return UNKNOWN;
3714   }
3715 
3716   // Disjointness by itself is not sufficient since
3717   // alias analysis is not complete for escaped objects.
3718   // Disjoint sets are definitely unrelated only when
3719   // at least one set has only not escaping allocations.
3720   if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
3721     if (ptn1->non_escaping_allocation()) {
3722       return NE;
3723     }
3724   }
3725   if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
3726     if (ptn2->non_escaping_allocation()) {
3727       return NE;
3728     }
3729   }
3730   return UNKNOWN;
3731 }
3732 
3733 // Connection Graph construction functions.
3734 
3735 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
3736   PointsToNode* ptadr = _nodes.at(n->_idx);
3737   if (ptadr != nullptr) {
3738     assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
3739     return;
3740   }
3741   Compile* C = _compile;
3742   ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
3743   map_ideal_node(n, ptadr);
3744 }
3745 
3746 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
3747   PointsToNode* ptadr = _nodes.at(n->_idx);
3748   if (ptadr != nullptr) {
3749     assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
3750     return ptadr;
3751   }
3752   Compile* C = _compile;
3753   ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
3754   map_ideal_node(n, ptadr);
3755   return ptadr;
3756 }
3757 
3758 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
3759   PointsToNode* ptadr = _nodes.at(n->_idx);
3760   if (ptadr != nullptr) {
3761     assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
3762     return;
3763   }
3764   bool unsafe = false;
3765   bool is_oop = is_oop_field(n, offset, &unsafe);
3766   if (unsafe) {
3767     es = PointsToNode::GlobalEscape;
3768   }
3769   Compile* C = _compile;
3770   FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
3771   map_ideal_node(n, field);
3772 }
3773 
3774 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
3775                                     PointsToNode* src, PointsToNode* dst) {
3776   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3777   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3778   PointsToNode* ptadr = _nodes.at(n->_idx);
3779   if (ptadr != nullptr) {
3780     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3781     return;
3782   }
3783   Compile* C = _compile;
3784   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3785   map_ideal_node(n, ptadr);
3786   // Add edge from arraycopy node to source object.
3787   (void)add_edge(ptadr, src);
3788   src->set_arraycopy_src();
3789   // Add edge from destination object to arraycopy node.
3790   (void)add_edge(dst, ptadr);
3791   dst->set_arraycopy_dst();
3792 }
3793 
3794 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3795   const Type* adr_type = n->as_AddP()->bottom_type();
3796   int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
3797   BasicType bt = T_INT;
3798   if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
3799     // Check only oop fields.
3800     if (!adr_type->isa_aryptr() ||
3801         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3802         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3803       // OffsetBot is used to reference array's element. Ignore first AddP.
3804       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3805         bt = T_OBJECT;
3806       }
3807     }
3808   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3809     if (adr_type->isa_instptr()) {
3810       ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
3811       if (field != nullptr) {
3812         bt = field->layout_type();
3813       } else {
3814         // Check for unsafe oop field access
3815         if (has_oop_node_outs(n)) {
3816           bt = T_OBJECT;
3817           (*unsafe) = true;
3818         }
3819       }
3820     } else if (adr_type->isa_aryptr()) {
3821       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3822         // Ignore array length load.
3823       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3824         // Ignore first AddP.
3825       } else {
3826         const Type* elemtype = adr_type->is_aryptr()->elem();
3827         if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
3828           ciInlineKlass* vk = elemtype->inline_klass();
3829           field_offset += vk->payload_offset();
3830           ciField* field = vk->get_field_by_offset(field_offset, false);
3831           if (field != nullptr) {
3832             bt = field->layout_type();
3833           } else {
3834             assert(field_offset == vk->payload_offset() + vk->null_marker_offset_in_payload(), "no field or null marker of %s at offset %d", vk->name()->as_utf8(), field_offset);
3835             bt = T_BOOLEAN;
3836           }
3837         } else {
3838           bt = elemtype->array_element_basic_type();
3839         }
3840       }
3841     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3842       // Allocation initialization, ThreadLocal field access, unsafe access
3843       if (has_oop_node_outs(n)) {
3844         bt = T_OBJECT;
3845       }
3846     }
3847   }
3848   // Note: T_NARROWOOP is not classed as a real reference type
3849   bool res = (is_reference_type(bt) || bt == T_NARROWOOP);
3850   assert(!has_oop_node_outs(n) || res, "sanity: AddP has oop outs, needs to be treated as oop field");
3851   return res;
3852 }
3853 
3854 bool ConnectionGraph::has_oop_node_outs(Node* n) {
3855   return n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3856          n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3857          n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3858          BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n);
3859 }
3860 
3861 // Returns unique pointed java object or null.
3862 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3863   // If the node was created after the escape computation we can't answer.
3864   uint idx = n->_idx;
3865   if (idx >= nodes_size()) {
3866     return nullptr;
3867   }
3868   PointsToNode* ptn = ptnode_adr(idx);
3869   if (ptn == nullptr) {
3870     return nullptr;
3871   }
3872   if (ptn->is_JavaObject()) {
3873     return ptn->as_JavaObject();
3874   }
3875   assert(ptn->is_LocalVar(), "sanity");
3876   // Check all java objects it points to.
3877   JavaObjectNode* jobj = nullptr;
3878   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3879     PointsToNode* e = i.get();
3880     if (e->is_JavaObject()) {
3881       if (jobj == nullptr) {
3882         jobj = e->as_JavaObject();
3883       } else if (jobj != e) {
3884         return nullptr;
3885       }
3886     }
3887   }
3888   return jobj;
3889 }
3890 
3891 // Return true if this node points only to non-escaping allocations.
3892 bool PointsToNode::non_escaping_allocation() {
3893   if (is_JavaObject()) {
3894     Node* n = ideal_node();
3895     if (n->is_Allocate() || n->is_CallStaticJava()) {
3896       return (escape_state() == PointsToNode::NoEscape);
3897     } else {
3898       return false;
3899     }
3900   }
3901   assert(is_LocalVar(), "sanity");
3902   // Check all java objects it points to.
3903   for (EdgeIterator i(this); i.has_next(); i.next()) {
3904     PointsToNode* e = i.get();
3905     if (e->is_JavaObject()) {
3906       Node* n = e->ideal_node();
3907       if ((e->escape_state() != PointsToNode::NoEscape) ||
3908           !(n->is_Allocate() || n->is_CallStaticJava())) {
3909         return false;
3910       }
3911     }
3912   }
3913   return true;
3914 }
3915 
3916 // Return true if we know the node does not escape globally.
3917 bool ConnectionGraph::not_global_escape(Node *n) {
3918   assert(!_collecting, "should not call during graph construction");
3919   // If the node was created after the escape computation we can't answer.
3920   uint idx = n->_idx;
3921   if (idx >= nodes_size()) {
3922     return false;
3923   }
3924   PointsToNode* ptn = ptnode_adr(idx);
3925   if (ptn == nullptr) {
3926     return false; // not in congraph (e.g. ConI)
3927   }
3928   PointsToNode::EscapeState es = ptn->escape_state();
3929   // If we have already computed a value, return it.
3930   if (es >= PointsToNode::GlobalEscape) {
3931     return false;
3932   }
3933   if (ptn->is_JavaObject()) {
3934     return true; // (es < PointsToNode::GlobalEscape);
3935   }
3936   assert(ptn->is_LocalVar(), "sanity");
3937   // Check all java objects it points to.
3938   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3939     if (i.get()->escape_state() >= PointsToNode::GlobalEscape) {
3940       return false;
3941     }
3942   }
3943   return true;
3944 }
3945 
3946 // Return true if locked object does not escape globally
3947 // and locked code region (identified by BoxLockNode) is balanced:
3948 // all compiled code paths have corresponding Lock/Unlock pairs.
3949 bool ConnectionGraph::can_eliminate_lock(AbstractLockNode* alock) {
3950   if (alock->is_balanced() && not_global_escape(alock->obj_node())) {
3951     if (EliminateNestedLocks) {
3952       // We can mark whole locking region as Local only when only
3953       // one object is used for locking.
3954       alock->box_node()->as_BoxLock()->set_local();
3955     }
3956     return true;
3957   }
3958   return false;
3959 }
3960 
3961 // Helper functions
3962 
3963 // Return true if this node points to specified node or nodes it points to.
3964 bool PointsToNode::points_to(JavaObjectNode* ptn) const {
3965   if (is_JavaObject()) {
3966     return (this == ptn);
3967   }
3968   assert(is_LocalVar() || is_Field(), "sanity");
3969   for (EdgeIterator i(this); i.has_next(); i.next()) {
3970     if (i.get() == ptn) {
3971       return true;
3972     }
3973   }
3974   return false;
3975 }
3976 
3977 // Return true if one node points to an other.
3978 bool PointsToNode::meet(PointsToNode* ptn) {
3979   if (this == ptn) {
3980     return true;
3981   } else if (ptn->is_JavaObject()) {
3982     return this->points_to(ptn->as_JavaObject());
3983   } else if (this->is_JavaObject()) {
3984     return ptn->points_to(this->as_JavaObject());
3985   }
3986   assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
3987   int ptn_count =  ptn->edge_count();
3988   for (EdgeIterator i(this); i.has_next(); i.next()) {
3989     PointsToNode* this_e = i.get();
3990     for (int j = 0; j < ptn_count; j++) {
3991       if (this_e == ptn->edge(j)) {
3992         return true;
3993       }
3994     }
3995   }
3996   return false;
3997 }
3998 
3999 #ifdef ASSERT
4000 // Return true if bases point to this java object.
4001 bool FieldNode::has_base(JavaObjectNode* jobj) const {
4002   for (BaseIterator i(this); i.has_next(); i.next()) {
4003     if (i.get() == jobj) {
4004       return true;
4005     }
4006   }
4007   return false;
4008 }
4009 #endif
4010 
4011 bool ConnectionGraph::is_captured_store_address(Node* addp) {
4012   // Handle simple case first.
4013   assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access");
4014   if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) {
4015     return true;
4016   } else if (addp->in(AddPNode::Address)->is_Phi()) {
4017     for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
4018       Node* addp_use = addp->fast_out(i);
4019       if (addp_use->is_Store()) {
4020         for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) {
4021           if (addp_use->fast_out(j)->is_Initialize()) {
4022             return true;
4023           }
4024         }
4025       }
4026     }
4027   }
4028   return false;
4029 }
4030 
4031 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
4032   const Type *adr_type = phase->type(adr);
4033   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
4034     // We are computing a raw address for a store captured by an Initialize
4035     // compute an appropriate address type. AddP cases #3 and #5 (see below).
4036     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
4037     assert(offs != Type::OffsetBot ||
4038            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
4039            "offset must be a constant or it is initialization of array");
4040     return offs;
4041   }
4042   return adr_type->is_ptr()->flat_offset();
4043 }
4044 
4045 Node* ConnectionGraph::get_addp_base(Node *addp) {
4046   assert(addp->is_AddP(), "must be AddP");
4047   //
4048   // AddP cases for Base and Address inputs:
4049   // case #1. Direct object's field reference:
4050   //     Allocate
4051   //       |
4052   //     Proj #5 ( oop result )
4053   //       |
4054   //     CheckCastPP (cast to instance type)
4055   //      | |
4056   //     AddP  ( base == address )
4057   //
4058   // case #2. Indirect object's field reference:
4059   //      Phi
4060   //       |
4061   //     CastPP (cast to instance type)
4062   //      | |
4063   //     AddP  ( base == address )
4064   //
4065   // case #3. Raw object's field reference for Initialize node.
4066   //          Could have an additional Phi merging multiple allocations.
4067   //      Allocate
4068   //        |
4069   //      Proj #5 ( oop result )
4070   //  top   |
4071   //     \  |
4072   //     AddP  ( base == top )
4073   //
4074   // case #4. Array's element reference:
4075   //   {CheckCastPP | CastPP}
4076   //     |  | |
4077   //     |  AddP ( array's element offset )
4078   //     |  |
4079   //     AddP ( array's offset )
4080   //
4081   // case #5. Raw object's field reference for arraycopy stub call:
4082   //          The inline_native_clone() case when the arraycopy stub is called
4083   //          after the allocation before Initialize and CheckCastPP nodes.
4084   //      Allocate
4085   //        |
4086   //      Proj #5 ( oop result )
4087   //       | |
4088   //       AddP  ( base == address )
4089   //
4090   // case #6. Constant Pool, ThreadLocal, CastX2P, Klass, OSR buffer buf or
4091   //          Raw object's field reference:
4092   //      {ConP, ThreadLocal, CastX2P, raw Load, Parm0}
4093   //  top   |
4094   //     \  |
4095   //     AddP  ( base == top )
4096   //
4097   // case #7. Klass's field reference.
4098   //      LoadKlass
4099   //       | |
4100   //       AddP  ( base == address )
4101   //
4102   // case #8. narrow Klass's field reference.
4103   //      LoadNKlass
4104   //       |
4105   //      DecodeN
4106   //       | |
4107   //       AddP  ( base == address )
4108   //
4109   // case #9. Mixed unsafe access
4110   //    {instance}
4111   //        |
4112   //      CheckCastPP (raw)
4113   //  top   |
4114   //     \  |
4115   //     AddP  ( base == top )
4116   //
4117   // case #10. Klass fetched with
4118   //           LibraryCallKit::load_*_refined_array_klass()
4119   //           which has en extra Phi.
4120   //  LoadKlass   LoadKlass
4121   //       |          |
4122   //     CastPP    CastPP
4123   //          \   /
4124   //           Phi
4125   //      top   |
4126   //         \  |
4127   //         AddP  ( base == top )
4128   //
4129   Node *base = addp->in(AddPNode::Base);
4130   if (base->uncast()->is_top()) { // The AddP case #3, #6, #9, and #10.
4131     base = addp->in(AddPNode::Address);
4132     while (base->is_AddP()) {
4133       // Case #6 (unsafe access) may have several chained AddP nodes.
4134       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
4135       base = base->in(AddPNode::Address);
4136     }
4137     if (base->Opcode() == Op_CheckCastPP &&
4138         base->bottom_type()->isa_rawptr() &&
4139         _igvn->type(base->in(1))->isa_oopptr()) {
4140       base = base->in(1); // Case #9
4141     } else {
4142       // Case #3, #6, and #10
4143       Node* uncast_base = base->uncast();
4144       int opcode = uncast_base->Opcode();
4145       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
4146              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
4147              (_igvn->C->is_osr_compilation() && uncast_base->is_Parm() && uncast_base->as_Parm()->_con == TypeFunc::Parms)||
4148              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) ||
4149              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_klassptr() != nullptr)) ||
4150              is_captured_store_address(addp) ||
4151              is_load_array_klass_related(uncast_base), "sanity");
4152     }
4153   }
4154   return base;
4155 }
4156 
4157 #ifdef ASSERT
4158 // Case #10
4159 bool ConnectionGraph::is_load_array_klass_related(const Node* uncast_base) {
4160   if (!uncast_base->is_Phi() || uncast_base->req() != 3) {
4161     return false;
4162   }
4163   Node* in1 = uncast_base->in(1);
4164   Node* in2 = uncast_base->in(2);
4165   return in1->uncast()->Opcode() == Op_LoadKlass &&
4166          in2->uncast()->Opcode() == Op_LoadKlass;
4167 }
4168 #endif
4169 
4170 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
4171   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
4172   Node* addp2 = addp->raw_out(0);
4173   if (addp->outcnt() == 1 && addp2->is_AddP() &&
4174       addp2->in(AddPNode::Base) == n &&
4175       addp2->in(AddPNode::Address) == addp) {
4176     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
4177     //
4178     // Find array's offset to push it on worklist first and
4179     // as result process an array's element offset first (pushed second)
4180     // to avoid CastPP for the array's offset.
4181     // Otherwise the inserted CastPP (LocalVar) will point to what
4182     // the AddP (Field) points to. Which would be wrong since
4183     // the algorithm expects the CastPP has the same point as
4184     // as AddP's base CheckCastPP (LocalVar).
4185     //
4186     //    ArrayAllocation
4187     //     |
4188     //    CheckCastPP
4189     //     |
4190     //    memProj (from ArrayAllocation CheckCastPP)
4191     //     |  ||
4192     //     |  ||   Int (element index)
4193     //     |  ||    |   ConI (log(element size))
4194     //     |  ||    |   /
4195     //     |  ||   LShift
4196     //     |  ||  /
4197     //     |  AddP (array's element offset)
4198     //     |  |
4199     //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits))
4200     //     | / /
4201     //     AddP (array's offset)
4202     //      |
4203     //     Load/Store (memory operation on array's element)
4204     //
4205     return addp2;
4206   }
4207   return nullptr;
4208 }
4209 
4210 //
4211 // Adjust the type and inputs of an AddP which computes the
4212 // address of a field of an instance
4213 //
4214 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
4215   PhaseGVN* igvn = _igvn;
4216   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
4217   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
4218   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
4219   if (t == nullptr) {
4220     // We are computing a raw address for a store captured by an Initialize
4221     // compute an appropriate address type (cases #3 and #5).
4222     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
4223     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
4224     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
4225     assert(offs != Type::OffsetBot, "offset must be a constant");
4226     if (base_t->isa_aryptr() != nullptr) {
4227       // In the case of a flat inline type array, each field has its
4228       // own slice so we need to extract the field being accessed from
4229       // the address computation
4230       t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
4231     } else {
4232       t = base_t->add_offset(offs)->is_oopptr();
4233     }
4234   }
4235   int inst_id = base_t->instance_id();
4236   assert(!t->is_known_instance() || t->instance_id() == inst_id,
4237                              "old type must be non-instance or match new type");
4238 
4239   // The type 't' could be subclass of 'base_t'.
4240   // As result t->offset() could be large then base_t's size and it will
4241   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
4242   // constructor verifies correctness of the offset.
4243   //
4244   // It could happened on subclass's branch (from the type profiling
4245   // inlining) which was not eliminated during parsing since the exactness
4246   // of the allocation type was not propagated to the subclass type check.
4247   //
4248   // Or the type 't' could be not related to 'base_t' at all.
4249   // It could happen when CHA type is different from MDO type on a dead path
4250   // (for example, from instanceof check) which is not collapsed during parsing.
4251   //
4252   // Do nothing for such AddP node and don't process its users since
4253   // this code branch will go away.
4254   //
4255   if (!t->is_known_instance() &&
4256       !base_t->maybe_java_subtype_of(t)) {
4257      return false; // bail out
4258   }
4259   const TypePtr* tinst = base_t->add_offset(t->offset());
4260   if (tinst->isa_aryptr() && t->isa_aryptr()) {
4261     // In the case of a flat inline type array, each field has its
4262     // own slice so we need to keep track of the field being accessed.
4263     tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
4264     // Keep array properties (not flat/null-free)
4265     tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
4266     if (tinst == nullptr) {
4267       return false; // Skip dead path with inconsistent properties
4268     }
4269   }
4270 
4271   // Do NOT remove the next line: ensure a new alias index is allocated
4272   // for the instance type. Note: C++ will not remove it since the call
4273   // has side effect.
4274   int alias_idx = _compile->get_alias_index(tinst);
4275   igvn->set_type(addp, tinst);
4276   // record the allocation in the node map
4277   set_map(addp, get_map(base->_idx));
4278   // Set addp's Base and Address to 'base'.
4279   Node *abase = addp->in(AddPNode::Base);
4280   Node *adr   = addp->in(AddPNode::Address);
4281   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
4282       adr->in(0)->_idx == (uint)inst_id) {
4283     // Skip AddP cases #3 and #5.
4284   } else {
4285     assert(!abase->is_top(), "sanity"); // AddP case #3
4286     if (abase != base) {
4287       igvn->hash_delete(addp);
4288       addp->set_req(AddPNode::Base, base);
4289       if (abase == adr) {
4290         addp->set_req(AddPNode::Address, base);
4291       } else {
4292         // AddP case #4 (adr is array's element offset AddP node)
4293 #ifdef ASSERT
4294         const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
4295         assert(adr->is_AddP() && atype != nullptr &&
4296                atype->instance_id() == inst_id, "array's element offset should be processed first");
4297 #endif
4298       }
4299       igvn->hash_insert(addp);
4300     }
4301   }
4302   // Put on IGVN worklist since at least addp's type was changed above.
4303   record_for_optimizer(addp);
4304   return true;
4305 }
4306 
4307 //
4308 // Create a new version of orig_phi if necessary. Returns either the newly
4309 // created phi or an existing phi.  Sets create_new to indicate whether a new
4310 // phi was created.  Cache the last newly created phi in the node map.
4311 //
4312 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created) {
4313   Compile *C = _compile;
4314   PhaseGVN* igvn = _igvn;
4315   new_created = false;
4316   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
4317   // nothing to do if orig_phi is bottom memory or matches alias_idx
4318   if (phi_alias_idx == alias_idx) {
4319     return orig_phi;
4320   }
4321   // Have we recently created a Phi for this alias index?
4322   PhiNode *result = get_map_phi(orig_phi->_idx);
4323   if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) {
4324     return result;
4325   }
4326   // Previous check may fail when the same wide memory Phi was split into Phis
4327   // for different memory slices. Search all Phis for this region.
4328   if (result != nullptr) {
4329     Node* region = orig_phi->in(0);
4330     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
4331       Node* phi = region->fast_out(i);
4332       if (phi->is_Phi() &&
4333           C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
4334         assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
4335         return phi->as_Phi();
4336       }
4337     }
4338   }
4339   if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {
4340     if (C->do_escape_analysis() == true && !C->failing()) {
4341       // Retry compilation without escape analysis.
4342       // If this is the first failure, the sentinel string will "stick"
4343       // to the Compile object, and the C2Compiler will see it and retry.
4344       C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4345     }
4346     return nullptr;
4347   }
4348   orig_phi_worklist.append_if_missing(orig_phi);
4349   const TypePtr *atype = C->get_adr_type(alias_idx);
4350   result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype);
4351   C->copy_node_notes_to(result, orig_phi);
4352   igvn->set_type(result, result->bottom_type());
4353   record_for_optimizer(result);
4354   set_map(orig_phi, result);
4355   new_created = true;
4356   return result;
4357 }
4358 
4359 //
4360 // Return a new version of Memory Phi "orig_phi" with the inputs having the
4361 // specified alias index.
4362 //
4363 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, uint rec_depth) {
4364   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
4365   Compile *C = _compile;
4366   PhaseGVN* igvn = _igvn;
4367   bool new_phi_created;
4368   PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
4369   if (!new_phi_created) {
4370     return result;
4371   }
4372   GrowableArray<PhiNode *>  phi_list;
4373   GrowableArray<uint>  cur_input;
4374   PhiNode *phi = orig_phi;
4375   uint idx = 1;
4376   bool finished = false;
4377   while(!finished) {
4378     while (idx < phi->req()) {
4379       Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, rec_depth + 1);
4380       if (mem != nullptr && mem->is_Phi()) {
4381         PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
4382         if (new_phi_created) {
4383           // found an phi for which we created a new split, push current one on worklist and begin
4384           // processing new one
4385           phi_list.push(phi);
4386           cur_input.push(idx);
4387           phi = mem->as_Phi();
4388           result = newphi;
4389           idx = 1;
4390           continue;
4391         } else {
4392           mem = newphi;
4393         }
4394       }
4395       if (C->failing()) {
4396         return nullptr;
4397       }
4398       result->set_req(idx++, mem);
4399     }
4400 #ifdef ASSERT
4401     // verify that the new Phi has an input for each input of the original
4402     assert( phi->req() == result->req(), "must have same number of inputs.");
4403     assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match");
4404 #endif
4405     // Check if all new phi's inputs have specified alias index.
4406     // Otherwise use old phi.
4407     for (uint i = 1; i < phi->req(); i++) {
4408       Node* in = result->in(i);
4409       assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond.");
4410     }
4411     // we have finished processing a Phi, see if there are any more to do
4412     finished = (phi_list.length() == 0 );
4413     if (!finished) {
4414       phi = phi_list.pop();
4415       idx = cur_input.pop();
4416       PhiNode *prev_result = get_map_phi(phi->_idx);
4417       prev_result->set_req(idx++, result);
4418       result = prev_result;
4419     }
4420   }
4421   return result;
4422 }
4423 
4424 //
4425 // The next methods are derived from methods in MemNode.
4426 //
4427 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
4428   Node *mem = mmem;
4429   // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
4430   // means an array I have not precisely typed yet.  Do not do any
4431   // alias stuff with it any time soon.
4432   if (toop->base() != Type::AnyPtr &&
4433       !(toop->isa_instptr() &&
4434         toop->is_instptr()->instance_klass()->is_java_lang_Object() &&
4435         toop->offset() == Type::OffsetBot)) {
4436     mem = mmem->memory_at(alias_idx);
4437     // Update input if it is progress over what we have now
4438   }
4439   return mem;
4440 }
4441 
4442 //
4443 // Move memory users to their memory slices.
4444 //
4445 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis) {
4446   Compile* C = _compile;
4447   PhaseGVN* igvn = _igvn;
4448   const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
4449   assert(tp != nullptr, "ptr type");
4450   int alias_idx = C->get_alias_index(tp);
4451   int general_idx = C->get_general_index(alias_idx);
4452 
4453   // Move users first
4454   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4455     Node* use = n->fast_out(i);
4456     if (use->is_MergeMem()) {
4457       MergeMemNode* mmem = use->as_MergeMem();
4458       assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
4459       if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
4460         continue; // Nothing to do
4461       }
4462       // Replace previous general reference to mem node.
4463       uint orig_uniq = C->unique();
4464       Node* m = find_inst_mem(n, general_idx, orig_phis);
4465       assert(orig_uniq == C->unique(), "no new nodes");
4466       mmem->set_memory_at(general_idx, m);
4467       --imax;
4468       --i;
4469     } else if (use->is_MemBar()) {
4470       assert(!use->is_Initialize(), "initializing stores should not be moved");
4471       if (use->req() > MemBarNode::Precedent &&
4472           use->in(MemBarNode::Precedent) == n) {
4473         // Don't move related membars.
4474         record_for_optimizer(use);
4475         continue;
4476       }
4477       tp = use->as_MemBar()->adr_type()->isa_ptr();
4478       if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) ||
4479           alias_idx == general_idx) {
4480         continue; // Nothing to do
4481       }
4482       // Move to general memory slice.
4483       uint orig_uniq = C->unique();
4484       Node* m = find_inst_mem(n, general_idx, orig_phis);
4485       assert(orig_uniq == C->unique(), "no new nodes");
4486       igvn->hash_delete(use);
4487       imax -= use->replace_edge(n, m, igvn);
4488       igvn->hash_insert(use);
4489       record_for_optimizer(use);
4490       --i;
4491 #ifdef ASSERT
4492     } else if (use->is_Mem()) {
4493       // Memory nodes should have new memory input.
4494       tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
4495       assert(tp != nullptr, "ptr type");
4496       int idx = C->get_alias_index(tp);
4497       assert(get_map(use->_idx) != nullptr || idx == alias_idx,
4498              "Following memory nodes should have new memory input or be on the same memory slice");
4499     } else if (use->is_Phi()) {
4500       // Phi nodes should be split and moved already.
4501       tp = use->as_Phi()->adr_type()->isa_ptr();
4502       assert(tp != nullptr, "ptr type");
4503       int idx = C->get_alias_index(tp);
4504       assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
4505     } else {
4506       use->dump();
4507       assert(false, "should not be here");
4508 #endif
4509     }
4510   }
4511 }
4512 
4513 //
4514 // Search memory chain of "mem" to find a MemNode whose address
4515 // is the specified alias index.
4516 //
4517 #define FIND_INST_MEM_RECURSION_DEPTH_LIMIT 1000
4518 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis, uint rec_depth) {
4519   if (rec_depth > FIND_INST_MEM_RECURSION_DEPTH_LIMIT) {
4520     _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4521     return nullptr;
4522   }
4523   if (orig_mem == nullptr) {
4524     return orig_mem;
4525   }
4526   Compile* C = _compile;
4527   PhaseGVN* igvn = _igvn;
4528   const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
4529   bool is_instance = (toop != nullptr) && toop->is_known_instance();
4530   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
4531   Node *prev = nullptr;
4532   Node *result = orig_mem;
4533   while (prev != result) {
4534     prev = result;
4535     if (result == start_mem) {
4536       break;  // hit one of our sentinels
4537     }
4538     if (result->is_Mem()) {
4539       const Type *at = igvn->type(result->in(MemNode::Address));
4540       if (at == Type::TOP) {
4541         break; // Dead
4542       }
4543       assert (at->isa_ptr() != nullptr, "pointer type required.");
4544       int idx = C->get_alias_index(at->is_ptr());
4545       if (idx == alias_idx) {
4546         break; // Found
4547       }
4548       if (!is_instance && (at->isa_oopptr() == nullptr ||
4549                            !at->is_oopptr()->is_known_instance())) {
4550         break; // Do not skip store to general memory slice.
4551       }
4552       result = result->in(MemNode::Memory);
4553     }
4554     if (!is_instance) {
4555       continue;  // don't search further for non-instance types
4556     }
4557     // skip over a call which does not affect this memory slice
4558     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
4559       Node *proj_in = result->in(0);
4560       if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
4561         break;  // hit one of our sentinels
4562       } else if (proj_in->is_Call()) {
4563         // ArrayCopy node processed here as well
4564         CallNode *call = proj_in->as_Call();
4565         if (!call->may_modify(toop, igvn)) {
4566           result = call->in(TypeFunc::Memory);
4567         }
4568       } else if (proj_in->is_Initialize()) {
4569         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
4570         // Stop if this is the initialization for the object instance which
4571         // which contains this memory slice, otherwise skip over it.
4572         if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) {
4573           result = proj_in->in(TypeFunc::Memory);
4574         } else if (C->get_alias_index(result->adr_type()) != alias_idx) {
4575           assert(C->get_general_index(alias_idx) == C->get_alias_index(result->adr_type()), "should be projection for the same field/array element");
4576           result = get_map(result->_idx);
4577           assert(result != nullptr, "new projection should have been allocated");
4578           break;
4579         }
4580       } else if (proj_in->is_MemBar()) {
4581         // Check if there is an array copy for a clone
4582         // Step over GC barrier when ReduceInitialCardMarks is disabled
4583         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4584         Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));
4585 
4586         if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
4587           // Stop if it is a clone
4588           ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
4589           if (ac->may_modify(toop, igvn)) {
4590             break;
4591           }
4592         }
4593         result = proj_in->in(TypeFunc::Memory);
4594       }
4595     } else if (result->is_MergeMem()) {
4596       MergeMemNode *mmem = result->as_MergeMem();
4597       result = step_through_mergemem(mmem, alias_idx, toop);
4598       if (result == mmem->base_memory()) {
4599         // Didn't find instance memory, search through general slice recursively.
4600         result = mmem->memory_at(C->get_general_index(alias_idx));
4601         result = find_inst_mem(result, alias_idx, orig_phis, rec_depth + 1);
4602         if (C->failing()) {
4603           return nullptr;
4604         }
4605         mmem->set_memory_at(alias_idx, result);
4606       }
4607     } else if (result->is_Phi() &&
4608                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
4609       Node *un = result->as_Phi()->unique_input(igvn);
4610       if (un != nullptr) {
4611         orig_phis.append_if_missing(result->as_Phi());
4612         result = un;
4613       } else {
4614         break;
4615       }
4616     } else if (result->is_ClearArray()) {
4617       if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
4618         // Can not bypass initialization of the instance
4619         // we are looking for.
4620         break;
4621       }
4622       // Otherwise skip it (the call updated 'result' value).
4623     } else if (result->Opcode() == Op_SCMemProj) {
4624       Node* mem = result->in(0);
4625       Node* adr = nullptr;
4626       if (mem->is_LoadStore()) {
4627         adr = mem->in(MemNode::Address);
4628       } else {
4629         assert(mem->Opcode() == Op_EncodeISOArray ||
4630                mem->Opcode() == Op_StrCompressedCopy, "sanity");
4631         adr = mem->in(3); // Memory edge corresponds to destination array
4632       }
4633       const Type *at = igvn->type(adr);
4634       if (at != Type::TOP) {
4635         assert(at->isa_ptr() != nullptr, "pointer type required.");
4636         int idx = C->get_alias_index(at->is_ptr());
4637         if (idx == alias_idx) {
4638           // Assert in debug mode
4639           assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
4640           break; // In product mode return SCMemProj node
4641         }
4642       }
4643       result = mem->in(MemNode::Memory);
4644     } else if (result->Opcode() == Op_StrInflatedCopy) {
4645       Node* adr = result->in(3); // Memory edge corresponds to destination array
4646       const Type *at = igvn->type(adr);
4647       if (at != Type::TOP) {
4648         assert(at->isa_ptr() != nullptr, "pointer type required.");
4649         int idx = C->get_alias_index(at->is_ptr());
4650         if (idx == alias_idx) {
4651           // Assert in debug mode
4652           assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
4653           break; // In product mode return SCMemProj node
4654         }
4655       }
4656       result = result->in(MemNode::Memory);
4657     }
4658   }
4659   if (result->is_Phi()) {
4660     PhiNode *mphi = result->as_Phi();
4661     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
4662     const TypePtr *t = mphi->adr_type();
4663     if (!is_instance) {
4664       // Push all non-instance Phis on the orig_phis worklist to update inputs
4665       // during Phase 4 if needed.
4666       orig_phis.append_if_missing(mphi);
4667     } else if (C->get_alias_index(t) != alias_idx) {
4668       // Create a new Phi with the specified alias index type.
4669       result = split_memory_phi(mphi, alias_idx, orig_phis, rec_depth + 1);
4670     }
4671   }
4672   // the result is either MemNode, PhiNode, InitializeNode.
4673   return result;
4674 }
4675 
4676 //
4677 //  Convert the types of non-escaped object to instance types where possible,
4678 //  propagate the new type information through the graph, and update memory
4679 //  edges and MergeMem inputs to reflect the new type.
4680 //
4681 //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
4682 //  The processing is done in 4 phases:
4683 //
4684 //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
4685 //            types for the CheckCastPP for allocations where possible.
4686 //            Propagate the new types through users as follows:
4687 //               casts and Phi:  push users on alloc_worklist
4688 //               AddP:  cast Base and Address inputs to the instance type
4689 //                      push any AddP users on alloc_worklist and push any memnode
4690 //                      users onto memnode_worklist.
4691 //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
4692 //            search the Memory chain for a store with the appropriate type
4693 //            address type.  If a Phi is found, create a new version with
4694 //            the appropriate memory slices from each of the Phi inputs.
4695 //            For stores, process the users as follows:
4696 //               MemNode:  push on memnode_worklist
4697 //               MergeMem: push on mergemem_worklist
4698 //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
4699 //            moving the first node encountered of each  instance type to the
4700 //            the input corresponding to its alias index.
4701 //            appropriate memory slice.
4702 //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
4703 //
4704 // In the following example, the CheckCastPP nodes are the cast of allocation
4705 // results and the allocation of node 29 is non-escaped and eligible to be an
4706 // instance type.
4707 //
4708 // We start with:
4709 //
4710 //     7 Parm #memory
4711 //    10  ConI  "12"
4712 //    19  CheckCastPP   "Foo"
4713 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
4714 //    29  CheckCastPP   "Foo"
4715 //    30  AddP  _ 29 29 10  Foo+12  alias_index=4
4716 //
4717 //    40  StoreP  25   7  20   ... alias_index=4
4718 //    50  StoreP  35  40  30   ... alias_index=4
4719 //    60  StoreP  45  50  20   ... alias_index=4
4720 //    70  LoadP    _  60  30   ... alias_index=4
4721 //    80  Phi     75  50  60   Memory alias_index=4
4722 //    90  LoadP    _  80  30   ... alias_index=4
4723 //   100  LoadP    _  80  20   ... alias_index=4
4724 //
4725 //
4726 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
4727 // and creating a new alias index for node 30.  This gives:
4728 //
4729 //     7 Parm #memory
4730 //    10  ConI  "12"
4731 //    19  CheckCastPP   "Foo"
4732 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
4733 //    29  CheckCastPP   "Foo"  iid=24
4734 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
4735 //
4736 //    40  StoreP  25   7  20   ... alias_index=4
4737 //    50  StoreP  35  40  30   ... alias_index=6
4738 //    60  StoreP  45  50  20   ... alias_index=4
4739 //    70  LoadP    _  60  30   ... alias_index=6
4740 //    80  Phi     75  50  60   Memory alias_index=4
4741 //    90  LoadP    _  80  30   ... alias_index=6
4742 //   100  LoadP    _  80  20   ... alias_index=4
4743 //
4744 // In phase 2, new memory inputs are computed for the loads and stores,
4745 // And a new version of the phi is created.  In phase 4, the inputs to
4746 // node 80 are updated and then the memory nodes are updated with the
4747 // values computed in phase 2.  This results in:
4748 //
4749 //     7 Parm #memory
4750 //    10  ConI  "12"
4751 //    19  CheckCastPP   "Foo"
4752 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
4753 //    29  CheckCastPP   "Foo"  iid=24
4754 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
4755 //
4756 //    40  StoreP  25  7   20   ... alias_index=4
4757 //    50  StoreP  35  7   30   ... alias_index=6
4758 //    60  StoreP  45  40  20   ... alias_index=4
4759 //    70  LoadP    _  50  30   ... alias_index=6
4760 //    80  Phi     75  40  60   Memory alias_index=4
4761 //   120  Phi     75  50  50   Memory alias_index=6
4762 //    90  LoadP    _ 120  30   ... alias_index=6
4763 //   100  LoadP    _  80  20   ... alias_index=4
4764 //
4765 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist,
4766                                          GrowableArray<ArrayCopyNode*> &arraycopy_worklist,
4767                                          GrowableArray<MergeMemNode*> &mergemem_worklist,
4768                                          Unique_Node_List &reducible_merges) {
4769   DEBUG_ONLY(Unique_Node_List reduced_merges;)
4770   GrowableArray<Node *>  memnode_worklist;
4771   GrowableArray<PhiNode *>  orig_phis;
4772   PhaseIterGVN  *igvn = _igvn;
4773   uint new_index_start = (uint) _compile->num_alias_types();
4774   VectorSet visited;
4775   ideal_nodes.clear(); // Reset for use with set_map/get_map.
4776 
4777   //  Phase 1:  Process possible allocations from alloc_worklist.
4778   //  Create instance types for the CheckCastPP for allocations where possible.
4779   //
4780   // (Note: don't forget to change the order of the second AddP node on
4781   //  the alloc_worklist if the order of the worklist processing is changed,
4782   //  see the comment in find_second_addp().)
4783   //
4784   while (alloc_worklist.length() != 0) {
4785     Node *n = alloc_worklist.pop();
4786     uint ni = n->_idx;
4787     if (n->is_Call()) {
4788       CallNode *alloc = n->as_Call();
4789       // copy escape information to call node
4790       PointsToNode* ptn = ptnode_adr(alloc->_idx);
4791       PointsToNode::EscapeState es = ptn->escape_state();
4792       // We have an allocation or call which returns a Java object,
4793       // see if it is non-escaped.
4794       if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) {
4795         continue;
4796       }
4797       // Find CheckCastPP for the allocate or for the return value of a call
4798       n = alloc->result_cast();
4799       if (n == nullptr) {            // No uses except Initialize node
4800         if (alloc->is_Allocate()) {
4801           // Set the scalar_replaceable flag for allocation
4802           // so it could be eliminated if it has no uses.
4803           alloc->as_Allocate()->_is_scalar_replaceable = true;
4804         }
4805         continue;
4806       }
4807       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
4808         // we could reach here for allocate case if one init is associated with many allocs.
4809         if (alloc->is_Allocate()) {
4810           alloc->as_Allocate()->_is_scalar_replaceable = false;
4811         }
4812         continue;
4813       }
4814 
4815       // The inline code for Object.clone() casts the allocation result to
4816       // java.lang.Object and then to the actual type of the allocated
4817       // object. Detect this case and use the second cast.
4818       // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
4819       // the allocation result is cast to java.lang.Object and then
4820       // to the actual Array type.
4821       if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
4822           && (alloc->is_AllocateArray() ||
4823               igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) {
4824         Node *cast2 = nullptr;
4825         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4826           Node *use = n->fast_out(i);
4827           if (use->is_CheckCastPP()) {
4828             cast2 = use;
4829             break;
4830           }
4831         }
4832         if (cast2 != nullptr) {
4833           n = cast2;
4834         } else {
4835           // Non-scalar replaceable if the allocation type is unknown statically
4836           // (reflection allocation), the object can't be restored during
4837           // deoptimization without precise type.
4838           continue;
4839         }
4840       }
4841 
4842       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
4843       if (t == nullptr) {
4844         continue;  // not a TypeOopPtr
4845       }
4846       if (!t->klass_is_exact()) {
4847         continue; // not an unique type
4848       }
4849       if (alloc->is_Allocate()) {
4850         // Set the scalar_replaceable flag for allocation
4851         // so it could be eliminated.
4852         alloc->as_Allocate()->_is_scalar_replaceable = true;
4853       }
4854       set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state
4855       // in order for an object to be scalar-replaceable, it must be:
4856       //   - a direct allocation (not a call returning an object)
4857       //   - non-escaping
4858       //   - eligible to be a unique type
4859       //   - not determined to be ineligible by escape analysis
4860       set_map(alloc, n);
4861       set_map(n, alloc);
4862       const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
4863       igvn->hash_delete(n);
4864       igvn->set_type(n,  tinst);
4865       n->raise_bottom_type(tinst);
4866       igvn->hash_insert(n);
4867       record_for_optimizer(n);
4868       // Allocate an alias index for the header fields. Accesses to
4869       // the header emitted during macro expansion wouldn't have
4870       // correct memory state otherwise.
4871       _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
4872       _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
4873       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
4874         // Add a new NarrowMem projection for each existing NarrowMem projection with new adr type
4875         InitializeNode* init = alloc->as_Allocate()->initialization();
4876         assert(init != nullptr, "can't find Initialization node for this Allocate node");
4877         auto process_narrow_proj = [&](NarrowMemProjNode* proj) {
4878           const TypePtr* adr_type = proj->adr_type();
4879           const TypePtr* new_adr_type = tinst->with_offset(adr_type->offset());
4880           if (adr_type->isa_aryptr()) {
4881             // In the case of a flat inline type array, each field has its own slice so we need a
4882             // NarrowMemProj for each field of the flat array elements
4883             new_adr_type = new_adr_type->is_aryptr()->with_field_offset(adr_type->is_aryptr()->field_offset().get());
4884           }
4885           if (adr_type != new_adr_type && !init->already_has_narrow_mem_proj_with_adr_type(new_adr_type)) {
4886             DEBUG_ONLY( uint alias_idx = _compile->get_alias_index(new_adr_type); )
4887             assert(_compile->get_general_index(alias_idx) == _compile->get_alias_index(adr_type), "new adr type should be narrowed down from existing adr type");
4888             NarrowMemProjNode* new_proj = new NarrowMemProjNode(init, new_adr_type);
4889             igvn->set_type(new_proj, new_proj->bottom_type());
4890             record_for_optimizer(new_proj);
4891             set_map(proj, new_proj); // record it so ConnectionGraph::find_inst_mem() can find it
4892           }
4893         };
4894         init->for_each_narrow_mem_proj_with_new_uses(process_narrow_proj);
4895 
4896         // First, put on the worklist all Field edges from Connection Graph
4897         // which is more accurate than putting immediate users from Ideal Graph.
4898         for (EdgeIterator e(ptn); e.has_next(); e.next()) {
4899           PointsToNode* tgt = e.get();
4900           if (tgt->is_Arraycopy()) {
4901             continue;
4902           }
4903           Node* use = tgt->ideal_node();
4904           assert(tgt->is_Field() && use->is_AddP(),
4905                  "only AddP nodes are Field edges in CG");
4906           if (use->outcnt() > 0) { // Don't process dead nodes
4907             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
4908             if (addp2 != nullptr) {
4909               assert(alloc->is_AllocateArray(),"array allocation was expected");
4910               alloc_worklist.append_if_missing(addp2);
4911             }
4912             alloc_worklist.append_if_missing(use);
4913           }
4914         }
4915 
4916         // An allocation may have an Initialize which has raw stores. Scan
4917         // the users of the raw allocation result and push AddP users
4918         // on alloc_worklist.
4919         Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms);
4920         assert (raw_result != nullptr, "must have an allocation result");
4921         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
4922           Node *use = raw_result->fast_out(i);
4923           if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
4924             Node* addp2 = find_second_addp(use, raw_result);
4925             if (addp2 != nullptr) {
4926               assert(alloc->is_AllocateArray(),"array allocation was expected");
4927               alloc_worklist.append_if_missing(addp2);
4928             }
4929             alloc_worklist.append_if_missing(use);
4930           } else if (use->is_MemBar()) {
4931             memnode_worklist.append_if_missing(use);
4932           }
4933         }
4934       }
4935     } else if (n->is_AddP()) {
4936       if (has_reducible_merge_base(n->as_AddP(), reducible_merges)) {
4937         // This AddP will go away when we reduce the Phi
4938         continue;
4939       }
4940       Node* addp_base = get_addp_base(n);
4941       JavaObjectNode* jobj = unique_java_object(addp_base);
4942       if (jobj == nullptr || jobj == phantom_obj) {
4943 #ifdef ASSERT
4944         ptnode_adr(get_addp_base(n)->_idx)->dump();
4945         ptnode_adr(n->_idx)->dump();
4946         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4947 #endif
4948         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4949         return;
4950       }
4951       Node *base = get_map(jobj->idx());  // CheckCastPP node
4952       if (!split_AddP(n, base)) continue; // wrong type from dead path
4953     } else if (n->is_Phi() ||
4954                n->is_CheckCastPP() ||
4955                n->is_EncodeP() ||
4956                n->is_DecodeN() ||
4957                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
4958       if (visited.test_set(n->_idx)) {
4959         assert(n->is_Phi(), "loops only through Phi's");
4960         continue;  // already processed
4961       }
4962       // Reducible Phi's will be removed from the graph after split_unique_types
4963       // finishes. For now we just try to split out the SR inputs of the merge.
4964       Node* parent = n->in(1);
4965       if (reducible_merges.member(n)) {
4966         reduce_phi(n->as_Phi(), alloc_worklist);
4967 #ifdef ASSERT
4968         if (VerifyReduceAllocationMerges) {
4969           reduced_merges.push(n);
4970         }
4971 #endif
4972         continue;
4973       } else if (reducible_merges.member(parent)) {
4974         // 'n' is an user of a reducible merge (a Phi). It will be simplified as
4975         // part of reduce_merge.
4976         continue;
4977       }
4978       JavaObjectNode* jobj = unique_java_object(n);
4979       if (jobj == nullptr || jobj == phantom_obj) {
4980 #ifdef ASSERT
4981         ptnode_adr(n->_idx)->dump();
4982         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4983 #endif
4984         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4985         return;
4986       } else {
4987         Node *val = get_map(jobj->idx());   // CheckCastPP node
4988         TypeNode *tn = n->as_Type();
4989         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4990         assert(tinst != nullptr && tinst->is_known_instance() &&
4991                tinst->instance_id() == jobj->idx() , "instance type expected.");
4992 
4993         const Type *tn_type = igvn->type(tn);
4994         const TypeOopPtr *tn_t;
4995         if (tn_type->isa_narrowoop()) {
4996           tn_t = tn_type->make_ptr()->isa_oopptr();
4997         } else {
4998           tn_t = tn_type->isa_oopptr();
4999         }
5000         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
5001           if (tn_t->isa_aryptr()) {
5002             // Keep array properties (not flat/null-free)
5003             tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
5004             if (tinst == nullptr) {
5005               continue; // Skip dead path with inconsistent properties
5006             }
5007           }
5008           if (tn_type->isa_narrowoop()) {
5009             tn_type = tinst->make_narrowoop();
5010           } else {
5011             tn_type = tinst;
5012           }
5013           igvn->hash_delete(tn);
5014           igvn->set_type(tn, tn_type);
5015           tn->set_type(tn_type);
5016           igvn->hash_insert(tn);
5017           record_for_optimizer(n);
5018         } else {
5019           assert(tn_type == TypePtr::NULL_PTR ||
5020                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
5021                  "unexpected type");
5022           continue; // Skip dead path with different type
5023         }
5024       }
5025     } else {
5026       DEBUG_ONLY(n->dump();)
5027       assert(false, "EA: unexpected node");
5028       continue;
5029     }
5030     // push allocation's users on appropriate worklist
5031     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5032       Node *use = n->fast_out(i);
5033       if (use->is_Mem() && use->in(MemNode::Address) == n) {
5034         // Load/store to instance's field
5035         memnode_worklist.append_if_missing(use);
5036       } else if (use->is_MemBar()) {
5037         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
5038           memnode_worklist.append_if_missing(use);
5039         }
5040       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
5041         Node* addp2 = find_second_addp(use, n);
5042         if (addp2 != nullptr) {
5043           alloc_worklist.append_if_missing(addp2);
5044         }
5045         alloc_worklist.append_if_missing(use);
5046       } else if (use->is_Phi() ||
5047                  use->is_CheckCastPP() ||
5048                  use->is_EncodeNarrowPtr() ||
5049                  use->is_DecodeNarrowPtr() ||
5050                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
5051         alloc_worklist.append_if_missing(use);
5052 #ifdef ASSERT
5053       } else if (use->is_Mem()) {
5054         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
5055       } else if (use->is_MergeMem()) {
5056         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
5057       } else if (use->is_SafePoint()) {
5058         // Look for MergeMem nodes for calls which reference unique allocation
5059         // (through CheckCastPP nodes) even for debug info.
5060         Node* m = use->in(TypeFunc::Memory);
5061         if (m->is_MergeMem()) {
5062           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
5063         }
5064       } else if (use->Opcode() == Op_EncodeISOArray) {
5065         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
5066           // EncodeISOArray overwrites destination array
5067           memnode_worklist.append_if_missing(use);
5068         }
5069       } else if (use->Opcode() == Op_Return) {
5070         // Allocation is referenced by field of returned inline type
5071         assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
5072       } else {
5073         uint op = use->Opcode();
5074         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
5075             (use->in(MemNode::Memory) == n)) {
5076           // They overwrite memory edge corresponding to destination array,
5077           memnode_worklist.append_if_missing(use);
5078         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
5079               op == Op_CastP2X ||
5080               op == Op_FastLock || op == Op_AryEq ||
5081               op == Op_StrComp || op == Op_CountPositives ||
5082               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
5083               op == Op_StrEquals || op == Op_VectorizedHashCode ||
5084               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
5085               op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
5086               op == Op_ReinterpretS2HF ||
5087               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
5088           n->dump();
5089           use->dump();
5090           assert(false, "EA: missing allocation reference path");
5091         }
5092 #endif
5093       }
5094     }
5095 
5096   }
5097 
5098 #ifdef ASSERT
5099   if (VerifyReduceAllocationMerges) {
5100     for (uint i = 0; i < reducible_merges.size(); i++) {
5101       Node* phi = reducible_merges.at(i);
5102 
5103       if (!reduced_merges.member(phi)) {
5104         phi->dump(2);
5105         phi->dump(-2);
5106         assert(false, "This reducible merge wasn't reduced.");
5107       }
5108 
5109       // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints or Casts.
5110       for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) {
5111         Node* use = phi->fast_out(j);
5112         if (!use->is_SafePoint() && !use->is_CastPP()) {
5113           phi->dump(2);
5114           phi->dump(-2);
5115           assert(false, "Unexpected user of reducible Phi -> %d:%s:%d", use->_idx, use->Name(), use->outcnt());
5116         }
5117       }
5118     }
5119   }
5120 #endif
5121 
5122   // Go over all ArrayCopy nodes and if one of the inputs has a unique
5123   // type, record it in the ArrayCopy node so we know what memory this
5124   // node uses/modified.
5125   for (int next = 0; next < arraycopy_worklist.length(); next++) {
5126     ArrayCopyNode* ac = arraycopy_worklist.at(next);
5127     Node* dest = ac->in(ArrayCopyNode::Dest);
5128     if (dest->is_AddP()) {
5129       dest = get_addp_base(dest);
5130     }
5131     JavaObjectNode* jobj = unique_java_object(dest);
5132     if (jobj != nullptr) {
5133       Node *base = get_map(jobj->idx());
5134       if (base != nullptr) {
5135         const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
5136         ac->_dest_type = base_t;
5137       }
5138     }
5139     Node* src = ac->in(ArrayCopyNode::Src);
5140     if (src->is_AddP()) {
5141       src = get_addp_base(src);
5142     }
5143     jobj = unique_java_object(src);
5144     if (jobj != nullptr) {
5145       Node* base = get_map(jobj->idx());
5146       if (base != nullptr) {
5147         const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
5148         ac->_src_type = base_t;
5149       }
5150     }
5151   }
5152 
5153   // New alias types were created in split_AddP().
5154   uint new_index_end = (uint) _compile->num_alias_types();
5155 
5156   _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_1, 5);
5157 
5158   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
5159   //            compute new values for Memory inputs  (the Memory inputs are not
5160   //            actually updated until phase 4.)
5161   if (memnode_worklist.length() == 0)
5162     return;  // nothing to do
5163   while (memnode_worklist.length() != 0) {
5164     Node *n = memnode_worklist.pop();
5165     if (visited.test_set(n->_idx)) {
5166       continue;
5167     }
5168     if (n->is_Phi() || n->is_ClearArray()) {
5169       // we don't need to do anything, but the users must be pushed
5170     } else if (n->is_MemBar()) { // MemBar nodes
5171       if (!n->is_Initialize()) { // memory projections for Initialize pushed below (so we get to all their uses)
5172         // we don't need to do anything, but the users must be pushed
5173         n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
5174         if (n == nullptr) {
5175           continue;
5176         }
5177       }
5178     } else if (n->is_CallLeaf()) {
5179       // Runtime calls with narrow memory input (no MergeMem node)
5180       // get the memory projection
5181       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
5182       if (n == nullptr) {
5183         continue;
5184       }
5185     } else if (n->Opcode() == Op_StrInflatedCopy) {
5186       // Check direct uses of StrInflatedCopy.
5187       // It is memory type Node - no special SCMemProj node.
5188     } else if (n->Opcode() == Op_StrCompressedCopy ||
5189                n->Opcode() == Op_EncodeISOArray) {
5190       // get the memory projection
5191       n = n->find_out_with(Op_SCMemProj);
5192       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
5193     } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
5194                strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
5195       n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
5196     } else if (n->is_Proj()) {
5197       assert(n->in(0)->is_Initialize(), "we only push memory projections for Initialize");
5198     } else {
5199 #ifdef ASSERT
5200       if (!n->is_Mem()) {
5201         n->dump();
5202       }
5203       assert(n->is_Mem(), "memory node required.");
5204 #endif
5205       Node *addr = n->in(MemNode::Address);
5206       const Type *addr_t = igvn->type(addr);
5207       if (addr_t == Type::TOP) {
5208         continue;
5209       }
5210       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
5211       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
5212       assert ((uint)alias_idx < new_index_end, "wrong alias index");
5213       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
5214       if (_compile->failing()) {
5215         return;
5216       }
5217       if (mem != n->in(MemNode::Memory)) {
5218         // We delay the memory edge update since we need old one in
5219         // MergeMem code below when instances memory slices are separated.
5220         set_map(n, mem);
5221       }
5222       if (n->is_Load()) {
5223         continue;  // don't push users
5224       } else if (n->is_LoadStore()) {
5225         // get the memory projection
5226         n = n->find_out_with(Op_SCMemProj);
5227         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
5228       }
5229     }
5230     // push user on appropriate worklist
5231     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5232       Node *use = n->fast_out(i);
5233       if (use->is_Phi() || use->is_ClearArray()) {
5234         memnode_worklist.append_if_missing(use);
5235       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
5236         memnode_worklist.append_if_missing(use);
5237       } else if (use->is_MemBar() || use->is_CallLeaf()) {
5238         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
5239           memnode_worklist.append_if_missing(use);
5240         }
5241       } else if (use->is_Proj()) {
5242         assert(n->is_Initialize(), "We only push projections of Initialize");
5243         if (use->as_Proj()->_con == TypeFunc::Memory) { // Ignore precedent edge
5244           memnode_worklist.append_if_missing(use);
5245         }
5246 #ifdef ASSERT
5247       } else if (use->is_Mem()) {
5248         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
5249       } else if (use->is_MergeMem()) {
5250         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
5251       } else if (use->Opcode() == Op_EncodeISOArray) {
5252         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
5253           // EncodeISOArray overwrites destination array
5254           memnode_worklist.append_if_missing(use);
5255         }
5256       } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
5257                  strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
5258         // store_unknown_inline overwrites destination array
5259         memnode_worklist.append_if_missing(use);
5260       } else {
5261         uint op = use->Opcode();
5262         if ((use->in(MemNode::Memory) == n) &&
5263             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
5264           // They overwrite memory edge corresponding to destination array,
5265           memnode_worklist.append_if_missing(use);
5266         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
5267               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
5268               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
5269               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
5270           n->dump();
5271           use->dump();
5272           assert(false, "EA: missing memory path");
5273         }
5274 #endif
5275       }
5276     }
5277   }
5278 
5279   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
5280   //            Walk each memory slice moving the first node encountered of each
5281   //            instance type to the input corresponding to its alias index.
5282   uint length = mergemem_worklist.length();
5283   for( uint next = 0; next < length; ++next ) {
5284     MergeMemNode* nmm = mergemem_worklist.at(next);
5285     assert(!visited.test_set(nmm->_idx), "should not be visited before");
5286     // Note: we don't want to use MergeMemStream here because we only want to
5287     // scan inputs which exist at the start, not ones we add during processing.
5288     // Note 2: MergeMem may already contains instance memory slices added
5289     // during find_inst_mem() call when memory nodes were processed above.
5290     igvn->hash_delete(nmm);
5291     uint nslices = MIN2(nmm->req(), new_index_start);
5292     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
5293       Node* mem = nmm->in(i);
5294       Node* cur = nullptr;
5295       if (mem == nullptr || mem->is_top()) {
5296         continue;
5297       }
5298       // First, update mergemem by moving memory nodes to corresponding slices
5299       // if their type became more precise since this mergemem was created.
5300       while (mem->is_Mem()) {
5301         const Type* at = igvn->type(mem->in(MemNode::Address));
5302         if (at != Type::TOP) {
5303           assert (at->isa_ptr() != nullptr, "pointer type required.");
5304           uint idx = (uint)_compile->get_alias_index(at->is_ptr());
5305           if (idx == i) {
5306             if (cur == nullptr) {
5307               cur = mem;
5308             }
5309           } else {
5310             if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
5311               nmm->set_memory_at(idx, mem);
5312             }
5313           }
5314         }
5315         mem = mem->in(MemNode::Memory);
5316       }
5317       nmm->set_memory_at(i, (cur != nullptr) ? cur : mem);
5318       // Find any instance of the current type if we haven't encountered
5319       // already a memory slice of the instance along the memory chain.
5320       for (uint ni = new_index_start; ni < new_index_end; ni++) {
5321         if((uint)_compile->get_general_index(ni) == i) {
5322           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
5323           if (nmm->is_empty_memory(m)) {
5324             Node* result = find_inst_mem(mem, ni, orig_phis);
5325             if (_compile->failing()) {
5326               return;
5327             }
5328             nmm->set_memory_at(ni, result);
5329           }
5330         }
5331       }
5332     }
5333     // Find the rest of instances values
5334     for (uint ni = new_index_start; ni < new_index_end; ni++) {
5335       const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
5336       Node* result = step_through_mergemem(nmm, ni, tinst);
5337       if (result == nmm->base_memory()) {
5338         // Didn't find instance memory, search through general slice recursively.
5339         result = nmm->memory_at(_compile->get_general_index(ni));
5340         result = find_inst_mem(result, ni, orig_phis);
5341         if (_compile->failing()) {
5342           return;
5343         }
5344         nmm->set_memory_at(ni, result);
5345       }
5346     }
5347 
5348     // If we have crossed the 3/4 point of max node limit it's too risky
5349     // to continue with EA/SR because we might hit the max node limit.
5350     if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
5351       if (_compile->do_reduce_allocation_merges()) {
5352         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
5353       } else if (_invocation > 0) {
5354         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
5355       } else {
5356         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
5357       }
5358       return;
5359     }
5360 
5361     igvn->hash_insert(nmm);
5362     record_for_optimizer(nmm);
5363   }
5364 
5365   _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_3, 5);
5366 
5367   //  Phase 4:  Update the inputs of non-instance memory Phis and
5368   //            the Memory input of memnodes
5369   // First update the inputs of any non-instance Phi's from
5370   // which we split out an instance Phi.  Note we don't have
5371   // to recursively process Phi's encountered on the input memory
5372   // chains as is done in split_memory_phi() since they will
5373   // also be processed here.
5374   for (int j = 0; j < orig_phis.length(); j++) {
5375     PhiNode *phi = orig_phis.at(j);
5376     int alias_idx = _compile->get_alias_index(phi->adr_type());
5377     igvn->hash_delete(phi);
5378     for (uint i = 1; i < phi->req(); i++) {
5379       Node *mem = phi->in(i);
5380       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
5381       if (_compile->failing()) {
5382         return;
5383       }
5384       if (mem != new_mem) {
5385         phi->set_req(i, new_mem);
5386       }
5387     }
5388     igvn->hash_insert(phi);
5389     record_for_optimizer(phi);
5390   }
5391 
5392   // Update the memory inputs of MemNodes with the value we computed
5393   // in Phase 2 and move stores memory users to corresponding memory slices.
5394   // Disable memory split verification code until the fix for 6984348.
5395   // Currently it produces false negative results since it does not cover all cases.
5396 #if 0 // ifdef ASSERT
5397   visited.Reset();
5398   Node_Stack old_mems(arena, _compile->unique() >> 2);
5399 #endif
5400   for (uint i = 0; i < ideal_nodes.size(); i++) {
5401     Node*    n = ideal_nodes.at(i);
5402     Node* nmem = get_map(n->_idx);
5403     assert(nmem != nullptr, "sanity");
5404     if (n->is_Mem()) {
5405 #if 0 // ifdef ASSERT
5406       Node* old_mem = n->in(MemNode::Memory);
5407       if (!visited.test_set(old_mem->_idx)) {
5408         old_mems.push(old_mem, old_mem->outcnt());
5409       }
5410 #endif
5411       assert(n->in(MemNode::Memory) != nmem, "sanity");
5412       if (!n->is_Load()) {
5413         // Move memory users of a store first.
5414         move_inst_mem(n, orig_phis);
5415       }
5416       // Now update memory input
5417       igvn->hash_delete(n);
5418       n->set_req(MemNode::Memory, nmem);
5419       igvn->hash_insert(n);
5420       record_for_optimizer(n);
5421     } else {
5422       assert(n->is_Allocate() || n->is_CheckCastPP() ||
5423              n->is_AddP() || n->is_Phi() || n->is_NarrowMemProj(), "unknown node used for set_map()");
5424     }
5425   }
5426 #if 0 // ifdef ASSERT
5427   // Verify that memory was split correctly
5428   while (old_mems.is_nonempty()) {
5429     Node* old_mem = old_mems.node();
5430     uint  old_cnt = old_mems.index();
5431     old_mems.pop();
5432     assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
5433   }
5434 #endif
5435   _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_4, 5);
5436 }
5437 
5438 #ifndef PRODUCT
5439 int ConnectionGraph::_no_escape_counter = 0;
5440 int ConnectionGraph::_arg_escape_counter = 0;
5441 int ConnectionGraph::_global_escape_counter = 0;
5442 
5443 static const char *node_type_names[] = {
5444   "UnknownType",
5445   "JavaObject",
5446   "LocalVar",
5447   "Field",
5448   "Arraycopy"
5449 };
5450 
5451 static const char *esc_names[] = {
5452   "UnknownEscape",
5453   "NoEscape",
5454   "ArgEscape",
5455   "GlobalEscape"
5456 };
5457 
5458 const char* PointsToNode::esc_name() const {
5459   return esc_names[(int)escape_state()];
5460 }
5461 
5462 void PointsToNode::dump_header(bool print_state, outputStream* out) const {
5463   NodeType nt = node_type();
5464   out->print("%s(%d) ", node_type_names[(int) nt], _pidx);
5465   if (print_state) {
5466     EscapeState es = escape_state();
5467     EscapeState fields_es = fields_escape_state();
5468     out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
5469     if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) {
5470       out->print("NSR ");
5471     }
5472   }
5473 }
5474 
5475 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const {
5476   dump_header(print_state, out);
5477   if (is_Field()) {
5478     FieldNode* f = (FieldNode*)this;
5479     if (f->is_oop()) {
5480       out->print("oop ");
5481     }
5482     if (f->offset() > 0) {
5483       out->print("+%d ", f->offset());
5484     }
5485     out->print("(");
5486     for (BaseIterator i(f); i.has_next(); i.next()) {
5487       PointsToNode* b = i.get();
5488       out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
5489     }
5490     out->print(" )");
5491   }
5492   out->print("[");
5493   for (EdgeIterator i(this); i.has_next(); i.next()) {
5494     PointsToNode* e = i.get();
5495     out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
5496   }
5497   out->print(" [");
5498   for (UseIterator i(this); i.has_next(); i.next()) {
5499     PointsToNode* u = i.get();
5500     bool is_base = false;
5501     if (PointsToNode::is_base_use(u)) {
5502       is_base = true;
5503       u = PointsToNode::get_use_node(u)->as_Field();
5504     }
5505     out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
5506   }
5507   out->print(" ]]  ");
5508   if (_node == nullptr) {
5509     out->print("<null>%s", newline ? "\n" : "");
5510   } else {
5511     _node->dump(newline ? "\n" : "", false, out);
5512   }
5513 }
5514 
5515 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
5516   bool first = true;
5517   int ptnodes_length = ptnodes_worklist.length();
5518   for (int i = 0; i < ptnodes_length; i++) {
5519     PointsToNode *ptn = ptnodes_worklist.at(i);
5520     if (ptn == nullptr || !ptn->is_JavaObject()) {
5521       continue;
5522     }
5523     PointsToNode::EscapeState es = ptn->escape_state();
5524     if ((es != PointsToNode::NoEscape) && !Verbose) {
5525       continue;
5526     }
5527     Node* n = ptn->ideal_node();
5528     if (n->is_Allocate() || (n->is_CallStaticJava() &&
5529                              n->as_CallStaticJava()->is_boxing_method())) {
5530       if (first) {
5531         tty->cr();
5532         tty->print("======== Connection graph for ");
5533         _compile->method()->print_short_name();
5534         tty->cr();
5535         tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d",
5536                       _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length());
5537         tty->cr();
5538         first = false;
5539       }
5540       ptn->dump();
5541       // Print all locals and fields which reference this allocation
5542       for (UseIterator j(ptn); j.has_next(); j.next()) {
5543         PointsToNode* use = j.get();
5544         if (use->is_LocalVar()) {
5545           use->dump(Verbose);
5546         } else if (Verbose) {
5547           use->dump();
5548         }
5549       }
5550       tty->cr();
5551     }
5552   }
5553 }
5554 
5555 void ConnectionGraph::print_statistics() {
5556   tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", AtomicAccess::load(&_no_escape_counter), AtomicAccess::load(&_arg_escape_counter), AtomicAccess::load(&_global_escape_counter));
5557 }
5558 
5559 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) {
5560   if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation
5561     return;
5562   }
5563   for (int next = 0; next < java_objects_worklist.length(); ++next) {
5564     JavaObjectNode* ptn = java_objects_worklist.at(next);
5565     if (ptn->ideal_node()->is_Allocate()) {
5566       if (ptn->escape_state() == PointsToNode::NoEscape) {
5567         AtomicAccess::inc(&ConnectionGraph::_no_escape_counter);
5568       } else if (ptn->escape_state() == PointsToNode::ArgEscape) {
5569         AtomicAccess::inc(&ConnectionGraph::_arg_escape_counter);
5570       } else if (ptn->escape_state() == PointsToNode::GlobalEscape) {
5571         AtomicAccess::inc(&ConnectionGraph::_global_escape_counter);
5572       } else {
5573         assert(false, "Unexpected Escape State");
5574       }
5575     }
5576   }
5577 }
5578 
5579 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const {
5580   if (_compile->directive()->TraceEscapeAnalysisOption) {
5581     assert(ptn != nullptr, "should not be null");
5582     assert(reason != nullptr, "should not be null");
5583     ptn->dump_header(true);
5584     PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es;
5585     PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state();
5586     tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason);
5587   }
5588 }
5589 
5590 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const {
5591   if (_compile->directive()->TraceEscapeAnalysisOption) {
5592     stringStream ss;
5593     ss.print("propagated from: ");
5594     from->dump(true, &ss, false);
5595     return ss.as_string();
5596   } else {
5597     return nullptr;
5598   }
5599 }
5600 
5601 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const {
5602   if (_compile->directive()->TraceEscapeAnalysisOption) {
5603     stringStream ss;
5604     ss.print("escapes as arg to:");
5605     call->dump("", false, &ss);
5606     return ss.as_string();
5607   } else {
5608     return nullptr;
5609   }
5610 }
5611 
5612 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const {
5613   if (_compile->directive()->TraceEscapeAnalysisOption) {
5614     stringStream ss;
5615     ss.print("is merged with other object: ");
5616     other->dump_header(true, &ss);
5617     return ss.as_string();
5618   } else {
5619     return nullptr;
5620   }
5621 }
5622 
5623 #endif
5624 
5625 void ConnectionGraph::record_for_optimizer(Node *n) {
5626   _igvn->_worklist.push(n);
5627   _igvn->add_users_to_worklist(n);
5628 }