1 /*
   2  * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "opto/c2compiler.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "utilities/macros.hpp"
  43 
  44 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  45   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  46   _in_worklist(C->comp_arena()),
  47   _next_pidx(0),
  48   _collecting(true),
  49   _verify(false),
  50   _compile(C),
  51   _igvn(igvn),
  52   _invocation(invocation),
  53   _build_iterations(0),
  54   _build_time(0.),
  55   _node_map(C->comp_arena()) {
  56   // Add unknown java object.
  57   add_java_object(C->top(), PointsToNode::GlobalEscape);
  58   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  59   // Add ConP(#NULL) and ConN(#NULL) nodes.
  60   Node* oop_null = igvn->zerocon(T_OBJECT);
  61   assert(oop_null->_idx < nodes_size(), "should be created already");
  62   add_java_object(oop_null, PointsToNode::NoEscape);
  63   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  64   if (UseCompressedOops) {
  65     Node* noop_null = igvn->zerocon(T_NARROWOOP);
  66     assert(noop_null->_idx < nodes_size(), "should be created already");
  67     map_ideal_node(noop_null, null_obj);
  68   }
  69 }
  70 
  71 bool ConnectionGraph::has_candidates(Compile *C) {
  72   // EA brings benefits only when the code has allocations and/or locks which
  73   // are represented by ideal Macro nodes.
  74   int cnt = C->macro_count();
  75   for (int i = 0; i < cnt; i++) {
  76     Node *n = C->macro_node(i);
  77     if (n->is_Allocate()) {
  78       return true;
  79     }
  80     if (n->is_Lock()) {
  81       Node* obj = n->as_Lock()->obj_node()->uncast();
  82       if (!(obj->is_Parm() || obj->is_Con())) {
  83         return true;
  84       }
  85     }
  86     if (n->is_CallStaticJava() &&
  87         n->as_CallStaticJava()->is_boxing_method()) {
  88       return true;
  89     }
  90   }
  91   return false;
  92 }
  93 
  94 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
  95   Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]);
  96   ResourceMark rm;
  97 
  98   // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
  99   // to create space for them in ConnectionGraph::_nodes[].
 100   Node* oop_null = igvn->zerocon(T_OBJECT);
 101   Node* noop_null = igvn->zerocon(T_NARROWOOP);
 102   int invocation = 0;
 103   if (C->congraph() != NULL) {
 104     invocation = C->congraph()->_invocation + 1;
 105   }
 106   ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation);
 107   // Perform escape analysis
 108   if (congraph->compute_escape()) {
 109     // There are non escaping objects.
 110     C->set_congraph(congraph);
 111   }
 112   // Cleanup.
 113   if (oop_null->outcnt() == 0) {
 114     igvn->hash_delete(oop_null);
 115   }
 116   if (noop_null->outcnt() == 0) {
 117     igvn->hash_delete(noop_null);
 118   }
 119 }
 120 
 121 bool ConnectionGraph::compute_escape() {
 122   Compile* C = _compile;
 123   PhaseGVN* igvn = _igvn;
 124 
 125   // Worklists used by EA.
 126   Unique_Node_List delayed_worklist;
 127   GrowableArray<Node*> alloc_worklist;
 128   GrowableArray<Node*> ptr_cmp_worklist;
 129   GrowableArray<MemBarStoreStoreNode*> storestore_worklist;
 130   GrowableArray<ArrayCopyNode*>  arraycopy_worklist;
 131   GrowableArray<PointsToNode*>   ptnodes_worklist;
 132   GrowableArray<JavaObjectNode*> java_objects_worklist;
 133   GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist;
 134   GrowableArray<FieldNode*>      oop_fields_worklist;
 135   GrowableArray<SafePointNode*>  sfn_worklist;
 136   GrowableArray<MergeMemNode*>   mergemem_worklist;
 137   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 138 
 139   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 140 
 141   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 142   ideal_nodes.map(C->live_nodes(), NULL);  // preallocate space
 143   // Initialize worklist
 144   if (C->root() != NULL) {
 145     ideal_nodes.push(C->root());
 146   }
 147   // Processed ideal nodes are unique on ideal_nodes list
 148   // but several ideal nodes are mapped to the phantom_obj.
 149   // To avoid duplicated entries on the following worklists
 150   // add the phantom_obj only once to them.
 151   ptnodes_worklist.append(phantom_obj);
 152   java_objects_worklist.append(phantom_obj);
 153   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 154     Node* n = ideal_nodes.at(next);
 155     // Create PointsTo nodes and add them to Connection Graph. Called
 156     // only once per ideal node since ideal_nodes is Unique_Node list.
 157     add_node_to_connection_graph(n, &delayed_worklist);
 158     PointsToNode* ptn = ptnode_adr(n->_idx);
 159     if (ptn != NULL && ptn != phantom_obj) {
 160       ptnodes_worklist.append(ptn);
 161       if (ptn->is_JavaObject()) {
 162         java_objects_worklist.append(ptn->as_JavaObject());
 163         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 164             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 165           // Only allocations and java static calls results are interesting.
 166           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 167         }
 168       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 169         oop_fields_worklist.append(ptn->as_Field());
 170       }
 171     }
 172     // Collect some interesting nodes for futher use.
 173     switch (n->Opcode()) {
 174       case Op_MergeMem:
 175         // Collect all MergeMem nodes to add memory slices for
 176         // scalar replaceable objects in split_unique_types().
 177         mergemem_worklist.append(n->as_MergeMem());
 178         break;
 179       case Op_CmpP:
 180       case Op_CmpN:
 181         // Collect compare pointers nodes.
 182         if (OptimizePtrCompare) {
 183           ptr_cmp_worklist.append(n);
 184         }
 185         break;
 186       case Op_MemBarStoreStore:
 187         // Collect all MemBarStoreStore nodes so that depending on the
 188         // escape status of the associated Allocate node some of them
 189         // may be eliminated.
 190         storestore_worklist.append(n->as_MemBarStoreStore());
 191         break;
 192       case Op_MemBarRelease:
 193         if (n->req() > MemBarNode::Precedent) {
 194           record_for_optimizer(n);
 195         }
 196         break;
 197 #ifdef ASSERT
 198       case Op_AddP:
 199         // Collect address nodes for graph verification.
 200         addp_worklist.append(n);
 201         break;
 202 #endif
 203       case Op_ArrayCopy:
 204         // Keep a list of ArrayCopy nodes so if one of its input is non
 205         // escaping, we can record a unique type
 206         arraycopy_worklist.append(n->as_ArrayCopy());
 207         break;
 208       default:
 209         // not interested now, ignore...
 210         break;
 211     }
 212     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 213       Node* m = n->fast_out(i);   // Get user
 214       ideal_nodes.push(m);
 215     }
 216     if (n->is_SafePoint()) {
 217       sfn_worklist.append(n->as_SafePoint());
 218     }
 219   }
 220 
 221 #ifndef PRODUCT
 222   if (_compile->directive()->TraceEscapeAnalysisOption) {
 223     tty->print("+++++ Initial worklist for ");
 224     _compile->method()->print_name();
 225     tty->print_cr(" (ea_inv=%d)", _invocation);
 226     for (int i = 0; i < ptnodes_worklist.length(); i++) {
 227       PointsToNode* ptn = ptnodes_worklist.at(i);
 228       ptn->dump();
 229     }
 230     tty->print_cr("+++++ Calculating escape states and scalar replaceability");
 231   }
 232 #endif
 233 
 234   if (non_escaped_allocs_worklist.length() == 0) {
 235     _collecting = false;
 236     return false; // Nothing to do.
 237   }
 238   // Add final simple edges to graph.
 239   while(delayed_worklist.size() > 0) {
 240     Node* n = delayed_worklist.pop();
 241     add_final_edges(n);
 242   }
 243 
 244 #ifdef ASSERT
 245   if (VerifyConnectionGraph) {
 246     // Verify that no new simple edges could be created and all
 247     // local vars has edges.
 248     _verify = true;
 249     int ptnodes_length = ptnodes_worklist.length();
 250     for (int next = 0; next < ptnodes_length; ++next) {
 251       PointsToNode* ptn = ptnodes_worklist.at(next);
 252       add_final_edges(ptn->ideal_node());
 253       if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
 254         ptn->dump();
 255         assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
 256       }
 257     }
 258     _verify = false;
 259   }
 260 #endif
 261   // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes
 262   // processing, calls to CI to resolve symbols (types, fields, methods)
 263   // referenced in bytecode. During symbol resolution VM may throw
 264   // an exception which CI cleans and converts to compilation failure.
 265   if (C->failing())  return false;
 266 
 267   // 2. Finish Graph construction by propagating references to all
 268   //    java objects through graph.
 269   if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
 270                                  java_objects_worklist, oop_fields_worklist)) {
 271     // All objects escaped or hit time or iterations limits.
 272     _collecting = false;
 273     return false;
 274   }
 275 
 276   // 3. Adjust scalar_replaceable state of nonescaping objects and push
 277   //    scalar replaceable allocations on alloc_worklist for processing
 278   //    in split_unique_types().
 279   int non_escaped_length = non_escaped_allocs_worklist.length();
 280   for (int next = 0; next < non_escaped_length; next++) {
 281     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
 282     bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
 283     Node* n = ptn->ideal_node();
 284     if (n->is_Allocate()) {
 285       n->as_Allocate()->_is_non_escaping = noescape;
 286     }
 287     if (noescape && ptn->scalar_replaceable()) {
 288       adjust_scalar_replaceable_state(ptn);
 289       if (ptn->scalar_replaceable()) {
 290         alloc_worklist.append(ptn->ideal_node());
 291       }
 292     }
 293   }
 294 
 295 #ifdef ASSERT
 296   if (VerifyConnectionGraph) {
 297     // Verify that graph is complete - no new edges could be added or needed.
 298     verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
 299                             java_objects_worklist, addp_worklist);
 300   }
 301   assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
 302   assert(null_obj->escape_state() == PointsToNode::NoEscape &&
 303          null_obj->edge_count() == 0 &&
 304          !null_obj->arraycopy_src() &&
 305          !null_obj->arraycopy_dst(), "sanity");
 306 #endif
 307 
 308   _collecting = false;
 309 
 310   } // TracePhase t3("connectionGraph")
 311 
 312   // 4. Optimize ideal graph based on EA information.
 313   bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0);
 314   if (has_non_escaping_obj) {
 315     optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
 316   }
 317 
 318 #ifndef PRODUCT
 319   if (PrintEscapeAnalysis) {
 320     dump(ptnodes_worklist); // Dump ConnectionGraph
 321   }
 322 #endif
 323 
 324 #ifdef ASSERT
 325   if (VerifyConnectionGraph) {
 326     int alloc_length = alloc_worklist.length();
 327     for (int next = 0; next < alloc_length; ++next) {
 328       Node* n = alloc_worklist.at(next);
 329       PointsToNode* ptn = ptnode_adr(n->_idx);
 330       assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
 331     }
 332   }
 333 #endif
 334 
 335   // 5. Separate memory graph for scalar replaceable allcations.
 336   bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
 337   if (has_scalar_replaceable_candidates &&
 338       C->AliasLevel() >= 3 && EliminateAllocations) {
 339     // Now use the escape information to create unique types for
 340     // scalar replaceable objects.
 341     split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist);
 342     if (C->failing())  return false;
 343     C->print_method(PHASE_AFTER_EA, 2);
 344 
 345 #ifdef ASSERT
 346   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
 347     tty->print("=== No allocations eliminated for ");
 348     C->method()->print_short_name();
 349     if (!EliminateAllocations) {
 350       tty->print(" since EliminateAllocations is off ===");
 351     } else if(!has_scalar_replaceable_candidates) {
 352       tty->print(" since there are no scalar replaceable candidates ===");
 353     } else if(C->AliasLevel() < 3) {
 354       tty->print(" since AliasLevel < 3 ===");
 355     }
 356     tty->cr();
 357 #endif
 358   }
 359 
 360   // Annotate at safepoints if they have <= ArgEscape objects in their scope and at
 361   // java calls if they pass ArgEscape objects as parameters.
 362   if (has_non_escaping_obj &&
 363       (C->env()->should_retain_local_variables() ||
 364        C->env()->jvmti_can_get_owned_monitor_info() ||
 365        C->env()->jvmti_can_walk_any_space() ||
 366        DeoptimizeObjectsALot)) {
 367     int sfn_length = sfn_worklist.length();
 368     for (int next = 0; next < sfn_length; next++) {
 369       SafePointNode* sfn = sfn_worklist.at(next);
 370       sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn));
 371       if (sfn->is_CallJava()) {
 372         CallJavaNode* call = sfn->as_CallJava();
 373         call->set_arg_escape(has_arg_escape(call));
 374       }
 375     }
 376   }
 377 
 378   return has_non_escaping_obj;
 379 }
 380 
 381 // Returns true if there is an object in the scope of sfn that does not escape globally.
 382 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) {
 383   Compile* C = _compile;
 384   for (JVMState* jvms = sfn->jvms(); jvms != NULL; jvms = jvms->caller()) {
 385     if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() ||
 386         DeoptimizeObjectsALot) {
 387       // Jvmti agents can access locals. Must provide info about local objects at runtime.
 388       int num_locs = jvms->loc_size();
 389       for (int idx = 0; idx < num_locs; idx++) {
 390         Node* l = sfn->local(jvms, idx);
 391         if (not_global_escape(l)) {
 392           return true;
 393         }
 394       }
 395     }
 396     if (C->env()->jvmti_can_get_owned_monitor_info() ||
 397         C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) {
 398       // Jvmti agents can read monitors. Must provide info about locked objects at runtime.
 399       int num_mon = jvms->nof_monitors();
 400       for (int idx = 0; idx < num_mon; idx++) {
 401         Node* m = sfn->monitor_obj(jvms, idx);
 402         if (m != NULL && not_global_escape(m)) {
 403           return true;
 404         }
 405       }
 406     }
 407   }
 408   return false;
 409 }
 410 
 411 // Returns true if at least one of the arguments to the call is an object
 412 // that does not escape globally.
 413 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
 414   if (call->method() != NULL) {
 415     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
 416     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
 417       Node* p = call->in(idx);
 418       if (not_global_escape(p)) {
 419         return true;
 420       }
 421     }
 422   } else {
 423     const char* name = call->as_CallStaticJava()->_name;
 424     assert(name != NULL, "no name");
 425     // no arg escapes through uncommon traps
 426     if (strcmp(name, "uncommon_trap") != 0) {
 427       // process_call_arguments() assumes that all arguments escape globally
 428       const TypeTuple* d = call->tf()->domain();
 429       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 430         const Type* at = d->field_at(i);
 431         if (at->isa_oopptr() != NULL) {
 432           return true;
 433         }
 434       }
 435     }
 436   }
 437   return false;
 438 }
 439 
 440 
 441 
 442 // Utility function for nodes that load an object
 443 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
 444   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 445   // ThreadLocal has RawPtr type.
 446   const Type* t = _igvn->type(n);
 447   if (t->make_ptr() != NULL) {
 448     Node* adr = n->in(MemNode::Address);
 449 #ifdef ASSERT
 450     if (!adr->is_AddP()) {
 451       assert(_igvn->type(adr)->isa_rawptr(), "sanity");
 452     } else {
 453       assert((ptnode_adr(adr->_idx) == NULL ||
 454               ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
 455     }
 456 #endif
 457     add_local_var_and_edge(n, PointsToNode::NoEscape,
 458                            adr, delayed_worklist);
 459   }
 460 }
 461 
 462 // Populate Connection Graph with PointsTo nodes and create simple
 463 // connection graph edges.
 464 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
 465   assert(!_verify, "this method should not be called for verification");
 466   PhaseGVN* igvn = _igvn;
 467   uint n_idx = n->_idx;
 468   PointsToNode* n_ptn = ptnode_adr(n_idx);
 469   if (n_ptn != NULL) {
 470     return; // No need to redefine PointsTo node during first iteration.
 471   }
 472   int opcode = n->Opcode();
 473   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
 474   if (gc_handled) {
 475     return; // Ignore node if already handled by GC.
 476   }
 477 
 478   if (n->is_Call()) {
 479     // Arguments to allocation and locking don't escape.
 480     if (n->is_AbstractLock()) {
 481       // Put Lock and Unlock nodes on IGVN worklist to process them during
 482       // first IGVN optimization when escape information is still available.
 483       record_for_optimizer(n);
 484     } else if (n->is_Allocate()) {
 485       add_call_node(n->as_Call());
 486       record_for_optimizer(n);
 487     } else {
 488       if (n->is_CallStaticJava()) {
 489         const char* name = n->as_CallStaticJava()->_name;
 490         if (name != NULL && strcmp(name, "uncommon_trap") == 0) {
 491           return; // Skip uncommon traps
 492         }
 493       }
 494       // Don't mark as processed since call's arguments have to be processed.
 495       delayed_worklist->push(n);
 496       // Check if a call returns an object.
 497       if ((n->as_Call()->returns_pointer() &&
 498            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
 499           (n->is_CallStaticJava() &&
 500            n->as_CallStaticJava()->is_boxing_method())) {
 501         add_call_node(n->as_Call());
 502       }
 503     }
 504     return;
 505   }
 506   // Put this check here to process call arguments since some call nodes
 507   // point to phantom_obj.
 508   if (n_ptn == phantom_obj || n_ptn == null_obj) {
 509     return; // Skip predefined nodes.
 510   }
 511   switch (opcode) {
 512     case Op_AddP: {
 513       Node* base = get_addp_base(n);
 514       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 515       // Field nodes are created for all field types. They are used in
 516       // adjust_scalar_replaceable_state() and split_unique_types().
 517       // Note, non-oop fields will have only base edges in Connection
 518       // Graph because such fields are not used for oop loads and stores.
 519       int offset = address_offset(n, igvn);
 520       add_field(n, PointsToNode::NoEscape, offset);
 521       if (ptn_base == NULL) {
 522         delayed_worklist->push(n); // Process it later.
 523       } else {
 524         n_ptn = ptnode_adr(n_idx);
 525         add_base(n_ptn->as_Field(), ptn_base);
 526       }
 527       break;
 528     }
 529     case Op_CastX2P: {
 530       map_ideal_node(n, phantom_obj);
 531       break;
 532     }
 533     case Op_CastPP:
 534     case Op_CheckCastPP:
 535     case Op_EncodeP:
 536     case Op_DecodeN:
 537     case Op_EncodePKlass:
 538     case Op_DecodeNKlass: {
 539       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
 540       break;
 541     }
 542     case Op_CMoveP: {
 543       add_local_var(n, PointsToNode::NoEscape);
 544       // Do not add edges during first iteration because some could be
 545       // not defined yet.
 546       delayed_worklist->push(n);
 547       break;
 548     }
 549     case Op_ConP:
 550     case Op_ConN:
 551     case Op_ConNKlass: {
 552       // assume all oop constants globally escape except for null
 553       PointsToNode::EscapeState es;
 554       const Type* t = igvn->type(n);
 555       if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
 556         es = PointsToNode::NoEscape;
 557       } else {
 558         es = PointsToNode::GlobalEscape;
 559       }
 560       add_java_object(n, es);
 561       break;
 562     }
 563     case Op_CreateEx: {
 564       // assume that all exception objects globally escape
 565       map_ideal_node(n, phantom_obj);
 566       break;
 567     }
 568     case Op_LoadKlass:
 569     case Op_LoadNKlass: {
 570       // Unknown class is loaded
 571       map_ideal_node(n, phantom_obj);
 572       break;
 573     }
 574     case Op_LoadP:
 575     case Op_LoadN:
 576     case Op_LoadPLocked: {
 577       add_objload_to_connection_graph(n, delayed_worklist);
 578       break;
 579     }
 580     case Op_Parm: {
 581       map_ideal_node(n, phantom_obj);
 582       break;
 583     }
 584     case Op_PartialSubtypeCheck: {
 585       // Produces Null or notNull and is used in only in CmpP so
 586       // phantom_obj could be used.
 587       map_ideal_node(n, phantom_obj); // Result is unknown
 588       break;
 589     }
 590     case Op_Phi: {
 591       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 592       // ThreadLocal has RawPtr type.
 593       const Type* t = n->as_Phi()->type();
 594       if (t->make_ptr() != NULL) {
 595         add_local_var(n, PointsToNode::NoEscape);
 596         // Do not add edges during first iteration because some could be
 597         // not defined yet.
 598         delayed_worklist->push(n);
 599       }
 600       break;
 601     }
 602     case Op_Proj: {
 603       // we are only interested in the oop result projection from a call
 604       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 605           n->in(0)->as_Call()->returns_pointer()) {
 606         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
 607       }
 608       break;
 609     }
 610     case Op_Rethrow: // Exception object escapes
 611     case Op_Return: {
 612       if (n->req() > TypeFunc::Parms &&
 613           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 614         // Treat Return value as LocalVar with GlobalEscape escape state.
 615         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
 616       }
 617       break;
 618     }
 619     case Op_CompareAndExchangeP:
 620     case Op_CompareAndExchangeN:
 621     case Op_GetAndSetP:
 622     case Op_GetAndSetN: {
 623       add_objload_to_connection_graph(n, delayed_worklist);
 624       // fall-through
 625     }
 626     case Op_StoreP:
 627     case Op_StoreN:
 628     case Op_StoreNKlass:
 629     case Op_StorePConditional:
 630     case Op_WeakCompareAndSwapP:
 631     case Op_WeakCompareAndSwapN:
 632     case Op_CompareAndSwapP:
 633     case Op_CompareAndSwapN: {
 634       add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
 635       break;
 636     }
 637     case Op_AryEq:
 638     case Op_CountPositives:
 639     case Op_StrComp:
 640     case Op_StrEquals:
 641     case Op_StrIndexOf:
 642     case Op_StrIndexOfChar:
 643     case Op_StrInflatedCopy:
 644     case Op_StrCompressedCopy:
 645     case Op_EncodeISOArray: {
 646       add_local_var(n, PointsToNode::ArgEscape);
 647       delayed_worklist->push(n); // Process it later.
 648       break;
 649     }
 650     case Op_ThreadLocal: {
 651       add_java_object(n, PointsToNode::ArgEscape);
 652       break;
 653     }
 654     default:
 655       ; // Do nothing for nodes not related to EA.
 656   }
 657   return;
 658 }
 659 
 660 // Add final simple edges to graph.
 661 void ConnectionGraph::add_final_edges(Node *n) {
 662   PointsToNode* n_ptn = ptnode_adr(n->_idx);
 663 #ifdef ASSERT
 664   if (_verify && n_ptn->is_JavaObject())
 665     return; // This method does not change graph for JavaObject.
 666 #endif
 667 
 668   if (n->is_Call()) {
 669     process_call_arguments(n->as_Call());
 670     return;
 671   }
 672   assert(n->is_Store() || n->is_LoadStore() ||
 673          (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
 674          "node should be registered already");
 675   int opcode = n->Opcode();
 676   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
 677   if (gc_handled) {
 678     return; // Ignore node if already handled by GC.
 679   }
 680   switch (opcode) {
 681     case Op_AddP: {
 682       Node* base = get_addp_base(n);
 683       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 684       assert(ptn_base != NULL, "field's base should be registered");
 685       add_base(n_ptn->as_Field(), ptn_base);
 686       break;
 687     }
 688     case Op_CastPP:
 689     case Op_CheckCastPP:
 690     case Op_EncodeP:
 691     case Op_DecodeN:
 692     case Op_EncodePKlass:
 693     case Op_DecodeNKlass: {
 694       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
 695       break;
 696     }
 697     case Op_CMoveP: {
 698       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
 699         Node* in = n->in(i);
 700         if (in == NULL) {
 701           continue;  // ignore NULL
 702         }
 703         Node* uncast_in = in->uncast();
 704         if (uncast_in->is_top() || uncast_in == n) {
 705           continue;  // ignore top or inputs which go back this node
 706         }
 707         PointsToNode* ptn = ptnode_adr(in->_idx);
 708         assert(ptn != NULL, "node should be registered");
 709         add_edge(n_ptn, ptn);
 710       }
 711       break;
 712     }
 713     case Op_LoadP:
 714     case Op_LoadN:
 715     case Op_LoadPLocked: {
 716       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 717       // ThreadLocal has RawPtr type.
 718       assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type");
 719       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL);
 720       break;
 721     }
 722     case Op_Phi: {
 723       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 724       // ThreadLocal has RawPtr type.
 725       assert(n->as_Phi()->type()->make_ptr() != NULL, "Unexpected node type");
 726       for (uint i = 1; i < n->req(); i++) {
 727         Node* in = n->in(i);
 728         if (in == NULL) {
 729           continue;  // ignore NULL
 730         }
 731         Node* uncast_in = in->uncast();
 732         if (uncast_in->is_top() || uncast_in == n) {
 733           continue;  // ignore top or inputs which go back this node
 734         }
 735         PointsToNode* ptn = ptnode_adr(in->_idx);
 736         assert(ptn != NULL, "node should be registered");
 737         add_edge(n_ptn, ptn);
 738       }
 739       break;
 740     }
 741     case Op_Proj: {
 742       // we are only interested in the oop result projection from a call
 743       assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 744              n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
 745       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
 746       break;
 747     }
 748     case Op_Rethrow: // Exception object escapes
 749     case Op_Return: {
 750       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
 751              "Unexpected node type");
 752       // Treat Return value as LocalVar with GlobalEscape escape state.
 753       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), NULL);
 754       break;
 755     }
 756     case Op_CompareAndExchangeP:
 757     case Op_CompareAndExchangeN:
 758     case Op_GetAndSetP:
 759     case Op_GetAndSetN:{
 760       assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type");
 761       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL);
 762       // fall-through
 763     }
 764     case Op_CompareAndSwapP:
 765     case Op_CompareAndSwapN:
 766     case Op_WeakCompareAndSwapP:
 767     case Op_WeakCompareAndSwapN:
 768     case Op_StoreP:
 769     case Op_StoreN:
 770     case Op_StoreNKlass:
 771     case Op_StorePConditional:{
 772       add_final_edges_unsafe_access(n, opcode);
 773       break;
 774     }
 775     case Op_AryEq:
 776     case Op_CountPositives:
 777     case Op_StrComp:
 778     case Op_StrEquals:
 779     case Op_StrIndexOf:
 780     case Op_StrIndexOfChar:
 781     case Op_StrInflatedCopy:
 782     case Op_StrCompressedCopy:
 783     case Op_EncodeISOArray: {
 784       // char[]/byte[] arrays passed to string intrinsic do not escape but
 785       // they are not scalar replaceable. Adjust escape state for them.
 786       // Start from in(2) edge since in(1) is memory edge.
 787       for (uint i = 2; i < n->req(); i++) {
 788         Node* adr = n->in(i);
 789         const Type* at = _igvn->type(adr);
 790         if (!adr->is_top() && at->isa_ptr()) {
 791           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
 792                  at->isa_ptr() != NULL, "expecting a pointer");
 793           if (adr->is_AddP()) {
 794             adr = get_addp_base(adr);
 795           }
 796           PointsToNode* ptn = ptnode_adr(adr->_idx);
 797           assert(ptn != NULL, "node should be registered");
 798           add_edge(n_ptn, ptn);
 799         }
 800       }
 801       break;
 802     }
 803     default: {
 804       // This method should be called only for EA specific nodes which may
 805       // miss some edges when they were created.
 806 #ifdef ASSERT
 807       n->dump(1);
 808 #endif
 809       guarantee(false, "unknown node");
 810     }
 811   }
 812   return;
 813 }
 814 
 815 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {
 816   Node* adr = n->in(MemNode::Address);
 817   const Type* adr_type = _igvn->type(adr);
 818   adr_type = adr_type->make_ptr();
 819   if (adr_type == NULL) {
 820     return; // skip dead nodes
 821   }
 822   if (adr_type->isa_oopptr()
 823       || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
 824           && adr_type == TypeRawPtr::NOTNULL
 825           && is_captured_store_address(adr))) {
 826     delayed_worklist->push(n); // Process it later.
 827 #ifdef ASSERT
 828     assert (adr->is_AddP(), "expecting an AddP");
 829     if (adr_type == TypeRawPtr::NOTNULL) {
 830       // Verify a raw address for a store captured by Initialize node.
 831       int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
 832       assert(offs != Type::OffsetBot, "offset must be a constant");
 833     }
 834 #endif
 835   } else {
 836     // Ignore copy the displaced header to the BoxNode (OSR compilation).
 837     if (adr->is_BoxLock()) {
 838       return;
 839     }
 840     // Stored value escapes in unsafe access.
 841     if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 842       delayed_worklist->push(n); // Process unsafe access later.
 843       return;
 844     }
 845 #ifdef ASSERT
 846     n->dump(1);
 847     assert(false, "not unsafe");
 848 #endif
 849   }
 850 }
 851 
 852 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {
 853   Node* adr = n->in(MemNode::Address);
 854   const Type *adr_type = _igvn->type(adr);
 855   adr_type = adr_type->make_ptr();
 856 #ifdef ASSERT
 857   if (adr_type == NULL) {
 858     n->dump(1);
 859     assert(adr_type != NULL, "dead node should not be on list");
 860     return true;
 861   }
 862 #endif
 863 
 864   if (adr_type->isa_oopptr()
 865       || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
 866            && adr_type == TypeRawPtr::NOTNULL
 867            && is_captured_store_address(adr))) {
 868     // Point Address to Value
 869     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 870     assert(adr_ptn != NULL &&
 871            adr_ptn->as_Field()->is_oop(), "node should be registered");
 872     Node* val = n->in(MemNode::ValueIn);
 873     PointsToNode* ptn = ptnode_adr(val->_idx);
 874     assert(ptn != NULL, "node should be registered");
 875     add_edge(adr_ptn, ptn);
 876     return true;
 877   } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
 878     // Stored value escapes in unsafe access.
 879     Node* val = n->in(MemNode::ValueIn);
 880     PointsToNode* ptn = ptnode_adr(val->_idx);
 881     assert(ptn != NULL, "node should be registered");
 882     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
 883     // Add edge to object for unsafe access with offset.
 884     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 885     assert(adr_ptn != NULL, "node should be registered");
 886     if (adr_ptn->is_Field()) {
 887       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
 888       add_edge(adr_ptn, ptn);
 889     }
 890     return true;
 891   }
 892 #ifdef ASSERT
 893   n->dump(1);
 894   assert(false, "not unsafe");
 895 #endif
 896   return false;
 897 }
 898 
 899 void ConnectionGraph::add_call_node(CallNode* call) {
 900   assert(call->returns_pointer(), "only for call which returns pointer");
 901   uint call_idx = call->_idx;
 902   if (call->is_Allocate()) {
 903     Node* k = call->in(AllocateNode::KlassNode);
 904     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 905     assert(kt != NULL, "TypeKlassPtr  required.");
 906     ciKlass* cik = kt->klass();
 907     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 908     bool scalar_replaceable = true;
 909     NOT_PRODUCT(const char* nsr_reason = "");
 910     if (call->is_AllocateArray()) {
 911       if (!cik->is_array_klass()) { // StressReflectiveCode
 912         es = PointsToNode::GlobalEscape;
 913       } else {
 914         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 915         if (length < 0) {
 916           // Not scalar replaceable if the length is not constant.
 917           scalar_replaceable = false;
 918           NOT_PRODUCT(nsr_reason = "has a non-constant length");
 919         } else if (length > EliminateAllocationArraySizeLimit) {
 920           // Not scalar replaceable if the length is too big.
 921           scalar_replaceable = false;
 922           NOT_PRODUCT(nsr_reason = "has a length that is too big");
 923         }
 924       }
 925     } else {  // Allocate instance
 926       if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
 927           cik->is_subclass_of(_compile->env()->Reference_klass()) ||
 928          !cik->is_instance_klass() || // StressReflectiveCode
 929          !cik->as_instance_klass()->can_be_instantiated() ||
 930           cik->as_instance_klass()->has_finalizer()) {
 931         es = PointsToNode::GlobalEscape;
 932       } else {
 933         int nfields = cik->as_instance_klass()->nof_nonstatic_fields();
 934         if (nfields > EliminateAllocationFieldsLimit) {
 935           // Not scalar replaceable if there are too many fields.
 936           scalar_replaceable = false;
 937           NOT_PRODUCT(nsr_reason = "has too many fields");
 938         }
 939       }
 940     }
 941     add_java_object(call, es);
 942     PointsToNode* ptn = ptnode_adr(call_idx);
 943     if (!scalar_replaceable && ptn->scalar_replaceable()) {
 944       set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason));
 945     }
 946   } else if (call->is_CallStaticJava()) {
 947     // Call nodes could be different types:
 948     //
 949     // 1. CallDynamicJavaNode (what happened during call is unknown):
 950     //
 951     //    - mapped to GlobalEscape JavaObject node if oop is returned;
 952     //
 953     //    - all oop arguments are escaping globally;
 954     //
 955     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
 956     //
 957     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
 958     //
 959     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
 960     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
 961     //      during call is returned;
 962     //    - mapped to ArgEscape LocalVar node pointed to object arguments
 963     //      which are returned and does not escape during call;
 964     //
 965     //    - oop arguments escaping status is defined by bytecode analysis;
 966     //
 967     // For a static call, we know exactly what method is being called.
 968     // Use bytecode estimator to record whether the call's return value escapes.
 969     ciMethod* meth = call->as_CallJava()->method();
 970     if (meth == NULL) {
 971       const char* name = call->as_CallStaticJava()->_name;
 972       assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
 973       // Returns a newly allocated non-escaped object.
 974       add_java_object(call, PointsToNode::NoEscape);
 975       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
 976     } else if (meth->is_boxing_method()) {
 977       // Returns boxing object
 978       PointsToNode::EscapeState es;
 979       vmIntrinsics::ID intr = meth->intrinsic_id();
 980       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
 981         // It does not escape if object is always allocated.
 982         es = PointsToNode::NoEscape;
 983       } else {
 984         // It escapes globally if object could be loaded from cache.
 985         es = PointsToNode::GlobalEscape;
 986       }
 987       add_java_object(call, es);
 988     } else {
 989       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
 990       call_analyzer->copy_dependencies(_compile->dependencies());
 991       if (call_analyzer->is_return_allocated()) {
 992         // Returns a newly allocated non-escaped object, simply
 993         // update dependency information.
 994         // Mark it as NoEscape so that objects referenced by
 995         // it's fields will be marked as NoEscape at least.
 996         add_java_object(call, PointsToNode::NoEscape);
 997         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
 998       } else {
 999         // Determine whether any arguments are returned.
1000         const TypeTuple* d = call->tf()->domain();
1001         bool ret_arg = false;
1002         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1003           if (d->field_at(i)->isa_ptr() != NULL &&
1004               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1005             ret_arg = true;
1006             break;
1007           }
1008         }
1009         if (ret_arg) {
1010           add_local_var(call, PointsToNode::ArgEscape);
1011         } else {
1012           // Returns unknown object.
1013           map_ideal_node(call, phantom_obj);
1014         }
1015       }
1016     }
1017   } else {
1018     // An other type of call, assume the worst case:
1019     // returned value is unknown and globally escapes.
1020     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
1021     map_ideal_node(call, phantom_obj);
1022   }
1023 }
1024 
1025 void ConnectionGraph::process_call_arguments(CallNode *call) {
1026     bool is_arraycopy = false;
1027     switch (call->Opcode()) {
1028 #ifdef ASSERT
1029     case Op_Allocate:
1030     case Op_AllocateArray:
1031     case Op_Lock:
1032     case Op_Unlock:
1033       assert(false, "should be done already");
1034       break;
1035 #endif
1036     case Op_ArrayCopy:
1037     case Op_CallLeafNoFP:
1038       // Most array copies are ArrayCopy nodes at this point but there
1039       // are still a few direct calls to the copy subroutines (See
1040       // PhaseStringOpts::copy_string())
1041       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1042         call->as_CallLeaf()->is_call_to_arraycopystub();
1043       // fall through
1044     case Op_CallLeafVector:
1045     case Op_CallLeaf: {
1046       // Stub calls, objects do not escape but they are not scale replaceable.
1047       // Adjust escape state for outgoing arguments.
1048       const TypeTuple * d = call->tf()->domain();
1049       bool src_has_oops = false;
1050       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1051         const Type* at = d->field_at(i);
1052         Node *arg = call->in(i);
1053         if (arg == NULL) {
1054           continue;
1055         }
1056         const Type *aat = _igvn->type(arg);
1057         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
1058           continue;
1059         }
1060         if (arg->is_AddP()) {
1061           //
1062           // The inline_native_clone() case when the arraycopy stub is called
1063           // after the allocation before Initialize and CheckCastPP nodes.
1064           // Or normal arraycopy for object arrays case.
1065           //
1066           // Set AddP's base (Allocate) as not scalar replaceable since
1067           // pointer to the base (with offset) is passed as argument.
1068           //
1069           arg = get_addp_base(arg);
1070         }
1071         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1072         assert(arg_ptn != NULL, "should be registered");
1073         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1074         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1075           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1076                  aat->isa_ptr() != NULL, "expecting an Ptr");
1077           bool arg_has_oops = aat->isa_oopptr() &&
1078                               (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
1079                                (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
1080           if (i == TypeFunc::Parms) {
1081             src_has_oops = arg_has_oops;
1082           }
1083           //
1084           // src or dst could be j.l.Object when other is basic type array:
1085           //
1086           //   arraycopy(char[],0,Object*,0,size);
1087           //   arraycopy(Object*,0,char[],0,size);
1088           //
1089           // Don't add edges in such cases.
1090           //
1091           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1092                                        arg_has_oops && (i > TypeFunc::Parms);
1093 #ifdef ASSERT
1094           if (!(is_arraycopy ||
1095                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1096                 (call->as_CallLeaf()->_name != NULL &&
1097                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1098                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1099                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
1100                   strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
1101                   strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
1102                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
1103                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
1104                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||
1105                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||
1106                   strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
1107                   strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 ||
1108                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1109                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1110                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
1111                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
1112                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
1113                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1114                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1115                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1116                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1117                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1118                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1119                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
1120                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
1121                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1122                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1123                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1124                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1125                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
1126                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
1127                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
1128                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1129                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
1130                  ))) {
1131             call->dump();
1132             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1133           }
1134 #endif
1135           // Always process arraycopy's destination object since
1136           // we need to add all possible edges to references in
1137           // source object.
1138           if (arg_esc >= PointsToNode::ArgEscape &&
1139               !arg_is_arraycopy_dest) {
1140             continue;
1141           }
1142           PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1143           if (call->is_ArrayCopy()) {
1144             ArrayCopyNode* ac = call->as_ArrayCopy();
1145             if (ac->is_clonebasic() ||
1146                 ac->is_arraycopy_validated() ||
1147                 ac->is_copyof_validated() ||
1148                 ac->is_copyofrange_validated()) {
1149               es = PointsToNode::NoEscape;
1150             }
1151           }
1152           set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1153           if (arg_is_arraycopy_dest) {
1154             Node* src = call->in(TypeFunc::Parms);
1155             if (src->is_AddP()) {
1156               src = get_addp_base(src);
1157             }
1158             PointsToNode* src_ptn = ptnode_adr(src->_idx);
1159             assert(src_ptn != NULL, "should be registered");
1160             if (arg_ptn != src_ptn) {
1161               // Special arraycopy edge:
1162               // A destination object's field can't have the source object
1163               // as base since objects escape states are not related.
1164               // Only escape state of destination object's fields affects
1165               // escape state of fields in source object.
1166               add_arraycopy(call, es, src_ptn, arg_ptn);
1167             }
1168           }
1169         }
1170       }
1171       break;
1172     }
1173     case Op_CallStaticJava: {
1174       // For a static call, we know exactly what method is being called.
1175       // Use bytecode estimator to record the call's escape affects
1176 #ifdef ASSERT
1177       const char* name = call->as_CallStaticJava()->_name;
1178       assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1179 #endif
1180       ciMethod* meth = call->as_CallJava()->method();
1181       if ((meth != NULL) && meth->is_boxing_method()) {
1182         break; // Boxing methods do not modify any oops.
1183       }
1184       BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1185       // fall-through if not a Java method or no analyzer information
1186       if (call_analyzer != NULL) {
1187         PointsToNode* call_ptn = ptnode_adr(call->_idx);
1188         const TypeTuple* d = call->tf()->domain();
1189         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1190           const Type* at = d->field_at(i);
1191           int k = i - TypeFunc::Parms;
1192           Node* arg = call->in(i);
1193           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1194           if (at->isa_ptr() != NULL &&
1195               call_analyzer->is_arg_returned(k)) {
1196             // The call returns arguments.
1197             if (call_ptn != NULL) { // Is call's result used?
1198               assert(call_ptn->is_LocalVar(), "node should be registered");
1199               assert(arg_ptn != NULL, "node should be registered");
1200               add_edge(call_ptn, arg_ptn);
1201             }
1202           }
1203           if (at->isa_oopptr() != NULL &&
1204               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1205             if (!call_analyzer->is_arg_stack(k)) {
1206               // The argument global escapes
1207               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1208             } else {
1209               set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1210               if (!call_analyzer->is_arg_local(k)) {
1211                 // The argument itself doesn't escape, but any fields might
1212                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1213               }
1214             }
1215           }
1216         }
1217         if (call_ptn != NULL && call_ptn->is_LocalVar()) {
1218           // The call returns arguments.
1219           assert(call_ptn->edge_count() > 0, "sanity");
1220           if (!call_analyzer->is_return_local()) {
1221             // Returns also unknown object.
1222             add_edge(call_ptn, phantom_obj);
1223           }
1224         }
1225         break;
1226       }
1227     }
1228     default: {
1229       // Fall-through here if not a Java method or no analyzer information
1230       // or some other type of call, assume the worst case: all arguments
1231       // globally escape.
1232       const TypeTuple* d = call->tf()->domain();
1233       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1234         const Type* at = d->field_at(i);
1235         if (at->isa_oopptr() != NULL) {
1236           Node* arg = call->in(i);
1237           if (arg->is_AddP()) {
1238             arg = get_addp_base(arg);
1239           }
1240           assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
1241           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1242         }
1243       }
1244     }
1245   }
1246 }
1247 
1248 
1249 // Finish Graph construction.
1250 bool ConnectionGraph::complete_connection_graph(
1251                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
1252                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
1253                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
1254                          GrowableArray<FieldNode*>&      oop_fields_worklist) {
1255   // Normally only 1-3 passes needed to build Connection Graph depending
1256   // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
1257   // Set limit to 20 to catch situation when something did go wrong and
1258   // bailout Escape Analysis.
1259   // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
1260 #define GRAPH_BUILD_ITER_LIMIT 20
1261 
1262   // Propagate GlobalEscape and ArgEscape escape states and check that
1263   // we still have non-escaping objects. The method pushs on _worklist
1264   // Field nodes which reference phantom_object.
1265   if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
1266     return false; // Nothing to do.
1267   }
1268   // Now propagate references to all JavaObject nodes.
1269   int java_objects_length = java_objects_worklist.length();
1270   elapsedTimer build_time;
1271   build_time.start();
1272   elapsedTimer time;
1273   bool timeout = false;
1274   int new_edges = 1;
1275   int iterations = 0;
1276   do {
1277     while ((new_edges > 0) &&
1278            (iterations++ < GRAPH_BUILD_ITER_LIMIT)) {
1279       double start_time = time.seconds();
1280       time.start();
1281       new_edges = 0;
1282       // Propagate references to phantom_object for nodes pushed on _worklist
1283       // by find_non_escaped_objects() and find_field_value().
1284       new_edges += add_java_object_edges(phantom_obj, false);
1285       for (int next = 0; next < java_objects_length; ++next) {
1286         JavaObjectNode* ptn = java_objects_worklist.at(next);
1287         new_edges += add_java_object_edges(ptn, true);
1288 
1289 #define SAMPLE_SIZE 4
1290         if ((next % SAMPLE_SIZE) == 0) {
1291           // Each 4 iterations calculate how much time it will take
1292           // to complete graph construction.
1293           time.stop();
1294           // Poll for requests from shutdown mechanism to quiesce compiler
1295           // because Connection graph construction may take long time.
1296           CompileBroker::maybe_block();
1297           double stop_time = time.seconds();
1298           double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
1299           double time_until_end = time_per_iter * (double)(java_objects_length - next);
1300           if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
1301             timeout = true;
1302             break; // Timeout
1303           }
1304           start_time = stop_time;
1305           time.start();
1306         }
1307 #undef SAMPLE_SIZE
1308 
1309       }
1310       if (timeout) break;
1311       if (new_edges > 0) {
1312         // Update escape states on each iteration if graph was updated.
1313         if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
1314           return false; // Nothing to do.
1315         }
1316       }
1317       time.stop();
1318       if (time.seconds() >= EscapeAnalysisTimeout) {
1319         timeout = true;
1320         break;
1321       }
1322     }
1323     if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) {
1324       time.start();
1325       // Find fields which have unknown value.
1326       int fields_length = oop_fields_worklist.length();
1327       for (int next = 0; next < fields_length; next++) {
1328         FieldNode* field = oop_fields_worklist.at(next);
1329         if (field->edge_count() == 0) {
1330           new_edges += find_field_value(field);
1331           // This code may added new edges to phantom_object.
1332           // Need an other cycle to propagate references to phantom_object.
1333         }
1334       }
1335       time.stop();
1336       if (time.seconds() >= EscapeAnalysisTimeout) {
1337         timeout = true;
1338         break;
1339       }
1340     } else {
1341       new_edges = 0; // Bailout
1342     }
1343   } while (new_edges > 0);
1344 
1345   build_time.stop();
1346   _build_time = build_time.seconds();
1347   _build_iterations = iterations;
1348 
1349   // Bailout if passed limits.
1350   if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) {
1351     Compile* C = _compile;
1352     if (C->log() != NULL) {
1353       C->log()->begin_elem("connectionGraph_bailout reason='reached ");
1354       C->log()->text("%s", timeout ? "time" : "iterations");
1355       C->log()->end_elem(" limit'");
1356     }
1357     assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d",
1358            _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length());
1359     // Possible infinite build_connection_graph loop,
1360     // bailout (no changes to ideal graph were made).
1361     return false;
1362   }
1363 
1364 #undef GRAPH_BUILD_ITER_LIMIT
1365 
1366   // Find fields initialized by NULL for non-escaping Allocations.
1367   int non_escaped_length = non_escaped_allocs_worklist.length();
1368   for (int next = 0; next < non_escaped_length; next++) {
1369     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
1370     PointsToNode::EscapeState es = ptn->escape_state();
1371     assert(es <= PointsToNode::ArgEscape, "sanity");
1372     if (es == PointsToNode::NoEscape) {
1373       if (find_init_values_null(ptn, _igvn) > 0) {
1374         // Adding references to NULL object does not change escape states
1375         // since it does not escape. Also no fields are added to NULL object.
1376         add_java_object_edges(null_obj, false);
1377       }
1378     }
1379     Node* n = ptn->ideal_node();
1380     if (n->is_Allocate()) {
1381       // The object allocated by this Allocate node will never be
1382       // seen by an other thread. Mark it so that when it is
1383       // expanded no MemBarStoreStore is added.
1384       InitializeNode* ini = n->as_Allocate()->initialization();
1385       if (ini != NULL)
1386         ini->set_does_not_escape();
1387     }
1388   }
1389   return true; // Finished graph construction.
1390 }
1391 
1392 // Propagate GlobalEscape and ArgEscape escape states to all nodes
1393 // and check that we still have non-escaping java objects.
1394 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
1395                                                GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) {
1396   GrowableArray<PointsToNode*> escape_worklist;
1397   // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
1398   int ptnodes_length = ptnodes_worklist.length();
1399   for (int next = 0; next < ptnodes_length; ++next) {
1400     PointsToNode* ptn = ptnodes_worklist.at(next);
1401     if (ptn->escape_state() >= PointsToNode::ArgEscape ||
1402         ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
1403       escape_worklist.push(ptn);
1404     }
1405   }
1406   // Set escape states to referenced nodes (edges list).
1407   while (escape_worklist.length() > 0) {
1408     PointsToNode* ptn = escape_worklist.pop();
1409     PointsToNode::EscapeState es  = ptn->escape_state();
1410     PointsToNode::EscapeState field_es = ptn->fields_escape_state();
1411     if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
1412         es >= PointsToNode::ArgEscape) {
1413       // GlobalEscape or ArgEscape state of field means it has unknown value.
1414       if (add_edge(ptn, phantom_obj)) {
1415         // New edge was added
1416         add_field_uses_to_worklist(ptn->as_Field());
1417       }
1418     }
1419     for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1420       PointsToNode* e = i.get();
1421       if (e->is_Arraycopy()) {
1422         assert(ptn->arraycopy_dst(), "sanity");
1423         // Propagate only fields escape state through arraycopy edge.
1424         if (e->fields_escape_state() < field_es) {
1425           set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
1426           escape_worklist.push(e);
1427         }
1428       } else if (es >= field_es) {
1429         // fields_escape_state is also set to 'es' if it is less than 'es'.
1430         if (e->escape_state() < es) {
1431           set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
1432           escape_worklist.push(e);
1433         }
1434       } else {
1435         // Propagate field escape state.
1436         bool es_changed = false;
1437         if (e->fields_escape_state() < field_es) {
1438           set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
1439           es_changed = true;
1440         }
1441         if ((e->escape_state() < field_es) &&
1442             e->is_Field() && ptn->is_JavaObject() &&
1443             e->as_Field()->is_oop()) {
1444           // Change escape state of referenced fields.
1445           set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
1446           es_changed = true;
1447         } else if (e->escape_state() < es) {
1448           set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
1449           es_changed = true;
1450         }
1451         if (es_changed) {
1452           escape_worklist.push(e);
1453         }
1454       }
1455     }
1456   }
1457   // Remove escaped objects from non_escaped list.
1458   for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) {
1459     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
1460     if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
1461       non_escaped_allocs_worklist.delete_at(next);
1462     }
1463     if (ptn->escape_state() == PointsToNode::NoEscape) {
1464       // Find fields in non-escaped allocations which have unknown value.
1465       find_init_values_phantom(ptn);
1466     }
1467   }
1468   return (non_escaped_allocs_worklist.length() > 0);
1469 }
1470 
1471 // Add all references to JavaObject node by walking over all uses.
1472 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
1473   int new_edges = 0;
1474   if (populate_worklist) {
1475     // Populate _worklist by uses of jobj's uses.
1476     for (UseIterator i(jobj); i.has_next(); i.next()) {
1477       PointsToNode* use = i.get();
1478       if (use->is_Arraycopy()) {
1479         continue;
1480       }
1481       add_uses_to_worklist(use);
1482       if (use->is_Field() && use->as_Field()->is_oop()) {
1483         // Put on worklist all field's uses (loads) and
1484         // related field nodes (same base and offset).
1485         add_field_uses_to_worklist(use->as_Field());
1486       }
1487     }
1488   }
1489   for (int l = 0; l < _worklist.length(); l++) {
1490     PointsToNode* use = _worklist.at(l);
1491     if (PointsToNode::is_base_use(use)) {
1492       // Add reference from jobj to field and from field to jobj (field's base).
1493       use = PointsToNode::get_use_node(use)->as_Field();
1494       if (add_base(use->as_Field(), jobj)) {
1495         new_edges++;
1496       }
1497       continue;
1498     }
1499     assert(!use->is_JavaObject(), "sanity");
1500     if (use->is_Arraycopy()) {
1501       if (jobj == null_obj) { // NULL object does not have field edges
1502         continue;
1503       }
1504       // Added edge from Arraycopy node to arraycopy's source java object
1505       if (add_edge(use, jobj)) {
1506         jobj->set_arraycopy_src();
1507         new_edges++;
1508       }
1509       // and stop here.
1510       continue;
1511     }
1512     if (!add_edge(use, jobj)) {
1513       continue; // No new edge added, there was such edge already.
1514     }
1515     new_edges++;
1516     if (use->is_LocalVar()) {
1517       add_uses_to_worklist(use);
1518       if (use->arraycopy_dst()) {
1519         for (EdgeIterator i(use); i.has_next(); i.next()) {
1520           PointsToNode* e = i.get();
1521           if (e->is_Arraycopy()) {
1522             if (jobj == null_obj) { // NULL object does not have field edges
1523               continue;
1524             }
1525             // Add edge from arraycopy's destination java object to Arraycopy node.
1526             if (add_edge(jobj, e)) {
1527               new_edges++;
1528               jobj->set_arraycopy_dst();
1529             }
1530           }
1531         }
1532       }
1533     } else {
1534       // Added new edge to stored in field values.
1535       // Put on worklist all field's uses (loads) and
1536       // related field nodes (same base and offset).
1537       add_field_uses_to_worklist(use->as_Field());
1538     }
1539   }
1540   _worklist.clear();
1541   _in_worklist.reset();
1542   return new_edges;
1543 }
1544 
1545 // Put on worklist all related field nodes.
1546 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
1547   assert(field->is_oop(), "sanity");
1548   int offset = field->offset();
1549   add_uses_to_worklist(field);
1550   // Loop over all bases of this field and push on worklist Field nodes
1551   // with the same offset and base (since they may reference the same field).
1552   for (BaseIterator i(field); i.has_next(); i.next()) {
1553     PointsToNode* base = i.get();
1554     add_fields_to_worklist(field, base);
1555     // Check if the base was source object of arraycopy and go over arraycopy's
1556     // destination objects since values stored to a field of source object are
1557     // accessable by uses (loads) of fields of destination objects.
1558     if (base->arraycopy_src()) {
1559       for (UseIterator j(base); j.has_next(); j.next()) {
1560         PointsToNode* arycp = j.get();
1561         if (arycp->is_Arraycopy()) {
1562           for (UseIterator k(arycp); k.has_next(); k.next()) {
1563             PointsToNode* abase = k.get();
1564             if (abase->arraycopy_dst() && abase != base) {
1565               // Look for the same arraycopy reference.
1566               add_fields_to_worklist(field, abase);
1567             }
1568           }
1569         }
1570       }
1571     }
1572   }
1573 }
1574 
1575 // Put on worklist all related field nodes.
1576 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
1577   int offset = field->offset();
1578   if (base->is_LocalVar()) {
1579     for (UseIterator j(base); j.has_next(); j.next()) {
1580       PointsToNode* f = j.get();
1581       if (PointsToNode::is_base_use(f)) { // Field
1582         f = PointsToNode::get_use_node(f);
1583         if (f == field || !f->as_Field()->is_oop()) {
1584           continue;
1585         }
1586         int offs = f->as_Field()->offset();
1587         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1588           add_to_worklist(f);
1589         }
1590       }
1591     }
1592   } else {
1593     assert(base->is_JavaObject(), "sanity");
1594     if (// Skip phantom_object since it is only used to indicate that
1595         // this field's content globally escapes.
1596         (base != phantom_obj) &&
1597         // NULL object node does not have fields.
1598         (base != null_obj)) {
1599       for (EdgeIterator i(base); i.has_next(); i.next()) {
1600         PointsToNode* f = i.get();
1601         // Skip arraycopy edge since store to destination object field
1602         // does not update value in source object field.
1603         if (f->is_Arraycopy()) {
1604           assert(base->arraycopy_dst(), "sanity");
1605           continue;
1606         }
1607         if (f == field || !f->as_Field()->is_oop()) {
1608           continue;
1609         }
1610         int offs = f->as_Field()->offset();
1611         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1612           add_to_worklist(f);
1613         }
1614       }
1615     }
1616   }
1617 }
1618 
1619 // Find fields which have unknown value.
1620 int ConnectionGraph::find_field_value(FieldNode* field) {
1621   // Escaped fields should have init value already.
1622   assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
1623   int new_edges = 0;
1624   for (BaseIterator i(field); i.has_next(); i.next()) {
1625     PointsToNode* base = i.get();
1626     if (base->is_JavaObject()) {
1627       // Skip Allocate's fields which will be processed later.
1628       if (base->ideal_node()->is_Allocate()) {
1629         return 0;
1630       }
1631       assert(base == null_obj, "only NULL ptr base expected here");
1632     }
1633   }
1634   if (add_edge(field, phantom_obj)) {
1635     // New edge was added
1636     new_edges++;
1637     add_field_uses_to_worklist(field);
1638   }
1639   return new_edges;
1640 }
1641 
1642 // Find fields initializing values for allocations.
1643 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
1644   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1645   Node* alloc = pta->ideal_node();
1646 
1647   // Do nothing for Allocate nodes since its fields values are
1648   // "known" unless they are initialized by arraycopy/clone.
1649   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
1650     return 0;
1651   }
1652   assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");
1653 #ifdef ASSERT
1654   if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) {
1655     const char* name = alloc->as_CallStaticJava()->_name;
1656     assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
1657   }
1658 #endif
1659   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
1660   int new_edges = 0;
1661   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1662     PointsToNode* field = i.get();
1663     if (field->is_Field() && field->as_Field()->is_oop()) {
1664       if (add_edge(field, phantom_obj)) {
1665         // New edge was added
1666         new_edges++;
1667         add_field_uses_to_worklist(field->as_Field());
1668       }
1669     }
1670   }
1671   return new_edges;
1672 }
1673 
1674 // Find fields initializing values for allocations.
1675 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* phase) {
1676   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1677   Node* alloc = pta->ideal_node();
1678   // Do nothing for Call nodes since its fields values are unknown.
1679   if (!alloc->is_Allocate()) {
1680     return 0;
1681   }
1682   InitializeNode* ini = alloc->as_Allocate()->initialization();
1683   bool visited_bottom_offset = false;
1684   GrowableArray<int> offsets_worklist;
1685   int new_edges = 0;
1686 
1687   // Check if an oop field's initializing value is recorded and add
1688   // a corresponding NULL if field's value if it is not recorded.
1689   // Connection Graph does not record a default initialization by NULL
1690   // captured by Initialize node.
1691   //
1692   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1693     PointsToNode* field = i.get(); // Field (AddP)
1694     if (!field->is_Field() || !field->as_Field()->is_oop()) {
1695       continue; // Not oop field
1696     }
1697     int offset = field->as_Field()->offset();
1698     if (offset == Type::OffsetBot) {
1699       if (!visited_bottom_offset) {
1700         // OffsetBot is used to reference array's element,
1701         // always add reference to NULL to all Field nodes since we don't
1702         // known which element is referenced.
1703         if (add_edge(field, null_obj)) {
1704           // New edge was added
1705           new_edges++;
1706           add_field_uses_to_worklist(field->as_Field());
1707           visited_bottom_offset = true;
1708         }
1709       }
1710     } else {
1711       // Check only oop fields.
1712       const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();
1713       if (adr_type->isa_rawptr()) {
1714 #ifdef ASSERT
1715         // Raw pointers are used for initializing stores so skip it
1716         // since it should be recorded already
1717         Node* base = get_addp_base(field->ideal_node());
1718         assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type");
1719 #endif
1720         continue;
1721       }
1722       if (!offsets_worklist.contains(offset)) {
1723         offsets_worklist.append(offset);
1724         Node* value = NULL;
1725         if (ini != NULL) {
1726           // StoreP::memory_type() == T_ADDRESS
1727           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;
1728           Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);
1729           // Make sure initializing store has the same type as this AddP.
1730           // This AddP may reference non existing field because it is on a
1731           // dead branch of bimorphic call which is not eliminated yet.
1732           if (store != NULL && store->is_Store() &&
1733               store->as_Store()->memory_type() == ft) {
1734             value = store->in(MemNode::ValueIn);
1735 #ifdef ASSERT
1736             if (VerifyConnectionGraph) {
1737               // Verify that AddP already points to all objects the value points to.
1738               PointsToNode* val = ptnode_adr(value->_idx);
1739               assert((val != NULL), "should be processed already");
1740               PointsToNode* missed_obj = NULL;
1741               if (val->is_JavaObject()) {
1742                 if (!field->points_to(val->as_JavaObject())) {
1743                   missed_obj = val;
1744                 }
1745               } else {
1746                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1747                   tty->print_cr("----------init store has invalid value -----");
1748                   store->dump();
1749                   val->dump();
1750                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1751                 }
1752                 for (EdgeIterator j(val); j.has_next(); j.next()) {
1753                   PointsToNode* obj = j.get();
1754                   if (obj->is_JavaObject()) {
1755                     if (!field->points_to(obj->as_JavaObject())) {
1756                       missed_obj = obj;
1757                       break;
1758                     }
1759                   }
1760                 }
1761               }
1762               if (missed_obj != NULL) {
1763                 tty->print_cr("----------field---------------------------------");
1764                 field->dump();
1765                 tty->print_cr("----------missed referernce to object-----------");
1766                 missed_obj->dump();
1767                 tty->print_cr("----------object referernced by init store -----");
1768                 store->dump();
1769                 val->dump();
1770                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1771               }
1772             }
1773 #endif
1774           } else {
1775             // There could be initializing stores which follow allocation.
1776             // For example, a volatile field store is not collected
1777             // by Initialize node.
1778             //
1779             // Need to check for dependent loads to separate such stores from
1780             // stores which follow loads. For now, add initial value NULL so
1781             // that compare pointers optimization works correctly.
1782           }
1783         }
1784         if (value == NULL) {
1785           // A field's initializing value was not recorded. Add NULL.
1786           if (add_edge(field, null_obj)) {
1787             // New edge was added
1788             new_edges++;
1789             add_field_uses_to_worklist(field->as_Field());
1790           }
1791         }
1792       }
1793     }
1794   }
1795   return new_edges;
1796 }
1797 
1798 // Adjust scalar_replaceable state after Connection Graph is built.
1799 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {
1800   // Search for non-escaping objects which are not scalar replaceable
1801   // and mark them to propagate the state to referenced objects.
1802 
1803   for (UseIterator i(jobj); i.has_next(); i.next()) {
1804     PointsToNode* use = i.get();
1805     if (use->is_Arraycopy()) {
1806       continue;
1807     }
1808     if (use->is_Field()) {
1809       FieldNode* field = use->as_Field();
1810       assert(field->is_oop() && field->scalar_replaceable(), "sanity");
1811       // 1. An object is not scalar replaceable if the field into which it is
1812       // stored has unknown offset (stored into unknown element of an array).
1813       if (field->offset() == Type::OffsetBot) {
1814         set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset"));
1815         return;
1816       }
1817       // 2. An object is not scalar replaceable if the field into which it is
1818       // stored has multiple bases one of which is null.
1819       if (field->base_count() > 1) {
1820         for (BaseIterator i(field); i.has_next(); i.next()) {
1821           PointsToNode* base = i.get();
1822           if (base == null_obj) {
1823             set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base"));
1824             return;
1825           }
1826         }
1827       }
1828     }
1829     assert(use->is_Field() || use->is_LocalVar(), "sanity");
1830     // 3. An object is not scalar replaceable if it is merged with other objects.
1831     for (EdgeIterator j(use); j.has_next(); j.next()) {
1832       PointsToNode* ptn = j.get();
1833       if (ptn->is_JavaObject() && ptn != jobj) {
1834         // Mark all objects.
1835         set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn)));
1836         set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj)));
1837       }
1838     }
1839     if (!jobj->scalar_replaceable()) {
1840       return;
1841     }
1842   }
1843 
1844   for (EdgeIterator j(jobj); j.has_next(); j.next()) {
1845     if (j.get()->is_Arraycopy()) {
1846       continue;
1847     }
1848 
1849     // Non-escaping object node should point only to field nodes.
1850     FieldNode* field = j.get()->as_Field();
1851     int offset = field->as_Field()->offset();
1852 
1853     // 4. An object is not scalar replaceable if it has a field with unknown
1854     // offset (array's element is accessed in loop).
1855     if (offset == Type::OffsetBot) {
1856       set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset"));
1857       return;
1858     }
1859     // 5. Currently an object is not scalar replaceable if a LoadStore node
1860     // access its field since the field value is unknown after it.
1861     //
1862     Node* n = field->ideal_node();
1863 
1864     // Test for an unsafe access that was parsed as maybe off heap
1865     // (with a CheckCastPP to raw memory).
1866     assert(n->is_AddP(), "expect an address computation");
1867     if (n->in(AddPNode::Base)->is_top() &&
1868         n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) {
1869       assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected");
1870       assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected");
1871       set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access"));
1872       return;
1873     }
1874 
1875     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1876       Node* u = n->fast_out(i);
1877       if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) {
1878         set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access"));
1879         return;
1880       }
1881     }
1882 
1883     // 6. Or the address may point to more then one object. This may produce
1884     // the false positive result (set not scalar replaceable)
1885     // since the flow-insensitive escape analysis can't separate
1886     // the case when stores overwrite the field's value from the case
1887     // when stores happened on different control branches.
1888     //
1889     // Note: it will disable scalar replacement in some cases:
1890     //
1891     //    Point p[] = new Point[1];
1892     //    p[0] = new Point(); // Will be not scalar replaced
1893     //
1894     // but it will save us from incorrect optimizations in next cases:
1895     //
1896     //    Point p[] = new Point[1];
1897     //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
1898     //
1899     if (field->base_count() > 1) {
1900       for (BaseIterator i(field); i.has_next(); i.next()) {
1901         PointsToNode* base = i.get();
1902         // Don't take into account LocalVar nodes which
1903         // may point to only one object which should be also
1904         // this field's base by now.
1905         if (base->is_JavaObject() && base != jobj) {
1906           // Mark all bases.
1907           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object"));
1908           set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object"));
1909         }
1910       }
1911     }
1912   }
1913 }
1914 
1915 #ifdef ASSERT
1916 void ConnectionGraph::verify_connection_graph(
1917                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
1918                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
1919                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
1920                          GrowableArray<Node*>& addp_worklist) {
1921   // Verify that graph is complete - no new edges could be added.
1922   int java_objects_length = java_objects_worklist.length();
1923   int non_escaped_length  = non_escaped_allocs_worklist.length();
1924   int new_edges = 0;
1925   for (int next = 0; next < java_objects_length; ++next) {
1926     JavaObjectNode* ptn = java_objects_worklist.at(next);
1927     new_edges += add_java_object_edges(ptn, true);
1928   }
1929   assert(new_edges == 0, "graph was not complete");
1930   // Verify that escape state is final.
1931   int length = non_escaped_allocs_worklist.length();
1932   find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist);
1933   assert((non_escaped_length == non_escaped_allocs_worklist.length()) &&
1934          (non_escaped_length == length) &&
1935          (_worklist.length() == 0), "escape state was not final");
1936 
1937   // Verify fields information.
1938   int addp_length = addp_worklist.length();
1939   for (int next = 0; next < addp_length; ++next ) {
1940     Node* n = addp_worklist.at(next);
1941     FieldNode* field = ptnode_adr(n->_idx)->as_Field();
1942     if (field->is_oop()) {
1943       // Verify that field has all bases
1944       Node* base = get_addp_base(n);
1945       PointsToNode* ptn = ptnode_adr(base->_idx);
1946       if (ptn->is_JavaObject()) {
1947         assert(field->has_base(ptn->as_JavaObject()), "sanity");
1948       } else {
1949         assert(ptn->is_LocalVar(), "sanity");
1950         for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1951           PointsToNode* e = i.get();
1952           if (e->is_JavaObject()) {
1953             assert(field->has_base(e->as_JavaObject()), "sanity");
1954           }
1955         }
1956       }
1957       // Verify that all fields have initializing values.
1958       if (field->edge_count() == 0) {
1959         tty->print_cr("----------field does not have references----------");
1960         field->dump();
1961         for (BaseIterator i(field); i.has_next(); i.next()) {
1962           PointsToNode* base = i.get();
1963           tty->print_cr("----------field has next base---------------------");
1964           base->dump();
1965           if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {
1966             tty->print_cr("----------base has fields-------------------------");
1967             for (EdgeIterator j(base); j.has_next(); j.next()) {
1968               j.get()->dump();
1969             }
1970             tty->print_cr("----------base has references---------------------");
1971             for (UseIterator j(base); j.has_next(); j.next()) {
1972               j.get()->dump();
1973             }
1974           }
1975         }
1976         for (UseIterator i(field); i.has_next(); i.next()) {
1977           i.get()->dump();
1978         }
1979         assert(field->edge_count() > 0, "sanity");
1980       }
1981     }
1982   }
1983 }
1984 #endif
1985 
1986 // Optimize ideal graph.
1987 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
1988                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
1989   Compile* C = _compile;
1990   PhaseIterGVN* igvn = _igvn;
1991   if (EliminateLocks) {
1992     // Mark locks before changing ideal graph.
1993     int cnt = C->macro_count();
1994     for (int i = 0; i < cnt; i++) {
1995       Node *n = C->macro_node(i);
1996       if (n->is_AbstractLock()) { // Lock and Unlock nodes
1997         AbstractLockNode* alock = n->as_AbstractLock();
1998         if (!alock->is_non_esc_obj()) {
1999           if (not_global_escape(alock->obj_node())) {
2000             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
2001             // The lock could be marked eliminated by lock coarsening
2002             // code during first IGVN before EA. Replace coarsened flag
2003             // to eliminate all associated locks/unlocks.
2004 #ifdef ASSERT
2005             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
2006 #endif
2007             alock->set_non_esc_obj();
2008           }
2009         }
2010       }
2011     }
2012   }
2013 
2014   if (OptimizePtrCompare) {
2015     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
2016       Node *n = ptr_cmp_worklist.at(i);
2017       const TypeInt* tcmp = optimize_ptr_compare(n);
2018       if (tcmp->singleton()) {
2019         Node* cmp = igvn->makecon(tcmp);
2020 #ifndef PRODUCT
2021         if (PrintOptimizePtrCompare) {
2022           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
2023           if (Verbose) {
2024             n->dump(1);
2025           }
2026         }
2027 #endif
2028         igvn->replace_node(n, cmp);
2029       }
2030     }
2031   }
2032 
2033   // For MemBarStoreStore nodes added in library_call.cpp, check
2034   // escape status of associated AllocateNode and optimize out
2035   // MemBarStoreStore node if the allocated object never escapes.
2036   for (int i = 0; i < storestore_worklist.length(); i++) {
2037     Node* storestore = storestore_worklist.at(i);
2038     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
2039     if (alloc->is_Allocate() && not_global_escape(alloc)) {
2040       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
2041       mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
2042       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
2043       igvn->register_new_node_with_optimizer(mb);
2044       igvn->replace_node(storestore, mb);
2045     }
2046   }
2047 }
2048 
2049 // Optimize objects compare.
2050 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {
2051   assert(OptimizePtrCompare, "sanity");
2052   assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
2053   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
2054   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
2055   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
2056 
2057   PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2058   PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2059   JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2060   JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2061   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2062   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2063 
2064   // Check simple cases first.
2065   if (jobj1 != NULL) {
2066     if (jobj1->escape_state() == PointsToNode::NoEscape) {
2067       if (jobj1 == jobj2) {
2068         // Comparing the same not escaping object.
2069         return EQ;
2070       }
2071       Node* obj = jobj1->ideal_node();
2072       // Comparing not escaping allocation.
2073       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
2074           !ptn2->points_to(jobj1)) {
2075         return NE; // This includes nullness check.
2076       }
2077     }
2078   }
2079   if (jobj2 != NULL) {
2080     if (jobj2->escape_state() == PointsToNode::NoEscape) {
2081       Node* obj = jobj2->ideal_node();
2082       // Comparing not escaping allocation.
2083       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
2084           !ptn1->points_to(jobj2)) {
2085         return NE; // This includes nullness check.
2086       }
2087     }
2088   }
2089   if (jobj1 != NULL && jobj1 != phantom_obj &&
2090       jobj2 != NULL && jobj2 != phantom_obj &&
2091       jobj1->ideal_node()->is_Con() &&
2092       jobj2->ideal_node()->is_Con()) {
2093     // Klass or String constants compare. Need to be careful with
2094     // compressed pointers - compare types of ConN and ConP instead of nodes.
2095     const Type* t1 = jobj1->ideal_node()->get_ptr_type();
2096     const Type* t2 = jobj2->ideal_node()->get_ptr_type();
2097     if (t1->make_ptr() == t2->make_ptr()) {
2098       return EQ;
2099     } else {
2100       return NE;
2101     }
2102   }
2103   if (ptn1->meet(ptn2)) {
2104     return UNKNOWN; // Sets are not disjoint
2105   }
2106 
2107   // Sets are disjoint.
2108   bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
2109   bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
2110   bool set1_has_null_ptr    = ptn1->points_to(null_obj);
2111   bool set2_has_null_ptr    = ptn2->points_to(null_obj);
2112   if ((set1_has_unknown_ptr && set2_has_null_ptr) ||
2113       (set2_has_unknown_ptr && set1_has_null_ptr)) {
2114     // Check nullness of unknown object.
2115     return UNKNOWN;
2116   }
2117 
2118   // Disjointness by itself is not sufficient since
2119   // alias analysis is not complete for escaped objects.
2120   // Disjoint sets are definitely unrelated only when
2121   // at least one set has only not escaping allocations.
2122   if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
2123     if (ptn1->non_escaping_allocation()) {
2124       return NE;
2125     }
2126   }
2127   if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
2128     if (ptn2->non_escaping_allocation()) {
2129       return NE;
2130     }
2131   }
2132   return UNKNOWN;
2133 }
2134 
2135 // Connection Graph construction functions.
2136 
2137 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
2138   PointsToNode* ptadr = _nodes.at(n->_idx);
2139   if (ptadr != NULL) {
2140     assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
2141     return;
2142   }
2143   Compile* C = _compile;
2144   ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
2145   map_ideal_node(n, ptadr);
2146 }
2147 
2148 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
2149   PointsToNode* ptadr = _nodes.at(n->_idx);
2150   if (ptadr != NULL) {
2151     assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
2152     return;
2153   }
2154   Compile* C = _compile;
2155   ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
2156   map_ideal_node(n, ptadr);
2157 }
2158 
2159 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
2160   PointsToNode* ptadr = _nodes.at(n->_idx);
2161   if (ptadr != NULL) {
2162     assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
2163     return;
2164   }
2165   bool unsafe = false;
2166   bool is_oop = is_oop_field(n, offset, &unsafe);
2167   if (unsafe) {
2168     es = PointsToNode::GlobalEscape;
2169   }
2170   Compile* C = _compile;
2171   FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
2172   map_ideal_node(n, field);
2173 }
2174 
2175 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
2176                                     PointsToNode* src, PointsToNode* dst) {
2177   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2178   assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
2179   PointsToNode* ptadr = _nodes.at(n->_idx);
2180   if (ptadr != NULL) {
2181     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2182     return;
2183   }
2184   Compile* C = _compile;
2185   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2186   map_ideal_node(n, ptadr);
2187   // Add edge from arraycopy node to source object.
2188   (void)add_edge(ptadr, src);
2189   src->set_arraycopy_src();
2190   // Add edge from destination object to arraycopy node.
2191   (void)add_edge(dst, ptadr);
2192   dst->set_arraycopy_dst();
2193 }
2194 
2195 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2196   const Type* adr_type = n->as_AddP()->bottom_type();
2197   BasicType bt = T_INT;
2198   if (offset == Type::OffsetBot) {
2199     // Check only oop fields.
2200     if (!adr_type->isa_aryptr() ||
2201         (adr_type->isa_aryptr()->klass() == NULL) ||
2202          adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
2203       // OffsetBot is used to reference array's element. Ignore first AddP.
2204       if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2205         bt = T_OBJECT;
2206       }
2207     }
2208   } else if (offset != oopDesc::klass_offset_in_bytes()) {
2209     if (adr_type->isa_instptr()) {
2210       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2211       if (field != NULL) {
2212         bt = field->layout_type();
2213       } else {
2214         // Check for unsafe oop field access
2215         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2216             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2217             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2218             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2219           bt = T_OBJECT;
2220           (*unsafe) = true;
2221         }
2222       }
2223     } else if (adr_type->isa_aryptr()) {
2224       if (offset == arrayOopDesc::length_offset_in_bytes()) {
2225         // Ignore array length load.
2226       } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2227         // Ignore first AddP.
2228       } else {
2229         const Type* elemtype = adr_type->isa_aryptr()->elem();
2230         bt = elemtype->array_element_basic_type();
2231       }
2232     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2233       // Allocation initialization, ThreadLocal field access, unsafe access
2234       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2235           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2236           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2237           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2238         bt = T_OBJECT;
2239       }
2240     }
2241   }
2242   // Note: T_NARROWOOP is not classed as a real reference type
2243   return (is_reference_type(bt) || bt == T_NARROWOOP);
2244 }
2245 
2246 // Returns unique pointed java object or NULL.
2247 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2248   assert(!_collecting, "should not call when constructed graph");
2249   // If the node was created after the escape computation we can't answer.
2250   uint idx = n->_idx;
2251   if (idx >= nodes_size()) {
2252     return NULL;
2253   }
2254   PointsToNode* ptn = ptnode_adr(idx);
2255   if (ptn == NULL) {
2256     return NULL;
2257   }
2258   if (ptn->is_JavaObject()) {
2259     return ptn->as_JavaObject();
2260   }
2261   assert(ptn->is_LocalVar(), "sanity");
2262   // Check all java objects it points to.
2263   JavaObjectNode* jobj = NULL;
2264   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2265     PointsToNode* e = i.get();
2266     if (e->is_JavaObject()) {
2267       if (jobj == NULL) {
2268         jobj = e->as_JavaObject();
2269       } else if (jobj != e) {
2270         return NULL;
2271       }
2272     }
2273   }
2274   return jobj;
2275 }
2276 
2277 // Return true if this node points only to non-escaping allocations.
2278 bool PointsToNode::non_escaping_allocation() {
2279   if (is_JavaObject()) {
2280     Node* n = ideal_node();
2281     if (n->is_Allocate() || n->is_CallStaticJava()) {
2282       return (escape_state() == PointsToNode::NoEscape);
2283     } else {
2284       return false;
2285     }
2286   }
2287   assert(is_LocalVar(), "sanity");
2288   // Check all java objects it points to.
2289   for (EdgeIterator i(this); i.has_next(); i.next()) {
2290     PointsToNode* e = i.get();
2291     if (e->is_JavaObject()) {
2292       Node* n = e->ideal_node();
2293       if ((e->escape_state() != PointsToNode::NoEscape) ||
2294           !(n->is_Allocate() || n->is_CallStaticJava())) {
2295         return false;
2296       }
2297     }
2298   }
2299   return true;
2300 }
2301 
2302 // Return true if we know the node does not escape globally.
2303 bool ConnectionGraph::not_global_escape(Node *n) {
2304   assert(!_collecting, "should not call during graph construction");
2305   // If the node was created after the escape computation we can't answer.
2306   uint idx = n->_idx;
2307   if (idx >= nodes_size()) {
2308     return false;
2309   }
2310   PointsToNode* ptn = ptnode_adr(idx);
2311   if (ptn == NULL) {
2312     return false; // not in congraph (e.g. ConI)
2313   }
2314   PointsToNode::EscapeState es = ptn->escape_state();
2315   // If we have already computed a value, return it.
2316   if (es >= PointsToNode::GlobalEscape) {
2317     return false;
2318   }
2319   if (ptn->is_JavaObject()) {
2320     return true; // (es < PointsToNode::GlobalEscape);
2321   }
2322   assert(ptn->is_LocalVar(), "sanity");
2323   // Check all java objects it points to.
2324   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2325     if (i.get()->escape_state() >= PointsToNode::GlobalEscape) {
2326       return false;
2327     }
2328   }
2329   return true;
2330 }
2331 
2332 
2333 // Helper functions
2334 
2335 // Return true if this node points to specified node or nodes it points to.
2336 bool PointsToNode::points_to(JavaObjectNode* ptn) const {
2337   if (is_JavaObject()) {
2338     return (this == ptn);
2339   }
2340   assert(is_LocalVar() || is_Field(), "sanity");
2341   for (EdgeIterator i(this); i.has_next(); i.next()) {
2342     if (i.get() == ptn) {
2343       return true;
2344     }
2345   }
2346   return false;
2347 }
2348 
2349 // Return true if one node points to an other.
2350 bool PointsToNode::meet(PointsToNode* ptn) {
2351   if (this == ptn) {
2352     return true;
2353   } else if (ptn->is_JavaObject()) {
2354     return this->points_to(ptn->as_JavaObject());
2355   } else if (this->is_JavaObject()) {
2356     return ptn->points_to(this->as_JavaObject());
2357   }
2358   assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
2359   int ptn_count =  ptn->edge_count();
2360   for (EdgeIterator i(this); i.has_next(); i.next()) {
2361     PointsToNode* this_e = i.get();
2362     for (int j = 0; j < ptn_count; j++) {
2363       if (this_e == ptn->edge(j)) {
2364         return true;
2365       }
2366     }
2367   }
2368   return false;
2369 }
2370 
2371 #ifdef ASSERT
2372 // Return true if bases point to this java object.
2373 bool FieldNode::has_base(JavaObjectNode* jobj) const {
2374   for (BaseIterator i(this); i.has_next(); i.next()) {
2375     if (i.get() == jobj) {
2376       return true;
2377     }
2378   }
2379   return false;
2380 }
2381 #endif
2382 
2383 bool ConnectionGraph::is_captured_store_address(Node* addp) {
2384   // Handle simple case first.
2385   assert(_igvn->type(addp)->isa_oopptr() == NULL, "should be raw access");
2386   if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) {
2387     return true;
2388   } else if (addp->in(AddPNode::Address)->is_Phi()) {
2389     for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2390       Node* addp_use = addp->fast_out(i);
2391       if (addp_use->is_Store()) {
2392         for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) {
2393           if (addp_use->fast_out(j)->is_Initialize()) {
2394             return true;
2395           }
2396         }
2397       }
2398     }
2399   }
2400   return false;
2401 }
2402 
2403 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2404   const Type *adr_type = phase->type(adr);
2405   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) {
2406     // We are computing a raw address for a store captured by an Initialize
2407     // compute an appropriate address type. AddP cases #3 and #5 (see below).
2408     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2409     assert(offs != Type::OffsetBot ||
2410            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2411            "offset must be a constant or it is initialization of array");
2412     return offs;
2413   }
2414   const TypePtr *t_ptr = adr_type->isa_ptr();
2415   assert(t_ptr != NULL, "must be a pointer type");
2416   return t_ptr->offset();
2417 }
2418 
2419 Node* ConnectionGraph::get_addp_base(Node *addp) {
2420   assert(addp->is_AddP(), "must be AddP");
2421   //
2422   // AddP cases for Base and Address inputs:
2423   // case #1. Direct object's field reference:
2424   //     Allocate
2425   //       |
2426   //     Proj #5 ( oop result )
2427   //       |
2428   //     CheckCastPP (cast to instance type)
2429   //      | |
2430   //     AddP  ( base == address )
2431   //
2432   // case #2. Indirect object's field reference:
2433   //      Phi
2434   //       |
2435   //     CastPP (cast to instance type)
2436   //      | |
2437   //     AddP  ( base == address )
2438   //
2439   // case #3. Raw object's field reference for Initialize node:
2440   //      Allocate
2441   //        |
2442   //      Proj #5 ( oop result )
2443   //  top   |
2444   //     \  |
2445   //     AddP  ( base == top )
2446   //
2447   // case #4. Array's element reference:
2448   //   {CheckCastPP | CastPP}
2449   //     |  | |
2450   //     |  AddP ( array's element offset )
2451   //     |  |
2452   //     AddP ( array's offset )
2453   //
2454   // case #5. Raw object's field reference for arraycopy stub call:
2455   //          The inline_native_clone() case when the arraycopy stub is called
2456   //          after the allocation before Initialize and CheckCastPP nodes.
2457   //      Allocate
2458   //        |
2459   //      Proj #5 ( oop result )
2460   //       | |
2461   //       AddP  ( base == address )
2462   //
2463   // case #6. Constant Pool, ThreadLocal, CastX2P or
2464   //          Raw object's field reference:
2465   //      {ConP, ThreadLocal, CastX2P, raw Load}
2466   //  top   |
2467   //     \  |
2468   //     AddP  ( base == top )
2469   //
2470   // case #7. Klass's field reference.
2471   //      LoadKlass
2472   //       | |
2473   //       AddP  ( base == address )
2474   //
2475   // case #8. narrow Klass's field reference.
2476   //      LoadNKlass
2477   //       |
2478   //      DecodeN
2479   //       | |
2480   //       AddP  ( base == address )
2481   //
2482   // case #9. Mixed unsafe access
2483   //    {instance}
2484   //        |
2485   //      CheckCastPP (raw)
2486   //  top   |
2487   //     \  |
2488   //     AddP  ( base == top )
2489   //
2490   Node *base = addp->in(AddPNode::Base);
2491   if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
2492     base = addp->in(AddPNode::Address);
2493     while (base->is_AddP()) {
2494       // Case #6 (unsafe access) may have several chained AddP nodes.
2495       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2496       base = base->in(AddPNode::Address);
2497     }
2498     if (base->Opcode() == Op_CheckCastPP &&
2499         base->bottom_type()->isa_rawptr() &&
2500         _igvn->type(base->in(1))->isa_oopptr()) {
2501       base = base->in(1); // Case #9
2502     } else {
2503       Node* uncast_base = base->uncast();
2504       int opcode = uncast_base->Opcode();
2505       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2506              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2507              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2508              is_captured_store_address(addp), "sanity");
2509     }
2510   }
2511   return base;
2512 }
2513 
2514 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2515   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2516   Node* addp2 = addp->raw_out(0);
2517   if (addp->outcnt() == 1 && addp2->is_AddP() &&
2518       addp2->in(AddPNode::Base) == n &&
2519       addp2->in(AddPNode::Address) == addp) {
2520     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2521     //
2522     // Find array's offset to push it on worklist first and
2523     // as result process an array's element offset first (pushed second)
2524     // to avoid CastPP for the array's offset.
2525     // Otherwise the inserted CastPP (LocalVar) will point to what
2526     // the AddP (Field) points to. Which would be wrong since
2527     // the algorithm expects the CastPP has the same point as
2528     // as AddP's base CheckCastPP (LocalVar).
2529     //
2530     //    ArrayAllocation
2531     //     |
2532     //    CheckCastPP
2533     //     |
2534     //    memProj (from ArrayAllocation CheckCastPP)
2535     //     |  ||
2536     //     |  ||   Int (element index)
2537     //     |  ||    |   ConI (log(element size))
2538     //     |  ||    |   /
2539     //     |  ||   LShift
2540     //     |  ||  /
2541     //     |  AddP (array's element offset)
2542     //     |  |
2543     //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits))
2544     //     | / /
2545     //     AddP (array's offset)
2546     //      |
2547     //     Load/Store (memory operation on array's element)
2548     //
2549     return addp2;
2550   }
2551   return NULL;
2552 }
2553 
2554 //
2555 // Adjust the type and inputs of an AddP which computes the
2556 // address of a field of an instance
2557 //
2558 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2559   PhaseGVN* igvn = _igvn;
2560   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2561   assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
2562   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2563   if (t == NULL) {
2564     // We are computing a raw address for a store captured by an Initialize
2565     // compute an appropriate address type (cases #3 and #5).
2566     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2567     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2568     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2569     assert(offs != Type::OffsetBot, "offset must be a constant");
2570     t = base_t->add_offset(offs)->is_oopptr();
2571   }
2572   int inst_id =  base_t->instance_id();
2573   assert(!t->is_known_instance() || t->instance_id() == inst_id,
2574                              "old type must be non-instance or match new type");
2575 
2576   // The type 't' could be subclass of 'base_t'.
2577   // As result t->offset() could be large then base_t's size and it will
2578   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2579   // constructor verifies correctness of the offset.
2580   //
2581   // It could happened on subclass's branch (from the type profiling
2582   // inlining) which was not eliminated during parsing since the exactness
2583   // of the allocation type was not propagated to the subclass type check.
2584   //
2585   // Or the type 't' could be not related to 'base_t' at all.
2586   // It could happened when CHA type is different from MDO type on a dead path
2587   // (for example, from instanceof check) which is not collapsed during parsing.
2588   //
2589   // Do nothing for such AddP node and don't process its users since
2590   // this code branch will go away.
2591   //
2592   if (!t->is_known_instance() &&
2593       !base_t->klass()->is_subtype_of(t->klass())) {
2594      return false; // bail out
2595   }
2596   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
2597   // Do NOT remove the next line: ensure a new alias index is allocated
2598   // for the instance type. Note: C++ will not remove it since the call
2599   // has side effect.
2600   int alias_idx = _compile->get_alias_index(tinst);
2601   igvn->set_type(addp, tinst);
2602   // record the allocation in the node map
2603   set_map(addp, get_map(base->_idx));
2604   // Set addp's Base and Address to 'base'.
2605   Node *abase = addp->in(AddPNode::Base);
2606   Node *adr   = addp->in(AddPNode::Address);
2607   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2608       adr->in(0)->_idx == (uint)inst_id) {
2609     // Skip AddP cases #3 and #5.
2610   } else {
2611     assert(!abase->is_top(), "sanity"); // AddP case #3
2612     if (abase != base) {
2613       igvn->hash_delete(addp);
2614       addp->set_req(AddPNode::Base, base);
2615       if (abase == adr) {
2616         addp->set_req(AddPNode::Address, base);
2617       } else {
2618         // AddP case #4 (adr is array's element offset AddP node)
2619 #ifdef ASSERT
2620         const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
2621         assert(adr->is_AddP() && atype != NULL &&
2622                atype->instance_id() == inst_id, "array's element offset should be processed first");
2623 #endif
2624       }
2625       igvn->hash_insert(addp);
2626     }
2627   }
2628   // Put on IGVN worklist since at least addp's type was changed above.
2629   record_for_optimizer(addp);
2630   return true;
2631 }
2632 
2633 //
2634 // Create a new version of orig_phi if necessary. Returns either the newly
2635 // created phi or an existing phi.  Sets create_new to indicate whether a new
2636 // phi was created.  Cache the last newly created phi in the node map.
2637 //
2638 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created) {
2639   Compile *C = _compile;
2640   PhaseGVN* igvn = _igvn;
2641   new_created = false;
2642   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
2643   // nothing to do if orig_phi is bottom memory or matches alias_idx
2644   if (phi_alias_idx == alias_idx) {
2645     return orig_phi;
2646   }
2647   // Have we recently created a Phi for this alias index?
2648   PhiNode *result = get_map_phi(orig_phi->_idx);
2649   if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
2650     return result;
2651   }
2652   // Previous check may fail when the same wide memory Phi was split into Phis
2653   // for different memory slices. Search all Phis for this region.
2654   if (result != NULL) {
2655     Node* region = orig_phi->in(0);
2656     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2657       Node* phi = region->fast_out(i);
2658       if (phi->is_Phi() &&
2659           C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
2660         assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
2661         return phi->as_Phi();
2662       }
2663     }
2664   }
2665   if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {
2666     if (C->do_escape_analysis() == true && !C->failing()) {
2667       // Retry compilation without escape analysis.
2668       // If this is the first failure, the sentinel string will "stick"
2669       // to the Compile object, and the C2Compiler will see it and retry.
2670       C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
2671     }
2672     return NULL;
2673   }
2674   orig_phi_worklist.append_if_missing(orig_phi);
2675   const TypePtr *atype = C->get_adr_type(alias_idx);
2676   result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
2677   C->copy_node_notes_to(result, orig_phi);
2678   igvn->set_type(result, result->bottom_type());
2679   record_for_optimizer(result);
2680   set_map(orig_phi, result);
2681   new_created = true;
2682   return result;
2683 }
2684 
2685 //
2686 // Return a new version of Memory Phi "orig_phi" with the inputs having the
2687 // specified alias index.
2688 //
2689 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist) {
2690   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
2691   Compile *C = _compile;
2692   PhaseGVN* igvn = _igvn;
2693   bool new_phi_created;
2694   PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
2695   if (!new_phi_created) {
2696     return result;
2697   }
2698   GrowableArray<PhiNode *>  phi_list;
2699   GrowableArray<uint>  cur_input;
2700   PhiNode *phi = orig_phi;
2701   uint idx = 1;
2702   bool finished = false;
2703   while(!finished) {
2704     while (idx < phi->req()) {
2705       Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);
2706       if (mem != NULL && mem->is_Phi()) {
2707         PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
2708         if (new_phi_created) {
2709           // found an phi for which we created a new split, push current one on worklist and begin
2710           // processing new one
2711           phi_list.push(phi);
2712           cur_input.push(idx);
2713           phi = mem->as_Phi();
2714           result = newphi;
2715           idx = 1;
2716           continue;
2717         } else {
2718           mem = newphi;
2719         }
2720       }
2721       if (C->failing()) {
2722         return NULL;
2723       }
2724       result->set_req(idx++, mem);
2725     }
2726 #ifdef ASSERT
2727     // verify that the new Phi has an input for each input of the original
2728     assert( phi->req() == result->req(), "must have same number of inputs.");
2729     assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
2730 #endif
2731     // Check if all new phi's inputs have specified alias index.
2732     // Otherwise use old phi.
2733     for (uint i = 1; i < phi->req(); i++) {
2734       Node* in = result->in(i);
2735       assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
2736     }
2737     // we have finished processing a Phi, see if there are any more to do
2738     finished = (phi_list.length() == 0 );
2739     if (!finished) {
2740       phi = phi_list.pop();
2741       idx = cur_input.pop();
2742       PhiNode *prev_result = get_map_phi(phi->_idx);
2743       prev_result->set_req(idx++, result);
2744       result = prev_result;
2745     }
2746   }
2747   return result;
2748 }
2749 
2750 //
2751 // The next methods are derived from methods in MemNode.
2752 //
2753 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
2754   Node *mem = mmem;
2755   // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
2756   // means an array I have not precisely typed yet.  Do not do any
2757   // alias stuff with it any time soon.
2758   if (toop->base() != Type::AnyPtr &&
2759       !(toop->klass() != NULL &&
2760         toop->klass()->is_java_lang_Object() &&
2761         toop->offset() == Type::OffsetBot)) {
2762     mem = mmem->memory_at(alias_idx);
2763     // Update input if it is progress over what we have now
2764   }
2765   return mem;
2766 }
2767 
2768 //
2769 // Move memory users to their memory slices.
2770 //
2771 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis) {
2772   Compile* C = _compile;
2773   PhaseGVN* igvn = _igvn;
2774   const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
2775   assert(tp != NULL, "ptr type");
2776   int alias_idx = C->get_alias_index(tp);
2777   int general_idx = C->get_general_index(alias_idx);
2778 
2779   // Move users first
2780   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2781     Node* use = n->fast_out(i);
2782     if (use->is_MergeMem()) {
2783       MergeMemNode* mmem = use->as_MergeMem();
2784       assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
2785       if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
2786         continue; // Nothing to do
2787       }
2788       // Replace previous general reference to mem node.
2789       uint orig_uniq = C->unique();
2790       Node* m = find_inst_mem(n, general_idx, orig_phis);
2791       assert(orig_uniq == C->unique(), "no new nodes");
2792       mmem->set_memory_at(general_idx, m);
2793       --imax;
2794       --i;
2795     } else if (use->is_MemBar()) {
2796       assert(!use->is_Initialize(), "initializing stores should not be moved");
2797       if (use->req() > MemBarNode::Precedent &&
2798           use->in(MemBarNode::Precedent) == n) {
2799         // Don't move related membars.
2800         record_for_optimizer(use);
2801         continue;
2802       }
2803       tp = use->as_MemBar()->adr_type()->isa_ptr();
2804       if ((tp != NULL && C->get_alias_index(tp) == alias_idx) ||
2805           alias_idx == general_idx) {
2806         continue; // Nothing to do
2807       }
2808       // Move to general memory slice.
2809       uint orig_uniq = C->unique();
2810       Node* m = find_inst_mem(n, general_idx, orig_phis);
2811       assert(orig_uniq == C->unique(), "no new nodes");
2812       igvn->hash_delete(use);
2813       imax -= use->replace_edge(n, m, igvn);
2814       igvn->hash_insert(use);
2815       record_for_optimizer(use);
2816       --i;
2817 #ifdef ASSERT
2818     } else if (use->is_Mem()) {
2819       if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
2820         // Don't move related cardmark.
2821         continue;
2822       }
2823       // Memory nodes should have new memory input.
2824       tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
2825       assert(tp != NULL, "ptr type");
2826       int idx = C->get_alias_index(tp);
2827       assert(get_map(use->_idx) != NULL || idx == alias_idx,
2828              "Following memory nodes should have new memory input or be on the same memory slice");
2829     } else if (use->is_Phi()) {
2830       // Phi nodes should be split and moved already.
2831       tp = use->as_Phi()->adr_type()->isa_ptr();
2832       assert(tp != NULL, "ptr type");
2833       int idx = C->get_alias_index(tp);
2834       assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
2835     } else {
2836       use->dump();
2837       assert(false, "should not be here");
2838 #endif
2839     }
2840   }
2841 }
2842 
2843 //
2844 // Search memory chain of "mem" to find a MemNode whose address
2845 // is the specified alias index.
2846 //
2847 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis) {
2848   if (orig_mem == NULL) {
2849     return orig_mem;
2850   }
2851   Compile* C = _compile;
2852   PhaseGVN* igvn = _igvn;
2853   const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
2854   bool is_instance = (toop != NULL) && toop->is_known_instance();
2855   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
2856   Node *prev = NULL;
2857   Node *result = orig_mem;
2858   while (prev != result) {
2859     prev = result;
2860     if (result == start_mem) {
2861       break;  // hit one of our sentinels
2862     }
2863     if (result->is_Mem()) {
2864       const Type *at = igvn->type(result->in(MemNode::Address));
2865       if (at == Type::TOP) {
2866         break; // Dead
2867       }
2868       assert (at->isa_ptr() != NULL, "pointer type required.");
2869       int idx = C->get_alias_index(at->is_ptr());
2870       if (idx == alias_idx) {
2871         break; // Found
2872       }
2873       if (!is_instance && (at->isa_oopptr() == NULL ||
2874                            !at->is_oopptr()->is_known_instance())) {
2875         break; // Do not skip store to general memory slice.
2876       }
2877       result = result->in(MemNode::Memory);
2878     }
2879     if (!is_instance) {
2880       continue;  // don't search further for non-instance types
2881     }
2882     // skip over a call which does not affect this memory slice
2883     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
2884       Node *proj_in = result->in(0);
2885       if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
2886         break;  // hit one of our sentinels
2887       } else if (proj_in->is_Call()) {
2888         // ArrayCopy node processed here as well
2889         CallNode *call = proj_in->as_Call();
2890         if (!call->may_modify(toop, igvn)) {
2891           result = call->in(TypeFunc::Memory);
2892         }
2893       } else if (proj_in->is_Initialize()) {
2894         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
2895         // Stop if this is the initialization for the object instance which
2896         // which contains this memory slice, otherwise skip over it.
2897         if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {
2898           result = proj_in->in(TypeFunc::Memory);
2899         }
2900       } else if (proj_in->is_MemBar()) {
2901         // Check if there is an array copy for a clone
2902         // Step over GC barrier when ReduceInitialCardMarks is disabled
2903         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2904         Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));
2905 
2906         if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
2907           // Stop if it is a clone
2908           ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
2909           if (ac->may_modify(toop, igvn)) {
2910             break;
2911           }
2912         }
2913         result = proj_in->in(TypeFunc::Memory);
2914       }
2915     } else if (result->is_MergeMem()) {
2916       MergeMemNode *mmem = result->as_MergeMem();
2917       result = step_through_mergemem(mmem, alias_idx, toop);
2918       if (result == mmem->base_memory()) {
2919         // Didn't find instance memory, search through general slice recursively.
2920         result = mmem->memory_at(C->get_general_index(alias_idx));
2921         result = find_inst_mem(result, alias_idx, orig_phis);
2922         if (C->failing()) {
2923           return NULL;
2924         }
2925         mmem->set_memory_at(alias_idx, result);
2926       }
2927     } else if (result->is_Phi() &&
2928                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
2929       Node *un = result->as_Phi()->unique_input(igvn);
2930       if (un != NULL) {
2931         orig_phis.append_if_missing(result->as_Phi());
2932         result = un;
2933       } else {
2934         break;
2935       }
2936     } else if (result->is_ClearArray()) {
2937       if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
2938         // Can not bypass initialization of the instance
2939         // we are looking for.
2940         break;
2941       }
2942       // Otherwise skip it (the call updated 'result' value).
2943     } else if (result->Opcode() == Op_SCMemProj) {
2944       Node* mem = result->in(0);
2945       Node* adr = NULL;
2946       if (mem->is_LoadStore()) {
2947         adr = mem->in(MemNode::Address);
2948       } else {
2949         assert(mem->Opcode() == Op_EncodeISOArray ||
2950                mem->Opcode() == Op_StrCompressedCopy, "sanity");
2951         adr = mem->in(3); // Memory edge corresponds to destination array
2952       }
2953       const Type *at = igvn->type(adr);
2954       if (at != Type::TOP) {
2955         assert(at->isa_ptr() != NULL, "pointer type required.");
2956         int idx = C->get_alias_index(at->is_ptr());
2957         if (idx == alias_idx) {
2958           // Assert in debug mode
2959           assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
2960           break; // In product mode return SCMemProj node
2961         }
2962       }
2963       result = mem->in(MemNode::Memory);
2964     } else if (result->Opcode() == Op_StrInflatedCopy) {
2965       Node* adr = result->in(3); // Memory edge corresponds to destination array
2966       const Type *at = igvn->type(adr);
2967       if (at != Type::TOP) {
2968         assert(at->isa_ptr() != NULL, "pointer type required.");
2969         int idx = C->get_alias_index(at->is_ptr());
2970         if (idx == alias_idx) {
2971           // Assert in debug mode
2972           assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
2973           break; // In product mode return SCMemProj node
2974         }
2975       }
2976       result = result->in(MemNode::Memory);
2977     }
2978   }
2979   if (result->is_Phi()) {
2980     PhiNode *mphi = result->as_Phi();
2981     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
2982     const TypePtr *t = mphi->adr_type();
2983     if (!is_instance) {
2984       // Push all non-instance Phis on the orig_phis worklist to update inputs
2985       // during Phase 4 if needed.
2986       orig_phis.append_if_missing(mphi);
2987     } else if (C->get_alias_index(t) != alias_idx) {
2988       // Create a new Phi with the specified alias index type.
2989       result = split_memory_phi(mphi, alias_idx, orig_phis);
2990     }
2991   }
2992   // the result is either MemNode, PhiNode, InitializeNode.
2993   return result;
2994 }
2995 
2996 //
2997 //  Convert the types of non-escaped object to instance types where possible,
2998 //  propagate the new type information through the graph, and update memory
2999 //  edges and MergeMem inputs to reflect the new type.
3000 //
3001 //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
3002 //  The processing is done in 4 phases:
3003 //
3004 //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
3005 //            types for the CheckCastPP for allocations where possible.
3006 //            Propagate the new types through users as follows:
3007 //               casts and Phi:  push users on alloc_worklist
3008 //               AddP:  cast Base and Address inputs to the instance type
3009 //                      push any AddP users on alloc_worklist and push any memnode
3010 //                      users onto memnode_worklist.
3011 //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
3012 //            search the Memory chain for a store with the appropriate type
3013 //            address type.  If a Phi is found, create a new version with
3014 //            the appropriate memory slices from each of the Phi inputs.
3015 //            For stores, process the users as follows:
3016 //               MemNode:  push on memnode_worklist
3017 //               MergeMem: push on mergemem_worklist
3018 //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
3019 //            moving the first node encountered of each  instance type to the
3020 //            the input corresponding to its alias index.
3021 //            appropriate memory slice.
3022 //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
3023 //
3024 // In the following example, the CheckCastPP nodes are the cast of allocation
3025 // results and the allocation of node 29 is non-escaped and eligible to be an
3026 // instance type.
3027 //
3028 // We start with:
3029 //
3030 //     7 Parm #memory
3031 //    10  ConI  "12"
3032 //    19  CheckCastPP   "Foo"
3033 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
3034 //    29  CheckCastPP   "Foo"
3035 //    30  AddP  _ 29 29 10  Foo+12  alias_index=4
3036 //
3037 //    40  StoreP  25   7  20   ... alias_index=4
3038 //    50  StoreP  35  40  30   ... alias_index=4
3039 //    60  StoreP  45  50  20   ... alias_index=4
3040 //    70  LoadP    _  60  30   ... alias_index=4
3041 //    80  Phi     75  50  60   Memory alias_index=4
3042 //    90  LoadP    _  80  30   ... alias_index=4
3043 //   100  LoadP    _  80  20   ... alias_index=4
3044 //
3045 //
3046 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
3047 // and creating a new alias index for node 30.  This gives:
3048 //
3049 //     7 Parm #memory
3050 //    10  ConI  "12"
3051 //    19  CheckCastPP   "Foo"
3052 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
3053 //    29  CheckCastPP   "Foo"  iid=24
3054 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
3055 //
3056 //    40  StoreP  25   7  20   ... alias_index=4
3057 //    50  StoreP  35  40  30   ... alias_index=6
3058 //    60  StoreP  45  50  20   ... alias_index=4
3059 //    70  LoadP    _  60  30   ... alias_index=6
3060 //    80  Phi     75  50  60   Memory alias_index=4
3061 //    90  LoadP    _  80  30   ... alias_index=6
3062 //   100  LoadP    _  80  20   ... alias_index=4
3063 //
3064 // In phase 2, new memory inputs are computed for the loads and stores,
3065 // And a new version of the phi is created.  In phase 4, the inputs to
3066 // node 80 are updated and then the memory nodes are updated with the
3067 // values computed in phase 2.  This results in:
3068 //
3069 //     7 Parm #memory
3070 //    10  ConI  "12"
3071 //    19  CheckCastPP   "Foo"
3072 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
3073 //    29  CheckCastPP   "Foo"  iid=24
3074 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
3075 //
3076 //    40  StoreP  25  7   20   ... alias_index=4
3077 //    50  StoreP  35  7   30   ... alias_index=6
3078 //    60  StoreP  45  40  20   ... alias_index=4
3079 //    70  LoadP    _  50  30   ... alias_index=6
3080 //    80  Phi     75  40  60   Memory alias_index=4
3081 //   120  Phi     75  50  50   Memory alias_index=6
3082 //    90  LoadP    _ 120  30   ... alias_index=6
3083 //   100  LoadP    _  80  20   ... alias_index=4
3084 //
3085 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist,
3086                                          GrowableArray<ArrayCopyNode*> &arraycopy_worklist,
3087                                          GrowableArray<MergeMemNode*> &mergemem_worklist) {
3088   GrowableArray<Node *>  memnode_worklist;
3089   GrowableArray<PhiNode *>  orig_phis;
3090   PhaseIterGVN  *igvn = _igvn;
3091   uint new_index_start = (uint) _compile->num_alias_types();
3092   VectorSet visited;
3093   ideal_nodes.clear(); // Reset for use with set_map/get_map.
3094   uint unique_old = _compile->unique();
3095 
3096   //  Phase 1:  Process possible allocations from alloc_worklist.
3097   //  Create instance types for the CheckCastPP for allocations where possible.
3098   //
3099   // (Note: don't forget to change the order of the second AddP node on
3100   //  the alloc_worklist if the order of the worklist processing is changed,
3101   //  see the comment in find_second_addp().)
3102   //
3103   while (alloc_worklist.length() != 0) {
3104     Node *n = alloc_worklist.pop();
3105     uint ni = n->_idx;
3106     if (n->is_Call()) {
3107       CallNode *alloc = n->as_Call();
3108       // copy escape information to call node
3109       PointsToNode* ptn = ptnode_adr(alloc->_idx);
3110       PointsToNode::EscapeState es = ptn->escape_state();
3111       // We have an allocation or call which returns a Java object,
3112       // see if it is non-escaped.
3113       if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) {
3114         continue;
3115       }
3116       // Find CheckCastPP for the allocate or for the return value of a call
3117       n = alloc->result_cast();
3118       if (n == NULL) {            // No uses except Initialize node
3119         if (alloc->is_Allocate()) {
3120           // Set the scalar_replaceable flag for allocation
3121           // so it could be eliminated if it has no uses.
3122           alloc->as_Allocate()->_is_scalar_replaceable = true;
3123         }
3124         continue;
3125       }
3126       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
3127         // we could reach here for allocate case if one init is associated with many allocs.
3128         if (alloc->is_Allocate()) {
3129           alloc->as_Allocate()->_is_scalar_replaceable = false;
3130         }
3131         continue;
3132       }
3133 
3134       // The inline code for Object.clone() casts the allocation result to
3135       // java.lang.Object and then to the actual type of the allocated
3136       // object. Detect this case and use the second cast.
3137       // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
3138       // the allocation result is cast to java.lang.Object and then
3139       // to the actual Array type.
3140       if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
3141           && (alloc->is_AllocateArray() ||
3142               igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) {
3143         Node *cast2 = NULL;
3144         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3145           Node *use = n->fast_out(i);
3146           if (use->is_CheckCastPP()) {
3147             cast2 = use;
3148             break;
3149           }
3150         }
3151         if (cast2 != NULL) {
3152           n = cast2;
3153         } else {
3154           // Non-scalar replaceable if the allocation type is unknown statically
3155           // (reflection allocation), the object can't be restored during
3156           // deoptimization without precise type.
3157           continue;
3158         }
3159       }
3160 
3161       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
3162       if (t == NULL) {
3163         continue;  // not a TypeOopPtr
3164       }
3165       if (!t->klass_is_exact()) {
3166         continue; // not an unique type
3167       }
3168       if (alloc->is_Allocate()) {
3169         // Set the scalar_replaceable flag for allocation
3170         // so it could be eliminated.
3171         alloc->as_Allocate()->_is_scalar_replaceable = true;
3172       }
3173       set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state
3174       // in order for an object to be scalar-replaceable, it must be:
3175       //   - a direct allocation (not a call returning an object)
3176       //   - non-escaping
3177       //   - eligible to be a unique type
3178       //   - not determined to be ineligible by escape analysis
3179       set_map(alloc, n);
3180       set_map(n, alloc);
3181       const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
3182       igvn->hash_delete(n);
3183       igvn->set_type(n,  tinst);
3184       n->raise_bottom_type(tinst);
3185       igvn->hash_insert(n);
3186       record_for_optimizer(n);
3187       // Allocate an alias index for the header fields. Accesses to
3188       // the header emitted during macro expansion wouldn't have
3189       // correct memory state otherwise.
3190       _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
3191       _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
3192       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
3193 
3194         // First, put on the worklist all Field edges from Connection Graph
3195         // which is more accurate than putting immediate users from Ideal Graph.
3196         for (EdgeIterator e(ptn); e.has_next(); e.next()) {
3197           PointsToNode* tgt = e.get();
3198           if (tgt->is_Arraycopy()) {
3199             continue;
3200           }
3201           Node* use = tgt->ideal_node();
3202           assert(tgt->is_Field() && use->is_AddP(),
3203                  "only AddP nodes are Field edges in CG");
3204           if (use->outcnt() > 0) { // Don't process dead nodes
3205             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
3206             if (addp2 != NULL) {
3207               assert(alloc->is_AllocateArray(),"array allocation was expected");
3208               alloc_worklist.append_if_missing(addp2);
3209             }
3210             alloc_worklist.append_if_missing(use);
3211           }
3212         }
3213 
3214         // An allocation may have an Initialize which has raw stores. Scan
3215         // the users of the raw allocation result and push AddP users
3216         // on alloc_worklist.
3217         Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms);
3218         assert (raw_result != NULL, "must have an allocation result");
3219         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
3220           Node *use = raw_result->fast_out(i);
3221           if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
3222             Node* addp2 = find_second_addp(use, raw_result);
3223             if (addp2 != NULL) {
3224               assert(alloc->is_AllocateArray(),"array allocation was expected");
3225               alloc_worklist.append_if_missing(addp2);
3226             }
3227             alloc_worklist.append_if_missing(use);
3228           } else if (use->is_MemBar()) {
3229             memnode_worklist.append_if_missing(use);
3230           }
3231         }
3232       }
3233     } else if (n->is_AddP()) {
3234       JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
3235       if (jobj == NULL || jobj == phantom_obj) {
3236 #ifdef ASSERT
3237         ptnode_adr(get_addp_base(n)->_idx)->dump();
3238         ptnode_adr(n->_idx)->dump();
3239         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3240 #endif
3241         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3242         return;
3243       }
3244       Node *base = get_map(jobj->idx());  // CheckCastPP node
3245       if (!split_AddP(n, base)) continue; // wrong type from dead path
3246     } else if (n->is_Phi() ||
3247                n->is_CheckCastPP() ||
3248                n->is_EncodeP() ||
3249                n->is_DecodeN() ||
3250                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
3251       if (visited.test_set(n->_idx)) {
3252         assert(n->is_Phi(), "loops only through Phi's");
3253         continue;  // already processed
3254       }
3255       JavaObjectNode* jobj = unique_java_object(n);
3256       if (jobj == NULL || jobj == phantom_obj) {
3257 #ifdef ASSERT
3258         ptnode_adr(n->_idx)->dump();
3259         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3260 #endif
3261         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3262         return;
3263       } else {
3264         Node *val = get_map(jobj->idx());   // CheckCastPP node
3265         TypeNode *tn = n->as_Type();
3266         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3267         assert(tinst != NULL && tinst->is_known_instance() &&
3268                tinst->instance_id() == jobj->idx() , "instance type expected.");
3269 
3270         const Type *tn_type = igvn->type(tn);
3271         const TypeOopPtr *tn_t;
3272         if (tn_type->isa_narrowoop()) {
3273           tn_t = tn_type->make_ptr()->isa_oopptr();
3274         } else {
3275           tn_t = tn_type->isa_oopptr();
3276         }
3277         if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
3278           if (tn_type->isa_narrowoop()) {
3279             tn_type = tinst->make_narrowoop();
3280           } else {
3281             tn_type = tinst;
3282           }
3283           igvn->hash_delete(tn);
3284           igvn->set_type(tn, tn_type);
3285           tn->set_type(tn_type);
3286           igvn->hash_insert(tn);
3287           record_for_optimizer(n);
3288         } else {
3289           assert(tn_type == TypePtr::NULL_PTR ||
3290                  tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
3291                  "unexpected type");
3292           continue; // Skip dead path with different type
3293         }
3294       }
3295     } else {
3296       debug_only(n->dump();)
3297       assert(false, "EA: unexpected node");
3298       continue;
3299     }
3300     // push allocation's users on appropriate worklist
3301     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3302       Node *use = n->fast_out(i);
3303       if(use->is_Mem() && use->in(MemNode::Address) == n) {
3304         // Load/store to instance's field
3305         memnode_worklist.append_if_missing(use);
3306       } else if (use->is_MemBar()) {
3307         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3308           memnode_worklist.append_if_missing(use);
3309         }
3310       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3311         Node* addp2 = find_second_addp(use, n);
3312         if (addp2 != NULL) {
3313           alloc_worklist.append_if_missing(addp2);
3314         }
3315         alloc_worklist.append_if_missing(use);
3316       } else if (use->is_Phi() ||
3317                  use->is_CheckCastPP() ||
3318                  use->is_EncodeNarrowPtr() ||
3319                  use->is_DecodeNarrowPtr() ||
3320                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3321         alloc_worklist.append_if_missing(use);
3322 #ifdef ASSERT
3323       } else if (use->is_Mem()) {
3324         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3325       } else if (use->is_MergeMem()) {
3326         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3327       } else if (use->is_SafePoint()) {
3328         // Look for MergeMem nodes for calls which reference unique allocation
3329         // (through CheckCastPP nodes) even for debug info.
3330         Node* m = use->in(TypeFunc::Memory);
3331         if (m->is_MergeMem()) {
3332           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3333         }
3334       } else if (use->Opcode() == Op_EncodeISOArray) {
3335         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3336           // EncodeISOArray overwrites destination array
3337           memnode_worklist.append_if_missing(use);
3338         }
3339       } else {
3340         uint op = use->Opcode();
3341         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3342             (use->in(MemNode::Memory) == n)) {
3343           // They overwrite memory edge corresponding to destination array,
3344           memnode_worklist.append_if_missing(use);
3345         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3346               op == Op_CastP2X || op == Op_StoreCM ||
3347               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
3348               op == Op_CountPositives ||
3349               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3350               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3351               op == Op_SubTypeCheck ||
3352               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3353           n->dump();
3354           use->dump();
3355           assert(false, "EA: missing allocation reference path");
3356         }
3357 #endif
3358       }
3359     }
3360 
3361   }
3362 
3363   // Go over all ArrayCopy nodes and if one of the inputs has a unique
3364   // type, record it in the ArrayCopy node so we know what memory this
3365   // node uses/modified.
3366   for (int next = 0; next < arraycopy_worklist.length(); next++) {
3367     ArrayCopyNode* ac = arraycopy_worklist.at(next);
3368     Node* dest = ac->in(ArrayCopyNode::Dest);
3369     if (dest->is_AddP()) {
3370       dest = get_addp_base(dest);
3371     }
3372     JavaObjectNode* jobj = unique_java_object(dest);
3373     if (jobj != NULL) {
3374       Node *base = get_map(jobj->idx());
3375       if (base != NULL) {
3376         const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
3377         ac->_dest_type = base_t;
3378       }
3379     }
3380     Node* src = ac->in(ArrayCopyNode::Src);
3381     if (src->is_AddP()) {
3382       src = get_addp_base(src);
3383     }
3384     jobj = unique_java_object(src);
3385     if (jobj != NULL) {
3386       Node* base = get_map(jobj->idx());
3387       if (base != NULL) {
3388         const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
3389         ac->_src_type = base_t;
3390       }
3391     }
3392   }
3393 
3394   // New alias types were created in split_AddP().
3395   uint new_index_end = (uint) _compile->num_alias_types();
3396   assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
3397 
3398   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
3399   //            compute new values for Memory inputs  (the Memory inputs are not
3400   //            actually updated until phase 4.)
3401   if (memnode_worklist.length() == 0)
3402     return;  // nothing to do
3403   while (memnode_worklist.length() != 0) {
3404     Node *n = memnode_worklist.pop();
3405     if (visited.test_set(n->_idx)) {
3406       continue;
3407     }
3408     if (n->is_Phi() || n->is_ClearArray()) {
3409       // we don't need to do anything, but the users must be pushed
3410     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3411       // we don't need to do anything, but the users must be pushed
3412       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
3413       if (n == NULL) {
3414         continue;
3415       }
3416     } else if (n->Opcode() == Op_StrCompressedCopy ||
3417                n->Opcode() == Op_EncodeISOArray) {
3418       // get the memory projection
3419       n = n->find_out_with(Op_SCMemProj);
3420       assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3421     } else {
3422       assert(n->is_Mem(), "memory node required.");
3423       Node *addr = n->in(MemNode::Address);
3424       const Type *addr_t = igvn->type(addr);
3425       if (addr_t == Type::TOP) {
3426         continue;
3427       }
3428       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3429       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3430       assert ((uint)alias_idx < new_index_end, "wrong alias index");
3431       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3432       if (_compile->failing()) {
3433         return;
3434       }
3435       if (mem != n->in(MemNode::Memory)) {
3436         // We delay the memory edge update since we need old one in
3437         // MergeMem code below when instances memory slices are separated.
3438         set_map(n, mem);
3439       }
3440       if (n->is_Load()) {
3441         continue;  // don't push users
3442       } else if (n->is_LoadStore()) {
3443         // get the memory projection
3444         n = n->find_out_with(Op_SCMemProj);
3445         assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3446       }
3447     }
3448     // push user on appropriate worklist
3449     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3450       Node *use = n->fast_out(i);
3451       if (use->is_Phi() || use->is_ClearArray()) {
3452         memnode_worklist.append_if_missing(use);
3453       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3454         if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
3455           continue;
3456         }
3457         memnode_worklist.append_if_missing(use);
3458       } else if (use->is_MemBar()) {
3459         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3460           memnode_worklist.append_if_missing(use);
3461         }
3462 #ifdef ASSERT
3463       } else if(use->is_Mem()) {
3464         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3465       } else if (use->is_MergeMem()) {
3466         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3467       } else if (use->Opcode() == Op_EncodeISOArray) {
3468         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3469           // EncodeISOArray overwrites destination array
3470           memnode_worklist.append_if_missing(use);
3471         }
3472       } else {
3473         uint op = use->Opcode();
3474         if ((use->in(MemNode::Memory) == n) &&
3475             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3476           // They overwrite memory edge corresponding to destination array,
3477           memnode_worklist.append_if_missing(use);
3478         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
3479               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
3480               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3481               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
3482           n->dump();
3483           use->dump();
3484           assert(false, "EA: missing memory path");
3485         }
3486 #endif
3487       }
3488     }
3489   }
3490 
3491   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
3492   //            Walk each memory slice moving the first node encountered of each
3493   //            instance type to the the input corresponding to its alias index.
3494   uint length = mergemem_worklist.length();
3495   for( uint next = 0; next < length; ++next ) {
3496     MergeMemNode* nmm = mergemem_worklist.at(next);
3497     assert(!visited.test_set(nmm->_idx), "should not be visited before");
3498     // Note: we don't want to use MergeMemStream here because we only want to
3499     // scan inputs which exist at the start, not ones we add during processing.
3500     // Note 2: MergeMem may already contains instance memory slices added
3501     // during find_inst_mem() call when memory nodes were processed above.
3502     igvn->hash_delete(nmm);
3503     uint nslices = MIN2(nmm->req(), new_index_start);
3504     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
3505       Node* mem = nmm->in(i);
3506       Node* cur = NULL;
3507       if (mem == NULL || mem->is_top()) {
3508         continue;
3509       }
3510       // First, update mergemem by moving memory nodes to corresponding slices
3511       // if their type became more precise since this mergemem was created.
3512       while (mem->is_Mem()) {
3513         const Type *at = igvn->type(mem->in(MemNode::Address));
3514         if (at != Type::TOP) {
3515           assert (at->isa_ptr() != NULL, "pointer type required.");
3516           uint idx = (uint)_compile->get_alias_index(at->is_ptr());
3517           if (idx == i) {
3518             if (cur == NULL) {
3519               cur = mem;
3520             }
3521           } else {
3522             if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
3523               nmm->set_memory_at(idx, mem);
3524             }
3525           }
3526         }
3527         mem = mem->in(MemNode::Memory);
3528       }
3529       nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
3530       // Find any instance of the current type if we haven't encountered
3531       // already a memory slice of the instance along the memory chain.
3532       for (uint ni = new_index_start; ni < new_index_end; ni++) {
3533         if((uint)_compile->get_general_index(ni) == i) {
3534           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
3535           if (nmm->is_empty_memory(m)) {
3536             Node* result = find_inst_mem(mem, ni, orig_phis);
3537             if (_compile->failing()) {
3538               return;
3539             }
3540             nmm->set_memory_at(ni, result);
3541           }
3542         }
3543       }
3544     }
3545     // Find the rest of instances values
3546     for (uint ni = new_index_start; ni < new_index_end; ni++) {
3547       const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
3548       Node* result = step_through_mergemem(nmm, ni, tinst);
3549       if (result == nmm->base_memory()) {
3550         // Didn't find instance memory, search through general slice recursively.
3551         result = nmm->memory_at(_compile->get_general_index(ni));
3552         result = find_inst_mem(result, ni, orig_phis);
3553         if (_compile->failing()) {
3554           return;
3555         }
3556         nmm->set_memory_at(ni, result);
3557       }
3558     }
3559     igvn->hash_insert(nmm);
3560     record_for_optimizer(nmm);
3561   }
3562 
3563   //  Phase 4:  Update the inputs of non-instance memory Phis and
3564   //            the Memory input of memnodes
3565   // First update the inputs of any non-instance Phi's from
3566   // which we split out an instance Phi.  Note we don't have
3567   // to recursively process Phi's encountered on the input memory
3568   // chains as is done in split_memory_phi() since they  will
3569   // also be processed here.
3570   for (int j = 0; j < orig_phis.length(); j++) {
3571     PhiNode *phi = orig_phis.at(j);
3572     int alias_idx = _compile->get_alias_index(phi->adr_type());
3573     igvn->hash_delete(phi);
3574     for (uint i = 1; i < phi->req(); i++) {
3575       Node *mem = phi->in(i);
3576       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3577       if (_compile->failing()) {
3578         return;
3579       }
3580       if (mem != new_mem) {
3581         phi->set_req(i, new_mem);
3582       }
3583     }
3584     igvn->hash_insert(phi);
3585     record_for_optimizer(phi);
3586   }
3587 
3588   // Update the memory inputs of MemNodes with the value we computed
3589   // in Phase 2 and move stores memory users to corresponding memory slices.
3590   // Disable memory split verification code until the fix for 6984348.
3591   // Currently it produces false negative results since it does not cover all cases.
3592 #if 0 // ifdef ASSERT
3593   visited.Reset();
3594   Node_Stack old_mems(arena, _compile->unique() >> 2);
3595 #endif
3596   for (uint i = 0; i < ideal_nodes.size(); i++) {
3597     Node*    n = ideal_nodes.at(i);
3598     Node* nmem = get_map(n->_idx);
3599     assert(nmem != NULL, "sanity");
3600     if (n->is_Mem()) {
3601 #if 0 // ifdef ASSERT
3602       Node* old_mem = n->in(MemNode::Memory);
3603       if (!visited.test_set(old_mem->_idx)) {
3604         old_mems.push(old_mem, old_mem->outcnt());
3605       }
3606 #endif
3607       assert(n->in(MemNode::Memory) != nmem, "sanity");
3608       if (!n->is_Load()) {
3609         // Move memory users of a store first.
3610         move_inst_mem(n, orig_phis);
3611       }
3612       // Now update memory input
3613       igvn->hash_delete(n);
3614       n->set_req(MemNode::Memory, nmem);
3615       igvn->hash_insert(n);
3616       record_for_optimizer(n);
3617     } else {
3618       assert(n->is_Allocate() || n->is_CheckCastPP() ||
3619              n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
3620     }
3621   }
3622 #if 0 // ifdef ASSERT
3623   // Verify that memory was split correctly
3624   while (old_mems.is_nonempty()) {
3625     Node* old_mem = old_mems.node();
3626     uint  old_cnt = old_mems.index();
3627     old_mems.pop();
3628     assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
3629   }
3630 #endif
3631 }
3632 
3633 #ifndef PRODUCT
3634 static const char *node_type_names[] = {
3635   "UnknownType",
3636   "JavaObject",
3637   "LocalVar",
3638   "Field",
3639   "Arraycopy"
3640 };
3641 
3642 static const char *esc_names[] = {
3643   "UnknownEscape",
3644   "NoEscape",
3645   "ArgEscape",
3646   "GlobalEscape"
3647 };
3648 
3649 void PointsToNode::dump_header(bool print_state, outputStream* out) const {
3650   NodeType nt = node_type();
3651   out->print("%s(%d) ", node_type_names[(int) nt], _pidx);
3652   if (print_state) {
3653     EscapeState es = escape_state();
3654     EscapeState fields_es = fields_escape_state();
3655     out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
3656     if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) {
3657       out->print("NSR ");
3658     }
3659   }
3660 }
3661 
3662 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const {
3663   dump_header(print_state, out);
3664   if (is_Field()) {
3665     FieldNode* f = (FieldNode*)this;
3666     if (f->is_oop()) {
3667       out->print("oop ");
3668     }
3669     if (f->offset() > 0) {
3670       out->print("+%d ", f->offset());
3671     }
3672     out->print("(");
3673     for (BaseIterator i(f); i.has_next(); i.next()) {
3674       PointsToNode* b = i.get();
3675       out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
3676     }
3677     out->print(" )");
3678   }
3679   out->print("[");
3680   for (EdgeIterator i(this); i.has_next(); i.next()) {
3681     PointsToNode* e = i.get();
3682     out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
3683   }
3684   out->print(" [");
3685   for (UseIterator i(this); i.has_next(); i.next()) {
3686     PointsToNode* u = i.get();
3687     bool is_base = false;
3688     if (PointsToNode::is_base_use(u)) {
3689       is_base = true;
3690       u = PointsToNode::get_use_node(u)->as_Field();
3691     }
3692     out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
3693   }
3694   out->print(" ]]  ");
3695   if (_node == NULL) {
3696     out->print("<null>%s", newline ? "\n" : "");
3697   } else {
3698     _node->dump(newline ? "\n" : "", false, out);
3699   }
3700 }
3701 
3702 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
3703   bool first = true;
3704   int ptnodes_length = ptnodes_worklist.length();
3705   for (int i = 0; i < ptnodes_length; i++) {
3706     PointsToNode *ptn = ptnodes_worklist.at(i);
3707     if (ptn == NULL || !ptn->is_JavaObject()) {
3708       continue;
3709     }
3710     PointsToNode::EscapeState es = ptn->escape_state();
3711     if ((es != PointsToNode::NoEscape) && !Verbose) {
3712       continue;
3713     }
3714     Node* n = ptn->ideal_node();
3715     if (n->is_Allocate() || (n->is_CallStaticJava() &&
3716                              n->as_CallStaticJava()->is_boxing_method())) {
3717       if (first) {
3718         tty->cr();
3719         tty->print("======== Connection graph for ");
3720         _compile->method()->print_short_name();
3721         tty->cr();
3722         tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d",
3723                       _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length());
3724         tty->cr();
3725         first = false;
3726       }
3727       ptn->dump();
3728       // Print all locals and fields which reference this allocation
3729       for (UseIterator j(ptn); j.has_next(); j.next()) {
3730         PointsToNode* use = j.get();
3731         if (use->is_LocalVar()) {
3732           use->dump(Verbose);
3733         } else if (Verbose) {
3734           use->dump();
3735         }
3736       }
3737       tty->cr();
3738     }
3739   }
3740 }
3741 
3742 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const {
3743   if (_compile->directive()->TraceEscapeAnalysisOption) {
3744     assert(ptn != nullptr, "should not be null");
3745     assert(reason != nullptr, "should not be null");
3746     ptn->dump_header(true);
3747     PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es;
3748     PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state();
3749     tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason);
3750   }
3751 }
3752 
3753 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const {
3754   if (_compile->directive()->TraceEscapeAnalysisOption) {
3755     stringStream ss;
3756     ss.print("propagated from: ");
3757     from->dump(true, &ss, false);
3758     return ss.as_string();
3759   } else {
3760     return nullptr;
3761   }
3762 }
3763 
3764 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const {
3765   if (_compile->directive()->TraceEscapeAnalysisOption) {
3766     stringStream ss;
3767     ss.print("escapes as arg to:");
3768     call->dump("", false, &ss);
3769     return ss.as_string();
3770   } else {
3771     return nullptr;
3772   }
3773 }
3774 
3775 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const {
3776   if (_compile->directive()->TraceEscapeAnalysisOption) {
3777     stringStream ss;
3778     ss.print("is merged with other object: ");
3779     other->dump_header(true, &ss);
3780     return ss.as_string();
3781   } else {
3782     return nullptr;
3783   }
3784 }
3785 
3786 #endif
3787 
3788 void ConnectionGraph::record_for_optimizer(Node *n) {
3789   _igvn->_worklist.push(n);
3790   _igvn->add_users_to_worklist(n);
3791 }