< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"

  32 #include "memory/resourceArea.hpp"
  33 #include "opto/c2compiler.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "utilities/macros.hpp"
  43 
  44 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  45   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  46   _in_worklist(C->comp_arena()),
  47   _next_pidx(0),
  48   _collecting(true),
  49   _verify(false),
  50   _compile(C),
  51   _igvn(igvn),

 135   GrowableArray<SafePointNode*>  sfn_worklist;
 136   GrowableArray<MergeMemNode*>   mergemem_worklist;
 137   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 138 
 139   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 140 
 141   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 142   ideal_nodes.map(C->live_nodes(), NULL);  // preallocate space
 143   // Initialize worklist
 144   if (C->root() != NULL) {
 145     ideal_nodes.push(C->root());
 146   }
 147   // Processed ideal nodes are unique on ideal_nodes list
 148   // but several ideal nodes are mapped to the phantom_obj.
 149   // To avoid duplicated entries on the following worklists
 150   // add the phantom_obj only once to them.
 151   ptnodes_worklist.append(phantom_obj);
 152   java_objects_worklist.append(phantom_obj);
 153   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 154     Node* n = ideal_nodes.at(next);










 155     // Create PointsTo nodes and add them to Connection Graph. Called
 156     // only once per ideal node since ideal_nodes is Unique_Node list.
 157     add_node_to_connection_graph(n, &delayed_worklist);
 158     PointsToNode* ptn = ptnode_adr(n->_idx);
 159     if (ptn != NULL && ptn != phantom_obj) {
 160       ptnodes_worklist.append(ptn);
 161       if (ptn->is_JavaObject()) {
 162         java_objects_worklist.append(ptn->as_JavaObject());
 163         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 164             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 165           // Only allocations and java static calls results are interesting.
 166           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 167         }
 168       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 169         oop_fields_worklist.append(ptn->as_Field());
 170       }
 171     }
 172     // Collect some interesting nodes for further use.
 173     switch (n->Opcode()) {
 174       case Op_MergeMem:

 431   return false;
 432 }
 433 
 434 // Returns true if at least one of the arguments to the call is an object
 435 // that does not escape globally.
 436 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
 437   if (call->method() != NULL) {
 438     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
 439     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
 440       Node* p = call->in(idx);
 441       if (not_global_escape(p)) {
 442         return true;
 443       }
 444     }
 445   } else {
 446     const char* name = call->as_CallStaticJava()->_name;
 447     assert(name != NULL, "no name");
 448     // no arg escapes through uncommon traps
 449     if (strcmp(name, "uncommon_trap") != 0) {
 450       // process_call_arguments() assumes that all arguments escape globally
 451       const TypeTuple* d = call->tf()->domain();
 452       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 453         const Type* at = d->field_at(i);
 454         if (at->isa_oopptr() != NULL) {
 455           return true;
 456         }
 457       }
 458     }
 459   }
 460   return false;
 461 }
 462 
 463 
 464 
 465 // Utility function for nodes that load an object
 466 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
 467   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 468   // ThreadLocal has RawPtr type.
 469   const Type* t = _igvn->type(n);
 470   if (t->make_ptr() != NULL) {
 471     Node* adr = n->in(MemNode::Address);

 505       // first IGVN optimization when escape information is still available.
 506       record_for_optimizer(n);
 507     } else if (n->is_Allocate()) {
 508       add_call_node(n->as_Call());
 509       record_for_optimizer(n);
 510     } else {
 511       if (n->is_CallStaticJava()) {
 512         const char* name = n->as_CallStaticJava()->_name;
 513         if (name != NULL && strcmp(name, "uncommon_trap") == 0) {
 514           return; // Skip uncommon traps
 515         }
 516       }
 517       // Don't mark as processed since call's arguments have to be processed.
 518       delayed_worklist->push(n);
 519       // Check if a call returns an object.
 520       if ((n->as_Call()->returns_pointer() &&
 521            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
 522           (n->is_CallStaticJava() &&
 523            n->as_CallStaticJava()->is_boxing_method())) {
 524         add_call_node(n->as_Call());











 525       }
 526     }
 527     return;
 528   }
 529   // Put this check here to process call arguments since some call nodes
 530   // point to phantom_obj.
 531   if (n_ptn == phantom_obj || n_ptn == null_obj) {
 532     return; // Skip predefined nodes.
 533   }
 534   switch (opcode) {
 535     case Op_AddP: {
 536       Node* base = get_addp_base(n);
 537       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 538       // Field nodes are created for all field types. They are used in
 539       // adjust_scalar_replaceable_state() and split_unique_types().
 540       // Note, non-oop fields will have only base edges in Connection
 541       // Graph because such fields are not used for oop loads and stores.
 542       int offset = address_offset(n, igvn);
 543       add_field(n, PointsToNode::NoEscape, offset);
 544       if (ptn_base == NULL) {
 545         delayed_worklist->push(n); // Process it later.
 546       } else {
 547         n_ptn = ptnode_adr(n_idx);
 548         add_base(n_ptn->as_Field(), ptn_base);
 549       }
 550       break;
 551     }
 552     case Op_CastX2P: {
 553       map_ideal_node(n, phantom_obj);
 554       break;
 555     }

 556     case Op_CastPP:
 557     case Op_CheckCastPP:
 558     case Op_EncodeP:
 559     case Op_DecodeN:
 560     case Op_EncodePKlass:
 561     case Op_DecodeNKlass: {
 562       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
 563       break;
 564     }
 565     case Op_CMoveP: {
 566       add_local_var(n, PointsToNode::NoEscape);
 567       // Do not add edges during first iteration because some could be
 568       // not defined yet.
 569       delayed_worklist->push(n);
 570       break;
 571     }
 572     case Op_ConP:
 573     case Op_ConN:
 574     case Op_ConNKlass: {
 575       // assume all oop constants globally escape except for null

 606     case Op_PartialSubtypeCheck: {
 607       // Produces Null or notNull and is used in only in CmpP so
 608       // phantom_obj could be used.
 609       map_ideal_node(n, phantom_obj); // Result is unknown
 610       break;
 611     }
 612     case Op_Phi: {
 613       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 614       // ThreadLocal has RawPtr type.
 615       const Type* t = n->as_Phi()->type();
 616       if (t->make_ptr() != NULL) {
 617         add_local_var(n, PointsToNode::NoEscape);
 618         // Do not add edges during first iteration because some could be
 619         // not defined yet.
 620         delayed_worklist->push(n);
 621       }
 622       break;
 623     }
 624     case Op_Proj: {
 625       // we are only interested in the oop result projection from a call
 626       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 627           n->in(0)->as_Call()->returns_pointer()) {


 628         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
 629       }
 630       break;
 631     }
 632     case Op_Rethrow: // Exception object escapes
 633     case Op_Return: {
 634       if (n->req() > TypeFunc::Parms &&
 635           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 636         // Treat Return value as LocalVar with GlobalEscape escape state.
 637         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
 638       }
 639       break;
 640     }
 641     case Op_CompareAndExchangeP:
 642     case Op_CompareAndExchangeN:
 643     case Op_GetAndSetP:
 644     case Op_GetAndSetN: {
 645       add_objload_to_connection_graph(n, delayed_worklist);
 646       // fall-through
 647     }

 707   if (n->is_Call()) {
 708     process_call_arguments(n->as_Call());
 709     return;
 710   }
 711   assert(n->is_Store() || n->is_LoadStore() ||
 712          (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
 713          "node should be registered already");
 714   int opcode = n->Opcode();
 715   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
 716   if (gc_handled) {
 717     return; // Ignore node if already handled by GC.
 718   }
 719   switch (opcode) {
 720     case Op_AddP: {
 721       Node* base = get_addp_base(n);
 722       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 723       assert(ptn_base != NULL, "field's base should be registered");
 724       add_base(n_ptn->as_Field(), ptn_base);
 725       break;
 726     }

 727     case Op_CastPP:
 728     case Op_CheckCastPP:
 729     case Op_EncodeP:
 730     case Op_DecodeN:
 731     case Op_EncodePKlass:
 732     case Op_DecodeNKlass: {
 733       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
 734       break;
 735     }
 736     case Op_CMoveP: {
 737       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
 738         Node* in = n->in(i);
 739         if (in == NULL) {
 740           continue;  // ignore NULL
 741         }
 742         Node* uncast_in = in->uncast();
 743         if (uncast_in->is_top() || uncast_in == n) {
 744           continue;  // ignore top or inputs which go back this node
 745         }
 746         PointsToNode* ptn = ptnode_adr(in->_idx);

 761       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 762       // ThreadLocal has RawPtr type.
 763       assert(n->as_Phi()->type()->make_ptr() != NULL, "Unexpected node type");
 764       for (uint i = 1; i < n->req(); i++) {
 765         Node* in = n->in(i);
 766         if (in == NULL) {
 767           continue;  // ignore NULL
 768         }
 769         Node* uncast_in = in->uncast();
 770         if (uncast_in->is_top() || uncast_in == n) {
 771           continue;  // ignore top or inputs which go back this node
 772         }
 773         PointsToNode* ptn = ptnode_adr(in->_idx);
 774         assert(ptn != NULL, "node should be registered");
 775         add_edge(n_ptn, ptn);
 776       }
 777       break;
 778     }
 779     case Op_Proj: {
 780       // we are only interested in the oop result projection from a call
 781       assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 782              n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
 783       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
 784       break;
 785     }
 786     case Op_Rethrow: // Exception object escapes
 787     case Op_Return: {
 788       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
 789              "Unexpected node type");
 790       // Treat Return value as LocalVar with GlobalEscape escape state.
 791       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), NULL);
 792       break;
 793     }
 794     case Op_CompareAndExchangeP:
 795     case Op_CompareAndExchangeN:
 796     case Op_GetAndSetP:
 797     case Op_GetAndSetN:{
 798       assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type");
 799       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL);
 800       // fall-through
 801     }
 802     case Op_CompareAndSwapP:

 937     PointsToNode* ptn = ptnode_adr(val->_idx);
 938     assert(ptn != NULL, "node should be registered");
 939     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
 940     // Add edge to object for unsafe access with offset.
 941     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 942     assert(adr_ptn != NULL, "node should be registered");
 943     if (adr_ptn->is_Field()) {
 944       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
 945       add_edge(adr_ptn, ptn);
 946     }
 947     return true;
 948   }
 949 #ifdef ASSERT
 950   n->dump(1);
 951   assert(false, "not unsafe");
 952 #endif
 953   return false;
 954 }
 955 
 956 void ConnectionGraph::add_call_node(CallNode* call) {
 957   assert(call->returns_pointer(), "only for call which returns pointer");
 958   uint call_idx = call->_idx;
 959   if (call->is_Allocate()) {
 960     Node* k = call->in(AllocateNode::KlassNode);
 961     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 962     assert(kt != NULL, "TypeKlassPtr  required.");
 963     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 964     bool scalar_replaceable = true;
 965     NOT_PRODUCT(const char* nsr_reason = "");
 966     if (call->is_AllocateArray()) {
 967       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
 968         es = PointsToNode::GlobalEscape;
 969       } else {
 970         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 971         if (length < 0) {
 972           // Not scalar replaceable if the length is not constant.
 973           scalar_replaceable = false;
 974           NOT_PRODUCT(nsr_reason = "has a non-constant length");
 975         } else if (length > EliminateAllocationArraySizeLimit) {
 976           // Not scalar replaceable if the length is too big.
 977           scalar_replaceable = false;

1013     //
1014     //    - all oop arguments are escaping globally;
1015     //
1016     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
1017     //
1018     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
1019     //
1020     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
1021     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
1022     //      during call is returned;
1023     //    - mapped to ArgEscape LocalVar node pointed to object arguments
1024     //      which are returned and does not escape during call;
1025     //
1026     //    - oop arguments escaping status is defined by bytecode analysis;
1027     //
1028     // For a static call, we know exactly what method is being called.
1029     // Use bytecode estimator to record whether the call's return value escapes.
1030     ciMethod* meth = call->as_CallJava()->method();
1031     if (meth == NULL) {
1032       const char* name = call->as_CallStaticJava()->_name;
1033       assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");

1034       // Returns a newly allocated non-escaped object.
1035       add_java_object(call, PointsToNode::NoEscape);
1036       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
1037     } else if (meth->is_boxing_method()) {
1038       // Returns boxing object
1039       PointsToNode::EscapeState es;
1040       vmIntrinsics::ID intr = meth->intrinsic_id();
1041       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
1042         // It does not escape if object is always allocated.
1043         es = PointsToNode::NoEscape;
1044       } else {
1045         // It escapes globally if object could be loaded from cache.
1046         es = PointsToNode::GlobalEscape;
1047       }
1048       add_java_object(call, es);
1049     } else {
1050       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
1051       call_analyzer->copy_dependencies(_compile->dependencies());
1052       if (call_analyzer->is_return_allocated()) {
1053         // Returns a newly allocated non-escaped object, simply
1054         // update dependency information.
1055         // Mark it as NoEscape so that objects referenced by
1056         // it's fields will be marked as NoEscape at least.
1057         add_java_object(call, PointsToNode::NoEscape);
1058         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
1059       } else {
1060         // Determine whether any arguments are returned.
1061         const TypeTuple* d = call->tf()->domain();
1062         bool ret_arg = false;
1063         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1064           if (d->field_at(i)->isa_ptr() != NULL &&
1065               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1066             ret_arg = true;
1067             break;
1068           }
1069         }
1070         if (ret_arg) {
1071           add_local_var(call, PointsToNode::ArgEscape);
1072         } else {
1073           // Returns unknown object.
1074           map_ideal_node(call, phantom_obj);
1075         }
1076       }
1077     }
1078   } else {
1079     // An other type of call, assume the worst case:
1080     // returned value is unknown and globally escapes.
1081     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

1089 #ifdef ASSERT
1090     case Op_Allocate:
1091     case Op_AllocateArray:
1092     case Op_Lock:
1093     case Op_Unlock:
1094       assert(false, "should be done already");
1095       break;
1096 #endif
1097     case Op_ArrayCopy:
1098     case Op_CallLeafNoFP:
1099       // Most array copies are ArrayCopy nodes at this point but there
1100       // are still a few direct calls to the copy subroutines (See
1101       // PhaseStringOpts::copy_string())
1102       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1103         call->as_CallLeaf()->is_call_to_arraycopystub();
1104       // fall through
1105     case Op_CallLeafVector:
1106     case Op_CallLeaf: {
1107       // Stub calls, objects do not escape but they are not scale replaceable.
1108       // Adjust escape state for outgoing arguments.
1109       const TypeTuple * d = call->tf()->domain();
1110       bool src_has_oops = false;
1111       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1112         const Type* at = d->field_at(i);
1113         Node *arg = call->in(i);
1114         if (arg == NULL) {
1115           continue;
1116         }
1117         const Type *aat = _igvn->type(arg);
1118         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
1119           continue;
1120         }
1121         if (arg->is_AddP()) {
1122           //
1123           // The inline_native_clone() case when the arraycopy stub is called
1124           // after the allocation before Initialize and CheckCastPP nodes.
1125           // Or normal arraycopy for object arrays case.
1126           //
1127           // Set AddP's base (Allocate) as not scalar replaceable since
1128           // pointer to the base (with offset) is passed as argument.
1129           //
1130           arg = get_addp_base(arg);
1131         }
1132         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1133         assert(arg_ptn != NULL, "should be registered");
1134         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1135         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1136           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1137                  aat->isa_ptr() != NULL, "expecting an Ptr");
1138           bool arg_has_oops = aat->isa_oopptr() &&
1139                               (aat->isa_instptr() ||
1140                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != NULL)));



1141           if (i == TypeFunc::Parms) {
1142             src_has_oops = arg_has_oops;
1143           }
1144           //
1145           // src or dst could be j.l.Object when other is basic type array:
1146           //
1147           //   arraycopy(char[],0,Object*,0,size);
1148           //   arraycopy(Object*,0,char[],0,size);
1149           //
1150           // Don't add edges in such cases.
1151           //
1152           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1153                                        arg_has_oops && (i > TypeFunc::Parms);
1154 #ifdef ASSERT
1155           if (!(is_arraycopy ||
1156                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1157                 (call->as_CallLeaf()->_name != NULL &&
1158                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1159                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1160                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

1169                   strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
1170                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1171                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
1172                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1173                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
1174                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
1175                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
1176                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1177                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1178                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1179                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1180                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1181                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1182                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
1183                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
1184                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1185                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1186                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1187                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1188                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||



1189                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
1190                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
1191                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1192                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
1193                  ))) {
1194             call->dump();
1195             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1196           }
1197 #endif
1198           // Always process arraycopy's destination object since
1199           // we need to add all possible edges to references in
1200           // source object.
1201           if (arg_esc >= PointsToNode::ArgEscape &&
1202               !arg_is_arraycopy_dest) {
1203             continue;
1204           }
1205           PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1206           if (call->is_ArrayCopy()) {
1207             ArrayCopyNode* ac = call->as_ArrayCopy();
1208             if (ac->is_clonebasic() ||

1231           }
1232         }
1233       }
1234       break;
1235     }
1236     case Op_CallStaticJava: {
1237       // For a static call, we know exactly what method is being called.
1238       // Use bytecode estimator to record the call's escape affects
1239 #ifdef ASSERT
1240       const char* name = call->as_CallStaticJava()->_name;
1241       assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1242 #endif
1243       ciMethod* meth = call->as_CallJava()->method();
1244       if ((meth != NULL) && meth->is_boxing_method()) {
1245         break; // Boxing methods do not modify any oops.
1246       }
1247       BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1248       // fall-through if not a Java method or no analyzer information
1249       if (call_analyzer != NULL) {
1250         PointsToNode* call_ptn = ptnode_adr(call->_idx);
1251         const TypeTuple* d = call->tf()->domain();
1252         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1253           const Type* at = d->field_at(i);
1254           int k = i - TypeFunc::Parms;
1255           Node* arg = call->in(i);
1256           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1257           if (at->isa_ptr() != NULL &&
1258               call_analyzer->is_arg_returned(k)) {
1259             // The call returns arguments.
1260             if (call_ptn != NULL) { // Is call's result used?
1261               assert(call_ptn->is_LocalVar(), "node should be registered");
1262               assert(arg_ptn != NULL, "node should be registered");
1263               add_edge(call_ptn, arg_ptn);
1264             }
1265           }
1266           if (at->isa_oopptr() != NULL &&
1267               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1268             if (!call_analyzer->is_arg_stack(k)) {
1269               // The argument global escapes
1270               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1271             } else {

1275                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1276               }
1277             }
1278           }
1279         }
1280         if (call_ptn != NULL && call_ptn->is_LocalVar()) {
1281           // The call returns arguments.
1282           assert(call_ptn->edge_count() > 0, "sanity");
1283           if (!call_analyzer->is_return_local()) {
1284             // Returns also unknown object.
1285             add_edge(call_ptn, phantom_obj);
1286           }
1287         }
1288         break;
1289       }
1290     }
1291     default: {
1292       // Fall-through here if not a Java method or no analyzer information
1293       // or some other type of call, assume the worst case: all arguments
1294       // globally escape.
1295       const TypeTuple* d = call->tf()->domain();
1296       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1297         const Type* at = d->field_at(i);
1298         if (at->isa_oopptr() != NULL) {
1299           Node* arg = call->in(i);
1300           if (arg->is_AddP()) {
1301             arg = get_addp_base(arg);
1302           }
1303           assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
1304           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1305         }
1306       }
1307     }
1308   }
1309 }
1310 
1311 
1312 // Finish Graph construction.
1313 bool ConnectionGraph::complete_connection_graph(
1314                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
1315                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

1688     PointsToNode* base = i.get();
1689     if (base->is_JavaObject()) {
1690       // Skip Allocate's fields which will be processed later.
1691       if (base->ideal_node()->is_Allocate()) {
1692         return 0;
1693       }
1694       assert(base == null_obj, "only NULL ptr base expected here");
1695     }
1696   }
1697   if (add_edge(field, phantom_obj)) {
1698     // New edge was added
1699     new_edges++;
1700     add_field_uses_to_worklist(field);
1701   }
1702   return new_edges;
1703 }
1704 
1705 // Find fields initializing values for allocations.
1706 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
1707   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");

1708   Node* alloc = pta->ideal_node();
1709 
1710   // Do nothing for Allocate nodes since its fields values are
1711   // "known" unless they are initialized by arraycopy/clone.
1712   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
1713     return 0;







1714   }
1715   assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");

1716 #ifdef ASSERT
1717   if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) {
1718     const char* name = alloc->as_CallStaticJava()->_name;
1719     assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");

1720   }
1721 #endif
1722   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
1723   int new_edges = 0;
1724   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1725     PointsToNode* field = i.get();
1726     if (field->is_Field() && field->as_Field()->is_oop()) {
1727       if (add_edge(field, phantom_obj)) {
1728         // New edge was added
1729         new_edges++;
1730         add_field_uses_to_worklist(field->as_Field());
1731       }
1732     }
1733   }
1734   return new_edges;
1735 }
1736 
1737 // Find fields initializing values for allocations.
1738 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* phase) {
1739   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1740   Node* alloc = pta->ideal_node();
1741   // Do nothing for Call nodes since its fields values are unknown.
1742   if (!alloc->is_Allocate()) {
1743     return 0;
1744   }
1745   InitializeNode* ini = alloc->as_Allocate()->initialization();
1746   bool visited_bottom_offset = false;
1747   GrowableArray<int> offsets_worklist;
1748   int new_edges = 0;
1749 
1750   // Check if an oop field's initializing value is recorded and add
1751   // a corresponding NULL if field's value if it is not recorded.
1752   // Connection Graph does not record a default initialization by NULL
1753   // captured by Initialize node.
1754   //
1755   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1756     PointsToNode* field = i.get(); // Field (AddP)
1757     if (!field->is_Field() || !field->as_Field()->is_oop()) {
1758       continue; // Not oop field
1759     }
1760     int offset = field->as_Field()->offset();
1761     if (offset == Type::OffsetBot) {
1762       if (!visited_bottom_offset) {

1808               } else {
1809                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1810                   tty->print_cr("----------init store has invalid value -----");
1811                   store->dump();
1812                   val->dump();
1813                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1814                 }
1815                 for (EdgeIterator j(val); j.has_next(); j.next()) {
1816                   PointsToNode* obj = j.get();
1817                   if (obj->is_JavaObject()) {
1818                     if (!field->points_to(obj->as_JavaObject())) {
1819                       missed_obj = obj;
1820                       break;
1821                     }
1822                   }
1823                 }
1824               }
1825               if (missed_obj != NULL) {
1826                 tty->print_cr("----------field---------------------------------");
1827                 field->dump();
1828                 tty->print_cr("----------missed referernce to object-----------");
1829                 missed_obj->dump();
1830                 tty->print_cr("----------object referernced by init store -----");
1831                 store->dump();
1832                 val->dump();
1833                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1834               }
1835             }
1836 #endif
1837           } else {
1838             // There could be initializing stores which follow allocation.
1839             // For example, a volatile field store is not collected
1840             // by Initialize node.
1841             //
1842             // Need to check for dependent loads to separate such stores from
1843             // stores which follow loads. For now, add initial value NULL so
1844             // that compare pointers optimization works correctly.
1845           }
1846         }
1847         if (value == NULL) {
1848           // A field's initializing value was not recorded. Add NULL.
1849           if (add_edge(field, null_obj)) {
1850             // New edge was added

2076         assert(field->edge_count() > 0, "sanity");
2077       }
2078     }
2079   }
2080 }
2081 #endif
2082 
2083 // Optimize ideal graph.
2084 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
2085                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
2086   Compile* C = _compile;
2087   PhaseIterGVN* igvn = _igvn;
2088   if (EliminateLocks) {
2089     // Mark locks before changing ideal graph.
2090     int cnt = C->macro_count();
2091     for (int i = 0; i < cnt; i++) {
2092       Node *n = C->macro_node(i);
2093       if (n->is_AbstractLock()) { // Lock and Unlock nodes
2094         AbstractLockNode* alock = n->as_AbstractLock();
2095         if (!alock->is_non_esc_obj()) {
2096           if (not_global_escape(alock->obj_node())) {

2097             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
2098             // The lock could be marked eliminated by lock coarsening
2099             // code during first IGVN before EA. Replace coarsened flag
2100             // to eliminate all associated locks/unlocks.
2101 #ifdef ASSERT
2102             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
2103 #endif
2104             alock->set_non_esc_obj();
2105           }
2106         }
2107       }
2108     }
2109   }
2110 
2111   if (OptimizePtrCompare) {
2112     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
2113       Node *n = ptr_cmp_worklist.at(i);
2114       const TypeInt* tcmp = optimize_ptr_compare(n);
2115       if (tcmp->singleton()) {
2116         Node* cmp = igvn->makecon(tcmp);
2117 #ifndef PRODUCT
2118         if (PrintOptimizePtrCompare) {
2119           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
2120           if (Verbose) {
2121             n->dump(1);
2122           }
2123         }
2124 #endif
2125         igvn->replace_node(n, cmp);
2126       }
2127     }
2128   }
2129 
2130   // For MemBarStoreStore nodes added in library_call.cpp, check
2131   // escape status of associated AllocateNode and optimize out
2132   // MemBarStoreStore node if the allocated object never escapes.
2133   for (int i = 0; i < storestore_worklist.length(); i++) {
2134     Node* storestore = storestore_worklist.at(i);
2135     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
2136     if (alloc->is_Allocate() && not_global_escape(alloc)) {
2137       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
2138       mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
2139       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
2140       igvn->register_new_node_with_optimizer(mb);
2141       igvn->replace_node(storestore, mb);





2142     }
2143   }
2144 }
2145 
2146 // Optimize objects compare.
2147 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {
2148   assert(OptimizePtrCompare, "sanity");
2149   assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
2150   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
2151   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
2152   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
2153 
2154   PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2155   PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2156   JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2157   JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2158   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2159   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2160 
2161   // Check simple cases first.

2274   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2275   assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
2276   PointsToNode* ptadr = _nodes.at(n->_idx);
2277   if (ptadr != NULL) {
2278     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2279     return;
2280   }
2281   Compile* C = _compile;
2282   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2283   map_ideal_node(n, ptadr);
2284   // Add edge from arraycopy node to source object.
2285   (void)add_edge(ptadr, src);
2286   src->set_arraycopy_src();
2287   // Add edge from destination object to arraycopy node.
2288   (void)add_edge(dst, ptadr);
2289   dst->set_arraycopy_dst();
2290 }
2291 
2292 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2293   const Type* adr_type = n->as_AddP()->bottom_type();

2294   BasicType bt = T_INT;
2295   if (offset == Type::OffsetBot) {
2296     // Check only oop fields.
2297     if (!adr_type->isa_aryptr() ||
2298         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
2299         adr_type->isa_aryptr()->elem()->make_oopptr() != NULL) {
2300       // OffsetBot is used to reference array's element. Ignore first AddP.
2301       if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2302         bt = T_OBJECT;
2303       }
2304     }
2305   } else if (offset != oopDesc::klass_offset_in_bytes()) {
2306     if (adr_type->isa_instptr()) {
2307       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2308       if (field != NULL) {
2309         bt = field->layout_type();
2310       } else {
2311         // Check for unsafe oop field access
2312         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2313             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2314             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2315             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2316           bt = T_OBJECT;
2317           (*unsafe) = true;
2318         }
2319       }
2320     } else if (adr_type->isa_aryptr()) {
2321       if (offset == arrayOopDesc::length_offset_in_bytes()) {
2322         // Ignore array length load.
2323       } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2324         // Ignore first AddP.
2325       } else {
2326         const Type* elemtype = adr_type->isa_aryptr()->elem();
2327         bt = elemtype->array_element_basic_type();






2328       }
2329     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2330       // Allocation initialization, ThreadLocal field access, unsafe access
2331       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2332           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2333           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2334           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2335         bt = T_OBJECT;
2336       }
2337     }
2338   }
2339   // Note: T_NARROWOOP is not classed as a real reference type
2340   return (is_reference_type(bt) || bt == T_NARROWOOP);
2341 }
2342 
2343 // Returns unique pointed java object or NULL.
2344 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2345   assert(!_collecting, "should not call when constructed graph");
2346   // If the node was created after the escape computation we can't answer.
2347   uint idx = n->_idx;

2491             return true;
2492           }
2493         }
2494       }
2495     }
2496   }
2497   return false;
2498 }
2499 
2500 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2501   const Type *adr_type = phase->type(adr);
2502   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) {
2503     // We are computing a raw address for a store captured by an Initialize
2504     // compute an appropriate address type. AddP cases #3 and #5 (see below).
2505     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2506     assert(offs != Type::OffsetBot ||
2507            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2508            "offset must be a constant or it is initialization of array");
2509     return offs;
2510   }
2511   const TypePtr *t_ptr = adr_type->isa_ptr();
2512   assert(t_ptr != NULL, "must be a pointer type");
2513   return t_ptr->offset();
2514 }
2515 
2516 Node* ConnectionGraph::get_addp_base(Node *addp) {
2517   assert(addp->is_AddP(), "must be AddP");
2518   //
2519   // AddP cases for Base and Address inputs:
2520   // case #1. Direct object's field reference:
2521   //     Allocate
2522   //       |
2523   //     Proj #5 ( oop result )
2524   //       |
2525   //     CheckCastPP (cast to instance type)
2526   //      | |
2527   //     AddP  ( base == address )
2528   //
2529   // case #2. Indirect object's field reference:
2530   //      Phi
2531   //       |
2532   //     CastPP (cast to instance type)
2533   //      | |

2647   }
2648   return NULL;
2649 }
2650 
2651 //
2652 // Adjust the type and inputs of an AddP which computes the
2653 // address of a field of an instance
2654 //
2655 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2656   PhaseGVN* igvn = _igvn;
2657   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2658   assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
2659   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2660   if (t == NULL) {
2661     // We are computing a raw address for a store captured by an Initialize
2662     // compute an appropriate address type (cases #3 and #5).
2663     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2664     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2665     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2666     assert(offs != Type::OffsetBot, "offset must be a constant");
2667     t = base_t->add_offset(offs)->is_oopptr();







2668   }
2669   int inst_id =  base_t->instance_id();
2670   assert(!t->is_known_instance() || t->instance_id() == inst_id,
2671                              "old type must be non-instance or match new type");
2672 
2673   // The type 't' could be subclass of 'base_t'.
2674   // As result t->offset() could be large then base_t's size and it will
2675   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2676   // constructor verifies correctness of the offset.
2677   //
2678   // It could happened on subclass's branch (from the type profiling
2679   // inlining) which was not eliminated during parsing since the exactness
2680   // of the allocation type was not propagated to the subclass type check.
2681   //
2682   // Or the type 't' could be not related to 'base_t' at all.
2683   // It could happened when CHA type is different from MDO type on a dead path
2684   // (for example, from instanceof check) which is not collapsed during parsing.
2685   //
2686   // Do nothing for such AddP node and don't process its users since
2687   // this code branch will go away.
2688   //
2689   if (!t->is_known_instance() &&
2690       !base_t->maybe_java_subtype_of(t)) {
2691      return false; // bail out
2692   }
2693   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();











2694   // Do NOT remove the next line: ensure a new alias index is allocated
2695   // for the instance type. Note: C++ will not remove it since the call
2696   // has side effect.
2697   int alias_idx = _compile->get_alias_index(tinst);
2698   igvn->set_type(addp, tinst);
2699   // record the allocation in the node map
2700   set_map(addp, get_map(base->_idx));
2701   // Set addp's Base and Address to 'base'.
2702   Node *abase = addp->in(AddPNode::Base);
2703   Node *adr   = addp->in(AddPNode::Address);
2704   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2705       adr->in(0)->_idx == (uint)inst_id) {
2706     // Skip AddP cases #3 and #5.
2707   } else {
2708     assert(!abase->is_top(), "sanity"); // AddP case #3
2709     if (abase != base) {
2710       igvn->hash_delete(addp);
2711       addp->set_req(AddPNode::Base, base);
2712       if (abase == adr) {
2713         addp->set_req(AddPNode::Address, base);

3355         ptnode_adr(n->_idx)->dump();
3356         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3357 #endif
3358         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3359         return;
3360       } else {
3361         Node *val = get_map(jobj->idx());   // CheckCastPP node
3362         TypeNode *tn = n->as_Type();
3363         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3364         assert(tinst != NULL && tinst->is_known_instance() &&
3365                tinst->instance_id() == jobj->idx() , "instance type expected.");
3366 
3367         const Type *tn_type = igvn->type(tn);
3368         const TypeOopPtr *tn_t;
3369         if (tn_type->isa_narrowoop()) {
3370           tn_t = tn_type->make_ptr()->isa_oopptr();
3371         } else {
3372           tn_t = tn_type->isa_oopptr();
3373         }
3374         if (tn_t != NULL && tinst->maybe_java_subtype_of(tn_t)) {







3375           if (tn_type->isa_narrowoop()) {
3376             tn_type = tinst->make_narrowoop();
3377           } else {
3378             tn_type = tinst;
3379           }
3380           igvn->hash_delete(tn);
3381           igvn->set_type(tn, tn_type);
3382           tn->set_type(tn_type);
3383           igvn->hash_insert(tn);
3384           record_for_optimizer(n);
3385         } else {
3386           assert(tn_type == TypePtr::NULL_PTR ||
3387                  tn_t != NULL && !tinst->maybe_java_subtype_of(tn_t),
3388                  "unexpected type");
3389           continue; // Skip dead path with different type
3390         }
3391       }
3392     } else {
3393       debug_only(n->dump();)
3394       assert(false, "EA: unexpected node");
3395       continue;
3396     }
3397     // push allocation's users on appropriate worklist
3398     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3399       Node *use = n->fast_out(i);
3400       if(use->is_Mem() && use->in(MemNode::Address) == n) {
3401         // Load/store to instance's field
3402         memnode_worklist.append_if_missing(use);
3403       } else if (use->is_MemBar()) {
3404         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3405           memnode_worklist.append_if_missing(use);
3406         }
3407       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3408         Node* addp2 = find_second_addp(use, n);
3409         if (addp2 != NULL) {
3410           alloc_worklist.append_if_missing(addp2);
3411         }
3412         alloc_worklist.append_if_missing(use);
3413       } else if (use->is_Phi() ||
3414                  use->is_CheckCastPP() ||
3415                  use->is_EncodeNarrowPtr() ||
3416                  use->is_DecodeNarrowPtr() ||
3417                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3418         alloc_worklist.append_if_missing(use);
3419 #ifdef ASSERT
3420       } else if (use->is_Mem()) {
3421         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3422       } else if (use->is_MergeMem()) {
3423         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3424       } else if (use->is_SafePoint()) {
3425         // Look for MergeMem nodes for calls which reference unique allocation
3426         // (through CheckCastPP nodes) even for debug info.
3427         Node* m = use->in(TypeFunc::Memory);
3428         if (m->is_MergeMem()) {
3429           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3430         }
3431       } else if (use->Opcode() == Op_EncodeISOArray) {
3432         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3433           // EncodeISOArray overwrites destination array
3434           memnode_worklist.append_if_missing(use);
3435         }



3436       } else {
3437         uint op = use->Opcode();
3438         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3439             (use->in(MemNode::Memory) == n)) {
3440           // They overwrite memory edge corresponding to destination array,
3441           memnode_worklist.append_if_missing(use);
3442         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3443               op == Op_CastP2X || op == Op_StoreCM ||
3444               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
3445               op == Op_CountPositives ||
3446               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3447               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3448               op == Op_SubTypeCheck ||
3449               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3450           n->dump();
3451           use->dump();
3452           assert(false, "EA: missing allocation reference path");
3453         }
3454 #endif
3455       }
3456     }
3457 
3458   }
3459 
3460   // Go over all ArrayCopy nodes and if one of the inputs has a unique
3461   // type, record it in the ArrayCopy node so we know what memory this
3462   // node uses/modified.
3463   for (int next = 0; next < arraycopy_worklist.length(); next++) {
3464     ArrayCopyNode* ac = arraycopy_worklist.at(next);
3465     Node* dest = ac->in(ArrayCopyNode::Dest);
3466     if (dest->is_AddP()) {
3467       dest = get_addp_base(dest);
3468     }

3498   if (memnode_worklist.length() == 0)
3499     return;  // nothing to do
3500   while (memnode_worklist.length() != 0) {
3501     Node *n = memnode_worklist.pop();
3502     if (visited.test_set(n->_idx)) {
3503       continue;
3504     }
3505     if (n->is_Phi() || n->is_ClearArray()) {
3506       // we don't need to do anything, but the users must be pushed
3507     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3508       // we don't need to do anything, but the users must be pushed
3509       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
3510       if (n == NULL) {
3511         continue;
3512       }
3513     } else if (n->Opcode() == Op_StrCompressedCopy ||
3514                n->Opcode() == Op_EncodeISOArray) {
3515       // get the memory projection
3516       n = n->find_out_with(Op_SCMemProj);
3517       assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");



3518     } else {
3519       assert(n->is_Mem(), "memory node required.");
3520       Node *addr = n->in(MemNode::Address);
3521       const Type *addr_t = igvn->type(addr);
3522       if (addr_t == Type::TOP) {
3523         continue;
3524       }
3525       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3526       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3527       assert ((uint)alias_idx < new_index_end, "wrong alias index");
3528       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3529       if (_compile->failing()) {
3530         return;
3531       }
3532       if (mem != n->in(MemNode::Memory)) {
3533         // We delay the memory edge update since we need old one in
3534         // MergeMem code below when instances memory slices are separated.
3535         set_map(n, mem);
3536       }
3537       if (n->is_Load()) {

3540         // get the memory projection
3541         n = n->find_out_with(Op_SCMemProj);
3542         assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3543       }
3544     }
3545     // push user on appropriate worklist
3546     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3547       Node *use = n->fast_out(i);
3548       if (use->is_Phi() || use->is_ClearArray()) {
3549         memnode_worklist.append_if_missing(use);
3550       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3551         if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
3552           continue;
3553         }
3554         memnode_worklist.append_if_missing(use);
3555       } else if (use->is_MemBar()) {
3556         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3557           memnode_worklist.append_if_missing(use);
3558         }
3559 #ifdef ASSERT
3560       } else if(use->is_Mem()) {
3561         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3562       } else if (use->is_MergeMem()) {
3563         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3564       } else if (use->Opcode() == Op_EncodeISOArray) {
3565         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3566           // EncodeISOArray overwrites destination array
3567           memnode_worklist.append_if_missing(use);
3568         }




3569       } else {
3570         uint op = use->Opcode();
3571         if ((use->in(MemNode::Memory) == n) &&
3572             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3573           // They overwrite memory edge corresponding to destination array,
3574           memnode_worklist.append_if_missing(use);
3575         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
3576               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
3577               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3578               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
3579           n->dump();
3580           use->dump();
3581           assert(false, "EA: missing memory path");
3582         }
3583 #endif
3584       }
3585     }
3586   }
3587 
3588   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
3589   //            Walk each memory slice moving the first node encountered of each
3590   //            instance type to the input corresponding to its alias index.
3591   uint length = mergemem_worklist.length();
3592   for( uint next = 0; next < length; ++next ) {
3593     MergeMemNode* nmm = mergemem_worklist.at(next);
3594     assert(!visited.test_set(nmm->_idx), "should not be visited before");
3595     // Note: we don't want to use MergeMemStream here because we only want to
3596     // scan inputs which exist at the start, not ones we add during processing.
3597     // Note 2: MergeMem may already contains instance memory slices added
3598     // during find_inst_mem() call when memory nodes were processed above.

3645       Node* result = step_through_mergemem(nmm, ni, tinst);
3646       if (result == nmm->base_memory()) {
3647         // Didn't find instance memory, search through general slice recursively.
3648         result = nmm->memory_at(_compile->get_general_index(ni));
3649         result = find_inst_mem(result, ni, orig_phis);
3650         if (_compile->failing()) {
3651           return;
3652         }
3653         nmm->set_memory_at(ni, result);
3654       }
3655     }
3656     igvn->hash_insert(nmm);
3657     record_for_optimizer(nmm);
3658   }
3659 
3660   //  Phase 4:  Update the inputs of non-instance memory Phis and
3661   //            the Memory input of memnodes
3662   // First update the inputs of any non-instance Phi's from
3663   // which we split out an instance Phi.  Note we don't have
3664   // to recursively process Phi's encountered on the input memory
3665   // chains as is done in split_memory_phi() since they  will
3666   // also be processed here.
3667   for (int j = 0; j < orig_phis.length(); j++) {
3668     PhiNode *phi = orig_phis.at(j);
3669     int alias_idx = _compile->get_alias_index(phi->adr_type());
3670     igvn->hash_delete(phi);
3671     for (uint i = 1; i < phi->req(); i++) {
3672       Node *mem = phi->in(i);
3673       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3674       if (_compile->failing()) {
3675         return;
3676       }
3677       if (mem != new_mem) {
3678         phi->set_req(i, new_mem);
3679       }
3680     }
3681     igvn->hash_insert(phi);
3682     record_for_optimizer(phi);
3683   }
3684 
3685   // Update the memory inputs of MemNodes with the value we computed

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/metaspace.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "opto/c2compiler.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/escape.hpp"
  40 #include "opto/phaseX.hpp"
  41 #include "opto/movenode.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "utilities/macros.hpp"
  44 
  45 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  46   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  47   _in_worklist(C->comp_arena()),
  48   _next_pidx(0),
  49   _collecting(true),
  50   _verify(false),
  51   _compile(C),
  52   _igvn(igvn),

 136   GrowableArray<SafePointNode*>  sfn_worklist;
 137   GrowableArray<MergeMemNode*>   mergemem_worklist;
 138   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 139 
 140   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 141 
 142   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 143   ideal_nodes.map(C->live_nodes(), NULL);  // preallocate space
 144   // Initialize worklist
 145   if (C->root() != NULL) {
 146     ideal_nodes.push(C->root());
 147   }
 148   // Processed ideal nodes are unique on ideal_nodes list
 149   // but several ideal nodes are mapped to the phantom_obj.
 150   // To avoid duplicated entries on the following worklists
 151   // add the phantom_obj only once to them.
 152   ptnodes_worklist.append(phantom_obj);
 153   java_objects_worklist.append(phantom_obj);
 154   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 155     Node* n = ideal_nodes.at(next);
 156     if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
 157         !n->in(MemNode::Address)->is_AddP() &&
 158         _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
 159       // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
 160       Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
 161       _igvn->register_new_node_with_optimizer(addp);
 162       _igvn->replace_input_of(n, MemNode::Address, addp);
 163       ideal_nodes.push(addp);
 164       _nodes.at_put_grow(addp->_idx, NULL, NULL);
 165     }
 166     // Create PointsTo nodes and add them to Connection Graph. Called
 167     // only once per ideal node since ideal_nodes is Unique_Node list.
 168     add_node_to_connection_graph(n, &delayed_worklist);
 169     PointsToNode* ptn = ptnode_adr(n->_idx);
 170     if (ptn != NULL && ptn != phantom_obj) {
 171       ptnodes_worklist.append(ptn);
 172       if (ptn->is_JavaObject()) {
 173         java_objects_worklist.append(ptn->as_JavaObject());
 174         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 175             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 176           // Only allocations and java static calls results are interesting.
 177           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 178         }
 179       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 180         oop_fields_worklist.append(ptn->as_Field());
 181       }
 182     }
 183     // Collect some interesting nodes for further use.
 184     switch (n->Opcode()) {
 185       case Op_MergeMem:

 442   return false;
 443 }
 444 
 445 // Returns true if at least one of the arguments to the call is an object
 446 // that does not escape globally.
 447 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
 448   if (call->method() != NULL) {
 449     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
 450     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
 451       Node* p = call->in(idx);
 452       if (not_global_escape(p)) {
 453         return true;
 454       }
 455     }
 456   } else {
 457     const char* name = call->as_CallStaticJava()->_name;
 458     assert(name != NULL, "no name");
 459     // no arg escapes through uncommon traps
 460     if (strcmp(name, "uncommon_trap") != 0) {
 461       // process_call_arguments() assumes that all arguments escape globally
 462       const TypeTuple* d = call->tf()->domain_sig();
 463       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 464         const Type* at = d->field_at(i);
 465         if (at->isa_oopptr() != NULL) {
 466           return true;
 467         }
 468       }
 469     }
 470   }
 471   return false;
 472 }
 473 
 474 
 475 
 476 // Utility function for nodes that load an object
 477 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
 478   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 479   // ThreadLocal has RawPtr type.
 480   const Type* t = _igvn->type(n);
 481   if (t->make_ptr() != NULL) {
 482     Node* adr = n->in(MemNode::Address);

 516       // first IGVN optimization when escape information is still available.
 517       record_for_optimizer(n);
 518     } else if (n->is_Allocate()) {
 519       add_call_node(n->as_Call());
 520       record_for_optimizer(n);
 521     } else {
 522       if (n->is_CallStaticJava()) {
 523         const char* name = n->as_CallStaticJava()->_name;
 524         if (name != NULL && strcmp(name, "uncommon_trap") == 0) {
 525           return; // Skip uncommon traps
 526         }
 527       }
 528       // Don't mark as processed since call's arguments have to be processed.
 529       delayed_worklist->push(n);
 530       // Check if a call returns an object.
 531       if ((n->as_Call()->returns_pointer() &&
 532            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
 533           (n->is_CallStaticJava() &&
 534            n->as_CallStaticJava()->is_boxing_method())) {
 535         add_call_node(n->as_Call());
 536       } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
 537         bool returns_oop = false;
 538         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
 539           ProjNode* pn = n->fast_out(i)->as_Proj();
 540           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
 541             returns_oop = true;
 542           }
 543         }
 544         if (returns_oop) {
 545           add_call_node(n->as_Call());
 546         }
 547       }
 548     }
 549     return;
 550   }
 551   // Put this check here to process call arguments since some call nodes
 552   // point to phantom_obj.
 553   if (n_ptn == phantom_obj || n_ptn == null_obj) {
 554     return; // Skip predefined nodes.
 555   }
 556   switch (opcode) {
 557     case Op_AddP: {
 558       Node* base = get_addp_base(n);
 559       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 560       // Field nodes are created for all field types. They are used in
 561       // adjust_scalar_replaceable_state() and split_unique_types().
 562       // Note, non-oop fields will have only base edges in Connection
 563       // Graph because such fields are not used for oop loads and stores.
 564       int offset = address_offset(n, igvn);
 565       add_field(n, PointsToNode::NoEscape, offset);
 566       if (ptn_base == NULL) {
 567         delayed_worklist->push(n); // Process it later.
 568       } else {
 569         n_ptn = ptnode_adr(n_idx);
 570         add_base(n_ptn->as_Field(), ptn_base);
 571       }
 572       break;
 573     }
 574     case Op_CastX2P: {
 575       map_ideal_node(n, phantom_obj);
 576       break;
 577     }
 578     case Op_InlineType:
 579     case Op_CastPP:
 580     case Op_CheckCastPP:
 581     case Op_EncodeP:
 582     case Op_DecodeN:
 583     case Op_EncodePKlass:
 584     case Op_DecodeNKlass: {
 585       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
 586       break;
 587     }
 588     case Op_CMoveP: {
 589       add_local_var(n, PointsToNode::NoEscape);
 590       // Do not add edges during first iteration because some could be
 591       // not defined yet.
 592       delayed_worklist->push(n);
 593       break;
 594     }
 595     case Op_ConP:
 596     case Op_ConN:
 597     case Op_ConNKlass: {
 598       // assume all oop constants globally escape except for null

 629     case Op_PartialSubtypeCheck: {
 630       // Produces Null or notNull and is used in only in CmpP so
 631       // phantom_obj could be used.
 632       map_ideal_node(n, phantom_obj); // Result is unknown
 633       break;
 634     }
 635     case Op_Phi: {
 636       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 637       // ThreadLocal has RawPtr type.
 638       const Type* t = n->as_Phi()->type();
 639       if (t->make_ptr() != NULL) {
 640         add_local_var(n, PointsToNode::NoEscape);
 641         // Do not add edges during first iteration because some could be
 642         // not defined yet.
 643         delayed_worklist->push(n);
 644       }
 645       break;
 646     }
 647     case Op_Proj: {
 648       // we are only interested in the oop result projection from a call
 649       if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
 650           (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
 651         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
 652                n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
 653         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
 654       }
 655       break;
 656     }
 657     case Op_Rethrow: // Exception object escapes
 658     case Op_Return: {
 659       if (n->req() > TypeFunc::Parms &&
 660           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 661         // Treat Return value as LocalVar with GlobalEscape escape state.
 662         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
 663       }
 664       break;
 665     }
 666     case Op_CompareAndExchangeP:
 667     case Op_CompareAndExchangeN:
 668     case Op_GetAndSetP:
 669     case Op_GetAndSetN: {
 670       add_objload_to_connection_graph(n, delayed_worklist);
 671       // fall-through
 672     }

 732   if (n->is_Call()) {
 733     process_call_arguments(n->as_Call());
 734     return;
 735   }
 736   assert(n->is_Store() || n->is_LoadStore() ||
 737          (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
 738          "node should be registered already");
 739   int opcode = n->Opcode();
 740   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
 741   if (gc_handled) {
 742     return; // Ignore node if already handled by GC.
 743   }
 744   switch (opcode) {
 745     case Op_AddP: {
 746       Node* base = get_addp_base(n);
 747       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 748       assert(ptn_base != NULL, "field's base should be registered");
 749       add_base(n_ptn->as_Field(), ptn_base);
 750       break;
 751     }
 752     case Op_InlineType:
 753     case Op_CastPP:
 754     case Op_CheckCastPP:
 755     case Op_EncodeP:
 756     case Op_DecodeN:
 757     case Op_EncodePKlass:
 758     case Op_DecodeNKlass: {
 759       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
 760       break;
 761     }
 762     case Op_CMoveP: {
 763       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
 764         Node* in = n->in(i);
 765         if (in == NULL) {
 766           continue;  // ignore NULL
 767         }
 768         Node* uncast_in = in->uncast();
 769         if (uncast_in->is_top() || uncast_in == n) {
 770           continue;  // ignore top or inputs which go back this node
 771         }
 772         PointsToNode* ptn = ptnode_adr(in->_idx);

 787       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 788       // ThreadLocal has RawPtr type.
 789       assert(n->as_Phi()->type()->make_ptr() != NULL, "Unexpected node type");
 790       for (uint i = 1; i < n->req(); i++) {
 791         Node* in = n->in(i);
 792         if (in == NULL) {
 793           continue;  // ignore NULL
 794         }
 795         Node* uncast_in = in->uncast();
 796         if (uncast_in->is_top() || uncast_in == n) {
 797           continue;  // ignore top or inputs which go back this node
 798         }
 799         PointsToNode* ptn = ptnode_adr(in->_idx);
 800         assert(ptn != NULL, "node should be registered");
 801         add_edge(n_ptn, ptn);
 802       }
 803       break;
 804     }
 805     case Op_Proj: {
 806       // we are only interested in the oop result projection from a call
 807       assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
 808              n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
 809       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
 810       break;
 811     }
 812     case Op_Rethrow: // Exception object escapes
 813     case Op_Return: {
 814       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
 815              "Unexpected node type");
 816       // Treat Return value as LocalVar with GlobalEscape escape state.
 817       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), NULL);
 818       break;
 819     }
 820     case Op_CompareAndExchangeP:
 821     case Op_CompareAndExchangeN:
 822     case Op_GetAndSetP:
 823     case Op_GetAndSetN:{
 824       assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type");
 825       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL);
 826       // fall-through
 827     }
 828     case Op_CompareAndSwapP:

 963     PointsToNode* ptn = ptnode_adr(val->_idx);
 964     assert(ptn != NULL, "node should be registered");
 965     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
 966     // Add edge to object for unsafe access with offset.
 967     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 968     assert(adr_ptn != NULL, "node should be registered");
 969     if (adr_ptn->is_Field()) {
 970       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
 971       add_edge(adr_ptn, ptn);
 972     }
 973     return true;
 974   }
 975 #ifdef ASSERT
 976   n->dump(1);
 977   assert(false, "not unsafe");
 978 #endif
 979   return false;
 980 }
 981 
 982 void ConnectionGraph::add_call_node(CallNode* call) {
 983   assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
 984   uint call_idx = call->_idx;
 985   if (call->is_Allocate()) {
 986     Node* k = call->in(AllocateNode::KlassNode);
 987     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 988     assert(kt != NULL, "TypeKlassPtr  required.");
 989     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 990     bool scalar_replaceable = true;
 991     NOT_PRODUCT(const char* nsr_reason = "");
 992     if (call->is_AllocateArray()) {
 993       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
 994         es = PointsToNode::GlobalEscape;
 995       } else {
 996         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 997         if (length < 0) {
 998           // Not scalar replaceable if the length is not constant.
 999           scalar_replaceable = false;
1000           NOT_PRODUCT(nsr_reason = "has a non-constant length");
1001         } else if (length > EliminateAllocationArraySizeLimit) {
1002           // Not scalar replaceable if the length is too big.
1003           scalar_replaceable = false;

1039     //
1040     //    - all oop arguments are escaping globally;
1041     //
1042     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
1043     //
1044     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
1045     //
1046     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
1047     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
1048     //      during call is returned;
1049     //    - mapped to ArgEscape LocalVar node pointed to object arguments
1050     //      which are returned and does not escape during call;
1051     //
1052     //    - oop arguments escaping status is defined by bytecode analysis;
1053     //
1054     // For a static call, we know exactly what method is being called.
1055     // Use bytecode estimator to record whether the call's return value escapes.
1056     ciMethod* meth = call->as_CallJava()->method();
1057     if (meth == NULL) {
1058       const char* name = call->as_CallStaticJava()->_name;
1059       assert(strncmp(name, "_multianewarray", 15) == 0 ||
1060              strncmp(name, "_load_unknown_inline", 20) == 0, "TODO: add failed case check");
1061       // Returns a newly allocated non-escaped object.
1062       add_java_object(call, PointsToNode::NoEscape);
1063       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
1064     } else if (meth->is_boxing_method()) {
1065       // Returns boxing object
1066       PointsToNode::EscapeState es;
1067       vmIntrinsics::ID intr = meth->intrinsic_id();
1068       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
1069         // It does not escape if object is always allocated.
1070         es = PointsToNode::NoEscape;
1071       } else {
1072         // It escapes globally if object could be loaded from cache.
1073         es = PointsToNode::GlobalEscape;
1074       }
1075       add_java_object(call, es);
1076     } else {
1077       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
1078       call_analyzer->copy_dependencies(_compile->dependencies());
1079       if (call_analyzer->is_return_allocated()) {
1080         // Returns a newly allocated non-escaped object, simply
1081         // update dependency information.
1082         // Mark it as NoEscape so that objects referenced by
1083         // it's fields will be marked as NoEscape at least.
1084         add_java_object(call, PointsToNode::NoEscape);
1085         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
1086       } else {
1087         // Determine whether any arguments are returned.
1088         const TypeTuple* d = call->tf()->domain_cc();
1089         bool ret_arg = false;
1090         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1091           if (d->field_at(i)->isa_ptr() != NULL &&
1092               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1093             ret_arg = true;
1094             break;
1095           }
1096         }
1097         if (ret_arg) {
1098           add_local_var(call, PointsToNode::ArgEscape);
1099         } else {
1100           // Returns unknown object.
1101           map_ideal_node(call, phantom_obj);
1102         }
1103       }
1104     }
1105   } else {
1106     // An other type of call, assume the worst case:
1107     // returned value is unknown and globally escapes.
1108     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

1116 #ifdef ASSERT
1117     case Op_Allocate:
1118     case Op_AllocateArray:
1119     case Op_Lock:
1120     case Op_Unlock:
1121       assert(false, "should be done already");
1122       break;
1123 #endif
1124     case Op_ArrayCopy:
1125     case Op_CallLeafNoFP:
1126       // Most array copies are ArrayCopy nodes at this point but there
1127       // are still a few direct calls to the copy subroutines (See
1128       // PhaseStringOpts::copy_string())
1129       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1130         call->as_CallLeaf()->is_call_to_arraycopystub();
1131       // fall through
1132     case Op_CallLeafVector:
1133     case Op_CallLeaf: {
1134       // Stub calls, objects do not escape but they are not scale replaceable.
1135       // Adjust escape state for outgoing arguments.
1136       const TypeTuple * d = call->tf()->domain_sig();
1137       bool src_has_oops = false;
1138       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1139         const Type* at = d->field_at(i);
1140         Node *arg = call->in(i);
1141         if (arg == NULL) {
1142           continue;
1143         }
1144         const Type *aat = _igvn->type(arg);
1145         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
1146           continue;
1147         }
1148         if (arg->is_AddP()) {
1149           //
1150           // The inline_native_clone() case when the arraycopy stub is called
1151           // after the allocation before Initialize and CheckCastPP nodes.
1152           // Or normal arraycopy for object arrays case.
1153           //
1154           // Set AddP's base (Allocate) as not scalar replaceable since
1155           // pointer to the base (with offset) is passed as argument.
1156           //
1157           arg = get_addp_base(arg);
1158         }
1159         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1160         assert(arg_ptn != NULL, "should be registered");
1161         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1162         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1163           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1164                  aat->isa_ptr() != NULL, "expecting an Ptr");
1165           bool arg_has_oops = aat->isa_oopptr() &&
1166                               (aat->isa_instptr() ||
1167                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != NULL)) ||
1168                                (aat->isa_aryptr() && aat->isa_aryptr()->elem() != NULL &&
1169                                                                aat->isa_aryptr()->is_flat() &&
1170                                                                aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
1171           if (i == TypeFunc::Parms) {
1172             src_has_oops = arg_has_oops;
1173           }
1174           //
1175           // src or dst could be j.l.Object when other is basic type array:
1176           //
1177           //   arraycopy(char[],0,Object*,0,size);
1178           //   arraycopy(Object*,0,char[],0,size);
1179           //
1180           // Don't add edges in such cases.
1181           //
1182           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1183                                        arg_has_oops && (i > TypeFunc::Parms);
1184 #ifdef ASSERT
1185           if (!(is_arraycopy ||
1186                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1187                 (call->as_CallLeaf()->_name != NULL &&
1188                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1189                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1190                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

1199                   strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
1200                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1201                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
1202                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1203                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
1204                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
1205                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
1206                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1207                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1208                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1209                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1210                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1211                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1212                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
1213                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
1214                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1215                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1216                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1217                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1218                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
1219                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1220                   strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
1221                   strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
1222                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
1223                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
1224                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1225                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
1226                  ))) {
1227             call->dump();
1228             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1229           }
1230 #endif
1231           // Always process arraycopy's destination object since
1232           // we need to add all possible edges to references in
1233           // source object.
1234           if (arg_esc >= PointsToNode::ArgEscape &&
1235               !arg_is_arraycopy_dest) {
1236             continue;
1237           }
1238           PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1239           if (call->is_ArrayCopy()) {
1240             ArrayCopyNode* ac = call->as_ArrayCopy();
1241             if (ac->is_clonebasic() ||

1264           }
1265         }
1266       }
1267       break;
1268     }
1269     case Op_CallStaticJava: {
1270       // For a static call, we know exactly what method is being called.
1271       // Use bytecode estimator to record the call's escape affects
1272 #ifdef ASSERT
1273       const char* name = call->as_CallStaticJava()->_name;
1274       assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1275 #endif
1276       ciMethod* meth = call->as_CallJava()->method();
1277       if ((meth != NULL) && meth->is_boxing_method()) {
1278         break; // Boxing methods do not modify any oops.
1279       }
1280       BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1281       // fall-through if not a Java method or no analyzer information
1282       if (call_analyzer != NULL) {
1283         PointsToNode* call_ptn = ptnode_adr(call->_idx);
1284         const TypeTuple* d = call->tf()->domain_cc();
1285         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1286           const Type* at = d->field_at(i);
1287           int k = i - TypeFunc::Parms;
1288           Node* arg = call->in(i);
1289           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1290           if (at->isa_ptr() != NULL &&
1291               call_analyzer->is_arg_returned(k)) {
1292             // The call returns arguments.
1293             if (call_ptn != NULL) { // Is call's result used?
1294               assert(call_ptn->is_LocalVar(), "node should be registered");
1295               assert(arg_ptn != NULL, "node should be registered");
1296               add_edge(call_ptn, arg_ptn);
1297             }
1298           }
1299           if (at->isa_oopptr() != NULL &&
1300               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1301             if (!call_analyzer->is_arg_stack(k)) {
1302               // The argument global escapes
1303               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1304             } else {

1308                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1309               }
1310             }
1311           }
1312         }
1313         if (call_ptn != NULL && call_ptn->is_LocalVar()) {
1314           // The call returns arguments.
1315           assert(call_ptn->edge_count() > 0, "sanity");
1316           if (!call_analyzer->is_return_local()) {
1317             // Returns also unknown object.
1318             add_edge(call_ptn, phantom_obj);
1319           }
1320         }
1321         break;
1322       }
1323     }
1324     default: {
1325       // Fall-through here if not a Java method or no analyzer information
1326       // or some other type of call, assume the worst case: all arguments
1327       // globally escape.
1328       const TypeTuple* d = call->tf()->domain_cc();
1329       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1330         const Type* at = d->field_at(i);
1331         if (at->isa_oopptr() != NULL) {
1332           Node* arg = call->in(i);
1333           if (arg->is_AddP()) {
1334             arg = get_addp_base(arg);
1335           }
1336           assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
1337           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1338         }
1339       }
1340     }
1341   }
1342 }
1343 
1344 
1345 // Finish Graph construction.
1346 bool ConnectionGraph::complete_connection_graph(
1347                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
1348                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

1721     PointsToNode* base = i.get();
1722     if (base->is_JavaObject()) {
1723       // Skip Allocate's fields which will be processed later.
1724       if (base->ideal_node()->is_Allocate()) {
1725         return 0;
1726       }
1727       assert(base == null_obj, "only NULL ptr base expected here");
1728     }
1729   }
1730   if (add_edge(field, phantom_obj)) {
1731     // New edge was added
1732     new_edges++;
1733     add_field_uses_to_worklist(field);
1734   }
1735   return new_edges;
1736 }
1737 
1738 // Find fields initializing values for allocations.
1739 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
1740   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1741   PointsToNode* init_val = phantom_obj;
1742   Node* alloc = pta->ideal_node();
1743 
1744   // Do nothing for Allocate nodes since its fields values are
1745   // "known" unless they are initialized by arraycopy/clone.
1746   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
1747     if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != NULL) {
1748       // Non-flattened inline type arrays are initialized with
1749       // the default value instead of null. Handle them here.
1750       init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx);
1751       assert(init_val != NULL, "default value should be registered");
1752     } else {
1753       return 0;
1754     }
1755   }
1756   // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
1757   assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
1758 #ifdef ASSERT
1759   if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == NULL) {
1760     const char* name = alloc->as_CallStaticJava()->_name;
1761     assert(strncmp(name, "_multianewarray", 15) == 0 ||
1762            strncmp(name, "_load_unknown_inline", 20) == 0, "sanity");
1763   }
1764 #endif
1765   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
1766   int new_edges = 0;
1767   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1768     PointsToNode* field = i.get();
1769     if (field->is_Field() && field->as_Field()->is_oop()) {
1770       if (add_edge(field, init_val)) {
1771         // New edge was added
1772         new_edges++;
1773         add_field_uses_to_worklist(field->as_Field());
1774       }
1775     }
1776   }
1777   return new_edges;
1778 }
1779 
1780 // Find fields initializing values for allocations.
1781 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* phase) {
1782   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1783   Node* alloc = pta->ideal_node();
1784   // Do nothing for Call nodes since its fields values are unknown.
1785   if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != NULL) {
1786     return 0;
1787   }
1788   InitializeNode* ini = alloc->as_Allocate()->initialization();
1789   bool visited_bottom_offset = false;
1790   GrowableArray<int> offsets_worklist;
1791   int new_edges = 0;
1792 
1793   // Check if an oop field's initializing value is recorded and add
1794   // a corresponding NULL if field's value if it is not recorded.
1795   // Connection Graph does not record a default initialization by NULL
1796   // captured by Initialize node.
1797   //
1798   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1799     PointsToNode* field = i.get(); // Field (AddP)
1800     if (!field->is_Field() || !field->as_Field()->is_oop()) {
1801       continue; // Not oop field
1802     }
1803     int offset = field->as_Field()->offset();
1804     if (offset == Type::OffsetBot) {
1805       if (!visited_bottom_offset) {

1851               } else {
1852                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1853                   tty->print_cr("----------init store has invalid value -----");
1854                   store->dump();
1855                   val->dump();
1856                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1857                 }
1858                 for (EdgeIterator j(val); j.has_next(); j.next()) {
1859                   PointsToNode* obj = j.get();
1860                   if (obj->is_JavaObject()) {
1861                     if (!field->points_to(obj->as_JavaObject())) {
1862                       missed_obj = obj;
1863                       break;
1864                     }
1865                   }
1866                 }
1867               }
1868               if (missed_obj != NULL) {
1869                 tty->print_cr("----------field---------------------------------");
1870                 field->dump();
1871                 tty->print_cr("----------missed reference to object------------");
1872                 missed_obj->dump();
1873                 tty->print_cr("----------object referenced by init store-------");
1874                 store->dump();
1875                 val->dump();
1876                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1877               }
1878             }
1879 #endif
1880           } else {
1881             // There could be initializing stores which follow allocation.
1882             // For example, a volatile field store is not collected
1883             // by Initialize node.
1884             //
1885             // Need to check for dependent loads to separate such stores from
1886             // stores which follow loads. For now, add initial value NULL so
1887             // that compare pointers optimization works correctly.
1888           }
1889         }
1890         if (value == NULL) {
1891           // A field's initializing value was not recorded. Add NULL.
1892           if (add_edge(field, null_obj)) {
1893             // New edge was added

2119         assert(field->edge_count() > 0, "sanity");
2120       }
2121     }
2122   }
2123 }
2124 #endif
2125 
2126 // Optimize ideal graph.
2127 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
2128                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
2129   Compile* C = _compile;
2130   PhaseIterGVN* igvn = _igvn;
2131   if (EliminateLocks) {
2132     // Mark locks before changing ideal graph.
2133     int cnt = C->macro_count();
2134     for (int i = 0; i < cnt; i++) {
2135       Node *n = C->macro_node(i);
2136       if (n->is_AbstractLock()) { // Lock and Unlock nodes
2137         AbstractLockNode* alock = n->as_AbstractLock();
2138         if (!alock->is_non_esc_obj()) {
2139           const Type* obj_type = igvn->type(alock->obj_node());
2140           if (not_global_escape(alock->obj_node()) && !obj_type->is_inlinetypeptr()) {
2141             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
2142             // The lock could be marked eliminated by lock coarsening
2143             // code during first IGVN before EA. Replace coarsened flag
2144             // to eliminate all associated locks/unlocks.
2145 #ifdef ASSERT
2146             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
2147 #endif
2148             alock->set_non_esc_obj();
2149           }
2150         }
2151       }
2152     }
2153   }
2154 
2155   if (OptimizePtrCompare) {
2156     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
2157       Node *n = ptr_cmp_worklist.at(i);
2158       const TypeInt* tcmp = optimize_ptr_compare(n);
2159       if (tcmp->singleton()) {
2160         Node* cmp = igvn->makecon(tcmp);
2161 #ifndef PRODUCT
2162         if (PrintOptimizePtrCompare) {
2163           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
2164           if (Verbose) {
2165             n->dump(1);
2166           }
2167         }
2168 #endif
2169         igvn->replace_node(n, cmp);
2170       }
2171     }
2172   }
2173 
2174   // For MemBarStoreStore nodes added in library_call.cpp, check
2175   // escape status of associated AllocateNode and optimize out
2176   // MemBarStoreStore node if the allocated object never escapes.
2177   for (int i = 0; i < storestore_worklist.length(); i++) {
2178     Node* storestore = storestore_worklist.at(i);
2179     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
2180     if (alloc->is_Allocate() && not_global_escape(alloc)) {
2181       if (alloc->in(AllocateNode::InlineType) != NULL) {
2182         // Non-escaping inline type buffer allocations don't require a membar
2183         storestore->as_MemBar()->remove(_igvn);
2184       } else {
2185         MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
2186         mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
2187         mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
2188         igvn->register_new_node_with_optimizer(mb);
2189         igvn->replace_node(storestore, mb);
2190       }
2191     }
2192   }
2193 }
2194 
2195 // Optimize objects compare.
2196 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {
2197   assert(OptimizePtrCompare, "sanity");
2198   assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
2199   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
2200   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
2201   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
2202 
2203   PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2204   PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2205   JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2206   JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2207   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2208   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2209 
2210   // Check simple cases first.

2323   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2324   assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
2325   PointsToNode* ptadr = _nodes.at(n->_idx);
2326   if (ptadr != NULL) {
2327     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2328     return;
2329   }
2330   Compile* C = _compile;
2331   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2332   map_ideal_node(n, ptadr);
2333   // Add edge from arraycopy node to source object.
2334   (void)add_edge(ptadr, src);
2335   src->set_arraycopy_src();
2336   // Add edge from destination object to arraycopy node.
2337   (void)add_edge(dst, ptadr);
2338   dst->set_arraycopy_dst();
2339 }
2340 
2341 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2342   const Type* adr_type = n->as_AddP()->bottom_type();
2343   int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
2344   BasicType bt = T_INT;
2345   if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
2346     // Check only oop fields.
2347     if (!adr_type->isa_aryptr() ||
2348         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
2349         adr_type->isa_aryptr()->elem()->make_oopptr() != NULL) {
2350       // OffsetBot is used to reference array's element. Ignore first AddP.
2351       if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2352         bt = T_OBJECT;
2353       }
2354     }
2355   } else if (offset != oopDesc::klass_offset_in_bytes()) {
2356     if (adr_type->isa_instptr()) {
2357       ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
2358       if (field != NULL) {
2359         bt = field->layout_type();
2360       } else {
2361         // Check for unsafe oop field access
2362         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2363             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2364             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2365             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2366           bt = T_OBJECT;
2367           (*unsafe) = true;
2368         }
2369       }
2370     } else if (adr_type->isa_aryptr()) {
2371       if (offset == arrayOopDesc::length_offset_in_bytes()) {
2372         // Ignore array length load.
2373       } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2374         // Ignore first AddP.
2375       } else {
2376         const Type* elemtype = adr_type->isa_aryptr()->elem();
2377         if (elemtype->isa_inlinetype() && field_offset != Type::OffsetBot) {
2378           ciInlineKlass* vk = elemtype->inline_klass();
2379           field_offset += vk->first_field_offset();
2380           bt = vk->get_field_by_offset(field_offset, false)->layout_type();
2381         } else {
2382           bt = elemtype->array_element_basic_type();
2383         }
2384       }
2385     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2386       // Allocation initialization, ThreadLocal field access, unsafe access
2387       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2388           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2389           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2390           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2391         bt = T_OBJECT;
2392       }
2393     }
2394   }
2395   // Note: T_NARROWOOP is not classed as a real reference type
2396   return (is_reference_type(bt) || bt == T_NARROWOOP);
2397 }
2398 
2399 // Returns unique pointed java object or NULL.
2400 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2401   assert(!_collecting, "should not call when constructed graph");
2402   // If the node was created after the escape computation we can't answer.
2403   uint idx = n->_idx;

2547             return true;
2548           }
2549         }
2550       }
2551     }
2552   }
2553   return false;
2554 }
2555 
2556 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2557   const Type *adr_type = phase->type(adr);
2558   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) {
2559     // We are computing a raw address for a store captured by an Initialize
2560     // compute an appropriate address type. AddP cases #3 and #5 (see below).
2561     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2562     assert(offs != Type::OffsetBot ||
2563            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2564            "offset must be a constant or it is initialization of array");
2565     return offs;
2566   }
2567   return adr_type->is_ptr()->flattened_offset();


2568 }
2569 
2570 Node* ConnectionGraph::get_addp_base(Node *addp) {
2571   assert(addp->is_AddP(), "must be AddP");
2572   //
2573   // AddP cases for Base and Address inputs:
2574   // case #1. Direct object's field reference:
2575   //     Allocate
2576   //       |
2577   //     Proj #5 ( oop result )
2578   //       |
2579   //     CheckCastPP (cast to instance type)
2580   //      | |
2581   //     AddP  ( base == address )
2582   //
2583   // case #2. Indirect object's field reference:
2584   //      Phi
2585   //       |
2586   //     CastPP (cast to instance type)
2587   //      | |

2701   }
2702   return NULL;
2703 }
2704 
2705 //
2706 // Adjust the type and inputs of an AddP which computes the
2707 // address of a field of an instance
2708 //
2709 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2710   PhaseGVN* igvn = _igvn;
2711   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2712   assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
2713   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2714   if (t == NULL) {
2715     // We are computing a raw address for a store captured by an Initialize
2716     // compute an appropriate address type (cases #3 and #5).
2717     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2718     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2719     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2720     assert(offs != Type::OffsetBot, "offset must be a constant");
2721     if (base_t->isa_aryptr() != NULL) {
2722       // In the case of a flattened inline type array, each field has its
2723       // own slice so we need to extract the field being accessed from
2724       // the address computation
2725       t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
2726     } else {
2727       t = base_t->add_offset(offs)->is_oopptr();
2728     }
2729   }
2730   int inst_id = base_t->instance_id();
2731   assert(!t->is_known_instance() || t->instance_id() == inst_id,
2732                              "old type must be non-instance or match new type");
2733 
2734   // The type 't' could be subclass of 'base_t'.
2735   // As result t->offset() could be large then base_t's size and it will
2736   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2737   // constructor verifies correctness of the offset.
2738   //
2739   // It could happened on subclass's branch (from the type profiling
2740   // inlining) which was not eliminated during parsing since the exactness
2741   // of the allocation type was not propagated to the subclass type check.
2742   //
2743   // Or the type 't' could be not related to 'base_t' at all.
2744   // It could happen when CHA type is different from MDO type on a dead path
2745   // (for example, from instanceof check) which is not collapsed during parsing.
2746   //
2747   // Do nothing for such AddP node and don't process its users since
2748   // this code branch will go away.
2749   //
2750   if (!t->is_known_instance() &&
2751       !base_t->maybe_java_subtype_of(t)) {
2752      return false; // bail out
2753   }
2754   const TypePtr* tinst = base_t->add_offset(t->offset());
2755   if (tinst->isa_aryptr() && t->isa_aryptr()) {
2756     // In the case of a flattened inline type array, each field has its
2757     // own slice so we need to keep track of the field being accessed.
2758     tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
2759     // Keep array properties (not flat/null-free)
2760     tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
2761     if (tinst == NULL) {
2762       return false; // Skip dead path with inconsistent properties
2763     }
2764   }
2765 
2766   // Do NOT remove the next line: ensure a new alias index is allocated
2767   // for the instance type. Note: C++ will not remove it since the call
2768   // has side effect.
2769   int alias_idx = _compile->get_alias_index(tinst);
2770   igvn->set_type(addp, tinst);
2771   // record the allocation in the node map
2772   set_map(addp, get_map(base->_idx));
2773   // Set addp's Base and Address to 'base'.
2774   Node *abase = addp->in(AddPNode::Base);
2775   Node *adr   = addp->in(AddPNode::Address);
2776   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2777       adr->in(0)->_idx == (uint)inst_id) {
2778     // Skip AddP cases #3 and #5.
2779   } else {
2780     assert(!abase->is_top(), "sanity"); // AddP case #3
2781     if (abase != base) {
2782       igvn->hash_delete(addp);
2783       addp->set_req(AddPNode::Base, base);
2784       if (abase == adr) {
2785         addp->set_req(AddPNode::Address, base);

3427         ptnode_adr(n->_idx)->dump();
3428         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3429 #endif
3430         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3431         return;
3432       } else {
3433         Node *val = get_map(jobj->idx());   // CheckCastPP node
3434         TypeNode *tn = n->as_Type();
3435         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3436         assert(tinst != NULL && tinst->is_known_instance() &&
3437                tinst->instance_id() == jobj->idx() , "instance type expected.");
3438 
3439         const Type *tn_type = igvn->type(tn);
3440         const TypeOopPtr *tn_t;
3441         if (tn_type->isa_narrowoop()) {
3442           tn_t = tn_type->make_ptr()->isa_oopptr();
3443         } else {
3444           tn_t = tn_type->isa_oopptr();
3445         }
3446         if (tn_t != NULL && tinst->maybe_java_subtype_of(tn_t)) {
3447           if (tn_t->isa_aryptr()) {
3448             // Keep array properties (not flat/null-free)
3449             tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
3450             if (tinst == NULL) {
3451               continue; // Skip dead path with inconsistent properties
3452             }
3453           }
3454           if (tn_type->isa_narrowoop()) {
3455             tn_type = tinst->make_narrowoop();
3456           } else {
3457             tn_type = tinst;
3458           }
3459           igvn->hash_delete(tn);
3460           igvn->set_type(tn, tn_type);
3461           tn->set_type(tn_type);
3462           igvn->hash_insert(tn);
3463           record_for_optimizer(n);
3464         } else {
3465           assert(tn_type == TypePtr::NULL_PTR ||
3466                  tn_t != NULL && !tinst->maybe_java_subtype_of(tn_t),
3467                  "unexpected type");
3468           continue; // Skip dead path with different type
3469         }
3470       }
3471     } else {
3472       debug_only(n->dump();)
3473       assert(false, "EA: unexpected node");
3474       continue;
3475     }
3476     // push allocation's users on appropriate worklist
3477     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3478       Node *use = n->fast_out(i);
3479       if (use->is_Mem() && use->in(MemNode::Address) == n) {
3480         // Load/store to instance's field
3481         memnode_worklist.append_if_missing(use);
3482       } else if (use->is_MemBar()) {
3483         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3484           memnode_worklist.append_if_missing(use);
3485         }
3486       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3487         Node* addp2 = find_second_addp(use, n);
3488         if (addp2 != NULL) {
3489           alloc_worklist.append_if_missing(addp2);
3490         }
3491         alloc_worklist.append_if_missing(use);
3492       } else if (use->is_Phi() ||
3493                  use->is_CheckCastPP() ||
3494                  use->is_EncodeNarrowPtr() ||
3495                  use->is_DecodeNarrowPtr() ||
3496                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3497         alloc_worklist.append_if_missing(use);
3498 #ifdef ASSERT
3499       } else if (use->is_Mem()) {
3500         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3501       } else if (use->is_MergeMem()) {
3502         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3503       } else if (use->is_SafePoint()) {
3504         // Look for MergeMem nodes for calls which reference unique allocation
3505         // (through CheckCastPP nodes) even for debug info.
3506         Node* m = use->in(TypeFunc::Memory);
3507         if (m->is_MergeMem()) {
3508           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3509         }
3510       } else if (use->Opcode() == Op_EncodeISOArray) {
3511         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3512           // EncodeISOArray overwrites destination array
3513           memnode_worklist.append_if_missing(use);
3514         }
3515       } else if (use->Opcode() == Op_Return) {
3516         // Allocation is referenced by field of returned inline type
3517         assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
3518       } else {
3519         uint op = use->Opcode();
3520         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3521             (use->in(MemNode::Memory) == n)) {
3522           // They overwrite memory edge corresponding to destination array,
3523           memnode_worklist.append_if_missing(use);
3524         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3525               op == Op_CastP2X || op == Op_StoreCM ||
3526               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
3527               op == Op_CountPositives ||
3528               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3529               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3530               op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
3531               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3532           n->dump();
3533           use->dump();
3534           assert(false, "EA: missing allocation reference path");
3535         }
3536 #endif
3537       }
3538     }
3539 
3540   }
3541 
3542   // Go over all ArrayCopy nodes and if one of the inputs has a unique
3543   // type, record it in the ArrayCopy node so we know what memory this
3544   // node uses/modified.
3545   for (int next = 0; next < arraycopy_worklist.length(); next++) {
3546     ArrayCopyNode* ac = arraycopy_worklist.at(next);
3547     Node* dest = ac->in(ArrayCopyNode::Dest);
3548     if (dest->is_AddP()) {
3549       dest = get_addp_base(dest);
3550     }

3580   if (memnode_worklist.length() == 0)
3581     return;  // nothing to do
3582   while (memnode_worklist.length() != 0) {
3583     Node *n = memnode_worklist.pop();
3584     if (visited.test_set(n->_idx)) {
3585       continue;
3586     }
3587     if (n->is_Phi() || n->is_ClearArray()) {
3588       // we don't need to do anything, but the users must be pushed
3589     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3590       // we don't need to do anything, but the users must be pushed
3591       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
3592       if (n == NULL) {
3593         continue;
3594       }
3595     } else if (n->Opcode() == Op_StrCompressedCopy ||
3596                n->Opcode() == Op_EncodeISOArray) {
3597       // get the memory projection
3598       n = n->find_out_with(Op_SCMemProj);
3599       assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3600     } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != NULL &&
3601                strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
3602       n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
3603     } else {
3604       assert(n->is_Mem(), "memory node required.");
3605       Node *addr = n->in(MemNode::Address);
3606       const Type *addr_t = igvn->type(addr);
3607       if (addr_t == Type::TOP) {
3608         continue;
3609       }
3610       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3611       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3612       assert ((uint)alias_idx < new_index_end, "wrong alias index");
3613       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3614       if (_compile->failing()) {
3615         return;
3616       }
3617       if (mem != n->in(MemNode::Memory)) {
3618         // We delay the memory edge update since we need old one in
3619         // MergeMem code below when instances memory slices are separated.
3620         set_map(n, mem);
3621       }
3622       if (n->is_Load()) {

3625         // get the memory projection
3626         n = n->find_out_with(Op_SCMemProj);
3627         assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3628       }
3629     }
3630     // push user on appropriate worklist
3631     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3632       Node *use = n->fast_out(i);
3633       if (use->is_Phi() || use->is_ClearArray()) {
3634         memnode_worklist.append_if_missing(use);
3635       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3636         if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
3637           continue;
3638         }
3639         memnode_worklist.append_if_missing(use);
3640       } else if (use->is_MemBar()) {
3641         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3642           memnode_worklist.append_if_missing(use);
3643         }
3644 #ifdef ASSERT
3645       } else if (use->is_Mem()) {
3646         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3647       } else if (use->is_MergeMem()) {
3648         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3649       } else if (use->Opcode() == Op_EncodeISOArray) {
3650         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3651           // EncodeISOArray overwrites destination array
3652           memnode_worklist.append_if_missing(use);
3653         }
3654       } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != NULL &&
3655                  strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
3656         // store_unknown_inline overwrites destination array
3657         memnode_worklist.append_if_missing(use);
3658       } else {
3659         uint op = use->Opcode();
3660         if ((use->in(MemNode::Memory) == n) &&
3661             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3662           // They overwrite memory edge corresponding to destination array,
3663           memnode_worklist.append_if_missing(use);
3664         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
3665               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
3666               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3667               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
3668           n->dump();
3669           use->dump();
3670           assert(false, "EA: missing memory path");
3671         }
3672 #endif
3673       }
3674     }
3675   }
3676 
3677   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
3678   //            Walk each memory slice moving the first node encountered of each
3679   //            instance type to the input corresponding to its alias index.
3680   uint length = mergemem_worklist.length();
3681   for( uint next = 0; next < length; ++next ) {
3682     MergeMemNode* nmm = mergemem_worklist.at(next);
3683     assert(!visited.test_set(nmm->_idx), "should not be visited before");
3684     // Note: we don't want to use MergeMemStream here because we only want to
3685     // scan inputs which exist at the start, not ones we add during processing.
3686     // Note 2: MergeMem may already contains instance memory slices added
3687     // during find_inst_mem() call when memory nodes were processed above.

3734       Node* result = step_through_mergemem(nmm, ni, tinst);
3735       if (result == nmm->base_memory()) {
3736         // Didn't find instance memory, search through general slice recursively.
3737         result = nmm->memory_at(_compile->get_general_index(ni));
3738         result = find_inst_mem(result, ni, orig_phis);
3739         if (_compile->failing()) {
3740           return;
3741         }
3742         nmm->set_memory_at(ni, result);
3743       }
3744     }
3745     igvn->hash_insert(nmm);
3746     record_for_optimizer(nmm);
3747   }
3748 
3749   //  Phase 4:  Update the inputs of non-instance memory Phis and
3750   //            the Memory input of memnodes
3751   // First update the inputs of any non-instance Phi's from
3752   // which we split out an instance Phi.  Note we don't have
3753   // to recursively process Phi's encountered on the input memory
3754   // chains as is done in split_memory_phi() since they will
3755   // also be processed here.
3756   for (int j = 0; j < orig_phis.length(); j++) {
3757     PhiNode *phi = orig_phis.at(j);
3758     int alias_idx = _compile->get_alias_index(phi->adr_type());
3759     igvn->hash_delete(phi);
3760     for (uint i = 1; i < phi->req(); i++) {
3761       Node *mem = phi->in(i);
3762       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3763       if (_compile->failing()) {
3764         return;
3765       }
3766       if (mem != new_mem) {
3767         phi->set_req(i, new_mem);
3768       }
3769     }
3770     igvn->hash_insert(phi);
3771     record_for_optimizer(phi);
3772   }
3773 
3774   // Update the memory inputs of MemNodes with the value we computed
< prev index next >