< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"

  32 #include "memory/resourceArea.hpp"
  33 #include "opto/c2compiler.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "utilities/macros.hpp"
  43 
  44 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  45   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  46   _in_worklist(C->comp_arena()),
  47   _next_pidx(0),
  48   _collecting(true),
  49   _verify(false),
  50   _compile(C),
  51   _igvn(igvn),

 135   GrowableArray<SafePointNode*>  sfn_worklist;
 136   GrowableArray<MergeMemNode*>   mergemem_worklist;
 137   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 138 
 139   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 140 
 141   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 142   ideal_nodes.map(C->live_nodes(), NULL);  // preallocate space
 143   // Initialize worklist
 144   if (C->root() != NULL) {
 145     ideal_nodes.push(C->root());
 146   }
 147   // Processed ideal nodes are unique on ideal_nodes list
 148   // but several ideal nodes are mapped to the phantom_obj.
 149   // To avoid duplicated entries on the following worklists
 150   // add the phantom_obj only once to them.
 151   ptnodes_worklist.append(phantom_obj);
 152   java_objects_worklist.append(phantom_obj);
 153   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 154     Node* n = ideal_nodes.at(next);










 155     // Create PointsTo nodes and add them to Connection Graph. Called
 156     // only once per ideal node since ideal_nodes is Unique_Node list.
 157     add_node_to_connection_graph(n, &delayed_worklist);
 158     PointsToNode* ptn = ptnode_adr(n->_idx);
 159     if (ptn != NULL && ptn != phantom_obj) {
 160       ptnodes_worklist.append(ptn);
 161       if (ptn->is_JavaObject()) {
 162         java_objects_worklist.append(ptn->as_JavaObject());
 163         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 164             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 165           // Only allocations and java static calls results are interesting.
 166           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 167         }
 168       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 169         oop_fields_worklist.append(ptn->as_Field());
 170       }
 171     }
 172     // Collect some interesting nodes for further use.
 173     switch (n->Opcode()) {
 174       case Op_MergeMem:

 417   return false;
 418 }
 419 
 420 // Returns true if at least one of the arguments to the call is an object
 421 // that does not escape globally.
 422 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
 423   if (call->method() != NULL) {
 424     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
 425     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
 426       Node* p = call->in(idx);
 427       if (not_global_escape(p)) {
 428         return true;
 429       }
 430     }
 431   } else {
 432     const char* name = call->as_CallStaticJava()->_name;
 433     assert(name != NULL, "no name");
 434     // no arg escapes through uncommon traps
 435     if (strcmp(name, "uncommon_trap") != 0) {
 436       // process_call_arguments() assumes that all arguments escape globally
 437       const TypeTuple* d = call->tf()->domain();
 438       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 439         const Type* at = d->field_at(i);
 440         if (at->isa_oopptr() != NULL) {
 441           return true;
 442         }
 443       }
 444     }
 445   }
 446   return false;
 447 }
 448 
 449 
 450 
 451 // Utility function for nodes that load an object
 452 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
 453   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 454   // ThreadLocal has RawPtr type.
 455   const Type* t = _igvn->type(n);
 456   if (t->make_ptr() != NULL) {
 457     Node* adr = n->in(MemNode::Address);

 491       // first IGVN optimization when escape information is still available.
 492       record_for_optimizer(n);
 493     } else if (n->is_Allocate()) {
 494       add_call_node(n->as_Call());
 495       record_for_optimizer(n);
 496     } else {
 497       if (n->is_CallStaticJava()) {
 498         const char* name = n->as_CallStaticJava()->_name;
 499         if (name != NULL && strcmp(name, "uncommon_trap") == 0) {
 500           return; // Skip uncommon traps
 501         }
 502       }
 503       // Don't mark as processed since call's arguments have to be processed.
 504       delayed_worklist->push(n);
 505       // Check if a call returns an object.
 506       if ((n->as_Call()->returns_pointer() &&
 507            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
 508           (n->is_CallStaticJava() &&
 509            n->as_CallStaticJava()->is_boxing_method())) {
 510         add_call_node(n->as_Call());











 511       }
 512     }
 513     return;
 514   }
 515   // Put this check here to process call arguments since some call nodes
 516   // point to phantom_obj.
 517   if (n_ptn == phantom_obj || n_ptn == null_obj) {
 518     return; // Skip predefined nodes.
 519   }
 520   switch (opcode) {
 521     case Op_AddP: {
 522       Node* base = get_addp_base(n);
 523       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 524       // Field nodes are created for all field types. They are used in
 525       // adjust_scalar_replaceable_state() and split_unique_types().
 526       // Note, non-oop fields will have only base edges in Connection
 527       // Graph because such fields are not used for oop loads and stores.
 528       int offset = address_offset(n, igvn);
 529       add_field(n, PointsToNode::NoEscape, offset);
 530       if (ptn_base == NULL) {
 531         delayed_worklist->push(n); // Process it later.
 532       } else {
 533         n_ptn = ptnode_adr(n_idx);
 534         add_base(n_ptn->as_Field(), ptn_base);
 535       }
 536       break;
 537     }
 538     case Op_CastX2P: {
 539       map_ideal_node(n, phantom_obj);
 540       break;
 541     }

 542     case Op_CastPP:
 543     case Op_CheckCastPP:
 544     case Op_EncodeP:
 545     case Op_DecodeN:
 546     case Op_EncodePKlass:
 547     case Op_DecodeNKlass: {
 548       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
 549       break;
 550     }
 551     case Op_CMoveP: {
 552       add_local_var(n, PointsToNode::NoEscape);
 553       // Do not add edges during first iteration because some could be
 554       // not defined yet.
 555       delayed_worklist->push(n);
 556       break;
 557     }
 558     case Op_ConP:
 559     case Op_ConN:
 560     case Op_ConNKlass: {
 561       // assume all oop constants globally escape except for null

 592     case Op_PartialSubtypeCheck: {
 593       // Produces Null or notNull and is used in only in CmpP so
 594       // phantom_obj could be used.
 595       map_ideal_node(n, phantom_obj); // Result is unknown
 596       break;
 597     }
 598     case Op_Phi: {
 599       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 600       // ThreadLocal has RawPtr type.
 601       const Type* t = n->as_Phi()->type();
 602       if (t->make_ptr() != NULL) {
 603         add_local_var(n, PointsToNode::NoEscape);
 604         // Do not add edges during first iteration because some could be
 605         // not defined yet.
 606         delayed_worklist->push(n);
 607       }
 608       break;
 609     }
 610     case Op_Proj: {
 611       // we are only interested in the oop result projection from a call
 612       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 613           n->in(0)->as_Call()->returns_pointer()) {


 614         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
 615       }
 616       break;
 617     }
 618     case Op_Rethrow: // Exception object escapes
 619     case Op_Return: {
 620       if (n->req() > TypeFunc::Parms &&
 621           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 622         // Treat Return value as LocalVar with GlobalEscape escape state.
 623         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
 624       }
 625       break;
 626     }
 627     case Op_CompareAndExchangeP:
 628     case Op_CompareAndExchangeN:
 629     case Op_GetAndSetP:
 630     case Op_GetAndSetN: {
 631       add_objload_to_connection_graph(n, delayed_worklist);
 632       // fall-through
 633     }

 693   if (n->is_Call()) {
 694     process_call_arguments(n->as_Call());
 695     return;
 696   }
 697   assert(n->is_Store() || n->is_LoadStore() ||
 698          (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
 699          "node should be registered already");
 700   int opcode = n->Opcode();
 701   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
 702   if (gc_handled) {
 703     return; // Ignore node if already handled by GC.
 704   }
 705   switch (opcode) {
 706     case Op_AddP: {
 707       Node* base = get_addp_base(n);
 708       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 709       assert(ptn_base != NULL, "field's base should be registered");
 710       add_base(n_ptn->as_Field(), ptn_base);
 711       break;
 712     }

 713     case Op_CastPP:
 714     case Op_CheckCastPP:
 715     case Op_EncodeP:
 716     case Op_DecodeN:
 717     case Op_EncodePKlass:
 718     case Op_DecodeNKlass: {
 719       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
 720       break;
 721     }
 722     case Op_CMoveP: {
 723       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
 724         Node* in = n->in(i);
 725         if (in == NULL) {
 726           continue;  // ignore NULL
 727         }
 728         Node* uncast_in = in->uncast();
 729         if (uncast_in->is_top() || uncast_in == n) {
 730           continue;  // ignore top or inputs which go back this node
 731         }
 732         PointsToNode* ptn = ptnode_adr(in->_idx);

 747       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 748       // ThreadLocal has RawPtr type.
 749       assert(n->as_Phi()->type()->make_ptr() != NULL, "Unexpected node type");
 750       for (uint i = 1; i < n->req(); i++) {
 751         Node* in = n->in(i);
 752         if (in == NULL) {
 753           continue;  // ignore NULL
 754         }
 755         Node* uncast_in = in->uncast();
 756         if (uncast_in->is_top() || uncast_in == n) {
 757           continue;  // ignore top or inputs which go back this node
 758         }
 759         PointsToNode* ptn = ptnode_adr(in->_idx);
 760         assert(ptn != NULL, "node should be registered");
 761         add_edge(n_ptn, ptn);
 762       }
 763       break;
 764     }
 765     case Op_Proj: {
 766       // we are only interested in the oop result projection from a call
 767       assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 768              n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
 769       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
 770       break;
 771     }
 772     case Op_Rethrow: // Exception object escapes
 773     case Op_Return: {
 774       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
 775              "Unexpected node type");
 776       // Treat Return value as LocalVar with GlobalEscape escape state.
 777       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), NULL);
 778       break;
 779     }
 780     case Op_CompareAndExchangeP:
 781     case Op_CompareAndExchangeN:
 782     case Op_GetAndSetP:
 783     case Op_GetAndSetN:{
 784       assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type");
 785       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL);
 786       // fall-through
 787     }
 788     case Op_CompareAndSwapP:

 923     PointsToNode* ptn = ptnode_adr(val->_idx);
 924     assert(ptn != NULL, "node should be registered");
 925     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
 926     // Add edge to object for unsafe access with offset.
 927     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 928     assert(adr_ptn != NULL, "node should be registered");
 929     if (adr_ptn->is_Field()) {
 930       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
 931       add_edge(adr_ptn, ptn);
 932     }
 933     return true;
 934   }
 935 #ifdef ASSERT
 936   n->dump(1);
 937   assert(false, "not unsafe");
 938 #endif
 939   return false;
 940 }
 941 
 942 void ConnectionGraph::add_call_node(CallNode* call) {
 943   assert(call->returns_pointer(), "only for call which returns pointer");
 944   uint call_idx = call->_idx;
 945   if (call->is_Allocate()) {
 946     Node* k = call->in(AllocateNode::KlassNode);
 947     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 948     assert(kt != NULL, "TypeKlassPtr  required.");
 949     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 950     bool scalar_replaceable = true;
 951     NOT_PRODUCT(const char* nsr_reason = "");
 952     if (call->is_AllocateArray()) {
 953       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
 954         es = PointsToNode::GlobalEscape;
 955       } else {
 956         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 957         if (length < 0) {
 958           // Not scalar replaceable if the length is not constant.
 959           scalar_replaceable = false;
 960           NOT_PRODUCT(nsr_reason = "has a non-constant length");
 961         } else if (length > EliminateAllocationArraySizeLimit) {
 962           // Not scalar replaceable if the length is too big.
 963           scalar_replaceable = false;

 999     //
1000     //    - all oop arguments are escaping globally;
1001     //
1002     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
1003     //
1004     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
1005     //
1006     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
1007     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
1008     //      during call is returned;
1009     //    - mapped to ArgEscape LocalVar node pointed to object arguments
1010     //      which are returned and does not escape during call;
1011     //
1012     //    - oop arguments escaping status is defined by bytecode analysis;
1013     //
1014     // For a static call, we know exactly what method is being called.
1015     // Use bytecode estimator to record whether the call's return value escapes.
1016     ciMethod* meth = call->as_CallJava()->method();
1017     if (meth == NULL) {
1018       const char* name = call->as_CallStaticJava()->_name;
1019       assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");

1020       // Returns a newly allocated non-escaped object.
1021       add_java_object(call, PointsToNode::NoEscape);
1022       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
1023     } else if (meth->is_boxing_method()) {
1024       // Returns boxing object
1025       PointsToNode::EscapeState es;
1026       vmIntrinsics::ID intr = meth->intrinsic_id();
1027       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
1028         // It does not escape if object is always allocated.
1029         es = PointsToNode::NoEscape;
1030       } else {
1031         // It escapes globally if object could be loaded from cache.
1032         es = PointsToNode::GlobalEscape;
1033       }
1034       add_java_object(call, es);
1035     } else {
1036       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
1037       call_analyzer->copy_dependencies(_compile->dependencies());
1038       if (call_analyzer->is_return_allocated()) {
1039         // Returns a newly allocated non-escaped object, simply
1040         // update dependency information.
1041         // Mark it as NoEscape so that objects referenced by
1042         // it's fields will be marked as NoEscape at least.
1043         add_java_object(call, PointsToNode::NoEscape);
1044         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
1045       } else {
1046         // Determine whether any arguments are returned.
1047         const TypeTuple* d = call->tf()->domain();
1048         bool ret_arg = false;
1049         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1050           if (d->field_at(i)->isa_ptr() != NULL &&
1051               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1052             ret_arg = true;
1053             break;
1054           }
1055         }
1056         if (ret_arg) {
1057           add_local_var(call, PointsToNode::ArgEscape);
1058         } else {
1059           // Returns unknown object.
1060           map_ideal_node(call, phantom_obj);
1061         }
1062       }
1063     }
1064   } else {
1065     // An other type of call, assume the worst case:
1066     // returned value is unknown and globally escapes.
1067     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

1075 #ifdef ASSERT
1076     case Op_Allocate:
1077     case Op_AllocateArray:
1078     case Op_Lock:
1079     case Op_Unlock:
1080       assert(false, "should be done already");
1081       break;
1082 #endif
1083     case Op_ArrayCopy:
1084     case Op_CallLeafNoFP:
1085       // Most array copies are ArrayCopy nodes at this point but there
1086       // are still a few direct calls to the copy subroutines (See
1087       // PhaseStringOpts::copy_string())
1088       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1089         call->as_CallLeaf()->is_call_to_arraycopystub();
1090       // fall through
1091     case Op_CallLeafVector:
1092     case Op_CallLeaf: {
1093       // Stub calls, objects do not escape but they are not scale replaceable.
1094       // Adjust escape state for outgoing arguments.
1095       const TypeTuple * d = call->tf()->domain();
1096       bool src_has_oops = false;
1097       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1098         const Type* at = d->field_at(i);
1099         Node *arg = call->in(i);
1100         if (arg == NULL) {
1101           continue;
1102         }
1103         const Type *aat = _igvn->type(arg);
1104         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
1105           continue;
1106         }
1107         if (arg->is_AddP()) {
1108           //
1109           // The inline_native_clone() case when the arraycopy stub is called
1110           // after the allocation before Initialize and CheckCastPP nodes.
1111           // Or normal arraycopy for object arrays case.
1112           //
1113           // Set AddP's base (Allocate) as not scalar replaceable since
1114           // pointer to the base (with offset) is passed as argument.
1115           //
1116           arg = get_addp_base(arg);
1117         }
1118         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1119         assert(arg_ptn != NULL, "should be registered");
1120         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1121         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1122           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1123                  aat->isa_ptr() != NULL, "expecting an Ptr");
1124           bool arg_has_oops = aat->isa_oopptr() &&
1125                               (aat->isa_instptr() ||
1126                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != NULL)));



1127           if (i == TypeFunc::Parms) {
1128             src_has_oops = arg_has_oops;
1129           }
1130           //
1131           // src or dst could be j.l.Object when other is basic type array:
1132           //
1133           //   arraycopy(char[],0,Object*,0,size);
1134           //   arraycopy(Object*,0,char[],0,size);
1135           //
1136           // Don't add edges in such cases.
1137           //
1138           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1139                                        arg_has_oops && (i > TypeFunc::Parms);
1140 #ifdef ASSERT
1141           if (!(is_arraycopy ||
1142                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1143                 (call->as_CallLeaf()->_name != NULL &&
1144                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1145                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1146                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

1153                   strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
1154                   strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 ||
1155                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1156                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1157                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
1158                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
1159                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
1160                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1161                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1162                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1163                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1164                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1165                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1166                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
1167                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
1168                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1169                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1170                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1171                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1172                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||



1173                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
1174                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
1175                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1176                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
1177                  ))) {
1178             call->dump();
1179             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1180           }
1181 #endif
1182           // Always process arraycopy's destination object since
1183           // we need to add all possible edges to references in
1184           // source object.
1185           if (arg_esc >= PointsToNode::ArgEscape &&
1186               !arg_is_arraycopy_dest) {
1187             continue;
1188           }
1189           PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1190           if (call->is_ArrayCopy()) {
1191             ArrayCopyNode* ac = call->as_ArrayCopy();
1192             if (ac->is_clonebasic() ||

1215           }
1216         }
1217       }
1218       break;
1219     }
1220     case Op_CallStaticJava: {
1221       // For a static call, we know exactly what method is being called.
1222       // Use bytecode estimator to record the call's escape affects
1223 #ifdef ASSERT
1224       const char* name = call->as_CallStaticJava()->_name;
1225       assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1226 #endif
1227       ciMethod* meth = call->as_CallJava()->method();
1228       if ((meth != NULL) && meth->is_boxing_method()) {
1229         break; // Boxing methods do not modify any oops.
1230       }
1231       BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1232       // fall-through if not a Java method or no analyzer information
1233       if (call_analyzer != NULL) {
1234         PointsToNode* call_ptn = ptnode_adr(call->_idx);
1235         const TypeTuple* d = call->tf()->domain();
1236         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1237           const Type* at = d->field_at(i);
1238           int k = i - TypeFunc::Parms;
1239           Node* arg = call->in(i);
1240           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1241           if (at->isa_ptr() != NULL &&
1242               call_analyzer->is_arg_returned(k)) {
1243             // The call returns arguments.
1244             if (call_ptn != NULL) { // Is call's result used?
1245               assert(call_ptn->is_LocalVar(), "node should be registered");
1246               assert(arg_ptn != NULL, "node should be registered");
1247               add_edge(call_ptn, arg_ptn);
1248             }
1249           }
1250           if (at->isa_oopptr() != NULL &&
1251               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1252             if (!call_analyzer->is_arg_stack(k)) {
1253               // The argument global escapes
1254               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1255             } else {

1259                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1260               }
1261             }
1262           }
1263         }
1264         if (call_ptn != NULL && call_ptn->is_LocalVar()) {
1265           // The call returns arguments.
1266           assert(call_ptn->edge_count() > 0, "sanity");
1267           if (!call_analyzer->is_return_local()) {
1268             // Returns also unknown object.
1269             add_edge(call_ptn, phantom_obj);
1270           }
1271         }
1272         break;
1273       }
1274     }
1275     default: {
1276       // Fall-through here if not a Java method or no analyzer information
1277       // or some other type of call, assume the worst case: all arguments
1278       // globally escape.
1279       const TypeTuple* d = call->tf()->domain();
1280       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1281         const Type* at = d->field_at(i);
1282         if (at->isa_oopptr() != NULL) {
1283           Node* arg = call->in(i);
1284           if (arg->is_AddP()) {
1285             arg = get_addp_base(arg);
1286           }
1287           assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
1288           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1289         }
1290       }
1291     }
1292   }
1293 }
1294 
1295 
1296 // Finish Graph construction.
1297 bool ConnectionGraph::complete_connection_graph(
1298                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
1299                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

1672     PointsToNode* base = i.get();
1673     if (base->is_JavaObject()) {
1674       // Skip Allocate's fields which will be processed later.
1675       if (base->ideal_node()->is_Allocate()) {
1676         return 0;
1677       }
1678       assert(base == null_obj, "only NULL ptr base expected here");
1679     }
1680   }
1681   if (add_edge(field, phantom_obj)) {
1682     // New edge was added
1683     new_edges++;
1684     add_field_uses_to_worklist(field);
1685   }
1686   return new_edges;
1687 }
1688 
1689 // Find fields initializing values for allocations.
1690 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
1691   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");

1692   Node* alloc = pta->ideal_node();
1693 
1694   // Do nothing for Allocate nodes since its fields values are
1695   // "known" unless they are initialized by arraycopy/clone.
1696   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
1697     return 0;







1698   }
1699   assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");

1700 #ifdef ASSERT
1701   if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) {
1702     const char* name = alloc->as_CallStaticJava()->_name;
1703     assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");

1704   }
1705 #endif
1706   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
1707   int new_edges = 0;
1708   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1709     PointsToNode* field = i.get();
1710     if (field->is_Field() && field->as_Field()->is_oop()) {
1711       if (add_edge(field, phantom_obj)) {
1712         // New edge was added
1713         new_edges++;
1714         add_field_uses_to_worklist(field->as_Field());
1715       }
1716     }
1717   }
1718   return new_edges;
1719 }
1720 
1721 // Find fields initializing values for allocations.
1722 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* phase) {
1723   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1724   Node* alloc = pta->ideal_node();
1725   // Do nothing for Call nodes since its fields values are unknown.
1726   if (!alloc->is_Allocate()) {
1727     return 0;
1728   }
1729   InitializeNode* ini = alloc->as_Allocate()->initialization();
1730   bool visited_bottom_offset = false;
1731   GrowableArray<int> offsets_worklist;
1732   int new_edges = 0;
1733 
1734   // Check if an oop field's initializing value is recorded and add
1735   // a corresponding NULL if field's value if it is not recorded.
1736   // Connection Graph does not record a default initialization by NULL
1737   // captured by Initialize node.
1738   //
1739   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1740     PointsToNode* field = i.get(); // Field (AddP)
1741     if (!field->is_Field() || !field->as_Field()->is_oop()) {
1742       continue; // Not oop field
1743     }
1744     int offset = field->as_Field()->offset();
1745     if (offset == Type::OffsetBot) {
1746       if (!visited_bottom_offset) {

1792               } else {
1793                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1794                   tty->print_cr("----------init store has invalid value -----");
1795                   store->dump();
1796                   val->dump();
1797                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1798                 }
1799                 for (EdgeIterator j(val); j.has_next(); j.next()) {
1800                   PointsToNode* obj = j.get();
1801                   if (obj->is_JavaObject()) {
1802                     if (!field->points_to(obj->as_JavaObject())) {
1803                       missed_obj = obj;
1804                       break;
1805                     }
1806                   }
1807                 }
1808               }
1809               if (missed_obj != NULL) {
1810                 tty->print_cr("----------field---------------------------------");
1811                 field->dump();
1812                 tty->print_cr("----------missed referernce to object-----------");
1813                 missed_obj->dump();
1814                 tty->print_cr("----------object referernced by init store -----");
1815                 store->dump();
1816                 val->dump();
1817                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1818               }
1819             }
1820 #endif
1821           } else {
1822             // There could be initializing stores which follow allocation.
1823             // For example, a volatile field store is not collected
1824             // by Initialize node.
1825             //
1826             // Need to check for dependent loads to separate such stores from
1827             // stores which follow loads. For now, add initial value NULL so
1828             // that compare pointers optimization works correctly.
1829           }
1830         }
1831         if (value == NULL) {
1832           // A field's initializing value was not recorded. Add NULL.
1833           if (add_edge(field, null_obj)) {
1834             // New edge was added

2026         assert(field->edge_count() > 0, "sanity");
2027       }
2028     }
2029   }
2030 }
2031 #endif
2032 
2033 // Optimize ideal graph.
2034 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
2035                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
2036   Compile* C = _compile;
2037   PhaseIterGVN* igvn = _igvn;
2038   if (EliminateLocks) {
2039     // Mark locks before changing ideal graph.
2040     int cnt = C->macro_count();
2041     for (int i = 0; i < cnt; i++) {
2042       Node *n = C->macro_node(i);
2043       if (n->is_AbstractLock()) { // Lock and Unlock nodes
2044         AbstractLockNode* alock = n->as_AbstractLock();
2045         if (!alock->is_non_esc_obj()) {
2046           if (not_global_escape(alock->obj_node())) {


2047             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
2048             // The lock could be marked eliminated by lock coarsening
2049             // code during first IGVN before EA. Replace coarsened flag
2050             // to eliminate all associated locks/unlocks.
2051 #ifdef ASSERT
2052             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
2053 #endif
2054             alock->set_non_esc_obj();
2055           }
2056         }
2057       }
2058     }
2059   }
2060 
2061   if (OptimizePtrCompare) {
2062     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
2063       Node *n = ptr_cmp_worklist.at(i);
2064       const TypeInt* tcmp = optimize_ptr_compare(n);
2065       if (tcmp->singleton()) {
2066         Node* cmp = igvn->makecon(tcmp);
2067 #ifndef PRODUCT
2068         if (PrintOptimizePtrCompare) {
2069           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
2070           if (Verbose) {
2071             n->dump(1);
2072           }
2073         }
2074 #endif
2075         igvn->replace_node(n, cmp);
2076       }
2077     }
2078   }
2079 
2080   // For MemBarStoreStore nodes added in library_call.cpp, check
2081   // escape status of associated AllocateNode and optimize out
2082   // MemBarStoreStore node if the allocated object never escapes.
2083   for (int i = 0; i < storestore_worklist.length(); i++) {
2084     Node* storestore = storestore_worklist.at(i);
2085     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
2086     if (alloc->is_Allocate() && not_global_escape(alloc)) {
2087       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
2088       mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
2089       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
2090       igvn->register_new_node_with_optimizer(mb);
2091       igvn->replace_node(storestore, mb);





2092     }
2093   }
2094 }
2095 
2096 // Optimize objects compare.
2097 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {
2098   assert(OptimizePtrCompare, "sanity");
2099   assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
2100   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
2101   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
2102   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
2103 
2104   PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2105   PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2106   JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2107   JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2108   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2109   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2110 
2111   // Check simple cases first.

2224   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2225   assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
2226   PointsToNode* ptadr = _nodes.at(n->_idx);
2227   if (ptadr != NULL) {
2228     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2229     return;
2230   }
2231   Compile* C = _compile;
2232   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2233   map_ideal_node(n, ptadr);
2234   // Add edge from arraycopy node to source object.
2235   (void)add_edge(ptadr, src);
2236   src->set_arraycopy_src();
2237   // Add edge from destination object to arraycopy node.
2238   (void)add_edge(dst, ptadr);
2239   dst->set_arraycopy_dst();
2240 }
2241 
2242 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2243   const Type* adr_type = n->as_AddP()->bottom_type();

2244   BasicType bt = T_INT;
2245   if (offset == Type::OffsetBot) {
2246     // Check only oop fields.
2247     if (!adr_type->isa_aryptr() ||
2248         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
2249         adr_type->isa_aryptr()->elem()->make_oopptr() != NULL) {
2250       // OffsetBot is used to reference array's element. Ignore first AddP.
2251       if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2252         bt = T_OBJECT;
2253       }
2254     }
2255   } else if (offset != oopDesc::klass_offset_in_bytes()) {
2256     if (adr_type->isa_instptr()) {
2257       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2258       if (field != NULL) {
2259         bt = field->layout_type();
2260       } else {
2261         // Check for unsafe oop field access
2262         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2263             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2264             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2265             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2266           bt = T_OBJECT;
2267           (*unsafe) = true;
2268         }
2269       }
2270     } else if (adr_type->isa_aryptr()) {
2271       if (offset == arrayOopDesc::length_offset_in_bytes()) {
2272         // Ignore array length load.
2273       } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2274         // Ignore first AddP.
2275       } else {
2276         const Type* elemtype = adr_type->isa_aryptr()->elem();
2277         bt = elemtype->array_element_basic_type();






2278       }
2279     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2280       // Allocation initialization, ThreadLocal field access, unsafe access
2281       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2282           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2283           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2284           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2285         bt = T_OBJECT;
2286       }
2287     }
2288   }
2289   // Note: T_NARROWOOP is not classed as a real reference type
2290   return (is_reference_type(bt) || bt == T_NARROWOOP);
2291 }
2292 
2293 // Returns unique pointed java object or NULL.
2294 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2295   assert(!_collecting, "should not call when constructed graph");
2296   // If the node was created after the escape computation we can't answer.
2297   uint idx = n->_idx;

2441             return true;
2442           }
2443         }
2444       }
2445     }
2446   }
2447   return false;
2448 }
2449 
2450 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2451   const Type *adr_type = phase->type(adr);
2452   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) {
2453     // We are computing a raw address for a store captured by an Initialize
2454     // compute an appropriate address type. AddP cases #3 and #5 (see below).
2455     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2456     assert(offs != Type::OffsetBot ||
2457            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2458            "offset must be a constant or it is initialization of array");
2459     return offs;
2460   }
2461   const TypePtr *t_ptr = adr_type->isa_ptr();
2462   assert(t_ptr != NULL, "must be a pointer type");
2463   return t_ptr->offset();
2464 }
2465 
2466 Node* ConnectionGraph::get_addp_base(Node *addp) {
2467   assert(addp->is_AddP(), "must be AddP");
2468   //
2469   // AddP cases for Base and Address inputs:
2470   // case #1. Direct object's field reference:
2471   //     Allocate
2472   //       |
2473   //     Proj #5 ( oop result )
2474   //       |
2475   //     CheckCastPP (cast to instance type)
2476   //      | |
2477   //     AddP  ( base == address )
2478   //
2479   // case #2. Indirect object's field reference:
2480   //      Phi
2481   //       |
2482   //     CastPP (cast to instance type)
2483   //      | |

2597   }
2598   return NULL;
2599 }
2600 
2601 //
2602 // Adjust the type and inputs of an AddP which computes the
2603 // address of a field of an instance
2604 //
2605 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2606   PhaseGVN* igvn = _igvn;
2607   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2608   assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
2609   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2610   if (t == NULL) {
2611     // We are computing a raw address for a store captured by an Initialize
2612     // compute an appropriate address type (cases #3 and #5).
2613     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2614     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2615     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2616     assert(offs != Type::OffsetBot, "offset must be a constant");
2617     t = base_t->add_offset(offs)->is_oopptr();







2618   }
2619   int inst_id =  base_t->instance_id();
2620   assert(!t->is_known_instance() || t->instance_id() == inst_id,
2621                              "old type must be non-instance or match new type");
2622 
2623   // The type 't' could be subclass of 'base_t'.
2624   // As result t->offset() could be large then base_t's size and it will
2625   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2626   // constructor verifies correctness of the offset.
2627   //
2628   // It could happened on subclass's branch (from the type profiling
2629   // inlining) which was not eliminated during parsing since the exactness
2630   // of the allocation type was not propagated to the subclass type check.
2631   //
2632   // Or the type 't' could be not related to 'base_t' at all.
2633   // It could happened when CHA type is different from MDO type on a dead path
2634   // (for example, from instanceof check) which is not collapsed during parsing.
2635   //
2636   // Do nothing for such AddP node and don't process its users since
2637   // this code branch will go away.
2638   //
2639   if (!t->is_known_instance() &&
2640       !base_t->maybe_java_subtype_of(t)) {
2641      return false; // bail out
2642   }
2643   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();











2644   // Do NOT remove the next line: ensure a new alias index is allocated
2645   // for the instance type. Note: C++ will not remove it since the call
2646   // has side effect.
2647   int alias_idx = _compile->get_alias_index(tinst);
2648   igvn->set_type(addp, tinst);
2649   // record the allocation in the node map
2650   set_map(addp, get_map(base->_idx));
2651   // Set addp's Base and Address to 'base'.
2652   Node *abase = addp->in(AddPNode::Base);
2653   Node *adr   = addp->in(AddPNode::Address);
2654   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2655       adr->in(0)->_idx == (uint)inst_id) {
2656     // Skip AddP cases #3 and #5.
2657   } else {
2658     assert(!abase->is_top(), "sanity"); // AddP case #3
2659     if (abase != base) {
2660       igvn->hash_delete(addp);
2661       addp->set_req(AddPNode::Base, base);
2662       if (abase == adr) {
2663         addp->set_req(AddPNode::Address, base);

3305         ptnode_adr(n->_idx)->dump();
3306         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3307 #endif
3308         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3309         return;
3310       } else {
3311         Node *val = get_map(jobj->idx());   // CheckCastPP node
3312         TypeNode *tn = n->as_Type();
3313         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3314         assert(tinst != NULL && tinst->is_known_instance() &&
3315                tinst->instance_id() == jobj->idx() , "instance type expected.");
3316 
3317         const Type *tn_type = igvn->type(tn);
3318         const TypeOopPtr *tn_t;
3319         if (tn_type->isa_narrowoop()) {
3320           tn_t = tn_type->make_ptr()->isa_oopptr();
3321         } else {
3322           tn_t = tn_type->isa_oopptr();
3323         }
3324         if (tn_t != NULL && tinst->maybe_java_subtype_of(tn_t)) {







3325           if (tn_type->isa_narrowoop()) {
3326             tn_type = tinst->make_narrowoop();
3327           } else {
3328             tn_type = tinst;
3329           }
3330           igvn->hash_delete(tn);
3331           igvn->set_type(tn, tn_type);
3332           tn->set_type(tn_type);
3333           igvn->hash_insert(tn);
3334           record_for_optimizer(n);
3335         } else {
3336           assert(tn_type == TypePtr::NULL_PTR ||
3337                  tn_t != NULL && !tinst->maybe_java_subtype_of(tn_t),
3338                  "unexpected type");
3339           continue; // Skip dead path with different type
3340         }
3341       }
3342     } else {
3343       debug_only(n->dump();)
3344       assert(false, "EA: unexpected node");
3345       continue;
3346     }
3347     // push allocation's users on appropriate worklist
3348     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3349       Node *use = n->fast_out(i);
3350       if(use->is_Mem() && use->in(MemNode::Address) == n) {
3351         // Load/store to instance's field
3352         memnode_worklist.append_if_missing(use);
3353       } else if (use->is_MemBar()) {
3354         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3355           memnode_worklist.append_if_missing(use);
3356         }
3357       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3358         Node* addp2 = find_second_addp(use, n);
3359         if (addp2 != NULL) {
3360           alloc_worklist.append_if_missing(addp2);
3361         }
3362         alloc_worklist.append_if_missing(use);
3363       } else if (use->is_Phi() ||
3364                  use->is_CheckCastPP() ||
3365                  use->is_EncodeNarrowPtr() ||
3366                  use->is_DecodeNarrowPtr() ||
3367                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3368         alloc_worklist.append_if_missing(use);
3369 #ifdef ASSERT
3370       } else if (use->is_Mem()) {
3371         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3372       } else if (use->is_MergeMem()) {
3373         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3374       } else if (use->is_SafePoint()) {
3375         // Look for MergeMem nodes for calls which reference unique allocation
3376         // (through CheckCastPP nodes) even for debug info.
3377         Node* m = use->in(TypeFunc::Memory);
3378         if (m->is_MergeMem()) {
3379           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3380         }
3381       } else if (use->Opcode() == Op_EncodeISOArray) {
3382         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3383           // EncodeISOArray overwrites destination array
3384           memnode_worklist.append_if_missing(use);
3385         }



3386       } else {
3387         uint op = use->Opcode();
3388         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3389             (use->in(MemNode::Memory) == n)) {
3390           // They overwrite memory edge corresponding to destination array,
3391           memnode_worklist.append_if_missing(use);
3392         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3393               op == Op_CastP2X || op == Op_StoreCM ||
3394               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
3395               op == Op_CountPositives ||
3396               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3397               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3398               op == Op_SubTypeCheck ||
3399               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3400           n->dump();
3401           use->dump();
3402           assert(false, "EA: missing allocation reference path");
3403         }
3404 #endif
3405       }
3406     }
3407 
3408   }
3409 
3410   // Go over all ArrayCopy nodes and if one of the inputs has a unique
3411   // type, record it in the ArrayCopy node so we know what memory this
3412   // node uses/modified.
3413   for (int next = 0; next < arraycopy_worklist.length(); next++) {
3414     ArrayCopyNode* ac = arraycopy_worklist.at(next);
3415     Node* dest = ac->in(ArrayCopyNode::Dest);
3416     if (dest->is_AddP()) {
3417       dest = get_addp_base(dest);
3418     }

3448   if (memnode_worklist.length() == 0)
3449     return;  // nothing to do
3450   while (memnode_worklist.length() != 0) {
3451     Node *n = memnode_worklist.pop();
3452     if (visited.test_set(n->_idx)) {
3453       continue;
3454     }
3455     if (n->is_Phi() || n->is_ClearArray()) {
3456       // we don't need to do anything, but the users must be pushed
3457     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3458       // we don't need to do anything, but the users must be pushed
3459       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
3460       if (n == NULL) {
3461         continue;
3462       }
3463     } else if (n->Opcode() == Op_StrCompressedCopy ||
3464                n->Opcode() == Op_EncodeISOArray) {
3465       // get the memory projection
3466       n = n->find_out_with(Op_SCMemProj);
3467       assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");



3468     } else {
3469       assert(n->is_Mem(), "memory node required.");
3470       Node *addr = n->in(MemNode::Address);
3471       const Type *addr_t = igvn->type(addr);
3472       if (addr_t == Type::TOP) {
3473         continue;
3474       }
3475       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3476       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3477       assert ((uint)alias_idx < new_index_end, "wrong alias index");
3478       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3479       if (_compile->failing()) {
3480         return;
3481       }
3482       if (mem != n->in(MemNode::Memory)) {
3483         // We delay the memory edge update since we need old one in
3484         // MergeMem code below when instances memory slices are separated.
3485         set_map(n, mem);
3486       }
3487       if (n->is_Load()) {

3490         // get the memory projection
3491         n = n->find_out_with(Op_SCMemProj);
3492         assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3493       }
3494     }
3495     // push user on appropriate worklist
3496     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3497       Node *use = n->fast_out(i);
3498       if (use->is_Phi() || use->is_ClearArray()) {
3499         memnode_worklist.append_if_missing(use);
3500       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3501         if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
3502           continue;
3503         }
3504         memnode_worklist.append_if_missing(use);
3505       } else if (use->is_MemBar()) {
3506         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3507           memnode_worklist.append_if_missing(use);
3508         }
3509 #ifdef ASSERT
3510       } else if(use->is_Mem()) {
3511         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3512       } else if (use->is_MergeMem()) {
3513         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3514       } else if (use->Opcode() == Op_EncodeISOArray) {
3515         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3516           // EncodeISOArray overwrites destination array
3517           memnode_worklist.append_if_missing(use);
3518         }




3519       } else {
3520         uint op = use->Opcode();
3521         if ((use->in(MemNode::Memory) == n) &&
3522             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3523           // They overwrite memory edge corresponding to destination array,
3524           memnode_worklist.append_if_missing(use);
3525         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
3526               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
3527               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3528               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
3529           n->dump();
3530           use->dump();
3531           assert(false, "EA: missing memory path");
3532         }
3533 #endif
3534       }
3535     }
3536   }
3537 
3538   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
3539   //            Walk each memory slice moving the first node encountered of each
3540   //            instance type to the input corresponding to its alias index.
3541   uint length = mergemem_worklist.length();
3542   for( uint next = 0; next < length; ++next ) {
3543     MergeMemNode* nmm = mergemem_worklist.at(next);
3544     assert(!visited.test_set(nmm->_idx), "should not be visited before");
3545     // Note: we don't want to use MergeMemStream here because we only want to
3546     // scan inputs which exist at the start, not ones we add during processing.
3547     // Note 2: MergeMem may already contains instance memory slices added
3548     // during find_inst_mem() call when memory nodes were processed above.

3595       Node* result = step_through_mergemem(nmm, ni, tinst);
3596       if (result == nmm->base_memory()) {
3597         // Didn't find instance memory, search through general slice recursively.
3598         result = nmm->memory_at(_compile->get_general_index(ni));
3599         result = find_inst_mem(result, ni, orig_phis);
3600         if (_compile->failing()) {
3601           return;
3602         }
3603         nmm->set_memory_at(ni, result);
3604       }
3605     }
3606     igvn->hash_insert(nmm);
3607     record_for_optimizer(nmm);
3608   }
3609 
3610   //  Phase 4:  Update the inputs of non-instance memory Phis and
3611   //            the Memory input of memnodes
3612   // First update the inputs of any non-instance Phi's from
3613   // which we split out an instance Phi.  Note we don't have
3614   // to recursively process Phi's encountered on the input memory
3615   // chains as is done in split_memory_phi() since they  will
3616   // also be processed here.
3617   for (int j = 0; j < orig_phis.length(); j++) {
3618     PhiNode *phi = orig_phis.at(j);
3619     int alias_idx = _compile->get_alias_index(phi->adr_type());
3620     igvn->hash_delete(phi);
3621     for (uint i = 1; i < phi->req(); i++) {
3622       Node *mem = phi->in(i);
3623       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3624       if (_compile->failing()) {
3625         return;
3626       }
3627       if (mem != new_mem) {
3628         phi->set_req(i, new_mem);
3629       }
3630     }
3631     igvn->hash_insert(phi);
3632     record_for_optimizer(phi);
3633   }
3634 
3635   // Update the memory inputs of MemNodes with the value we computed

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/metaspace.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "opto/c2compiler.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/escape.hpp"
  40 #include "opto/phaseX.hpp"
  41 #include "opto/movenode.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "utilities/macros.hpp"
  44 
  45 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  46   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
  47   _in_worklist(C->comp_arena()),
  48   _next_pidx(0),
  49   _collecting(true),
  50   _verify(false),
  51   _compile(C),
  52   _igvn(igvn),

 136   GrowableArray<SafePointNode*>  sfn_worklist;
 137   GrowableArray<MergeMemNode*>   mergemem_worklist;
 138   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 139 
 140   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 141 
 142   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 143   ideal_nodes.map(C->live_nodes(), NULL);  // preallocate space
 144   // Initialize worklist
 145   if (C->root() != NULL) {
 146     ideal_nodes.push(C->root());
 147   }
 148   // Processed ideal nodes are unique on ideal_nodes list
 149   // but several ideal nodes are mapped to the phantom_obj.
 150   // To avoid duplicated entries on the following worklists
 151   // add the phantom_obj only once to them.
 152   ptnodes_worklist.append(phantom_obj);
 153   java_objects_worklist.append(phantom_obj);
 154   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 155     Node* n = ideal_nodes.at(next);
 156     if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
 157         !n->in(MemNode::Address)->is_AddP() &&
 158         _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
 159       // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
 160       Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
 161       _igvn->register_new_node_with_optimizer(addp);
 162       _igvn->replace_input_of(n, MemNode::Address, addp);
 163       ideal_nodes.push(addp);
 164       _nodes.at_put_grow(addp->_idx, NULL, NULL);
 165     }
 166     // Create PointsTo nodes and add them to Connection Graph. Called
 167     // only once per ideal node since ideal_nodes is Unique_Node list.
 168     add_node_to_connection_graph(n, &delayed_worklist);
 169     PointsToNode* ptn = ptnode_adr(n->_idx);
 170     if (ptn != NULL && ptn != phantom_obj) {
 171       ptnodes_worklist.append(ptn);
 172       if (ptn->is_JavaObject()) {
 173         java_objects_worklist.append(ptn->as_JavaObject());
 174         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 175             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 176           // Only allocations and java static calls results are interesting.
 177           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 178         }
 179       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 180         oop_fields_worklist.append(ptn->as_Field());
 181       }
 182     }
 183     // Collect some interesting nodes for further use.
 184     switch (n->Opcode()) {
 185       case Op_MergeMem:

 428   return false;
 429 }
 430 
 431 // Returns true if at least one of the arguments to the call is an object
 432 // that does not escape globally.
 433 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
 434   if (call->method() != NULL) {
 435     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
 436     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
 437       Node* p = call->in(idx);
 438       if (not_global_escape(p)) {
 439         return true;
 440       }
 441     }
 442   } else {
 443     const char* name = call->as_CallStaticJava()->_name;
 444     assert(name != NULL, "no name");
 445     // no arg escapes through uncommon traps
 446     if (strcmp(name, "uncommon_trap") != 0) {
 447       // process_call_arguments() assumes that all arguments escape globally
 448       const TypeTuple* d = call->tf()->domain_sig();
 449       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 450         const Type* at = d->field_at(i);
 451         if (at->isa_oopptr() != NULL) {
 452           return true;
 453         }
 454       }
 455     }
 456   }
 457   return false;
 458 }
 459 
 460 
 461 
 462 // Utility function for nodes that load an object
 463 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
 464   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 465   // ThreadLocal has RawPtr type.
 466   const Type* t = _igvn->type(n);
 467   if (t->make_ptr() != NULL) {
 468     Node* adr = n->in(MemNode::Address);

 502       // first IGVN optimization when escape information is still available.
 503       record_for_optimizer(n);
 504     } else if (n->is_Allocate()) {
 505       add_call_node(n->as_Call());
 506       record_for_optimizer(n);
 507     } else {
 508       if (n->is_CallStaticJava()) {
 509         const char* name = n->as_CallStaticJava()->_name;
 510         if (name != NULL && strcmp(name, "uncommon_trap") == 0) {
 511           return; // Skip uncommon traps
 512         }
 513       }
 514       // Don't mark as processed since call's arguments have to be processed.
 515       delayed_worklist->push(n);
 516       // Check if a call returns an object.
 517       if ((n->as_Call()->returns_pointer() &&
 518            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
 519           (n->is_CallStaticJava() &&
 520            n->as_CallStaticJava()->is_boxing_method())) {
 521         add_call_node(n->as_Call());
 522       } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
 523         bool returns_oop = false;
 524         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
 525           ProjNode* pn = n->fast_out(i)->as_Proj();
 526           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
 527             returns_oop = true;
 528           }
 529         }
 530         if (returns_oop) {
 531           add_call_node(n->as_Call());
 532         }
 533       }
 534     }
 535     return;
 536   }
 537   // Put this check here to process call arguments since some call nodes
 538   // point to phantom_obj.
 539   if (n_ptn == phantom_obj || n_ptn == null_obj) {
 540     return; // Skip predefined nodes.
 541   }
 542   switch (opcode) {
 543     case Op_AddP: {
 544       Node* base = get_addp_base(n);
 545       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 546       // Field nodes are created for all field types. They are used in
 547       // adjust_scalar_replaceable_state() and split_unique_types().
 548       // Note, non-oop fields will have only base edges in Connection
 549       // Graph because such fields are not used for oop loads and stores.
 550       int offset = address_offset(n, igvn);
 551       add_field(n, PointsToNode::NoEscape, offset);
 552       if (ptn_base == NULL) {
 553         delayed_worklist->push(n); // Process it later.
 554       } else {
 555         n_ptn = ptnode_adr(n_idx);
 556         add_base(n_ptn->as_Field(), ptn_base);
 557       }
 558       break;
 559     }
 560     case Op_CastX2P: {
 561       map_ideal_node(n, phantom_obj);
 562       break;
 563     }
 564     case Op_InlineTypePtr:
 565     case Op_CastPP:
 566     case Op_CheckCastPP:
 567     case Op_EncodeP:
 568     case Op_DecodeN:
 569     case Op_EncodePKlass:
 570     case Op_DecodeNKlass: {
 571       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
 572       break;
 573     }
 574     case Op_CMoveP: {
 575       add_local_var(n, PointsToNode::NoEscape);
 576       // Do not add edges during first iteration because some could be
 577       // not defined yet.
 578       delayed_worklist->push(n);
 579       break;
 580     }
 581     case Op_ConP:
 582     case Op_ConN:
 583     case Op_ConNKlass: {
 584       // assume all oop constants globally escape except for null

 615     case Op_PartialSubtypeCheck: {
 616       // Produces Null or notNull and is used in only in CmpP so
 617       // phantom_obj could be used.
 618       map_ideal_node(n, phantom_obj); // Result is unknown
 619       break;
 620     }
 621     case Op_Phi: {
 622       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 623       // ThreadLocal has RawPtr type.
 624       const Type* t = n->as_Phi()->type();
 625       if (t->make_ptr() != NULL) {
 626         add_local_var(n, PointsToNode::NoEscape);
 627         // Do not add edges during first iteration because some could be
 628         // not defined yet.
 629         delayed_worklist->push(n);
 630       }
 631       break;
 632     }
 633     case Op_Proj: {
 634       // we are only interested in the oop result projection from a call
 635       if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
 636           (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
 637         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
 638                n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
 639         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
 640       }
 641       break;
 642     }
 643     case Op_Rethrow: // Exception object escapes
 644     case Op_Return: {
 645       if (n->req() > TypeFunc::Parms &&
 646           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 647         // Treat Return value as LocalVar with GlobalEscape escape state.
 648         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
 649       }
 650       break;
 651     }
 652     case Op_CompareAndExchangeP:
 653     case Op_CompareAndExchangeN:
 654     case Op_GetAndSetP:
 655     case Op_GetAndSetN: {
 656       add_objload_to_connection_graph(n, delayed_worklist);
 657       // fall-through
 658     }

 718   if (n->is_Call()) {
 719     process_call_arguments(n->as_Call());
 720     return;
 721   }
 722   assert(n->is_Store() || n->is_LoadStore() ||
 723          (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
 724          "node should be registered already");
 725   int opcode = n->Opcode();
 726   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
 727   if (gc_handled) {
 728     return; // Ignore node if already handled by GC.
 729   }
 730   switch (opcode) {
 731     case Op_AddP: {
 732       Node* base = get_addp_base(n);
 733       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 734       assert(ptn_base != NULL, "field's base should be registered");
 735       add_base(n_ptn->as_Field(), ptn_base);
 736       break;
 737     }
 738     case Op_InlineTypePtr:
 739     case Op_CastPP:
 740     case Op_CheckCastPP:
 741     case Op_EncodeP:
 742     case Op_DecodeN:
 743     case Op_EncodePKlass:
 744     case Op_DecodeNKlass: {
 745       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
 746       break;
 747     }
 748     case Op_CMoveP: {
 749       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
 750         Node* in = n->in(i);
 751         if (in == NULL) {
 752           continue;  // ignore NULL
 753         }
 754         Node* uncast_in = in->uncast();
 755         if (uncast_in->is_top() || uncast_in == n) {
 756           continue;  // ignore top or inputs which go back this node
 757         }
 758         PointsToNode* ptn = ptnode_adr(in->_idx);

 773       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 774       // ThreadLocal has RawPtr type.
 775       assert(n->as_Phi()->type()->make_ptr() != NULL, "Unexpected node type");
 776       for (uint i = 1; i < n->req(); i++) {
 777         Node* in = n->in(i);
 778         if (in == NULL) {
 779           continue;  // ignore NULL
 780         }
 781         Node* uncast_in = in->uncast();
 782         if (uncast_in->is_top() || uncast_in == n) {
 783           continue;  // ignore top or inputs which go back this node
 784         }
 785         PointsToNode* ptn = ptnode_adr(in->_idx);
 786         assert(ptn != NULL, "node should be registered");
 787         add_edge(n_ptn, ptn);
 788       }
 789       break;
 790     }
 791     case Op_Proj: {
 792       // we are only interested in the oop result projection from a call
 793       assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
 794              n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
 795       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
 796       break;
 797     }
 798     case Op_Rethrow: // Exception object escapes
 799     case Op_Return: {
 800       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
 801              "Unexpected node type");
 802       // Treat Return value as LocalVar with GlobalEscape escape state.
 803       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), NULL);
 804       break;
 805     }
 806     case Op_CompareAndExchangeP:
 807     case Op_CompareAndExchangeN:
 808     case Op_GetAndSetP:
 809     case Op_GetAndSetN:{
 810       assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type");
 811       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL);
 812       // fall-through
 813     }
 814     case Op_CompareAndSwapP:

 949     PointsToNode* ptn = ptnode_adr(val->_idx);
 950     assert(ptn != NULL, "node should be registered");
 951     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
 952     // Add edge to object for unsafe access with offset.
 953     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 954     assert(adr_ptn != NULL, "node should be registered");
 955     if (adr_ptn->is_Field()) {
 956       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
 957       add_edge(adr_ptn, ptn);
 958     }
 959     return true;
 960   }
 961 #ifdef ASSERT
 962   n->dump(1);
 963   assert(false, "not unsafe");
 964 #endif
 965   return false;
 966 }
 967 
 968 void ConnectionGraph::add_call_node(CallNode* call) {
 969   assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
 970   uint call_idx = call->_idx;
 971   if (call->is_Allocate()) {
 972     Node* k = call->in(AllocateNode::KlassNode);
 973     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 974     assert(kt != NULL, "TypeKlassPtr  required.");
 975     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 976     bool scalar_replaceable = true;
 977     NOT_PRODUCT(const char* nsr_reason = "");
 978     if (call->is_AllocateArray()) {
 979       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
 980         es = PointsToNode::GlobalEscape;
 981       } else {
 982         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 983         if (length < 0) {
 984           // Not scalar replaceable if the length is not constant.
 985           scalar_replaceable = false;
 986           NOT_PRODUCT(nsr_reason = "has a non-constant length");
 987         } else if (length > EliminateAllocationArraySizeLimit) {
 988           // Not scalar replaceable if the length is too big.
 989           scalar_replaceable = false;

1025     //
1026     //    - all oop arguments are escaping globally;
1027     //
1028     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
1029     //
1030     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
1031     //
1032     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
1033     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
1034     //      during call is returned;
1035     //    - mapped to ArgEscape LocalVar node pointed to object arguments
1036     //      which are returned and does not escape during call;
1037     //
1038     //    - oop arguments escaping status is defined by bytecode analysis;
1039     //
1040     // For a static call, we know exactly what method is being called.
1041     // Use bytecode estimator to record whether the call's return value escapes.
1042     ciMethod* meth = call->as_CallJava()->method();
1043     if (meth == NULL) {
1044       const char* name = call->as_CallStaticJava()->_name;
1045       assert(strncmp(name, "_multianewarray", 15) == 0 ||
1046              strncmp(name, "_load_unknown_inline", 20) == 0, "TODO: add failed case check");
1047       // Returns a newly allocated non-escaped object.
1048       add_java_object(call, PointsToNode::NoEscape);
1049       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
1050     } else if (meth->is_boxing_method()) {
1051       // Returns boxing object
1052       PointsToNode::EscapeState es;
1053       vmIntrinsics::ID intr = meth->intrinsic_id();
1054       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
1055         // It does not escape if object is always allocated.
1056         es = PointsToNode::NoEscape;
1057       } else {
1058         // It escapes globally if object could be loaded from cache.
1059         es = PointsToNode::GlobalEscape;
1060       }
1061       add_java_object(call, es);
1062     } else {
1063       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
1064       call_analyzer->copy_dependencies(_compile->dependencies());
1065       if (call_analyzer->is_return_allocated()) {
1066         // Returns a newly allocated non-escaped object, simply
1067         // update dependency information.
1068         // Mark it as NoEscape so that objects referenced by
1069         // it's fields will be marked as NoEscape at least.
1070         add_java_object(call, PointsToNode::NoEscape);
1071         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
1072       } else {
1073         // Determine whether any arguments are returned.
1074         const TypeTuple* d = call->tf()->domain_cc();
1075         bool ret_arg = false;
1076         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1077           if (d->field_at(i)->isa_ptr() != NULL &&
1078               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1079             ret_arg = true;
1080             break;
1081           }
1082         }
1083         if (ret_arg) {
1084           add_local_var(call, PointsToNode::ArgEscape);
1085         } else {
1086           // Returns unknown object.
1087           map_ideal_node(call, phantom_obj);
1088         }
1089       }
1090     }
1091   } else {
1092     // An other type of call, assume the worst case:
1093     // returned value is unknown and globally escapes.
1094     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

1102 #ifdef ASSERT
1103     case Op_Allocate:
1104     case Op_AllocateArray:
1105     case Op_Lock:
1106     case Op_Unlock:
1107       assert(false, "should be done already");
1108       break;
1109 #endif
1110     case Op_ArrayCopy:
1111     case Op_CallLeafNoFP:
1112       // Most array copies are ArrayCopy nodes at this point but there
1113       // are still a few direct calls to the copy subroutines (See
1114       // PhaseStringOpts::copy_string())
1115       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1116         call->as_CallLeaf()->is_call_to_arraycopystub();
1117       // fall through
1118     case Op_CallLeafVector:
1119     case Op_CallLeaf: {
1120       // Stub calls, objects do not escape but they are not scale replaceable.
1121       // Adjust escape state for outgoing arguments.
1122       const TypeTuple * d = call->tf()->domain_sig();
1123       bool src_has_oops = false;
1124       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1125         const Type* at = d->field_at(i);
1126         Node *arg = call->in(i);
1127         if (arg == NULL) {
1128           continue;
1129         }
1130         const Type *aat = _igvn->type(arg);
1131         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
1132           continue;
1133         }
1134         if (arg->is_AddP()) {
1135           //
1136           // The inline_native_clone() case when the arraycopy stub is called
1137           // after the allocation before Initialize and CheckCastPP nodes.
1138           // Or normal arraycopy for object arrays case.
1139           //
1140           // Set AddP's base (Allocate) as not scalar replaceable since
1141           // pointer to the base (with offset) is passed as argument.
1142           //
1143           arg = get_addp_base(arg);
1144         }
1145         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1146         assert(arg_ptn != NULL, "should be registered");
1147         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1148         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1149           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1150                  aat->isa_ptr() != NULL, "expecting an Ptr");
1151           bool arg_has_oops = aat->isa_oopptr() &&
1152                               (aat->isa_instptr() ||
1153                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != NULL)) ||
1154                                (aat->isa_aryptr() && aat->isa_aryptr()->elem() != NULL &&
1155                                                                aat->isa_aryptr()->is_flat() &&
1156                                                                aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
1157           if (i == TypeFunc::Parms) {
1158             src_has_oops = arg_has_oops;
1159           }
1160           //
1161           // src or dst could be j.l.Object when other is basic type array:
1162           //
1163           //   arraycopy(char[],0,Object*,0,size);
1164           //   arraycopy(Object*,0,char[],0,size);
1165           //
1166           // Don't add edges in such cases.
1167           //
1168           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1169                                        arg_has_oops && (i > TypeFunc::Parms);
1170 #ifdef ASSERT
1171           if (!(is_arraycopy ||
1172                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1173                 (call->as_CallLeaf()->_name != NULL &&
1174                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1175                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1176                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

1183                   strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
1184                   strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 ||
1185                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1186                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1187                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
1188                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
1189                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
1190                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1191                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1192                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1193                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1194                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1195                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1196                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
1197                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
1198                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1199                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1200                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1201                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1202                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
1203                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1204                   strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
1205                   strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
1206                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
1207                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
1208                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1209                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
1210                  ))) {
1211             call->dump();
1212             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1213           }
1214 #endif
1215           // Always process arraycopy's destination object since
1216           // we need to add all possible edges to references in
1217           // source object.
1218           if (arg_esc >= PointsToNode::ArgEscape &&
1219               !arg_is_arraycopy_dest) {
1220             continue;
1221           }
1222           PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1223           if (call->is_ArrayCopy()) {
1224             ArrayCopyNode* ac = call->as_ArrayCopy();
1225             if (ac->is_clonebasic() ||

1248           }
1249         }
1250       }
1251       break;
1252     }
1253     case Op_CallStaticJava: {
1254       // For a static call, we know exactly what method is being called.
1255       // Use bytecode estimator to record the call's escape affects
1256 #ifdef ASSERT
1257       const char* name = call->as_CallStaticJava()->_name;
1258       assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1259 #endif
1260       ciMethod* meth = call->as_CallJava()->method();
1261       if ((meth != NULL) && meth->is_boxing_method()) {
1262         break; // Boxing methods do not modify any oops.
1263       }
1264       BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1265       // fall-through if not a Java method or no analyzer information
1266       if (call_analyzer != NULL) {
1267         PointsToNode* call_ptn = ptnode_adr(call->_idx);
1268         const TypeTuple* d = call->tf()->domain_cc();
1269         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1270           const Type* at = d->field_at(i);
1271           int k = i - TypeFunc::Parms;
1272           Node* arg = call->in(i);
1273           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1274           if (at->isa_ptr() != NULL &&
1275               call_analyzer->is_arg_returned(k)) {
1276             // The call returns arguments.
1277             if (call_ptn != NULL) { // Is call's result used?
1278               assert(call_ptn->is_LocalVar(), "node should be registered");
1279               assert(arg_ptn != NULL, "node should be registered");
1280               add_edge(call_ptn, arg_ptn);
1281             }
1282           }
1283           if (at->isa_oopptr() != NULL &&
1284               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1285             if (!call_analyzer->is_arg_stack(k)) {
1286               // The argument global escapes
1287               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1288             } else {

1292                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1293               }
1294             }
1295           }
1296         }
1297         if (call_ptn != NULL && call_ptn->is_LocalVar()) {
1298           // The call returns arguments.
1299           assert(call_ptn->edge_count() > 0, "sanity");
1300           if (!call_analyzer->is_return_local()) {
1301             // Returns also unknown object.
1302             add_edge(call_ptn, phantom_obj);
1303           }
1304         }
1305         break;
1306       }
1307     }
1308     default: {
1309       // Fall-through here if not a Java method or no analyzer information
1310       // or some other type of call, assume the worst case: all arguments
1311       // globally escape.
1312       const TypeTuple* d = call->tf()->domain_cc();
1313       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1314         const Type* at = d->field_at(i);
1315         if (at->isa_oopptr() != NULL) {
1316           Node* arg = call->in(i);
1317           if (arg->is_AddP()) {
1318             arg = get_addp_base(arg);
1319           }
1320           assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
1321           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1322         }
1323       }
1324     }
1325   }
1326 }
1327 
1328 
1329 // Finish Graph construction.
1330 bool ConnectionGraph::complete_connection_graph(
1331                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
1332                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

1705     PointsToNode* base = i.get();
1706     if (base->is_JavaObject()) {
1707       // Skip Allocate's fields which will be processed later.
1708       if (base->ideal_node()->is_Allocate()) {
1709         return 0;
1710       }
1711       assert(base == null_obj, "only NULL ptr base expected here");
1712     }
1713   }
1714   if (add_edge(field, phantom_obj)) {
1715     // New edge was added
1716     new_edges++;
1717     add_field_uses_to_worklist(field);
1718   }
1719   return new_edges;
1720 }
1721 
1722 // Find fields initializing values for allocations.
1723 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
1724   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1725   PointsToNode* init_val = phantom_obj;
1726   Node* alloc = pta->ideal_node();
1727 
1728   // Do nothing for Allocate nodes since its fields values are
1729   // "known" unless they are initialized by arraycopy/clone.
1730   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
1731     if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != NULL) {
1732       // Non-flattened inline type arrays are initialized with
1733       // the default value instead of null. Handle them here.
1734       init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx);
1735       assert(init_val != NULL, "default value should be registered");
1736     } else {
1737       return 0;
1738     }
1739   }
1740   // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
1741   assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
1742 #ifdef ASSERT
1743   if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == NULL) {
1744     const char* name = alloc->as_CallStaticJava()->_name;
1745     assert(strncmp(name, "_multianewarray", 15) == 0 ||
1746            strncmp(name, "_load_unknown_inline", 20) == 0, "sanity");
1747   }
1748 #endif
1749   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
1750   int new_edges = 0;
1751   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1752     PointsToNode* field = i.get();
1753     if (field->is_Field() && field->as_Field()->is_oop()) {
1754       if (add_edge(field, init_val)) {
1755         // New edge was added
1756         new_edges++;
1757         add_field_uses_to_worklist(field->as_Field());
1758       }
1759     }
1760   }
1761   return new_edges;
1762 }
1763 
1764 // Find fields initializing values for allocations.
1765 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* phase) {
1766   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1767   Node* alloc = pta->ideal_node();
1768   // Do nothing for Call nodes since its fields values are unknown.
1769   if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != NULL) {
1770     return 0;
1771   }
1772   InitializeNode* ini = alloc->as_Allocate()->initialization();
1773   bool visited_bottom_offset = false;
1774   GrowableArray<int> offsets_worklist;
1775   int new_edges = 0;
1776 
1777   // Check if an oop field's initializing value is recorded and add
1778   // a corresponding NULL if field's value if it is not recorded.
1779   // Connection Graph does not record a default initialization by NULL
1780   // captured by Initialize node.
1781   //
1782   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1783     PointsToNode* field = i.get(); // Field (AddP)
1784     if (!field->is_Field() || !field->as_Field()->is_oop()) {
1785       continue; // Not oop field
1786     }
1787     int offset = field->as_Field()->offset();
1788     if (offset == Type::OffsetBot) {
1789       if (!visited_bottom_offset) {

1835               } else {
1836                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1837                   tty->print_cr("----------init store has invalid value -----");
1838                   store->dump();
1839                   val->dump();
1840                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1841                 }
1842                 for (EdgeIterator j(val); j.has_next(); j.next()) {
1843                   PointsToNode* obj = j.get();
1844                   if (obj->is_JavaObject()) {
1845                     if (!field->points_to(obj->as_JavaObject())) {
1846                       missed_obj = obj;
1847                       break;
1848                     }
1849                   }
1850                 }
1851               }
1852               if (missed_obj != NULL) {
1853                 tty->print_cr("----------field---------------------------------");
1854                 field->dump();
1855                 tty->print_cr("----------missed reference to object------------");
1856                 missed_obj->dump();
1857                 tty->print_cr("----------object referenced by init store-------");
1858                 store->dump();
1859                 val->dump();
1860                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1861               }
1862             }
1863 #endif
1864           } else {
1865             // There could be initializing stores which follow allocation.
1866             // For example, a volatile field store is not collected
1867             // by Initialize node.
1868             //
1869             // Need to check for dependent loads to separate such stores from
1870             // stores which follow loads. For now, add initial value NULL so
1871             // that compare pointers optimization works correctly.
1872           }
1873         }
1874         if (value == NULL) {
1875           // A field's initializing value was not recorded. Add NULL.
1876           if (add_edge(field, null_obj)) {
1877             // New edge was added

2069         assert(field->edge_count() > 0, "sanity");
2070       }
2071     }
2072   }
2073 }
2074 #endif
2075 
2076 // Optimize ideal graph.
2077 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
2078                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
2079   Compile* C = _compile;
2080   PhaseIterGVN* igvn = _igvn;
2081   if (EliminateLocks) {
2082     // Mark locks before changing ideal graph.
2083     int cnt = C->macro_count();
2084     for (int i = 0; i < cnt; i++) {
2085       Node *n = C->macro_node(i);
2086       if (n->is_AbstractLock()) { // Lock and Unlock nodes
2087         AbstractLockNode* alock = n->as_AbstractLock();
2088         if (!alock->is_non_esc_obj()) {
2089           const Type* obj_type = igvn->type(alock->obj_node());
2090           if (not_global_escape(alock->obj_node()) &&
2091               !obj_type->isa_inlinetype() && !obj_type->is_inlinetypeptr()) {
2092             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
2093             // The lock could be marked eliminated by lock coarsening
2094             // code during first IGVN before EA. Replace coarsened flag
2095             // to eliminate all associated locks/unlocks.
2096 #ifdef ASSERT
2097             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
2098 #endif
2099             alock->set_non_esc_obj();
2100           }
2101         }
2102       }
2103     }
2104   }
2105 
2106   if (OptimizePtrCompare) {
2107     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
2108       Node *n = ptr_cmp_worklist.at(i);
2109       const TypeInt* tcmp = optimize_ptr_compare(n);
2110       if (tcmp->singleton()) {
2111         Node* cmp = igvn->makecon(tcmp);
2112 #ifndef PRODUCT
2113         if (PrintOptimizePtrCompare) {
2114           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
2115           if (Verbose) {
2116             n->dump(1);
2117           }
2118         }
2119 #endif
2120         igvn->replace_node(n, cmp);
2121       }
2122     }
2123   }
2124 
2125   // For MemBarStoreStore nodes added in library_call.cpp, check
2126   // escape status of associated AllocateNode and optimize out
2127   // MemBarStoreStore node if the allocated object never escapes.
2128   for (int i = 0; i < storestore_worklist.length(); i++) {
2129     Node* storestore = storestore_worklist.at(i);
2130     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
2131     if (alloc->is_Allocate() && not_global_escape(alloc)) {
2132       if (alloc->in(AllocateNode::InlineTypeNode) != NULL) {
2133         // Non-escaping inline type buffer allocations don't require a membar
2134         storestore->as_MemBar()->remove(_igvn);
2135       } else {
2136         MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
2137         mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
2138         mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
2139         igvn->register_new_node_with_optimizer(mb);
2140         igvn->replace_node(storestore, mb);
2141       }
2142     }
2143   }
2144 }
2145 
2146 // Optimize objects compare.
2147 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {
2148   assert(OptimizePtrCompare, "sanity");
2149   assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
2150   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
2151   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
2152   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
2153 
2154   PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2155   PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2156   JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2157   JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2158   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2159   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2160 
2161   // Check simple cases first.

2274   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2275   assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
2276   PointsToNode* ptadr = _nodes.at(n->_idx);
2277   if (ptadr != NULL) {
2278     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2279     return;
2280   }
2281   Compile* C = _compile;
2282   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2283   map_ideal_node(n, ptadr);
2284   // Add edge from arraycopy node to source object.
2285   (void)add_edge(ptadr, src);
2286   src->set_arraycopy_src();
2287   // Add edge from destination object to arraycopy node.
2288   (void)add_edge(dst, ptadr);
2289   dst->set_arraycopy_dst();
2290 }
2291 
2292 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2293   const Type* adr_type = n->as_AddP()->bottom_type();
2294   int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
2295   BasicType bt = T_INT;
2296   if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
2297     // Check only oop fields.
2298     if (!adr_type->isa_aryptr() ||
2299         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
2300         adr_type->isa_aryptr()->elem()->make_oopptr() != NULL) {
2301       // OffsetBot is used to reference array's element. Ignore first AddP.
2302       if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2303         bt = T_OBJECT;
2304       }
2305     }
2306   } else if (offset != oopDesc::klass_offset_in_bytes()) {
2307     if (adr_type->isa_instptr()) {
2308       ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
2309       if (field != NULL) {
2310         bt = field->layout_type();
2311       } else {
2312         // Check for unsafe oop field access
2313         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2314             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2315             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2316             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2317           bt = T_OBJECT;
2318           (*unsafe) = true;
2319         }
2320       }
2321     } else if (adr_type->isa_aryptr()) {
2322       if (offset == arrayOopDesc::length_offset_in_bytes()) {
2323         // Ignore array length load.
2324       } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2325         // Ignore first AddP.
2326       } else {
2327         const Type* elemtype = adr_type->isa_aryptr()->elem();
2328         if (elemtype->isa_inlinetype() && field_offset != Type::OffsetBot) {
2329           ciInlineKlass* vk = elemtype->inline_klass();
2330           field_offset += vk->first_field_offset();
2331           bt = vk->get_field_by_offset(field_offset, false)->layout_type();
2332         } else {
2333           bt = elemtype->array_element_basic_type();
2334         }
2335       }
2336     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2337       // Allocation initialization, ThreadLocal field access, unsafe access
2338       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2339           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2340           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2341           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2342         bt = T_OBJECT;
2343       }
2344     }
2345   }
2346   // Note: T_NARROWOOP is not classed as a real reference type
2347   return (is_reference_type(bt) || bt == T_NARROWOOP);
2348 }
2349 
2350 // Returns unique pointed java object or NULL.
2351 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2352   assert(!_collecting, "should not call when constructed graph");
2353   // If the node was created after the escape computation we can't answer.
2354   uint idx = n->_idx;

2498             return true;
2499           }
2500         }
2501       }
2502     }
2503   }
2504   return false;
2505 }
2506 
2507 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2508   const Type *adr_type = phase->type(adr);
2509   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) {
2510     // We are computing a raw address for a store captured by an Initialize
2511     // compute an appropriate address type. AddP cases #3 and #5 (see below).
2512     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2513     assert(offs != Type::OffsetBot ||
2514            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2515            "offset must be a constant or it is initialization of array");
2516     return offs;
2517   }
2518   return adr_type->is_ptr()->flattened_offset();


2519 }
2520 
2521 Node* ConnectionGraph::get_addp_base(Node *addp) {
2522   assert(addp->is_AddP(), "must be AddP");
2523   //
2524   // AddP cases for Base and Address inputs:
2525   // case #1. Direct object's field reference:
2526   //     Allocate
2527   //       |
2528   //     Proj #5 ( oop result )
2529   //       |
2530   //     CheckCastPP (cast to instance type)
2531   //      | |
2532   //     AddP  ( base == address )
2533   //
2534   // case #2. Indirect object's field reference:
2535   //      Phi
2536   //       |
2537   //     CastPP (cast to instance type)
2538   //      | |

2652   }
2653   return NULL;
2654 }
2655 
2656 //
2657 // Adjust the type and inputs of an AddP which computes the
2658 // address of a field of an instance
2659 //
2660 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2661   PhaseGVN* igvn = _igvn;
2662   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2663   assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
2664   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2665   if (t == NULL) {
2666     // We are computing a raw address for a store captured by an Initialize
2667     // compute an appropriate address type (cases #3 and #5).
2668     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2669     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2670     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2671     assert(offs != Type::OffsetBot, "offset must be a constant");
2672     if (base_t->isa_aryptr() != NULL) {
2673       // In the case of a flattened inline type array, each field has its
2674       // own slice so we need to extract the field being accessed from
2675       // the address computation
2676       t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
2677     } else {
2678       t = base_t->add_offset(offs)->is_oopptr();
2679     }
2680   }
2681   int inst_id = base_t->instance_id();
2682   assert(!t->is_known_instance() || t->instance_id() == inst_id,
2683                              "old type must be non-instance or match new type");
2684 
2685   // The type 't' could be subclass of 'base_t'.
2686   // As result t->offset() could be large then base_t's size and it will
2687   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2688   // constructor verifies correctness of the offset.
2689   //
2690   // It could happened on subclass's branch (from the type profiling
2691   // inlining) which was not eliminated during parsing since the exactness
2692   // of the allocation type was not propagated to the subclass type check.
2693   //
2694   // Or the type 't' could be not related to 'base_t' at all.
2695   // It could happen when CHA type is different from MDO type on a dead path
2696   // (for example, from instanceof check) which is not collapsed during parsing.
2697   //
2698   // Do nothing for such AddP node and don't process its users since
2699   // this code branch will go away.
2700   //
2701   if (!t->is_known_instance() &&
2702       !base_t->maybe_java_subtype_of(t)) {
2703      return false; // bail out
2704   }
2705   const TypePtr* tinst = base_t->add_offset(t->offset());
2706   if (tinst->isa_aryptr() && t->isa_aryptr()) {
2707     // In the case of a flattened inline type array, each field has its
2708     // own slice so we need to keep track of the field being accessed.
2709     tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
2710     // Keep array properties (not flat/null-free)
2711     tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
2712     if (tinst == NULL) {
2713       return false; // Skip dead path with inconsistent properties
2714     }
2715   }
2716 
2717   // Do NOT remove the next line: ensure a new alias index is allocated
2718   // for the instance type. Note: C++ will not remove it since the call
2719   // has side effect.
2720   int alias_idx = _compile->get_alias_index(tinst);
2721   igvn->set_type(addp, tinst);
2722   // record the allocation in the node map
2723   set_map(addp, get_map(base->_idx));
2724   // Set addp's Base and Address to 'base'.
2725   Node *abase = addp->in(AddPNode::Base);
2726   Node *adr   = addp->in(AddPNode::Address);
2727   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2728       adr->in(0)->_idx == (uint)inst_id) {
2729     // Skip AddP cases #3 and #5.
2730   } else {
2731     assert(!abase->is_top(), "sanity"); // AddP case #3
2732     if (abase != base) {
2733       igvn->hash_delete(addp);
2734       addp->set_req(AddPNode::Base, base);
2735       if (abase == adr) {
2736         addp->set_req(AddPNode::Address, base);

3378         ptnode_adr(n->_idx)->dump();
3379         assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3380 #endif
3381         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3382         return;
3383       } else {
3384         Node *val = get_map(jobj->idx());   // CheckCastPP node
3385         TypeNode *tn = n->as_Type();
3386         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3387         assert(tinst != NULL && tinst->is_known_instance() &&
3388                tinst->instance_id() == jobj->idx() , "instance type expected.");
3389 
3390         const Type *tn_type = igvn->type(tn);
3391         const TypeOopPtr *tn_t;
3392         if (tn_type->isa_narrowoop()) {
3393           tn_t = tn_type->make_ptr()->isa_oopptr();
3394         } else {
3395           tn_t = tn_type->isa_oopptr();
3396         }
3397         if (tn_t != NULL && tinst->maybe_java_subtype_of(tn_t)) {
3398           if (tn_t->isa_aryptr()) {
3399             // Keep array properties (not flat/null-free)
3400             tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
3401             if (tinst == NULL) {
3402               continue; // Skip dead path with inconsistent properties
3403             }
3404           }
3405           if (tn_type->isa_narrowoop()) {
3406             tn_type = tinst->make_narrowoop();
3407           } else {
3408             tn_type = tinst;
3409           }
3410           igvn->hash_delete(tn);
3411           igvn->set_type(tn, tn_type);
3412           tn->set_type(tn_type);
3413           igvn->hash_insert(tn);
3414           record_for_optimizer(n);
3415         } else {
3416           assert(tn_type == TypePtr::NULL_PTR ||
3417                  tn_t != NULL && !tinst->maybe_java_subtype_of(tn_t),
3418                  "unexpected type");
3419           continue; // Skip dead path with different type
3420         }
3421       }
3422     } else {
3423       debug_only(n->dump();)
3424       assert(false, "EA: unexpected node");
3425       continue;
3426     }
3427     // push allocation's users on appropriate worklist
3428     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3429       Node *use = n->fast_out(i);
3430       if (use->is_Mem() && use->in(MemNode::Address) == n) {
3431         // Load/store to instance's field
3432         memnode_worklist.append_if_missing(use);
3433       } else if (use->is_MemBar()) {
3434         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3435           memnode_worklist.append_if_missing(use);
3436         }
3437       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3438         Node* addp2 = find_second_addp(use, n);
3439         if (addp2 != NULL) {
3440           alloc_worklist.append_if_missing(addp2);
3441         }
3442         alloc_worklist.append_if_missing(use);
3443       } else if (use->is_Phi() ||
3444                  use->is_CheckCastPP() ||
3445                  use->is_EncodeNarrowPtr() ||
3446                  use->is_DecodeNarrowPtr() ||
3447                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3448         alloc_worklist.append_if_missing(use);
3449 #ifdef ASSERT
3450       } else if (use->is_Mem()) {
3451         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3452       } else if (use->is_MergeMem()) {
3453         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3454       } else if (use->is_SafePoint()) {
3455         // Look for MergeMem nodes for calls which reference unique allocation
3456         // (through CheckCastPP nodes) even for debug info.
3457         Node* m = use->in(TypeFunc::Memory);
3458         if (m->is_MergeMem()) {
3459           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3460         }
3461       } else if (use->Opcode() == Op_EncodeISOArray) {
3462         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3463           // EncodeISOArray overwrites destination array
3464           memnode_worklist.append_if_missing(use);
3465         }
3466       } else if (use->Opcode() == Op_Return) {
3467         // Allocation is referenced by field of returned inline type
3468         assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
3469       } else {
3470         uint op = use->Opcode();
3471         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3472             (use->in(MemNode::Memory) == n)) {
3473           // They overwrite memory edge corresponding to destination array,
3474           memnode_worklist.append_if_missing(use);
3475         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3476               op == Op_CastP2X || op == Op_StoreCM ||
3477               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
3478               op == Op_CountPositives ||
3479               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3480               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3481               op == Op_SubTypeCheck || op == Op_InlineType || op == Op_InlineTypePtr || op == Op_FlatArrayCheck ||
3482               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3483           n->dump();
3484           use->dump();
3485           assert(false, "EA: missing allocation reference path");
3486         }
3487 #endif
3488       }
3489     }
3490 
3491   }
3492 
3493   // Go over all ArrayCopy nodes and if one of the inputs has a unique
3494   // type, record it in the ArrayCopy node so we know what memory this
3495   // node uses/modified.
3496   for (int next = 0; next < arraycopy_worklist.length(); next++) {
3497     ArrayCopyNode* ac = arraycopy_worklist.at(next);
3498     Node* dest = ac->in(ArrayCopyNode::Dest);
3499     if (dest->is_AddP()) {
3500       dest = get_addp_base(dest);
3501     }

3531   if (memnode_worklist.length() == 0)
3532     return;  // nothing to do
3533   while (memnode_worklist.length() != 0) {
3534     Node *n = memnode_worklist.pop();
3535     if (visited.test_set(n->_idx)) {
3536       continue;
3537     }
3538     if (n->is_Phi() || n->is_ClearArray()) {
3539       // we don't need to do anything, but the users must be pushed
3540     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3541       // we don't need to do anything, but the users must be pushed
3542       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
3543       if (n == NULL) {
3544         continue;
3545       }
3546     } else if (n->Opcode() == Op_StrCompressedCopy ||
3547                n->Opcode() == Op_EncodeISOArray) {
3548       // get the memory projection
3549       n = n->find_out_with(Op_SCMemProj);
3550       assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3551     } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != NULL &&
3552                strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
3553       n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
3554     } else {
3555       assert(n->is_Mem(), "memory node required.");
3556       Node *addr = n->in(MemNode::Address);
3557       const Type *addr_t = igvn->type(addr);
3558       if (addr_t == Type::TOP) {
3559         continue;
3560       }
3561       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3562       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3563       assert ((uint)alias_idx < new_index_end, "wrong alias index");
3564       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3565       if (_compile->failing()) {
3566         return;
3567       }
3568       if (mem != n->in(MemNode::Memory)) {
3569         // We delay the memory edge update since we need old one in
3570         // MergeMem code below when instances memory slices are separated.
3571         set_map(n, mem);
3572       }
3573       if (n->is_Load()) {

3576         // get the memory projection
3577         n = n->find_out_with(Op_SCMemProj);
3578         assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required");
3579       }
3580     }
3581     // push user on appropriate worklist
3582     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3583       Node *use = n->fast_out(i);
3584       if (use->is_Phi() || use->is_ClearArray()) {
3585         memnode_worklist.append_if_missing(use);
3586       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3587         if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
3588           continue;
3589         }
3590         memnode_worklist.append_if_missing(use);
3591       } else if (use->is_MemBar()) {
3592         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3593           memnode_worklist.append_if_missing(use);
3594         }
3595 #ifdef ASSERT
3596       } else if (use->is_Mem()) {
3597         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3598       } else if (use->is_MergeMem()) {
3599         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3600       } else if (use->Opcode() == Op_EncodeISOArray) {
3601         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3602           // EncodeISOArray overwrites destination array
3603           memnode_worklist.append_if_missing(use);
3604         }
3605       } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != NULL &&
3606                  strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
3607         // store_unknown_inline overwrites destination array
3608         memnode_worklist.append_if_missing(use);
3609       } else {
3610         uint op = use->Opcode();
3611         if ((use->in(MemNode::Memory) == n) &&
3612             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3613           // They overwrite memory edge corresponding to destination array,
3614           memnode_worklist.append_if_missing(use);
3615         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
3616               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
3617               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3618               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
3619           n->dump();
3620           use->dump();
3621           assert(false, "EA: missing memory path");
3622         }
3623 #endif
3624       }
3625     }
3626   }
3627 
3628   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
3629   //            Walk each memory slice moving the first node encountered of each
3630   //            instance type to the input corresponding to its alias index.
3631   uint length = mergemem_worklist.length();
3632   for( uint next = 0; next < length; ++next ) {
3633     MergeMemNode* nmm = mergemem_worklist.at(next);
3634     assert(!visited.test_set(nmm->_idx), "should not be visited before");
3635     // Note: we don't want to use MergeMemStream here because we only want to
3636     // scan inputs which exist at the start, not ones we add during processing.
3637     // Note 2: MergeMem may already contains instance memory slices added
3638     // during find_inst_mem() call when memory nodes were processed above.

3685       Node* result = step_through_mergemem(nmm, ni, tinst);
3686       if (result == nmm->base_memory()) {
3687         // Didn't find instance memory, search through general slice recursively.
3688         result = nmm->memory_at(_compile->get_general_index(ni));
3689         result = find_inst_mem(result, ni, orig_phis);
3690         if (_compile->failing()) {
3691           return;
3692         }
3693         nmm->set_memory_at(ni, result);
3694       }
3695     }
3696     igvn->hash_insert(nmm);
3697     record_for_optimizer(nmm);
3698   }
3699 
3700   //  Phase 4:  Update the inputs of non-instance memory Phis and
3701   //            the Memory input of memnodes
3702   // First update the inputs of any non-instance Phi's from
3703   // which we split out an instance Phi.  Note we don't have
3704   // to recursively process Phi's encountered on the input memory
3705   // chains as is done in split_memory_phi() since they will
3706   // also be processed here.
3707   for (int j = 0; j < orig_phis.length(); j++) {
3708     PhiNode *phi = orig_phis.at(j);
3709     int alias_idx = _compile->get_alias_index(phi->adr_type());
3710     igvn->hash_delete(phi);
3711     for (uint i = 1; i < phi->req(); i++) {
3712       Node *mem = phi->in(i);
3713       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3714       if (_compile->failing()) {
3715         return;
3716       }
3717       if (mem != new_mem) {
3718         phi->set_req(i, new_mem);
3719       }
3720     }
3721     igvn->hash_insert(phi);
3722     record_for_optimizer(phi);
3723   }
3724 
3725   // Update the memory inputs of MemNodes with the value we computed
< prev index next >