< prev index next >

src/hotspot/share/opto/escape.cpp

Print this page

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"

  32 #include "memory/resourceArea.hpp"
  33 #include "opto/c2compiler.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "utilities/macros.hpp"
  43 
  44 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  45   _nodes(C->comp_arena(), C->unique(), C->unique(), nullptr),
  46   _in_worklist(C->comp_arena()),
  47   _next_pidx(0),
  48   _collecting(true),
  49   _verify(false),
  50   _compile(C),
  51   _igvn(igvn),

 135   GrowableArray<SafePointNode*>  sfn_worklist;
 136   GrowableArray<MergeMemNode*>   mergemem_worklist;
 137   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 138 
 139   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 140 
 141   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 142   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 143   // Initialize worklist
 144   if (C->root() != nullptr) {
 145     ideal_nodes.push(C->root());
 146   }
 147   // Processed ideal nodes are unique on ideal_nodes list
 148   // but several ideal nodes are mapped to the phantom_obj.
 149   // To avoid duplicated entries on the following worklists
 150   // add the phantom_obj only once to them.
 151   ptnodes_worklist.append(phantom_obj);
 152   java_objects_worklist.append(phantom_obj);
 153   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 154     Node* n = ideal_nodes.at(next);










 155     // Create PointsTo nodes and add them to Connection Graph. Called
 156     // only once per ideal node since ideal_nodes is Unique_Node list.
 157     add_node_to_connection_graph(n, &delayed_worklist);
 158     PointsToNode* ptn = ptnode_adr(n->_idx);
 159     if (ptn != nullptr && ptn != phantom_obj) {
 160       ptnodes_worklist.append(ptn);
 161       if (ptn->is_JavaObject()) {
 162         java_objects_worklist.append(ptn->as_JavaObject());
 163         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 164             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 165           // Only allocations and java static calls results are interesting.
 166           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 167         }
 168       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 169         oop_fields_worklist.append(ptn->as_Field());
 170       }
 171     }
 172     // Collect some interesting nodes for further use.
 173     switch (n->Opcode()) {
 174       case Op_MergeMem:

 431   return false;
 432 }
 433 
 434 // Returns true if at least one of the arguments to the call is an object
 435 // that does not escape globally.
 436 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
 437   if (call->method() != nullptr) {
 438     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
 439     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
 440       Node* p = call->in(idx);
 441       if (not_global_escape(p)) {
 442         return true;
 443       }
 444     }
 445   } else {
 446     const char* name = call->as_CallStaticJava()->_name;
 447     assert(name != nullptr, "no name");
 448     // no arg escapes through uncommon traps
 449     if (strcmp(name, "uncommon_trap") != 0) {
 450       // process_call_arguments() assumes that all arguments escape globally
 451       const TypeTuple* d = call->tf()->domain();
 452       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 453         const Type* at = d->field_at(i);
 454         if (at->isa_oopptr() != nullptr) {
 455           return true;
 456         }
 457       }
 458     }
 459   }
 460   return false;
 461 }
 462 
 463 
 464 
 465 // Utility function for nodes that load an object
 466 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
 467   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 468   // ThreadLocal has RawPtr type.
 469   const Type* t = _igvn->type(n);
 470   if (t->make_ptr() != nullptr) {
 471     Node* adr = n->in(MemNode::Address);

 505       // first IGVN optimization when escape information is still available.
 506       record_for_optimizer(n);
 507     } else if (n->is_Allocate()) {
 508       add_call_node(n->as_Call());
 509       record_for_optimizer(n);
 510     } else {
 511       if (n->is_CallStaticJava()) {
 512         const char* name = n->as_CallStaticJava()->_name;
 513         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
 514           return; // Skip uncommon traps
 515         }
 516       }
 517       // Don't mark as processed since call's arguments have to be processed.
 518       delayed_worklist->push(n);
 519       // Check if a call returns an object.
 520       if ((n->as_Call()->returns_pointer() &&
 521            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
 522           (n->is_CallStaticJava() &&
 523            n->as_CallStaticJava()->is_boxing_method())) {
 524         add_call_node(n->as_Call());











 525       }
 526     }
 527     return;
 528   }
 529   // Put this check here to process call arguments since some call nodes
 530   // point to phantom_obj.
 531   if (n_ptn == phantom_obj || n_ptn == null_obj) {
 532     return; // Skip predefined nodes.
 533   }
 534   switch (opcode) {
 535     case Op_AddP: {
 536       Node* base = get_addp_base(n);
 537       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 538       // Field nodes are created for all field types. They are used in
 539       // adjust_scalar_replaceable_state() and split_unique_types().
 540       // Note, non-oop fields will have only base edges in Connection
 541       // Graph because such fields are not used for oop loads and stores.
 542       int offset = address_offset(n, igvn);
 543       add_field(n, PointsToNode::NoEscape, offset);
 544       if (ptn_base == nullptr) {
 545         delayed_worklist->push(n); // Process it later.
 546       } else {
 547         n_ptn = ptnode_adr(n_idx);
 548         add_base(n_ptn->as_Field(), ptn_base);
 549       }
 550       break;
 551     }
 552     case Op_CastX2P: {
 553       map_ideal_node(n, phantom_obj);
 554       break;
 555     }

 556     case Op_CastPP:
 557     case Op_CheckCastPP:
 558     case Op_EncodeP:
 559     case Op_DecodeN:
 560     case Op_EncodePKlass:
 561     case Op_DecodeNKlass: {
 562       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
 563       break;
 564     }
 565     case Op_CMoveP: {
 566       add_local_var(n, PointsToNode::NoEscape);
 567       // Do not add edges during first iteration because some could be
 568       // not defined yet.
 569       delayed_worklist->push(n);
 570       break;
 571     }
 572     case Op_ConP:
 573     case Op_ConN:
 574     case Op_ConNKlass: {
 575       // assume all oop constants globally escape except for null

 606     case Op_PartialSubtypeCheck: {
 607       // Produces Null or notNull and is used in only in CmpP so
 608       // phantom_obj could be used.
 609       map_ideal_node(n, phantom_obj); // Result is unknown
 610       break;
 611     }
 612     case Op_Phi: {
 613       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 614       // ThreadLocal has RawPtr type.
 615       const Type* t = n->as_Phi()->type();
 616       if (t->make_ptr() != nullptr) {
 617         add_local_var(n, PointsToNode::NoEscape);
 618         // Do not add edges during first iteration because some could be
 619         // not defined yet.
 620         delayed_worklist->push(n);
 621       }
 622       break;
 623     }
 624     case Op_Proj: {
 625       // we are only interested in the oop result projection from a call
 626       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 627           n->in(0)->as_Call()->returns_pointer()) {


 628         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
 629       }
 630       break;
 631     }
 632     case Op_Rethrow: // Exception object escapes
 633     case Op_Return: {
 634       if (n->req() > TypeFunc::Parms &&
 635           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 636         // Treat Return value as LocalVar with GlobalEscape escape state.
 637         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
 638       }
 639       break;
 640     }
 641     case Op_CompareAndExchangeP:
 642     case Op_CompareAndExchangeN:
 643     case Op_GetAndSetP:
 644     case Op_GetAndSetN: {
 645       add_objload_to_connection_graph(n, delayed_worklist);
 646       // fall-through
 647     }

 707   if (n->is_Call()) {
 708     process_call_arguments(n->as_Call());
 709     return;
 710   }
 711   assert(n->is_Store() || n->is_LoadStore() ||
 712          (n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr),
 713          "node should be registered already");
 714   int opcode = n->Opcode();
 715   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
 716   if (gc_handled) {
 717     return; // Ignore node if already handled by GC.
 718   }
 719   switch (opcode) {
 720     case Op_AddP: {
 721       Node* base = get_addp_base(n);
 722       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 723       assert(ptn_base != nullptr, "field's base should be registered");
 724       add_base(n_ptn->as_Field(), ptn_base);
 725       break;
 726     }

 727     case Op_CastPP:
 728     case Op_CheckCastPP:
 729     case Op_EncodeP:
 730     case Op_DecodeN:
 731     case Op_EncodePKlass:
 732     case Op_DecodeNKlass: {
 733       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
 734       break;
 735     }
 736     case Op_CMoveP: {
 737       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
 738         Node* in = n->in(i);
 739         if (in == nullptr) {
 740           continue;  // ignore null
 741         }
 742         Node* uncast_in = in->uncast();
 743         if (uncast_in->is_top() || uncast_in == n) {
 744           continue;  // ignore top or inputs which go back this node
 745         }
 746         PointsToNode* ptn = ptnode_adr(in->_idx);

 761       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 762       // ThreadLocal has RawPtr type.
 763       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
 764       for (uint i = 1; i < n->req(); i++) {
 765         Node* in = n->in(i);
 766         if (in == nullptr) {
 767           continue;  // ignore null
 768         }
 769         Node* uncast_in = in->uncast();
 770         if (uncast_in->is_top() || uncast_in == n) {
 771           continue;  // ignore top or inputs which go back this node
 772         }
 773         PointsToNode* ptn = ptnode_adr(in->_idx);
 774         assert(ptn != nullptr, "node should be registered");
 775         add_edge(n_ptn, ptn);
 776       }
 777       break;
 778     }
 779     case Op_Proj: {
 780       // we are only interested in the oop result projection from a call
 781       assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 782              n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
 783       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
 784       break;
 785     }
 786     case Op_Rethrow: // Exception object escapes
 787     case Op_Return: {
 788       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
 789              "Unexpected node type");
 790       // Treat Return value as LocalVar with GlobalEscape escape state.
 791       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
 792       break;
 793     }
 794     case Op_CompareAndExchangeP:
 795     case Op_CompareAndExchangeN:
 796     case Op_GetAndSetP:
 797     case Op_GetAndSetN:{
 798       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
 799       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
 800       // fall-through
 801     }
 802     case Op_CompareAndSwapP:

 938     PointsToNode* ptn = ptnode_adr(val->_idx);
 939     assert(ptn != nullptr, "node should be registered");
 940     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
 941     // Add edge to object for unsafe access with offset.
 942     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 943     assert(adr_ptn != nullptr, "node should be registered");
 944     if (adr_ptn->is_Field()) {
 945       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
 946       add_edge(adr_ptn, ptn);
 947     }
 948     return true;
 949   }
 950 #ifdef ASSERT
 951   n->dump(1);
 952   assert(false, "not unsafe");
 953 #endif
 954   return false;
 955 }
 956 
 957 void ConnectionGraph::add_call_node(CallNode* call) {
 958   assert(call->returns_pointer(), "only for call which returns pointer");
 959   uint call_idx = call->_idx;
 960   if (call->is_Allocate()) {
 961     Node* k = call->in(AllocateNode::KlassNode);
 962     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 963     assert(kt != nullptr, "TypeKlassPtr  required.");
 964     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 965     bool scalar_replaceable = true;
 966     NOT_PRODUCT(const char* nsr_reason = "");
 967     if (call->is_AllocateArray()) {
 968       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
 969         es = PointsToNode::GlobalEscape;
 970       } else {
 971         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 972         if (length < 0) {
 973           // Not scalar replaceable if the length is not constant.
 974           scalar_replaceable = false;
 975           NOT_PRODUCT(nsr_reason = "has a non-constant length");
 976         } else if (length > EliminateAllocationArraySizeLimit) {
 977           // Not scalar replaceable if the length is too big.
 978           scalar_replaceable = false;

1014     //
1015     //    - all oop arguments are escaping globally;
1016     //
1017     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
1018     //
1019     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
1020     //
1021     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
1022     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
1023     //      during call is returned;
1024     //    - mapped to ArgEscape LocalVar node pointed to object arguments
1025     //      which are returned and does not escape during call;
1026     //
1027     //    - oop arguments escaping status is defined by bytecode analysis;
1028     //
1029     // For a static call, we know exactly what method is being called.
1030     // Use bytecode estimator to record whether the call's return value escapes.
1031     ciMethod* meth = call->as_CallJava()->method();
1032     if (meth == nullptr) {
1033       const char* name = call->as_CallStaticJava()->_name;
1034       assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");

1035       // Returns a newly allocated non-escaped object.
1036       add_java_object(call, PointsToNode::NoEscape);
1037       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
1038     } else if (meth->is_boxing_method()) {
1039       // Returns boxing object
1040       PointsToNode::EscapeState es;
1041       vmIntrinsics::ID intr = meth->intrinsic_id();
1042       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
1043         // It does not escape if object is always allocated.
1044         es = PointsToNode::NoEscape;
1045       } else {
1046         // It escapes globally if object could be loaded from cache.
1047         es = PointsToNode::GlobalEscape;
1048       }
1049       add_java_object(call, es);
1050     } else {
1051       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
1052       call_analyzer->copy_dependencies(_compile->dependencies());
1053       if (call_analyzer->is_return_allocated()) {
1054         // Returns a newly allocated non-escaped object, simply
1055         // update dependency information.
1056         // Mark it as NoEscape so that objects referenced by
1057         // it's fields will be marked as NoEscape at least.
1058         add_java_object(call, PointsToNode::NoEscape);
1059         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
1060       } else {
1061         // Determine whether any arguments are returned.
1062         const TypeTuple* d = call->tf()->domain();
1063         bool ret_arg = false;
1064         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1065           if (d->field_at(i)->isa_ptr() != nullptr &&
1066               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1067             ret_arg = true;
1068             break;
1069           }
1070         }
1071         if (ret_arg) {
1072           add_local_var(call, PointsToNode::ArgEscape);
1073         } else {
1074           // Returns unknown object.
1075           map_ideal_node(call, phantom_obj);
1076         }
1077       }
1078     }
1079   } else {
1080     // An other type of call, assume the worst case:
1081     // returned value is unknown and globally escapes.
1082     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

1090 #ifdef ASSERT
1091     case Op_Allocate:
1092     case Op_AllocateArray:
1093     case Op_Lock:
1094     case Op_Unlock:
1095       assert(false, "should be done already");
1096       break;
1097 #endif
1098     case Op_ArrayCopy:
1099     case Op_CallLeafNoFP:
1100       // Most array copies are ArrayCopy nodes at this point but there
1101       // are still a few direct calls to the copy subroutines (See
1102       // PhaseStringOpts::copy_string())
1103       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1104         call->as_CallLeaf()->is_call_to_arraycopystub();
1105       // fall through
1106     case Op_CallLeafVector:
1107     case Op_CallLeaf: {
1108       // Stub calls, objects do not escape but they are not scale replaceable.
1109       // Adjust escape state for outgoing arguments.
1110       const TypeTuple * d = call->tf()->domain();
1111       bool src_has_oops = false;
1112       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1113         const Type* at = d->field_at(i);
1114         Node *arg = call->in(i);
1115         if (arg == nullptr) {
1116           continue;
1117         }
1118         const Type *aat = _igvn->type(arg);
1119         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
1120           continue;
1121         }
1122         if (arg->is_AddP()) {
1123           //
1124           // The inline_native_clone() case when the arraycopy stub is called
1125           // after the allocation before Initialize and CheckCastPP nodes.
1126           // Or normal arraycopy for object arrays case.
1127           //
1128           // Set AddP's base (Allocate) as not scalar replaceable since
1129           // pointer to the base (with offset) is passed as argument.
1130           //
1131           arg = get_addp_base(arg);
1132         }
1133         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1134         assert(arg_ptn != nullptr, "should be registered");
1135         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1136         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1137           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1138                  aat->isa_ptr() != nullptr, "expecting an Ptr");
1139           bool arg_has_oops = aat->isa_oopptr() &&
1140                               (aat->isa_instptr() ||
1141                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));



1142           if (i == TypeFunc::Parms) {
1143             src_has_oops = arg_has_oops;
1144           }
1145           //
1146           // src or dst could be j.l.Object when other is basic type array:
1147           //
1148           //   arraycopy(char[],0,Object*,0,size);
1149           //   arraycopy(Object*,0,char[],0,size);
1150           //
1151           // Don't add edges in such cases.
1152           //
1153           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1154                                        arg_has_oops && (i > TypeFunc::Parms);
1155 #ifdef ASSERT
1156           if (!(is_arraycopy ||
1157                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1158                 (call->as_CallLeaf()->_name != nullptr &&
1159                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1160                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1161                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

1170                   strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
1171                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1172                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
1173                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1174                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
1175                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
1176                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
1177                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1178                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1179                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1180                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1181                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1182                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1183                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
1184                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
1185                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1186                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1187                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1188                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1189                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||



1190                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
1191                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
1192                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1193                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
1194                  ))) {
1195             call->dump();
1196             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1197           }
1198 #endif
1199           // Always process arraycopy's destination object since
1200           // we need to add all possible edges to references in
1201           // source object.
1202           if (arg_esc >= PointsToNode::ArgEscape &&
1203               !arg_is_arraycopy_dest) {
1204             continue;
1205           }
1206           PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1207           if (call->is_ArrayCopy()) {
1208             ArrayCopyNode* ac = call->as_ArrayCopy();
1209             if (ac->is_clonebasic() ||

1232           }
1233         }
1234       }
1235       break;
1236     }
1237     case Op_CallStaticJava: {
1238       // For a static call, we know exactly what method is being called.
1239       // Use bytecode estimator to record the call's escape affects
1240 #ifdef ASSERT
1241       const char* name = call->as_CallStaticJava()->_name;
1242       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1243 #endif
1244       ciMethod* meth = call->as_CallJava()->method();
1245       if ((meth != nullptr) && meth->is_boxing_method()) {
1246         break; // Boxing methods do not modify any oops.
1247       }
1248       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
1249       // fall-through if not a Java method or no analyzer information
1250       if (call_analyzer != nullptr) {
1251         PointsToNode* call_ptn = ptnode_adr(call->_idx);
1252         const TypeTuple* d = call->tf()->domain();
1253         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1254           const Type* at = d->field_at(i);
1255           int k = i - TypeFunc::Parms;
1256           Node* arg = call->in(i);
1257           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1258           if (at->isa_ptr() != nullptr &&
1259               call_analyzer->is_arg_returned(k)) {
1260             // The call returns arguments.
1261             if (call_ptn != nullptr) { // Is call's result used?
1262               assert(call_ptn->is_LocalVar(), "node should be registered");
1263               assert(arg_ptn != nullptr, "node should be registered");
1264               add_edge(call_ptn, arg_ptn);
1265             }
1266           }
1267           if (at->isa_oopptr() != nullptr &&
1268               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1269             if (!call_analyzer->is_arg_stack(k)) {
1270               // The argument global escapes
1271               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1272             } else {

1276                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1277               }
1278             }
1279           }
1280         }
1281         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
1282           // The call returns arguments.
1283           assert(call_ptn->edge_count() > 0, "sanity");
1284           if (!call_analyzer->is_return_local()) {
1285             // Returns also unknown object.
1286             add_edge(call_ptn, phantom_obj);
1287           }
1288         }
1289         break;
1290       }
1291     }
1292     default: {
1293       // Fall-through here if not a Java method or no analyzer information
1294       // or some other type of call, assume the worst case: all arguments
1295       // globally escape.
1296       const TypeTuple* d = call->tf()->domain();
1297       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1298         const Type* at = d->field_at(i);
1299         if (at->isa_oopptr() != nullptr) {
1300           Node* arg = call->in(i);
1301           if (arg->is_AddP()) {
1302             arg = get_addp_base(arg);
1303           }
1304           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
1305           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1306         }
1307       }
1308     }
1309   }
1310 }
1311 
1312 
1313 // Finish Graph construction.
1314 bool ConnectionGraph::complete_connection_graph(
1315                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
1316                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

1689     PointsToNode* base = i.get();
1690     if (base->is_JavaObject()) {
1691       // Skip Allocate's fields which will be processed later.
1692       if (base->ideal_node()->is_Allocate()) {
1693         return 0;
1694       }
1695       assert(base == null_obj, "only null ptr base expected here");
1696     }
1697   }
1698   if (add_edge(field, phantom_obj)) {
1699     // New edge was added
1700     new_edges++;
1701     add_field_uses_to_worklist(field);
1702   }
1703   return new_edges;
1704 }
1705 
1706 // Find fields initializing values for allocations.
1707 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
1708   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");

1709   Node* alloc = pta->ideal_node();
1710 
1711   // Do nothing for Allocate nodes since its fields values are
1712   // "known" unless they are initialized by arraycopy/clone.
1713   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
1714     return 0;







1715   }
1716   assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");

1717 #ifdef ASSERT
1718   if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
1719     const char* name = alloc->as_CallStaticJava()->_name;
1720     assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");

1721   }
1722 #endif
1723   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
1724   int new_edges = 0;
1725   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1726     PointsToNode* field = i.get();
1727     if (field->is_Field() && field->as_Field()->is_oop()) {
1728       if (add_edge(field, phantom_obj)) {
1729         // New edge was added
1730         new_edges++;
1731         add_field_uses_to_worklist(field->as_Field());
1732       }
1733     }
1734   }
1735   return new_edges;
1736 }
1737 
1738 // Find fields initializing values for allocations.
1739 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* phase) {
1740   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1741   Node* alloc = pta->ideal_node();
1742   // Do nothing for Call nodes since its fields values are unknown.
1743   if (!alloc->is_Allocate()) {
1744     return 0;
1745   }
1746   InitializeNode* ini = alloc->as_Allocate()->initialization();
1747   bool visited_bottom_offset = false;
1748   GrowableArray<int> offsets_worklist;
1749   int new_edges = 0;
1750 
1751   // Check if an oop field's initializing value is recorded and add
1752   // a corresponding null if field's value if it is not recorded.
1753   // Connection Graph does not record a default initialization by null
1754   // captured by Initialize node.
1755   //
1756   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1757     PointsToNode* field = i.get(); // Field (AddP)
1758     if (!field->is_Field() || !field->as_Field()->is_oop()) {
1759       continue; // Not oop field
1760     }
1761     int offset = field->as_Field()->offset();
1762     if (offset == Type::OffsetBot) {
1763       if (!visited_bottom_offset) {

1809               } else {
1810                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1811                   tty->print_cr("----------init store has invalid value -----");
1812                   store->dump();
1813                   val->dump();
1814                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1815                 }
1816                 for (EdgeIterator j(val); j.has_next(); j.next()) {
1817                   PointsToNode* obj = j.get();
1818                   if (obj->is_JavaObject()) {
1819                     if (!field->points_to(obj->as_JavaObject())) {
1820                       missed_obj = obj;
1821                       break;
1822                     }
1823                   }
1824                 }
1825               }
1826               if (missed_obj != nullptr) {
1827                 tty->print_cr("----------field---------------------------------");
1828                 field->dump();
1829                 tty->print_cr("----------missed referernce to object-----------");
1830                 missed_obj->dump();
1831                 tty->print_cr("----------object referernced by init store -----");
1832                 store->dump();
1833                 val->dump();
1834                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1835               }
1836             }
1837 #endif
1838           } else {
1839             // There could be initializing stores which follow allocation.
1840             // For example, a volatile field store is not collected
1841             // by Initialize node.
1842             //
1843             // Need to check for dependent loads to separate such stores from
1844             // stores which follow loads. For now, add initial value null so
1845             // that compare pointers optimization works correctly.
1846           }
1847         }
1848         if (value == nullptr) {
1849           // A field's initializing value was not recorded. Add null.
1850           if (add_edge(field, null_obj)) {
1851             // New edge was added

2077         assert(field->edge_count() > 0, "sanity");
2078       }
2079     }
2080   }
2081 }
2082 #endif
2083 
2084 // Optimize ideal graph.
2085 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
2086                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
2087   Compile* C = _compile;
2088   PhaseIterGVN* igvn = _igvn;
2089   if (EliminateLocks) {
2090     // Mark locks before changing ideal graph.
2091     int cnt = C->macro_count();
2092     for (int i = 0; i < cnt; i++) {
2093       Node *n = C->macro_node(i);
2094       if (n->is_AbstractLock()) { // Lock and Unlock nodes
2095         AbstractLockNode* alock = n->as_AbstractLock();
2096         if (!alock->is_non_esc_obj()) {
2097           if (not_global_escape(alock->obj_node())) {

2098             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
2099             // The lock could be marked eliminated by lock coarsening
2100             // code during first IGVN before EA. Replace coarsened flag
2101             // to eliminate all associated locks/unlocks.
2102 #ifdef ASSERT
2103             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
2104 #endif
2105             alock->set_non_esc_obj();
2106           }
2107         }
2108       }
2109     }
2110   }
2111 
2112   if (OptimizePtrCompare) {
2113     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
2114       Node *n = ptr_cmp_worklist.at(i);
2115       const TypeInt* tcmp = optimize_ptr_compare(n);
2116       if (tcmp->singleton()) {
2117         Node* cmp = igvn->makecon(tcmp);
2118 #ifndef PRODUCT
2119         if (PrintOptimizePtrCompare) {
2120           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
2121           if (Verbose) {
2122             n->dump(1);
2123           }
2124         }
2125 #endif
2126         igvn->replace_node(n, cmp);
2127       }
2128     }
2129   }
2130 
2131   // For MemBarStoreStore nodes added in library_call.cpp, check
2132   // escape status of associated AllocateNode and optimize out
2133   // MemBarStoreStore node if the allocated object never escapes.
2134   for (int i = 0; i < storestore_worklist.length(); i++) {
2135     Node* storestore = storestore_worklist.at(i);
2136     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
2137     if (alloc->is_Allocate() && not_global_escape(alloc)) {
2138       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
2139       mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
2140       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
2141       igvn->register_new_node_with_optimizer(mb);
2142       igvn->replace_node(storestore, mb);





2143     }
2144   }
2145 }
2146 
2147 // Optimize objects compare.
2148 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {
2149   assert(OptimizePtrCompare, "sanity");
2150   assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
2151   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
2152   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
2153   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
2154 
2155   PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2156   PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2157   JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2158   JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2159   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2160   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2161 
2162   // Check simple cases first.

2275   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2276   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
2277   PointsToNode* ptadr = _nodes.at(n->_idx);
2278   if (ptadr != nullptr) {
2279     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2280     return;
2281   }
2282   Compile* C = _compile;
2283   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2284   map_ideal_node(n, ptadr);
2285   // Add edge from arraycopy node to source object.
2286   (void)add_edge(ptadr, src);
2287   src->set_arraycopy_src();
2288   // Add edge from destination object to arraycopy node.
2289   (void)add_edge(dst, ptadr);
2290   dst->set_arraycopy_dst();
2291 }
2292 
2293 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2294   const Type* adr_type = n->as_AddP()->bottom_type();

2295   BasicType bt = T_INT;
2296   if (offset == Type::OffsetBot) {
2297     // Check only oop fields.
2298     if (!adr_type->isa_aryptr() ||
2299         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
2300         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
2301       // OffsetBot is used to reference array's element. Ignore first AddP.
2302       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
2303         bt = T_OBJECT;
2304       }
2305     }
2306   } else if (offset != oopDesc::klass_offset_in_bytes()) {
2307     if (adr_type->isa_instptr()) {
2308       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2309       if (field != nullptr) {
2310         bt = field->layout_type();
2311       } else {
2312         // Check for unsafe oop field access
2313         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2314             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2315             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2316             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2317           bt = T_OBJECT;
2318           (*unsafe) = true;
2319         }
2320       }
2321     } else if (adr_type->isa_aryptr()) {
2322       if (offset == arrayOopDesc::length_offset_in_bytes()) {
2323         // Ignore array length load.
2324       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
2325         // Ignore first AddP.
2326       } else {
2327         const Type* elemtype = adr_type->isa_aryptr()->elem();
2328         bt = elemtype->array_element_basic_type();






2329       }
2330     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2331       // Allocation initialization, ThreadLocal field access, unsafe access
2332       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2333           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2334           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2335           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2336         bt = T_OBJECT;
2337       }
2338     }
2339   }
2340   // Note: T_NARROWOOP is not classed as a real reference type
2341   return (is_reference_type(bt) || bt == T_NARROWOOP);
2342 }
2343 
2344 // Returns unique pointed java object or null.
2345 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2346   assert(!_collecting, "should not call when constructed graph");
2347   // If the node was created after the escape computation we can't answer.
2348   uint idx = n->_idx;

2492             return true;
2493           }
2494         }
2495       }
2496     }
2497   }
2498   return false;
2499 }
2500 
2501 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2502   const Type *adr_type = phase->type(adr);
2503   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
2504     // We are computing a raw address for a store captured by an Initialize
2505     // compute an appropriate address type. AddP cases #3 and #5 (see below).
2506     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2507     assert(offs != Type::OffsetBot ||
2508            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2509            "offset must be a constant or it is initialization of array");
2510     return offs;
2511   }
2512   const TypePtr *t_ptr = adr_type->isa_ptr();
2513   assert(t_ptr != nullptr, "must be a pointer type");
2514   return t_ptr->offset();
2515 }
2516 
2517 Node* ConnectionGraph::get_addp_base(Node *addp) {
2518   assert(addp->is_AddP(), "must be AddP");
2519   //
2520   // AddP cases for Base and Address inputs:
2521   // case #1. Direct object's field reference:
2522   //     Allocate
2523   //       |
2524   //     Proj #5 ( oop result )
2525   //       |
2526   //     CheckCastPP (cast to instance type)
2527   //      | |
2528   //     AddP  ( base == address )
2529   //
2530   // case #2. Indirect object's field reference:
2531   //      Phi
2532   //       |
2533   //     CastPP (cast to instance type)
2534   //      | |

2648   }
2649   return nullptr;
2650 }
2651 
2652 //
2653 // Adjust the type and inputs of an AddP which computes the
2654 // address of a field of an instance
2655 //
2656 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2657   PhaseGVN* igvn = _igvn;
2658   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2659   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
2660   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2661   if (t == nullptr) {
2662     // We are computing a raw address for a store captured by an Initialize
2663     // compute an appropriate address type (cases #3 and #5).
2664     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2665     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2666     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2667     assert(offs != Type::OffsetBot, "offset must be a constant");
2668     t = base_t->add_offset(offs)->is_oopptr();







2669   }
2670   int inst_id =  base_t->instance_id();
2671   assert(!t->is_known_instance() || t->instance_id() == inst_id,
2672                              "old type must be non-instance or match new type");
2673 
2674   // The type 't' could be subclass of 'base_t'.
2675   // As result t->offset() could be large then base_t's size and it will
2676   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2677   // constructor verifies correctness of the offset.
2678   //
2679   // It could happened on subclass's branch (from the type profiling
2680   // inlining) which was not eliminated during parsing since the exactness
2681   // of the allocation type was not propagated to the subclass type check.
2682   //
2683   // Or the type 't' could be not related to 'base_t' at all.
2684   // It could happened when CHA type is different from MDO type on a dead path
2685   // (for example, from instanceof check) which is not collapsed during parsing.
2686   //
2687   // Do nothing for such AddP node and don't process its users since
2688   // this code branch will go away.
2689   //
2690   if (!t->is_known_instance() &&
2691       !base_t->maybe_java_subtype_of(t)) {
2692      return false; // bail out
2693   }
2694   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();











2695   // Do NOT remove the next line: ensure a new alias index is allocated
2696   // for the instance type. Note: C++ will not remove it since the call
2697   // has side effect.
2698   int alias_idx = _compile->get_alias_index(tinst);
2699   igvn->set_type(addp, tinst);
2700   // record the allocation in the node map
2701   set_map(addp, get_map(base->_idx));
2702   // Set addp's Base and Address to 'base'.
2703   Node *abase = addp->in(AddPNode::Base);
2704   Node *adr   = addp->in(AddPNode::Address);
2705   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2706       adr->in(0)->_idx == (uint)inst_id) {
2707     // Skip AddP cases #3 and #5.
2708   } else {
2709     assert(!abase->is_top(), "sanity"); // AddP case #3
2710     if (abase != base) {
2711       igvn->hash_delete(addp);
2712       addp->set_req(AddPNode::Base, base);
2713       if (abase == adr) {
2714         addp->set_req(AddPNode::Address, base);

3356         ptnode_adr(n->_idx)->dump();
3357         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
3358 #endif
3359         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3360         return;
3361       } else {
3362         Node *val = get_map(jobj->idx());   // CheckCastPP node
3363         TypeNode *tn = n->as_Type();
3364         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3365         assert(tinst != nullptr && tinst->is_known_instance() &&
3366                tinst->instance_id() == jobj->idx() , "instance type expected.");
3367 
3368         const Type *tn_type = igvn->type(tn);
3369         const TypeOopPtr *tn_t;
3370         if (tn_type->isa_narrowoop()) {
3371           tn_t = tn_type->make_ptr()->isa_oopptr();
3372         } else {
3373           tn_t = tn_type->isa_oopptr();
3374         }
3375         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {







3376           if (tn_type->isa_narrowoop()) {
3377             tn_type = tinst->make_narrowoop();
3378           } else {
3379             tn_type = tinst;
3380           }
3381           igvn->hash_delete(tn);
3382           igvn->set_type(tn, tn_type);
3383           tn->set_type(tn_type);
3384           igvn->hash_insert(tn);
3385           record_for_optimizer(n);
3386         } else {
3387           assert(tn_type == TypePtr::NULL_PTR ||
3388                  tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t),
3389                  "unexpected type");
3390           continue; // Skip dead path with different type
3391         }
3392       }
3393     } else {
3394       debug_only(n->dump();)
3395       assert(false, "EA: unexpected node");
3396       continue;
3397     }
3398     // push allocation's users on appropriate worklist
3399     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3400       Node *use = n->fast_out(i);
3401       if(use->is_Mem() && use->in(MemNode::Address) == n) {
3402         // Load/store to instance's field
3403         memnode_worklist.append_if_missing(use);
3404       } else if (use->is_MemBar()) {
3405         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3406           memnode_worklist.append_if_missing(use);
3407         }
3408       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3409         Node* addp2 = find_second_addp(use, n);
3410         if (addp2 != nullptr) {
3411           alloc_worklist.append_if_missing(addp2);
3412         }
3413         alloc_worklist.append_if_missing(use);
3414       } else if (use->is_Phi() ||
3415                  use->is_CheckCastPP() ||
3416                  use->is_EncodeNarrowPtr() ||
3417                  use->is_DecodeNarrowPtr() ||
3418                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3419         alloc_worklist.append_if_missing(use);
3420 #ifdef ASSERT
3421       } else if (use->is_Mem()) {
3422         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3423       } else if (use->is_MergeMem()) {
3424         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3425       } else if (use->is_SafePoint()) {
3426         // Look for MergeMem nodes for calls which reference unique allocation
3427         // (through CheckCastPP nodes) even for debug info.
3428         Node* m = use->in(TypeFunc::Memory);
3429         if (m->is_MergeMem()) {
3430           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3431         }
3432       } else if (use->Opcode() == Op_EncodeISOArray) {
3433         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3434           // EncodeISOArray overwrites destination array
3435           memnode_worklist.append_if_missing(use);
3436         }



3437       } else {
3438         uint op = use->Opcode();
3439         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3440             (use->in(MemNode::Memory) == n)) {
3441           // They overwrite memory edge corresponding to destination array,
3442           memnode_worklist.append_if_missing(use);
3443         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3444               op == Op_CastP2X || op == Op_StoreCM ||
3445               op == Op_FastLock || op == Op_AryEq ||
3446               op == Op_StrComp || op == Op_CountPositives ||
3447               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3448               op == Op_StrEquals || op == Op_VectorizedHashCode ||
3449               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3450               op == Op_SubTypeCheck ||
3451               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3452           n->dump();
3453           use->dump();
3454           assert(false, "EA: missing allocation reference path");
3455         }
3456 #endif
3457       }
3458     }
3459 
3460   }
3461 
3462   // Go over all ArrayCopy nodes and if one of the inputs has a unique
3463   // type, record it in the ArrayCopy node so we know what memory this
3464   // node uses/modified.
3465   for (int next = 0; next < arraycopy_worklist.length(); next++) {
3466     ArrayCopyNode* ac = arraycopy_worklist.at(next);
3467     Node* dest = ac->in(ArrayCopyNode::Dest);
3468     if (dest->is_AddP()) {
3469       dest = get_addp_base(dest);
3470     }

3500   if (memnode_worklist.length() == 0)
3501     return;  // nothing to do
3502   while (memnode_worklist.length() != 0) {
3503     Node *n = memnode_worklist.pop();
3504     if (visited.test_set(n->_idx)) {
3505       continue;
3506     }
3507     if (n->is_Phi() || n->is_ClearArray()) {
3508       // we don't need to do anything, but the users must be pushed
3509     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3510       // we don't need to do anything, but the users must be pushed
3511       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
3512       if (n == nullptr) {
3513         continue;
3514       }
3515     } else if (n->Opcode() == Op_StrCompressedCopy ||
3516                n->Opcode() == Op_EncodeISOArray) {
3517       // get the memory projection
3518       n = n->find_out_with(Op_SCMemProj);
3519       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");



3520     } else {
3521       assert(n->is_Mem(), "memory node required.");
3522       Node *addr = n->in(MemNode::Address);
3523       const Type *addr_t = igvn->type(addr);
3524       if (addr_t == Type::TOP) {
3525         continue;
3526       }
3527       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
3528       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3529       assert ((uint)alias_idx < new_index_end, "wrong alias index");
3530       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3531       if (_compile->failing()) {
3532         return;
3533       }
3534       if (mem != n->in(MemNode::Memory)) {
3535         // We delay the memory edge update since we need old one in
3536         // MergeMem code below when instances memory slices are separated.
3537         set_map(n, mem);
3538       }
3539       if (n->is_Load()) {

3542         // get the memory projection
3543         n = n->find_out_with(Op_SCMemProj);
3544         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
3545       }
3546     }
3547     // push user on appropriate worklist
3548     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3549       Node *use = n->fast_out(i);
3550       if (use->is_Phi() || use->is_ClearArray()) {
3551         memnode_worklist.append_if_missing(use);
3552       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3553         if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
3554           continue;
3555         }
3556         memnode_worklist.append_if_missing(use);
3557       } else if (use->is_MemBar()) {
3558         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3559           memnode_worklist.append_if_missing(use);
3560         }
3561 #ifdef ASSERT
3562       } else if(use->is_Mem()) {
3563         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3564       } else if (use->is_MergeMem()) {
3565         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3566       } else if (use->Opcode() == Op_EncodeISOArray) {
3567         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3568           // EncodeISOArray overwrites destination array
3569           memnode_worklist.append_if_missing(use);
3570         }




3571       } else {
3572         uint op = use->Opcode();
3573         if ((use->in(MemNode::Memory) == n) &&
3574             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3575           // They overwrite memory edge corresponding to destination array,
3576           memnode_worklist.append_if_missing(use);
3577         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
3578               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
3579               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3580               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
3581           n->dump();
3582           use->dump();
3583           assert(false, "EA: missing memory path");
3584         }
3585 #endif
3586       }
3587     }
3588   }
3589 
3590   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
3591   //            Walk each memory slice moving the first node encountered of each
3592   //            instance type to the input corresponding to its alias index.
3593   uint length = mergemem_worklist.length();
3594   for( uint next = 0; next < length; ++next ) {
3595     MergeMemNode* nmm = mergemem_worklist.at(next);
3596     assert(!visited.test_set(nmm->_idx), "should not be visited before");
3597     // Note: we don't want to use MergeMemStream here because we only want to
3598     // scan inputs which exist at the start, not ones we add during processing.
3599     // Note 2: MergeMem may already contains instance memory slices added
3600     // during find_inst_mem() call when memory nodes were processed above.

3647       Node* result = step_through_mergemem(nmm, ni, tinst);
3648       if (result == nmm->base_memory()) {
3649         // Didn't find instance memory, search through general slice recursively.
3650         result = nmm->memory_at(_compile->get_general_index(ni));
3651         result = find_inst_mem(result, ni, orig_phis);
3652         if (_compile->failing()) {
3653           return;
3654         }
3655         nmm->set_memory_at(ni, result);
3656       }
3657     }
3658     igvn->hash_insert(nmm);
3659     record_for_optimizer(nmm);
3660   }
3661 
3662   //  Phase 4:  Update the inputs of non-instance memory Phis and
3663   //            the Memory input of memnodes
3664   // First update the inputs of any non-instance Phi's from
3665   // which we split out an instance Phi.  Note we don't have
3666   // to recursively process Phi's encountered on the input memory
3667   // chains as is done in split_memory_phi() since they  will
3668   // also be processed here.
3669   for (int j = 0; j < orig_phis.length(); j++) {
3670     PhiNode *phi = orig_phis.at(j);
3671     int alias_idx = _compile->get_alias_index(phi->adr_type());
3672     igvn->hash_delete(phi);
3673     for (uint i = 1; i < phi->req(); i++) {
3674       Node *mem = phi->in(i);
3675       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3676       if (_compile->failing()) {
3677         return;
3678       }
3679       if (mem != new_mem) {
3680         phi->set_req(i, new_mem);
3681       }
3682     }
3683     igvn->hash_insert(phi);
3684     record_for_optimizer(phi);
3685   }
3686 
3687   // Update the memory inputs of MemNodes with the value we computed

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/metaspace.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "opto/c2compiler.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/escape.hpp"
  40 #include "opto/phaseX.hpp"
  41 #include "opto/movenode.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "utilities/macros.hpp"
  44 
  45 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  46   _nodes(C->comp_arena(), C->unique(), C->unique(), nullptr),
  47   _in_worklist(C->comp_arena()),
  48   _next_pidx(0),
  49   _collecting(true),
  50   _verify(false),
  51   _compile(C),
  52   _igvn(igvn),

 136   GrowableArray<SafePointNode*>  sfn_worklist;
 137   GrowableArray<MergeMemNode*>   mergemem_worklist;
 138   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 139 
 140   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 141 
 142   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 143   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 144   // Initialize worklist
 145   if (C->root() != nullptr) {
 146     ideal_nodes.push(C->root());
 147   }
 148   // Processed ideal nodes are unique on ideal_nodes list
 149   // but several ideal nodes are mapped to the phantom_obj.
 150   // To avoid duplicated entries on the following worklists
 151   // add the phantom_obj only once to them.
 152   ptnodes_worklist.append(phantom_obj);
 153   java_objects_worklist.append(phantom_obj);
 154   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 155     Node* n = ideal_nodes.at(next);
 156     if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
 157         !n->in(MemNode::Address)->is_AddP() &&
 158         _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
 159       // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
 160       Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
 161       _igvn->register_new_node_with_optimizer(addp);
 162       _igvn->replace_input_of(n, MemNode::Address, addp);
 163       ideal_nodes.push(addp);
 164       _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
 165     }
 166     // Create PointsTo nodes and add them to Connection Graph. Called
 167     // only once per ideal node since ideal_nodes is Unique_Node list.
 168     add_node_to_connection_graph(n, &delayed_worklist);
 169     PointsToNode* ptn = ptnode_adr(n->_idx);
 170     if (ptn != nullptr && ptn != phantom_obj) {
 171       ptnodes_worklist.append(ptn);
 172       if (ptn->is_JavaObject()) {
 173         java_objects_worklist.append(ptn->as_JavaObject());
 174         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 175             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 176           // Only allocations and java static calls results are interesting.
 177           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 178         }
 179       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 180         oop_fields_worklist.append(ptn->as_Field());
 181       }
 182     }
 183     // Collect some interesting nodes for further use.
 184     switch (n->Opcode()) {
 185       case Op_MergeMem:

 442   return false;
 443 }
 444 
 445 // Returns true if at least one of the arguments to the call is an object
 446 // that does not escape globally.
 447 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
 448   if (call->method() != nullptr) {
 449     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
 450     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
 451       Node* p = call->in(idx);
 452       if (not_global_escape(p)) {
 453         return true;
 454       }
 455     }
 456   } else {
 457     const char* name = call->as_CallStaticJava()->_name;
 458     assert(name != nullptr, "no name");
 459     // no arg escapes through uncommon traps
 460     if (strcmp(name, "uncommon_trap") != 0) {
 461       // process_call_arguments() assumes that all arguments escape globally
 462       const TypeTuple* d = call->tf()->domain_sig();
 463       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 464         const Type* at = d->field_at(i);
 465         if (at->isa_oopptr() != nullptr) {
 466           return true;
 467         }
 468       }
 469     }
 470   }
 471   return false;
 472 }
 473 
 474 
 475 
 476 // Utility function for nodes that load an object
 477 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
 478   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 479   // ThreadLocal has RawPtr type.
 480   const Type* t = _igvn->type(n);
 481   if (t->make_ptr() != nullptr) {
 482     Node* adr = n->in(MemNode::Address);

 516       // first IGVN optimization when escape information is still available.
 517       record_for_optimizer(n);
 518     } else if (n->is_Allocate()) {
 519       add_call_node(n->as_Call());
 520       record_for_optimizer(n);
 521     } else {
 522       if (n->is_CallStaticJava()) {
 523         const char* name = n->as_CallStaticJava()->_name;
 524         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
 525           return; // Skip uncommon traps
 526         }
 527       }
 528       // Don't mark as processed since call's arguments have to be processed.
 529       delayed_worklist->push(n);
 530       // Check if a call returns an object.
 531       if ((n->as_Call()->returns_pointer() &&
 532            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
 533           (n->is_CallStaticJava() &&
 534            n->as_CallStaticJava()->is_boxing_method())) {
 535         add_call_node(n->as_Call());
 536       } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
 537         bool returns_oop = false;
 538         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
 539           ProjNode* pn = n->fast_out(i)->as_Proj();
 540           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
 541             returns_oop = true;
 542           }
 543         }
 544         if (returns_oop) {
 545           add_call_node(n->as_Call());
 546         }
 547       }
 548     }
 549     return;
 550   }
 551   // Put this check here to process call arguments since some call nodes
 552   // point to phantom_obj.
 553   if (n_ptn == phantom_obj || n_ptn == null_obj) {
 554     return; // Skip predefined nodes.
 555   }
 556   switch (opcode) {
 557     case Op_AddP: {
 558       Node* base = get_addp_base(n);
 559       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 560       // Field nodes are created for all field types. They are used in
 561       // adjust_scalar_replaceable_state() and split_unique_types().
 562       // Note, non-oop fields will have only base edges in Connection
 563       // Graph because such fields are not used for oop loads and stores.
 564       int offset = address_offset(n, igvn);
 565       add_field(n, PointsToNode::NoEscape, offset);
 566       if (ptn_base == nullptr) {
 567         delayed_worklist->push(n); // Process it later.
 568       } else {
 569         n_ptn = ptnode_adr(n_idx);
 570         add_base(n_ptn->as_Field(), ptn_base);
 571       }
 572       break;
 573     }
 574     case Op_CastX2P: {
 575       map_ideal_node(n, phantom_obj);
 576       break;
 577     }
 578     case Op_InlineType:
 579     case Op_CastPP:
 580     case Op_CheckCastPP:
 581     case Op_EncodeP:
 582     case Op_DecodeN:
 583     case Op_EncodePKlass:
 584     case Op_DecodeNKlass: {
 585       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
 586       break;
 587     }
 588     case Op_CMoveP: {
 589       add_local_var(n, PointsToNode::NoEscape);
 590       // Do not add edges during first iteration because some could be
 591       // not defined yet.
 592       delayed_worklist->push(n);
 593       break;
 594     }
 595     case Op_ConP:
 596     case Op_ConN:
 597     case Op_ConNKlass: {
 598       // assume all oop constants globally escape except for null

 629     case Op_PartialSubtypeCheck: {
 630       // Produces Null or notNull and is used in only in CmpP so
 631       // phantom_obj could be used.
 632       map_ideal_node(n, phantom_obj); // Result is unknown
 633       break;
 634     }
 635     case Op_Phi: {
 636       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 637       // ThreadLocal has RawPtr type.
 638       const Type* t = n->as_Phi()->type();
 639       if (t->make_ptr() != nullptr) {
 640         add_local_var(n, PointsToNode::NoEscape);
 641         // Do not add edges during first iteration because some could be
 642         // not defined yet.
 643         delayed_worklist->push(n);
 644       }
 645       break;
 646     }
 647     case Op_Proj: {
 648       // we are only interested in the oop result projection from a call
 649       if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
 650           (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
 651         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
 652                n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
 653         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
 654       }
 655       break;
 656     }
 657     case Op_Rethrow: // Exception object escapes
 658     case Op_Return: {
 659       if (n->req() > TypeFunc::Parms &&
 660           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 661         // Treat Return value as LocalVar with GlobalEscape escape state.
 662         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
 663       }
 664       break;
 665     }
 666     case Op_CompareAndExchangeP:
 667     case Op_CompareAndExchangeN:
 668     case Op_GetAndSetP:
 669     case Op_GetAndSetN: {
 670       add_objload_to_connection_graph(n, delayed_worklist);
 671       // fall-through
 672     }

 732   if (n->is_Call()) {
 733     process_call_arguments(n->as_Call());
 734     return;
 735   }
 736   assert(n->is_Store() || n->is_LoadStore() ||
 737          (n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr),
 738          "node should be registered already");
 739   int opcode = n->Opcode();
 740   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
 741   if (gc_handled) {
 742     return; // Ignore node if already handled by GC.
 743   }
 744   switch (opcode) {
 745     case Op_AddP: {
 746       Node* base = get_addp_base(n);
 747       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 748       assert(ptn_base != nullptr, "field's base should be registered");
 749       add_base(n_ptn->as_Field(), ptn_base);
 750       break;
 751     }
 752     case Op_InlineType:
 753     case Op_CastPP:
 754     case Op_CheckCastPP:
 755     case Op_EncodeP:
 756     case Op_DecodeN:
 757     case Op_EncodePKlass:
 758     case Op_DecodeNKlass: {
 759       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
 760       break;
 761     }
 762     case Op_CMoveP: {
 763       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
 764         Node* in = n->in(i);
 765         if (in == nullptr) {
 766           continue;  // ignore null
 767         }
 768         Node* uncast_in = in->uncast();
 769         if (uncast_in->is_top() || uncast_in == n) {
 770           continue;  // ignore top or inputs which go back this node
 771         }
 772         PointsToNode* ptn = ptnode_adr(in->_idx);

 787       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 788       // ThreadLocal has RawPtr type.
 789       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
 790       for (uint i = 1; i < n->req(); i++) {
 791         Node* in = n->in(i);
 792         if (in == nullptr) {
 793           continue;  // ignore null
 794         }
 795         Node* uncast_in = in->uncast();
 796         if (uncast_in->is_top() || uncast_in == n) {
 797           continue;  // ignore top or inputs which go back this node
 798         }
 799         PointsToNode* ptn = ptnode_adr(in->_idx);
 800         assert(ptn != nullptr, "node should be registered");
 801         add_edge(n_ptn, ptn);
 802       }
 803       break;
 804     }
 805     case Op_Proj: {
 806       // we are only interested in the oop result projection from a call
 807       assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
 808              n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
 809       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
 810       break;
 811     }
 812     case Op_Rethrow: // Exception object escapes
 813     case Op_Return: {
 814       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
 815              "Unexpected node type");
 816       // Treat Return value as LocalVar with GlobalEscape escape state.
 817       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
 818       break;
 819     }
 820     case Op_CompareAndExchangeP:
 821     case Op_CompareAndExchangeN:
 822     case Op_GetAndSetP:
 823     case Op_GetAndSetN:{
 824       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
 825       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
 826       // fall-through
 827     }
 828     case Op_CompareAndSwapP:

 964     PointsToNode* ptn = ptnode_adr(val->_idx);
 965     assert(ptn != nullptr, "node should be registered");
 966     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
 967     // Add edge to object for unsafe access with offset.
 968     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
 969     assert(adr_ptn != nullptr, "node should be registered");
 970     if (adr_ptn->is_Field()) {
 971       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
 972       add_edge(adr_ptn, ptn);
 973     }
 974     return true;
 975   }
 976 #ifdef ASSERT
 977   n->dump(1);
 978   assert(false, "not unsafe");
 979 #endif
 980   return false;
 981 }
 982 
 983 void ConnectionGraph::add_call_node(CallNode* call) {
 984   assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
 985   uint call_idx = call->_idx;
 986   if (call->is_Allocate()) {
 987     Node* k = call->in(AllocateNode::KlassNode);
 988     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 989     assert(kt != nullptr, "TypeKlassPtr  required.");
 990     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 991     bool scalar_replaceable = true;
 992     NOT_PRODUCT(const char* nsr_reason = "");
 993     if (call->is_AllocateArray()) {
 994       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
 995         es = PointsToNode::GlobalEscape;
 996       } else {
 997         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 998         if (length < 0) {
 999           // Not scalar replaceable if the length is not constant.
1000           scalar_replaceable = false;
1001           NOT_PRODUCT(nsr_reason = "has a non-constant length");
1002         } else if (length > EliminateAllocationArraySizeLimit) {
1003           // Not scalar replaceable if the length is too big.
1004           scalar_replaceable = false;

1040     //
1041     //    - all oop arguments are escaping globally;
1042     //
1043     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
1044     //
1045     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
1046     //
1047     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
1048     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
1049     //      during call is returned;
1050     //    - mapped to ArgEscape LocalVar node pointed to object arguments
1051     //      which are returned and does not escape during call;
1052     //
1053     //    - oop arguments escaping status is defined by bytecode analysis;
1054     //
1055     // For a static call, we know exactly what method is being called.
1056     // Use bytecode estimator to record whether the call's return value escapes.
1057     ciMethod* meth = call->as_CallJava()->method();
1058     if (meth == nullptr) {
1059       const char* name = call->as_CallStaticJava()->_name;
1060       assert(strncmp(name, "_multianewarray", 15) == 0 ||
1061              strncmp(name, "_load_unknown_inline", 20) == 0, "TODO: add failed case check");
1062       // Returns a newly allocated non-escaped object.
1063       add_java_object(call, PointsToNode::NoEscape);
1064       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
1065     } else if (meth->is_boxing_method()) {
1066       // Returns boxing object
1067       PointsToNode::EscapeState es;
1068       vmIntrinsics::ID intr = meth->intrinsic_id();
1069       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
1070         // It does not escape if object is always allocated.
1071         es = PointsToNode::NoEscape;
1072       } else {
1073         // It escapes globally if object could be loaded from cache.
1074         es = PointsToNode::GlobalEscape;
1075       }
1076       add_java_object(call, es);
1077     } else {
1078       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
1079       call_analyzer->copy_dependencies(_compile->dependencies());
1080       if (call_analyzer->is_return_allocated()) {
1081         // Returns a newly allocated non-escaped object, simply
1082         // update dependency information.
1083         // Mark it as NoEscape so that objects referenced by
1084         // it's fields will be marked as NoEscape at least.
1085         add_java_object(call, PointsToNode::NoEscape);
1086         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
1087       } else {
1088         // Determine whether any arguments are returned.
1089         const TypeTuple* d = call->tf()->domain_cc();
1090         bool ret_arg = false;
1091         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1092           if (d->field_at(i)->isa_ptr() != nullptr &&
1093               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1094             ret_arg = true;
1095             break;
1096           }
1097         }
1098         if (ret_arg) {
1099           add_local_var(call, PointsToNode::ArgEscape);
1100         } else {
1101           // Returns unknown object.
1102           map_ideal_node(call, phantom_obj);
1103         }
1104       }
1105     }
1106   } else {
1107     // An other type of call, assume the worst case:
1108     // returned value is unknown and globally escapes.
1109     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");

1117 #ifdef ASSERT
1118     case Op_Allocate:
1119     case Op_AllocateArray:
1120     case Op_Lock:
1121     case Op_Unlock:
1122       assert(false, "should be done already");
1123       break;
1124 #endif
1125     case Op_ArrayCopy:
1126     case Op_CallLeafNoFP:
1127       // Most array copies are ArrayCopy nodes at this point but there
1128       // are still a few direct calls to the copy subroutines (See
1129       // PhaseStringOpts::copy_string())
1130       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
1131         call->as_CallLeaf()->is_call_to_arraycopystub();
1132       // fall through
1133     case Op_CallLeafVector:
1134     case Op_CallLeaf: {
1135       // Stub calls, objects do not escape but they are not scale replaceable.
1136       // Adjust escape state for outgoing arguments.
1137       const TypeTuple * d = call->tf()->domain_sig();
1138       bool src_has_oops = false;
1139       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1140         const Type* at = d->field_at(i);
1141         Node *arg = call->in(i);
1142         if (arg == nullptr) {
1143           continue;
1144         }
1145         const Type *aat = _igvn->type(arg);
1146         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
1147           continue;
1148         }
1149         if (arg->is_AddP()) {
1150           //
1151           // The inline_native_clone() case when the arraycopy stub is called
1152           // after the allocation before Initialize and CheckCastPP nodes.
1153           // Or normal arraycopy for object arrays case.
1154           //
1155           // Set AddP's base (Allocate) as not scalar replaceable since
1156           // pointer to the base (with offset) is passed as argument.
1157           //
1158           arg = get_addp_base(arg);
1159         }
1160         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1161         assert(arg_ptn != nullptr, "should be registered");
1162         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
1163         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
1164           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1165                  aat->isa_ptr() != nullptr, "expecting an Ptr");
1166           bool arg_has_oops = aat->isa_oopptr() &&
1167                               (aat->isa_instptr() ||
1168                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
1169                                (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
1170                                                                aat->isa_aryptr()->is_flat() &&
1171                                                                aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
1172           if (i == TypeFunc::Parms) {
1173             src_has_oops = arg_has_oops;
1174           }
1175           //
1176           // src or dst could be j.l.Object when other is basic type array:
1177           //
1178           //   arraycopy(char[],0,Object*,0,size);
1179           //   arraycopy(Object*,0,char[],0,size);
1180           //
1181           // Don't add edges in such cases.
1182           //
1183           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
1184                                        arg_has_oops && (i > TypeFunc::Parms);
1185 #ifdef ASSERT
1186           if (!(is_arraycopy ||
1187                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
1188                 (call->as_CallLeaf()->_name != nullptr &&
1189                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
1190                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
1191                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||

1200                   strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
1201                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
1202                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
1203                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
1204                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
1205                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
1206                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
1207                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
1208                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
1209                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
1210                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
1211                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
1212                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
1213                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
1214                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
1215                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
1216                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
1217                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
1218                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
1219                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
1220                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1221                   strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
1222                   strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
1223                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
1224                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
1225                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
1226                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
1227                  ))) {
1228             call->dump();
1229             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
1230           }
1231 #endif
1232           // Always process arraycopy's destination object since
1233           // we need to add all possible edges to references in
1234           // source object.
1235           if (arg_esc >= PointsToNode::ArgEscape &&
1236               !arg_is_arraycopy_dest) {
1237             continue;
1238           }
1239           PointsToNode::EscapeState es = PointsToNode::ArgEscape;
1240           if (call->is_ArrayCopy()) {
1241             ArrayCopyNode* ac = call->as_ArrayCopy();
1242             if (ac->is_clonebasic() ||

1265           }
1266         }
1267       }
1268       break;
1269     }
1270     case Op_CallStaticJava: {
1271       // For a static call, we know exactly what method is being called.
1272       // Use bytecode estimator to record the call's escape affects
1273 #ifdef ASSERT
1274       const char* name = call->as_CallStaticJava()->_name;
1275       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1276 #endif
1277       ciMethod* meth = call->as_CallJava()->method();
1278       if ((meth != nullptr) && meth->is_boxing_method()) {
1279         break; // Boxing methods do not modify any oops.
1280       }
1281       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
1282       // fall-through if not a Java method or no analyzer information
1283       if (call_analyzer != nullptr) {
1284         PointsToNode* call_ptn = ptnode_adr(call->_idx);
1285         const TypeTuple* d = call->tf()->domain_cc();
1286         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1287           const Type* at = d->field_at(i);
1288           int k = i - TypeFunc::Parms;
1289           Node* arg = call->in(i);
1290           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1291           if (at->isa_ptr() != nullptr &&
1292               call_analyzer->is_arg_returned(k)) {
1293             // The call returns arguments.
1294             if (call_ptn != nullptr) { // Is call's result used?
1295               assert(call_ptn->is_LocalVar(), "node should be registered");
1296               assert(arg_ptn != nullptr, "node should be registered");
1297               add_edge(call_ptn, arg_ptn);
1298             }
1299           }
1300           if (at->isa_oopptr() != nullptr &&
1301               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1302             if (!call_analyzer->is_arg_stack(k)) {
1303               // The argument global escapes
1304               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1305             } else {

1309                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1310               }
1311             }
1312           }
1313         }
1314         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
1315           // The call returns arguments.
1316           assert(call_ptn->edge_count() > 0, "sanity");
1317           if (!call_analyzer->is_return_local()) {
1318             // Returns also unknown object.
1319             add_edge(call_ptn, phantom_obj);
1320           }
1321         }
1322         break;
1323       }
1324     }
1325     default: {
1326       // Fall-through here if not a Java method or no analyzer information
1327       // or some other type of call, assume the worst case: all arguments
1328       // globally escape.
1329       const TypeTuple* d = call->tf()->domain_cc();
1330       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1331         const Type* at = d->field_at(i);
1332         if (at->isa_oopptr() != nullptr) {
1333           Node* arg = call->in(i);
1334           if (arg->is_AddP()) {
1335             arg = get_addp_base(arg);
1336           }
1337           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
1338           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
1339         }
1340       }
1341     }
1342   }
1343 }
1344 
1345 
1346 // Finish Graph construction.
1347 bool ConnectionGraph::complete_connection_graph(
1348                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
1349                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,

1722     PointsToNode* base = i.get();
1723     if (base->is_JavaObject()) {
1724       // Skip Allocate's fields which will be processed later.
1725       if (base->ideal_node()->is_Allocate()) {
1726         return 0;
1727       }
1728       assert(base == null_obj, "only null ptr base expected here");
1729     }
1730   }
1731   if (add_edge(field, phantom_obj)) {
1732     // New edge was added
1733     new_edges++;
1734     add_field_uses_to_worklist(field);
1735   }
1736   return new_edges;
1737 }
1738 
1739 // Find fields initializing values for allocations.
1740 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
1741   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1742   PointsToNode* init_val = phantom_obj;
1743   Node* alloc = pta->ideal_node();
1744 
1745   // Do nothing for Allocate nodes since its fields values are
1746   // "known" unless they are initialized by arraycopy/clone.
1747   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
1748     if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
1749       // Non-flattened inline type arrays are initialized with
1750       // the default value instead of null. Handle them here.
1751       init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx);
1752       assert(init_val != nullptr, "default value should be registered");
1753     } else {
1754       return 0;
1755     }
1756   }
1757   // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
1758   assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
1759 #ifdef ASSERT
1760   if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
1761     const char* name = alloc->as_CallStaticJava()->_name;
1762     assert(strncmp(name, "_multianewarray", 15) == 0 ||
1763            strncmp(name, "_load_unknown_inline", 20) == 0, "sanity");
1764   }
1765 #endif
1766   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
1767   int new_edges = 0;
1768   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1769     PointsToNode* field = i.get();
1770     if (field->is_Field() && field->as_Field()->is_oop()) {
1771       if (add_edge(field, init_val)) {
1772         // New edge was added
1773         new_edges++;
1774         add_field_uses_to_worklist(field->as_Field());
1775       }
1776     }
1777   }
1778   return new_edges;
1779 }
1780 
1781 // Find fields initializing values for allocations.
1782 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* phase) {
1783   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1784   Node* alloc = pta->ideal_node();
1785   // Do nothing for Call nodes since its fields values are unknown.
1786   if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
1787     return 0;
1788   }
1789   InitializeNode* ini = alloc->as_Allocate()->initialization();
1790   bool visited_bottom_offset = false;
1791   GrowableArray<int> offsets_worklist;
1792   int new_edges = 0;
1793 
1794   // Check if an oop field's initializing value is recorded and add
1795   // a corresponding null if field's value if it is not recorded.
1796   // Connection Graph does not record a default initialization by null
1797   // captured by Initialize node.
1798   //
1799   for (EdgeIterator i(pta); i.has_next(); i.next()) {
1800     PointsToNode* field = i.get(); // Field (AddP)
1801     if (!field->is_Field() || !field->as_Field()->is_oop()) {
1802       continue; // Not oop field
1803     }
1804     int offset = field->as_Field()->offset();
1805     if (offset == Type::OffsetBot) {
1806       if (!visited_bottom_offset) {

1852               } else {
1853                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1854                   tty->print_cr("----------init store has invalid value -----");
1855                   store->dump();
1856                   val->dump();
1857                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1858                 }
1859                 for (EdgeIterator j(val); j.has_next(); j.next()) {
1860                   PointsToNode* obj = j.get();
1861                   if (obj->is_JavaObject()) {
1862                     if (!field->points_to(obj->as_JavaObject())) {
1863                       missed_obj = obj;
1864                       break;
1865                     }
1866                   }
1867                 }
1868               }
1869               if (missed_obj != nullptr) {
1870                 tty->print_cr("----------field---------------------------------");
1871                 field->dump();
1872                 tty->print_cr("----------missed reference to object------------");
1873                 missed_obj->dump();
1874                 tty->print_cr("----------object referenced by init store-------");
1875                 store->dump();
1876                 val->dump();
1877                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1878               }
1879             }
1880 #endif
1881           } else {
1882             // There could be initializing stores which follow allocation.
1883             // For example, a volatile field store is not collected
1884             // by Initialize node.
1885             //
1886             // Need to check for dependent loads to separate such stores from
1887             // stores which follow loads. For now, add initial value null so
1888             // that compare pointers optimization works correctly.
1889           }
1890         }
1891         if (value == nullptr) {
1892           // A field's initializing value was not recorded. Add null.
1893           if (add_edge(field, null_obj)) {
1894             // New edge was added

2120         assert(field->edge_count() > 0, "sanity");
2121       }
2122     }
2123   }
2124 }
2125 #endif
2126 
2127 // Optimize ideal graph.
2128 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
2129                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
2130   Compile* C = _compile;
2131   PhaseIterGVN* igvn = _igvn;
2132   if (EliminateLocks) {
2133     // Mark locks before changing ideal graph.
2134     int cnt = C->macro_count();
2135     for (int i = 0; i < cnt; i++) {
2136       Node *n = C->macro_node(i);
2137       if (n->is_AbstractLock()) { // Lock and Unlock nodes
2138         AbstractLockNode* alock = n->as_AbstractLock();
2139         if (!alock->is_non_esc_obj()) {
2140           const Type* obj_type = igvn->type(alock->obj_node());
2141           if (not_global_escape(alock->obj_node()) && !obj_type->is_inlinetypeptr()) {
2142             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
2143             // The lock could be marked eliminated by lock coarsening
2144             // code during first IGVN before EA. Replace coarsened flag
2145             // to eliminate all associated locks/unlocks.
2146 #ifdef ASSERT
2147             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
2148 #endif
2149             alock->set_non_esc_obj();
2150           }
2151         }
2152       }
2153     }
2154   }
2155 
2156   if (OptimizePtrCompare) {
2157     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
2158       Node *n = ptr_cmp_worklist.at(i);
2159       const TypeInt* tcmp = optimize_ptr_compare(n);
2160       if (tcmp->singleton()) {
2161         Node* cmp = igvn->makecon(tcmp);
2162 #ifndef PRODUCT
2163         if (PrintOptimizePtrCompare) {
2164           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
2165           if (Verbose) {
2166             n->dump(1);
2167           }
2168         }
2169 #endif
2170         igvn->replace_node(n, cmp);
2171       }
2172     }
2173   }
2174 
2175   // For MemBarStoreStore nodes added in library_call.cpp, check
2176   // escape status of associated AllocateNode and optimize out
2177   // MemBarStoreStore node if the allocated object never escapes.
2178   for (int i = 0; i < storestore_worklist.length(); i++) {
2179     Node* storestore = storestore_worklist.at(i);
2180     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
2181     if (alloc->is_Allocate() && not_global_escape(alloc)) {
2182       if (alloc->in(AllocateNode::InlineType) != nullptr) {
2183         // Non-escaping inline type buffer allocations don't require a membar
2184         storestore->as_MemBar()->remove(_igvn);
2185       } else {
2186         MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
2187         mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
2188         mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
2189         igvn->register_new_node_with_optimizer(mb);
2190         igvn->replace_node(storestore, mb);
2191       }
2192     }
2193   }
2194 }
2195 
2196 // Optimize objects compare.
2197 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) {
2198   assert(OptimizePtrCompare, "sanity");
2199   assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
2200   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
2201   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
2202   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
2203 
2204   PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
2205   PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
2206   JavaObjectNode* jobj1 = unique_java_object(n->in(1));
2207   JavaObjectNode* jobj2 = unique_java_object(n->in(2));
2208   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
2209   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
2210 
2211   // Check simple cases first.

2324   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
2325   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
2326   PointsToNode* ptadr = _nodes.at(n->_idx);
2327   if (ptadr != nullptr) {
2328     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
2329     return;
2330   }
2331   Compile* C = _compile;
2332   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
2333   map_ideal_node(n, ptadr);
2334   // Add edge from arraycopy node to source object.
2335   (void)add_edge(ptadr, src);
2336   src->set_arraycopy_src();
2337   // Add edge from destination object to arraycopy node.
2338   (void)add_edge(dst, ptadr);
2339   dst->set_arraycopy_dst();
2340 }
2341 
2342 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2343   const Type* adr_type = n->as_AddP()->bottom_type();
2344   int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
2345   BasicType bt = T_INT;
2346   if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
2347     // Check only oop fields.
2348     if (!adr_type->isa_aryptr() ||
2349         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
2350         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
2351       // OffsetBot is used to reference array's element. Ignore first AddP.
2352       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
2353         bt = T_OBJECT;
2354       }
2355     }
2356   } else if (offset != oopDesc::klass_offset_in_bytes()) {
2357     if (adr_type->isa_instptr()) {
2358       ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
2359       if (field != nullptr) {
2360         bt = field->layout_type();
2361       } else {
2362         // Check for unsafe oop field access
2363         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2364             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2365             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2366             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2367           bt = T_OBJECT;
2368           (*unsafe) = true;
2369         }
2370       }
2371     } else if (adr_type->isa_aryptr()) {
2372       if (offset == arrayOopDesc::length_offset_in_bytes()) {
2373         // Ignore array length load.
2374       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
2375         // Ignore first AddP.
2376       } else {
2377         const Type* elemtype = adr_type->is_aryptr()->elem();
2378         if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
2379           ciInlineKlass* vk = elemtype->inline_klass();
2380           field_offset += vk->first_field_offset();
2381           bt = vk->get_field_by_offset(field_offset, false)->layout_type();
2382         } else {
2383           bt = elemtype->array_element_basic_type();
2384         }
2385       }
2386     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2387       // Allocation initialization, ThreadLocal field access, unsafe access
2388       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
2389           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
2390           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
2391           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
2392         bt = T_OBJECT;
2393       }
2394     }
2395   }
2396   // Note: T_NARROWOOP is not classed as a real reference type
2397   return (is_reference_type(bt) || bt == T_NARROWOOP);
2398 }
2399 
2400 // Returns unique pointed java object or null.
2401 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2402   assert(!_collecting, "should not call when constructed graph");
2403   // If the node was created after the escape computation we can't answer.
2404   uint idx = n->_idx;

2548             return true;
2549           }
2550         }
2551       }
2552     }
2553   }
2554   return false;
2555 }
2556 
2557 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2558   const Type *adr_type = phase->type(adr);
2559   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
2560     // We are computing a raw address for a store captured by an Initialize
2561     // compute an appropriate address type. AddP cases #3 and #5 (see below).
2562     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2563     assert(offs != Type::OffsetBot ||
2564            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2565            "offset must be a constant or it is initialization of array");
2566     return offs;
2567   }
2568   return adr_type->is_ptr()->flattened_offset();


2569 }
2570 
2571 Node* ConnectionGraph::get_addp_base(Node *addp) {
2572   assert(addp->is_AddP(), "must be AddP");
2573   //
2574   // AddP cases for Base and Address inputs:
2575   // case #1. Direct object's field reference:
2576   //     Allocate
2577   //       |
2578   //     Proj #5 ( oop result )
2579   //       |
2580   //     CheckCastPP (cast to instance type)
2581   //      | |
2582   //     AddP  ( base == address )
2583   //
2584   // case #2. Indirect object's field reference:
2585   //      Phi
2586   //       |
2587   //     CastPP (cast to instance type)
2588   //      | |

2702   }
2703   return nullptr;
2704 }
2705 
2706 //
2707 // Adjust the type and inputs of an AddP which computes the
2708 // address of a field of an instance
2709 //
2710 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2711   PhaseGVN* igvn = _igvn;
2712   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2713   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
2714   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2715   if (t == nullptr) {
2716     // We are computing a raw address for a store captured by an Initialize
2717     // compute an appropriate address type (cases #3 and #5).
2718     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2719     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2720     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2721     assert(offs != Type::OffsetBot, "offset must be a constant");
2722     if (base_t->isa_aryptr() != nullptr) {
2723       // In the case of a flattened inline type array, each field has its
2724       // own slice so we need to extract the field being accessed from
2725       // the address computation
2726       t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
2727     } else {
2728       t = base_t->add_offset(offs)->is_oopptr();
2729     }
2730   }
2731   int inst_id = base_t->instance_id();
2732   assert(!t->is_known_instance() || t->instance_id() == inst_id,
2733                              "old type must be non-instance or match new type");
2734 
2735   // The type 't' could be subclass of 'base_t'.
2736   // As result t->offset() could be large then base_t's size and it will
2737   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2738   // constructor verifies correctness of the offset.
2739   //
2740   // It could happened on subclass's branch (from the type profiling
2741   // inlining) which was not eliminated during parsing since the exactness
2742   // of the allocation type was not propagated to the subclass type check.
2743   //
2744   // Or the type 't' could be not related to 'base_t' at all.
2745   // It could happen when CHA type is different from MDO type on a dead path
2746   // (for example, from instanceof check) which is not collapsed during parsing.
2747   //
2748   // Do nothing for such AddP node and don't process its users since
2749   // this code branch will go away.
2750   //
2751   if (!t->is_known_instance() &&
2752       !base_t->maybe_java_subtype_of(t)) {
2753      return false; // bail out
2754   }
2755   const TypePtr* tinst = base_t->add_offset(t->offset());
2756   if (tinst->isa_aryptr() && t->isa_aryptr()) {
2757     // In the case of a flattened inline type array, each field has its
2758     // own slice so we need to keep track of the field being accessed.
2759     tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
2760     // Keep array properties (not flat/null-free)
2761     tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
2762     if (tinst == nullptr) {
2763       return false; // Skip dead path with inconsistent properties
2764     }
2765   }
2766 
2767   // Do NOT remove the next line: ensure a new alias index is allocated
2768   // for the instance type. Note: C++ will not remove it since the call
2769   // has side effect.
2770   int alias_idx = _compile->get_alias_index(tinst);
2771   igvn->set_type(addp, tinst);
2772   // record the allocation in the node map
2773   set_map(addp, get_map(base->_idx));
2774   // Set addp's Base and Address to 'base'.
2775   Node *abase = addp->in(AddPNode::Base);
2776   Node *adr   = addp->in(AddPNode::Address);
2777   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2778       adr->in(0)->_idx == (uint)inst_id) {
2779     // Skip AddP cases #3 and #5.
2780   } else {
2781     assert(!abase->is_top(), "sanity"); // AddP case #3
2782     if (abase != base) {
2783       igvn->hash_delete(addp);
2784       addp->set_req(AddPNode::Base, base);
2785       if (abase == adr) {
2786         addp->set_req(AddPNode::Address, base);

3428         ptnode_adr(n->_idx)->dump();
3429         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
3430 #endif
3431         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3432         return;
3433       } else {
3434         Node *val = get_map(jobj->idx());   // CheckCastPP node
3435         TypeNode *tn = n->as_Type();
3436         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3437         assert(tinst != nullptr && tinst->is_known_instance() &&
3438                tinst->instance_id() == jobj->idx() , "instance type expected.");
3439 
3440         const Type *tn_type = igvn->type(tn);
3441         const TypeOopPtr *tn_t;
3442         if (tn_type->isa_narrowoop()) {
3443           tn_t = tn_type->make_ptr()->isa_oopptr();
3444         } else {
3445           tn_t = tn_type->isa_oopptr();
3446         }
3447         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
3448           if (tn_t->isa_aryptr()) {
3449             // Keep array properties (not flat/null-free)
3450             tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
3451             if (tinst == nullptr) {
3452               continue; // Skip dead path with inconsistent properties
3453             }
3454           }
3455           if (tn_type->isa_narrowoop()) {
3456             tn_type = tinst->make_narrowoop();
3457           } else {
3458             tn_type = tinst;
3459           }
3460           igvn->hash_delete(tn);
3461           igvn->set_type(tn, tn_type);
3462           tn->set_type(tn_type);
3463           igvn->hash_insert(tn);
3464           record_for_optimizer(n);
3465         } else {
3466           assert(tn_type == TypePtr::NULL_PTR ||
3467                  tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t),
3468                  "unexpected type");
3469           continue; // Skip dead path with different type
3470         }
3471       }
3472     } else {
3473       debug_only(n->dump();)
3474       assert(false, "EA: unexpected node");
3475       continue;
3476     }
3477     // push allocation's users on appropriate worklist
3478     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3479       Node *use = n->fast_out(i);
3480       if (use->is_Mem() && use->in(MemNode::Address) == n) {
3481         // Load/store to instance's field
3482         memnode_worklist.append_if_missing(use);
3483       } else if (use->is_MemBar()) {
3484         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3485           memnode_worklist.append_if_missing(use);
3486         }
3487       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3488         Node* addp2 = find_second_addp(use, n);
3489         if (addp2 != nullptr) {
3490           alloc_worklist.append_if_missing(addp2);
3491         }
3492         alloc_worklist.append_if_missing(use);
3493       } else if (use->is_Phi() ||
3494                  use->is_CheckCastPP() ||
3495                  use->is_EncodeNarrowPtr() ||
3496                  use->is_DecodeNarrowPtr() ||
3497                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3498         alloc_worklist.append_if_missing(use);
3499 #ifdef ASSERT
3500       } else if (use->is_Mem()) {
3501         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3502       } else if (use->is_MergeMem()) {
3503         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3504       } else if (use->is_SafePoint()) {
3505         // Look for MergeMem nodes for calls which reference unique allocation
3506         // (through CheckCastPP nodes) even for debug info.
3507         Node* m = use->in(TypeFunc::Memory);
3508         if (m->is_MergeMem()) {
3509           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3510         }
3511       } else if (use->Opcode() == Op_EncodeISOArray) {
3512         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3513           // EncodeISOArray overwrites destination array
3514           memnode_worklist.append_if_missing(use);
3515         }
3516       } else if (use->Opcode() == Op_Return) {
3517         // Allocation is referenced by field of returned inline type
3518         assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
3519       } else {
3520         uint op = use->Opcode();
3521         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
3522             (use->in(MemNode::Memory) == n)) {
3523           // They overwrite memory edge corresponding to destination array,
3524           memnode_worklist.append_if_missing(use);
3525         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3526               op == Op_CastP2X || op == Op_StoreCM ||
3527               op == Op_FastLock || op == Op_AryEq ||
3528               op == Op_StrComp || op == Op_CountPositives ||
3529               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3530               op == Op_StrEquals || op == Op_VectorizedHashCode ||
3531               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
3532               op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
3533               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
3534           n->dump();
3535           use->dump();
3536           assert(false, "EA: missing allocation reference path");
3537         }
3538 #endif
3539       }
3540     }
3541 
3542   }
3543 
3544   // Go over all ArrayCopy nodes and if one of the inputs has a unique
3545   // type, record it in the ArrayCopy node so we know what memory this
3546   // node uses/modified.
3547   for (int next = 0; next < arraycopy_worklist.length(); next++) {
3548     ArrayCopyNode* ac = arraycopy_worklist.at(next);
3549     Node* dest = ac->in(ArrayCopyNode::Dest);
3550     if (dest->is_AddP()) {
3551       dest = get_addp_base(dest);
3552     }

3582   if (memnode_worklist.length() == 0)
3583     return;  // nothing to do
3584   while (memnode_worklist.length() != 0) {
3585     Node *n = memnode_worklist.pop();
3586     if (visited.test_set(n->_idx)) {
3587       continue;
3588     }
3589     if (n->is_Phi() || n->is_ClearArray()) {
3590       // we don't need to do anything, but the users must be pushed
3591     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3592       // we don't need to do anything, but the users must be pushed
3593       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
3594       if (n == nullptr) {
3595         continue;
3596       }
3597     } else if (n->Opcode() == Op_StrCompressedCopy ||
3598                n->Opcode() == Op_EncodeISOArray) {
3599       // get the memory projection
3600       n = n->find_out_with(Op_SCMemProj);
3601       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
3602     } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
3603                strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
3604       n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
3605     } else {
3606       assert(n->is_Mem(), "memory node required.");
3607       Node *addr = n->in(MemNode::Address);
3608       const Type *addr_t = igvn->type(addr);
3609       if (addr_t == Type::TOP) {
3610         continue;
3611       }
3612       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
3613       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3614       assert ((uint)alias_idx < new_index_end, "wrong alias index");
3615       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3616       if (_compile->failing()) {
3617         return;
3618       }
3619       if (mem != n->in(MemNode::Memory)) {
3620         // We delay the memory edge update since we need old one in
3621         // MergeMem code below when instances memory slices are separated.
3622         set_map(n, mem);
3623       }
3624       if (n->is_Load()) {

3627         // get the memory projection
3628         n = n->find_out_with(Op_SCMemProj);
3629         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
3630       }
3631     }
3632     // push user on appropriate worklist
3633     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3634       Node *use = n->fast_out(i);
3635       if (use->is_Phi() || use->is_ClearArray()) {
3636         memnode_worklist.append_if_missing(use);
3637       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3638         if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
3639           continue;
3640         }
3641         memnode_worklist.append_if_missing(use);
3642       } else if (use->is_MemBar()) {
3643         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3644           memnode_worklist.append_if_missing(use);
3645         }
3646 #ifdef ASSERT
3647       } else if (use->is_Mem()) {
3648         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3649       } else if (use->is_MergeMem()) {
3650         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3651       } else if (use->Opcode() == Op_EncodeISOArray) {
3652         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3653           // EncodeISOArray overwrites destination array
3654           memnode_worklist.append_if_missing(use);
3655         }
3656       } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
3657                  strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
3658         // store_unknown_inline overwrites destination array
3659         memnode_worklist.append_if_missing(use);
3660       } else {
3661         uint op = use->Opcode();
3662         if ((use->in(MemNode::Memory) == n) &&
3663             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3664           // They overwrite memory edge corresponding to destination array,
3665           memnode_worklist.append_if_missing(use);
3666         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
3667               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
3668               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3669               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
3670           n->dump();
3671           use->dump();
3672           assert(false, "EA: missing memory path");
3673         }
3674 #endif
3675       }
3676     }
3677   }
3678 
3679   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
3680   //            Walk each memory slice moving the first node encountered of each
3681   //            instance type to the input corresponding to its alias index.
3682   uint length = mergemem_worklist.length();
3683   for( uint next = 0; next < length; ++next ) {
3684     MergeMemNode* nmm = mergemem_worklist.at(next);
3685     assert(!visited.test_set(nmm->_idx), "should not be visited before");
3686     // Note: we don't want to use MergeMemStream here because we only want to
3687     // scan inputs which exist at the start, not ones we add during processing.
3688     // Note 2: MergeMem may already contains instance memory slices added
3689     // during find_inst_mem() call when memory nodes were processed above.

3736       Node* result = step_through_mergemem(nmm, ni, tinst);
3737       if (result == nmm->base_memory()) {
3738         // Didn't find instance memory, search through general slice recursively.
3739         result = nmm->memory_at(_compile->get_general_index(ni));
3740         result = find_inst_mem(result, ni, orig_phis);
3741         if (_compile->failing()) {
3742           return;
3743         }
3744         nmm->set_memory_at(ni, result);
3745       }
3746     }
3747     igvn->hash_insert(nmm);
3748     record_for_optimizer(nmm);
3749   }
3750 
3751   //  Phase 4:  Update the inputs of non-instance memory Phis and
3752   //            the Memory input of memnodes
3753   // First update the inputs of any non-instance Phi's from
3754   // which we split out an instance Phi.  Note we don't have
3755   // to recursively process Phi's encountered on the input memory
3756   // chains as is done in split_memory_phi() since they will
3757   // also be processed here.
3758   for (int j = 0; j < orig_phis.length(); j++) {
3759     PhiNode *phi = orig_phis.at(j);
3760     int alias_idx = _compile->get_alias_index(phi->adr_type());
3761     igvn->hash_delete(phi);
3762     for (uint i = 1; i < phi->req(); i++) {
3763       Node *mem = phi->in(i);
3764       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3765       if (_compile->failing()) {
3766         return;
3767       }
3768       if (mem != new_mem) {
3769         phi->set_req(i, new_mem);
3770       }
3771     }
3772     igvn->hash_insert(phi);
3773     record_for_optimizer(phi);
3774   }
3775 
3776   // Update the memory inputs of MemNodes with the value we computed
< prev index next >