< prev index next >

src/hotspot/share/opto/callnode.cpp

Print this page

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"

  27 #include "ci/bcEscapeAnalyzer.hpp"
  28 #include "compiler/oopMap.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/c2/barrierSetC2.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "opto/callGenerator.hpp"
  33 #include "opto/callnode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/escape.hpp"

  37 #include "opto/locknode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/matcher.hpp"
  40 #include "opto/parse.hpp"
  41 #include "opto/regalloc.hpp"
  42 #include "opto/regmask.hpp"
  43 #include "opto/rootnode.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "runtime/sharedRuntime.hpp"

  46 #include "utilities/powerOfTwo.hpp"
  47 #include "code/vmreg.hpp"
  48 
  49 // Portions of code courtesy of Clifford Click
  50 
  51 // Optimization - Graph Style
  52 
  53 //=============================================================================
  54 uint StartNode::size_of() const { return sizeof(*this); }
  55 bool StartNode::cmp( const Node &n ) const
  56 { return _domain == ((StartNode&)n)._domain; }
  57 const Type *StartNode::bottom_type() const { return _domain; }
  58 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
  59 #ifndef PRODUCT
  60 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
  61 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
  62 #endif
  63 
  64 //------------------------------Ideal------------------------------------------
  65 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
  66   return remove_dead_region(phase, can_reshape) ? this : NULL;
  67 }
  68 
  69 //------------------------------calling_convention-----------------------------
  70 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
  71   SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
  72 }
  73 
  74 //------------------------------Registers--------------------------------------
  75 const RegMask &StartNode::in_RegMask(uint) const {
  76   return RegMask::Empty;
  77 }
  78 
  79 //------------------------------match------------------------------------------
  80 // Construct projections for incoming parameters, and their RegMask info
  81 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
  82   switch (proj->_con) {
  83   case TypeFunc::Control:
  84   case TypeFunc::I_O:
  85   case TypeFunc::Memory:
  86     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
  87   case TypeFunc::FramePtr:
  88     return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
  89   case TypeFunc::ReturnAdr:
  90     return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
  91   case TypeFunc::Parms:
  92   default: {
  93       uint parm_num = proj->_con - TypeFunc::Parms;
  94       const Type *t = _domain->field_at(proj->_con);
  95       if (t->base() == Type::Half)  // 2nd half of Longs and Doubles
  96         return new ConNode(Type::TOP);
  97       uint ideal_reg = t->ideal_reg();
  98       RegMask &rm = match->_calling_convention_mask[parm_num];
  99       return new MachProjNode(this,proj->_con,rm,ideal_reg);
 100     }
 101   }
 102   return NULL;
 103 }
 104 
 105 //------------------------------StartOSRNode----------------------------------
 106 // The method start node for an on stack replacement adapter
 107 
 108 //------------------------------osr_domain-----------------------------
 109 const TypeTuple *StartOSRNode::osr_domain() {
 110   const Type **fields = TypeTuple::fields(2);
 111   fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM;  // address of osr buffer
 112 
 113   return TypeTuple::make(TypeFunc::Parms+1, fields);
 114 }
 115 
 116 //=============================================================================
 117 const char * const ParmNode::names[TypeFunc::Parms+1] = {
 118   "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
 119 };
 120 
 121 #ifndef PRODUCT
 122 void ParmNode::dump_spec(outputStream *st) const {
 123   if( _con < TypeFunc::Parms ) {
 124     st->print("%s", names[_con]);
 125   } else {
 126     st->print("Parm%d: ",_con-TypeFunc::Parms);
 127     // Verbose and WizardMode dump bottom_type for all nodes
 128     if( !Verbose && !WizardMode )   bottom_type()->dump_on(st);
 129   }
 130 }
 131 
 132 void ParmNode::dump_compact_spec(outputStream *st) const {
 133   if (_con < TypeFunc::Parms) {
 134     st->print("%s", names[_con]);
 135   } else {

 478       if (cik->is_instance_klass()) {
 479         cik->print_name_on(st);
 480         iklass = cik->as_instance_klass();
 481       } else if (cik->is_type_array_klass()) {
 482         cik->as_array_klass()->base_element_type()->print_name_on(st);
 483         st->print("[%d]", spobj->n_fields());
 484       } else if (cik->is_obj_array_klass()) {
 485         ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
 486         if (cie->is_instance_klass()) {
 487           cie->print_name_on(st);
 488         } else if (cie->is_type_array_klass()) {
 489           cie->as_array_klass()->base_element_type()->print_name_on(st);
 490         } else {
 491           ShouldNotReachHere();
 492         }
 493         st->print("[%d]", spobj->n_fields());
 494         int ndim = cik->as_array_klass()->dimension() - 1;
 495         while (ndim-- > 0) {
 496           st->print("[]");
 497         }








 498       }
 499       st->print("={");
 500       uint nf = spobj->n_fields();
 501       if (nf > 0) {
 502         uint first_ind = spobj->first_index(mcall->jvms());







 503         Node* fld_node = mcall->in(first_ind);
 504         ciField* cifield;
 505         if (iklass != NULL) {
 506           st->print(" [");
 507           cifield = iklass->nonstatic_field_at(0);
 508           cifield->print_name_on(st);
 509           format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
 510         } else {
 511           format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
 512         }
 513         for (uint j = 1; j < nf; j++) {
 514           fld_node = mcall->in(first_ind+j);
 515           if (iklass != NULL) {
 516             st->print(", [");
 517             cifield = iklass->nonstatic_field_at(j);
 518             cifield->print_name_on(st);
 519             format_helper(regalloc, st, fld_node, ":", j, &scobjs);
 520           } else {
 521             format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
 522           }

 691 #ifndef PRODUCT
 692 void CallNode::dump_req(outputStream *st) const {
 693   // Dump the required inputs, enclosed in '(' and ')'
 694   uint i;                       // Exit value of loop
 695   for (i = 0; i < req(); i++) {    // For all required inputs
 696     if (i == TypeFunc::Parms) st->print("(");
 697     if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
 698     else st->print("_ ");
 699   }
 700   st->print(")");
 701 }
 702 
 703 void CallNode::dump_spec(outputStream *st) const {
 704   st->print(" ");
 705   if (tf() != NULL)  tf()->dump_on(st);
 706   if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
 707   if (jvms() != NULL)  jvms()->dump_spec(st);
 708 }
 709 #endif
 710 
 711 const Type *CallNode::bottom_type() const { return tf()->range(); }
 712 const Type* CallNode::Value(PhaseGVN* phase) const {
 713   if (phase->type(in(0)) == Type::TOP)  return Type::TOP;
 714   return tf()->range();


 715 }
 716 
 717 //------------------------------calling_convention-----------------------------
 718 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {







 719   // Use the standard compiler calling convention
 720   SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
 721 }
 722 
 723 
 724 //------------------------------match------------------------------------------
 725 // Construct projections for control, I/O, memory-fields, ..., and
 726 // return result(s) along with their RegMask info
 727 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
 728   switch (proj->_con) {
 729   case TypeFunc::Control:
 730   case TypeFunc::I_O:
 731   case TypeFunc::Memory:
 732     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
 733 
 734   case TypeFunc::Parms+1:       // For LONG & DOUBLE returns
 735     assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
 736     // 2nd half of doubles and longs
 737     return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
 738 
 739   case TypeFunc::Parms: {       // Normal returns
 740     uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
 741     OptoRegPair regs = Opcode() == Op_CallLeafVector
 742       ? match->vector_return_value(ideal_reg)      // Calls into assembly vector routine
 743       : is_CallRuntime()
 744         ? match->c_return_value(ideal_reg)  // Calls into C runtime
 745         : match->  return_value(ideal_reg); // Calls into compiled Java code
 746     RegMask rm = RegMask(regs.first());
 747 
 748     if (Opcode() == Op_CallLeafVector) {
 749       // If the return is in vector, compute appropriate regmask taking into account the whole range
 750       if(ideal_reg >= Op_VecS && ideal_reg <= Op_VecZ) {
 751         if(OptoReg::is_valid(regs.second())) {
 752           for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
 753             rm.Insert(r);
 754           }
 755         }









 756       }
 757     }
 758 
 759     if( OptoReg::is_valid(regs.second()) )
 760       rm.Insert( regs.second() );
 761     return new MachProjNode(this,proj->_con,rm,ideal_reg);
 762   }
 763 






 764   case TypeFunc::ReturnAdr:
 765   case TypeFunc::FramePtr:
 766   default:
 767     ShouldNotReachHere();
 768   }
 769   return NULL;
 770 }
 771 
 772 // Do we Match on this edge index or not?  Match no edges
 773 uint CallNode::match_edge(uint idx) const {
 774   return 0;
 775 }
 776 
 777 //
 778 // Determine whether the call could modify the field of the specified
 779 // instance at the specified offset.
 780 //
 781 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
 782   assert((t_oop != NULL), "sanity");
 783   if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
 784     const TypeTuple* args = _tf->domain();
 785     Node* dest = NULL;
 786     // Stubs that can be called once an ArrayCopyNode is expanded have
 787     // different signatures. Look for the second pointer argument,
 788     // that is the destination of the copy.
 789     for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 790       if (args->field_at(i)->isa_ptr()) {
 791         j++;
 792         if (j == 2) {
 793           dest = in(i);
 794           break;
 795         }
 796       }
 797     }
 798     guarantee(dest != NULL, "Call had only one ptr in, broken IR!");
 799     if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
 800       return true;
 801     }
 802     return false;
 803   }
 804   if (t_oop->is_known_instance()) {

 813       Node* proj = proj_out_or_null(TypeFunc::Parms);
 814       if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
 815         return false;
 816       }
 817     }
 818     if (is_CallJava() && as_CallJava()->method() != NULL) {
 819       ciMethod* meth = as_CallJava()->method();
 820       if (meth->is_getter()) {
 821         return false;
 822       }
 823       // May modify (by reflection) if an boxing object is passed
 824       // as argument or returned.
 825       Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL;
 826       if (proj != NULL) {
 827         const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
 828         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 829                                  (inst_t->klass() == boxing_klass))) {
 830           return true;
 831         }
 832       }
 833       const TypeTuple* d = tf()->domain();
 834       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 835         const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
 836         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 837                                  (inst_t->klass() == boxing_klass))) {
 838           return true;
 839         }
 840       }
 841       return false;
 842     }
 843   }
 844   return true;
 845 }
 846 
 847 // Does this call have a direct reference to n other than debug information?
 848 bool CallNode::has_non_debug_use(Node *n) {
 849   const TypeTuple * d = tf()->domain();
 850   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 851     Node *arg = in(i);
 852     if (arg == n) {
 853       return true;
 854     }
 855   }
 856   return false;
 857 }
 858 











 859 // Returns the unique CheckCastPP of a call
 860 // or 'this' if there are several CheckCastPP or unexpected uses
 861 // or returns NULL if there is no one.
 862 Node *CallNode::result_cast() {
 863   Node *cast = NULL;
 864 
 865   Node *p = proj_out_or_null(TypeFunc::Parms);
 866   if (p == NULL)
 867     return NULL;
 868 
 869   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
 870     Node *use = p->fast_out(i);
 871     if (use->is_CheckCastPP()) {
 872       if (cast != NULL) {
 873         return this;  // more than 1 CheckCastPP
 874       }
 875       cast = use;
 876     } else if (!use->is_Initialize() &&
 877                !use->is_AddP() &&
 878                use->Opcode() != Op_MemBarStoreStore) {
 879       // Expected uses are restricted to a CheckCastPP, an Initialize
 880       // node, a MemBarStoreStore (clone) and AddP nodes. If we
 881       // encounter any other use (a Phi node can be seen in rare
 882       // cases) return this to prevent incorrect optimizations.
 883       return this;
 884     }
 885   }
 886   return cast;
 887 }
 888 
 889 
 890 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
 891   projs->fallthrough_proj      = NULL;
 892   projs->fallthrough_catchproj = NULL;
 893   projs->fallthrough_ioproj    = NULL;
 894   projs->catchall_ioproj       = NULL;
 895   projs->catchall_catchproj    = NULL;
 896   projs->fallthrough_memproj   = NULL;
 897   projs->catchall_memproj      = NULL;
 898   projs->resproj               = NULL;
 899   projs->exobj                 = NULL;





 900 
 901   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 902     ProjNode *pn = fast_out(i)->as_Proj();
 903     if (pn->outcnt() == 0) continue;
 904     switch (pn->_con) {
 905     case TypeFunc::Control:
 906       {
 907         // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
 908         projs->fallthrough_proj = pn;
 909         const Node *cn = pn->unique_ctrl_out();
 910         if (cn != NULL && cn->is_Catch()) {
 911           ProjNode *cpn = NULL;
 912           for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
 913             cpn = cn->fast_out(k)->as_Proj();
 914             assert(cpn->is_CatchProj(), "must be a CatchProjNode");
 915             if (cpn->_con == CatchProjNode::fall_through_index)
 916               projs->fallthrough_catchproj = cpn;
 917             else {
 918               assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
 919               projs->catchall_catchproj = cpn;

 925     case TypeFunc::I_O:
 926       if (pn->_is_io_use)
 927         projs->catchall_ioproj = pn;
 928       else
 929         projs->fallthrough_ioproj = pn;
 930       for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
 931         Node* e = pn->out(j);
 932         if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
 933           assert(projs->exobj == NULL, "only one");
 934           projs->exobj = e;
 935         }
 936       }
 937       break;
 938     case TypeFunc::Memory:
 939       if (pn->_is_io_use)
 940         projs->catchall_memproj = pn;
 941       else
 942         projs->fallthrough_memproj = pn;
 943       break;
 944     case TypeFunc::Parms:
 945       projs->resproj = pn;
 946       break;
 947     default:
 948       assert(false, "unexpected projection from allocation node.");


 949     }
 950   }
 951 
 952   // The resproj may not exist because the result could be ignored
 953   // and the exception object may not exist if an exception handler
 954   // swallows the exception but all the other must exist and be found.
 955   assert(projs->fallthrough_proj      != NULL, "must be found");
 956   do_asserts = do_asserts && !Compile::current()->inlining_incrementally();

 957   assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
 958   assert(!do_asserts || projs->fallthrough_memproj   != NULL, "must be found");
 959   assert(!do_asserts || projs->fallthrough_ioproj    != NULL, "must be found");
 960   assert(!do_asserts || projs->catchall_catchproj    != NULL, "must be found");
 961   if (separate_io_proj) {
 962     assert(!do_asserts || projs->catchall_memproj    != NULL, "must be found");
 963     assert(!do_asserts || projs->catchall_ioproj     != NULL, "must be found");
 964   }

 965 }
 966 
 967 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 968 #ifdef ASSERT
 969   // Validate attached generator
 970   CallGenerator* cg = generator();
 971   if (cg != NULL) {
 972     assert(is_CallStaticJava()  && cg->is_mh_late_inline() ||
 973            is_CallDynamicJava() && cg->is_virtual_late_inline(), "mismatch");
 974   }
 975 #endif // ASSERT
 976   return SafePointNode::Ideal(phase, can_reshape);
 977 }
 978 
 979 bool CallNode::is_call_to_arraycopystub() const {
 980   if (_name != NULL && strstr(_name, "arraycopy") != 0) {
 981     return true;
 982   }
 983   return false;
 984 }
 985 
 986 //=============================================================================
 987 uint CallJavaNode::size_of() const { return sizeof(*this); }
 988 bool CallJavaNode::cmp( const Node &n ) const {
 989   CallJavaNode &call = (CallJavaNode&)n;
 990   return CallNode::cmp(call) && _method == call._method &&
 991          _override_symbolic_info == call._override_symbolic_info;
 992 }
 993 
 994 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
 995   // Copy debug information and adjust JVMState information
 996   uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1;
 997   uint new_dbg_start = tf()->domain()->cnt();
 998   int jvms_adj  = new_dbg_start - old_dbg_start;
 999   assert (new_dbg_start == req(), "argument count mismatch");
1000   Compile* C = phase->C;
1001 
1002   // SafePointScalarObject node could be referenced several times in debug info.
1003   // Use Dict to record cloned nodes.
1004   Dict* sosn_map = new Dict(cmpkey,hashkey);
1005   for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1006     Node* old_in = sfpt->in(i);
1007     // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1008     if (old_in != NULL && old_in->is_SafePointScalarObject()) {
1009       SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1010       bool new_node;
1011       Node* new_in = old_sosn->clone(sosn_map, new_node);
1012       if (new_node) { // New node?
1013         new_in->set_req(0, C->root()); // reset control edge
1014         new_in = phase->transform(new_in); // Register new node.
1015       }
1016       old_in = new_in;
1017     }
1018     add_req(old_in);
1019   }
1020 
1021   // JVMS may be shared so clone it before we modify it
1022   set_jvms(sfpt->jvms() != NULL ? sfpt->jvms()->clone_deep(C) : NULL);
1023   for (JVMState *jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
1024     jvms->set_map(this);
1025     jvms->set_locoff(jvms->locoff()+jvms_adj);
1026     jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1027     jvms->set_monoff(jvms->monoff()+jvms_adj);
1028     jvms->set_scloff(jvms->scloff()+jvms_adj);
1029     jvms->set_endoff(jvms->endoff()+jvms_adj);
1030   }
1031 }
1032 
1033 #ifdef ASSERT
1034 bool CallJavaNode::validate_symbolic_info() const {
1035   if (method() == NULL) {
1036     return true; // call into runtime or uncommon trap
1037   }




1038   ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1039   ciMethod* callee = method();
1040   if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1041     assert(override_symbolic_info(), "should be set");
1042   }
1043   assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1044   return true;
1045 }
1046 #endif
1047 
1048 #ifndef PRODUCT
1049 void CallJavaNode::dump_spec(outputStream* st) const {
1050   if( _method ) _method->print_short_name(st);
1051   CallNode::dump_spec(st);
1052 }
1053 
1054 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1055   if (_method) {
1056     _method->print_short_name(st);
1057   } else {
1058     st->print("<?>");
1059   }
1060 }
1061 #endif
1062 
1063 //=============================================================================
1064 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1065 bool CallStaticJavaNode::cmp( const Node &n ) const {
1066   CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1067   return CallJavaNode::cmp(call);
1068 }
1069 
1070 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {










1071   CallGenerator* cg = generator();
1072   if (can_reshape && cg != NULL) {
1073     assert(IncrementalInlineMH, "required");
1074     assert(cg->call_node() == this, "mismatch");
1075     assert(cg->is_mh_late_inline(), "not virtual");
1076 
1077     // Check whether this MH handle call becomes a candidate for inlining.
1078     ciMethod* callee = cg->method();
1079     vmIntrinsics::ID iid = callee->intrinsic_id();
1080     if (iid == vmIntrinsics::_invokeBasic) {
1081       if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1082         phase->C->prepend_late_inline(cg);
1083         set_generator(NULL);
1084       }
1085     } else if (iid == vmIntrinsics::_linkToNative) {
1086       if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP /* NEP */
1087           && in(TypeFunc::Parms + 1)->Opcode() == Op_ConL /* address */) {
1088         phase->C->prepend_late_inline(cg);
1089         set_generator(NULL);
1090       }

1104 int CallStaticJavaNode::uncommon_trap_request() const {
1105   if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
1106     return extract_uncommon_trap_request(this);
1107   }
1108   return 0;
1109 }
1110 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1111 #ifndef PRODUCT
1112   if (!(call->req() > TypeFunc::Parms &&
1113         call->in(TypeFunc::Parms) != NULL &&
1114         call->in(TypeFunc::Parms)->is_Con() &&
1115         call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1116     assert(in_dump() != 0, "OK if dumping");
1117     tty->print("[bad uncommon trap]");
1118     return 0;
1119   }
1120 #endif
1121   return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1122 }
1123 




























































































































1124 #ifndef PRODUCT
1125 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1126   st->print("# Static ");
1127   if (_name != NULL) {
1128     st->print("%s", _name);
1129     int trap_req = uncommon_trap_request();
1130     if (trap_req != 0) {
1131       char buf[100];
1132       st->print("(%s)",
1133                  Deoptimization::format_trap_request(buf, sizeof(buf),
1134                                                      trap_req));
1135     }
1136     st->print(" ");
1137   }
1138   CallJavaNode::dump_spec(st);
1139 }
1140 
1141 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1142   if (_method) {
1143     _method->print_short_name(st);

1213 #ifndef PRODUCT
1214 void CallRuntimeNode::dump_spec(outputStream *st) const {
1215   st->print("# ");
1216   st->print("%s", _name);
1217   CallNode::dump_spec(st);
1218 }
1219 #endif
1220 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1221 bool CallLeafVectorNode::cmp( const Node &n ) const {
1222   CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1223   return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1224 }
1225 
1226 //=============================================================================
1227 uint CallNativeNode::size_of() const { return sizeof(*this); }
1228 bool CallNativeNode::cmp( const Node &n ) const {
1229   CallNativeNode &call = (CallNativeNode&)n;
1230   return CallNode::cmp(call) && !strcmp(_name,call._name)
1231     && _arg_regs == call._arg_regs && _ret_regs == call._ret_regs;
1232 }
1233 Node* CallNativeNode::match(const ProjNode *proj, const Matcher *matcher) {
1234   switch (proj->_con) {
1235     case TypeFunc::Control:
1236     case TypeFunc::I_O:
1237     case TypeFunc::Memory:
1238       return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
1239     case TypeFunc::ReturnAdr:
1240     case TypeFunc::FramePtr:
1241       ShouldNotReachHere();
1242     case TypeFunc::Parms: {
1243       const Type* field_at_con = tf()->range()->field_at(proj->_con);
1244       const BasicType bt = field_at_con->basic_type();
1245       OptoReg::Name optoreg = OptoReg::as_OptoReg(_ret_regs.at(proj->_con - TypeFunc::Parms));
1246       OptoRegPair regs;
1247       if (bt == T_DOUBLE || bt == T_LONG) {
1248         regs.set2(optoreg);
1249       } else {
1250         regs.set1(optoreg);
1251       }
1252       RegMask rm = RegMask(regs.first());
1253       if(OptoReg::is_valid(regs.second()))
1254         rm.Insert(regs.second());
1255       return new MachProjNode(this, proj->_con, rm, field_at_con->ideal_reg());
1256     }
1257     case TypeFunc::Parms + 1: {
1258       assert(tf()->range()->field_at(proj->_con) == Type::HALF, "Expected HALF");
1259       assert(_ret_regs.at(proj->_con - TypeFunc::Parms) == VMRegImpl::Bad(), "Unexpected register for Type::HALF");
1260       // 2nd half of doubles and longs
1261       return new MachProjNode(this, proj->_con, RegMask::Empty, (uint) OptoReg::Bad);
1262     }
1263     default:
1264       ShouldNotReachHere();
1265   }
1266   return NULL;
1267 }
1268 #ifndef PRODUCT
1269 void CallNativeNode::print_regs(const GrowableArray<VMReg>& regs, outputStream* st) {
1270   st->print("{ ");
1271   for (int i = 0; i < regs.length(); i++) {
1272     regs.at(i)->print_on(st);
1273     if (i < regs.length() - 1) {
1274       st->print(", ");
1275     }
1276   }
1277   st->print(" } ");
1278 }
1279 
1280 void CallNativeNode::dump_spec(outputStream *st) const {
1281   st->print("# ");
1282   st->print("%s ", _name);
1283   st->print("_arg_regs: ");
1284   print_regs(_arg_regs, st);
1285   st->print("_ret_regs: ");
1286   print_regs(_ret_regs, st);
1287   CallNode::dump_spec(st);
1288 }
1289 #endif
1290 
1291 //------------------------------calling_convention-----------------------------
1292 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {







1293   SharedRuntime::c_calling_convention(sig_bt, parm_regs, /*regs2=*/nullptr, argcnt);
1294 }
1295 
1296 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1297 #ifdef ASSERT
1298   assert(tf()->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1299          "return vector size must match");
1300   const TypeTuple* d = tf()->domain();
1301   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1302     Node* arg = in(i);
1303     assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1304            "vector argument size must match");
1305   }
1306 #endif
1307 
1308   SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1309 }
1310 
1311 void CallNativeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1312   assert((tf()->domain()->cnt() - TypeFunc::Parms) == argcnt, "arg counts must match!");
1313 #ifdef ASSERT
1314   for (uint i = 0; i < argcnt; i++) {
1315     assert(tf()->domain()->field_at(TypeFunc::Parms + i)->basic_type() == sig_bt[i], "types must match!");
1316   }
1317 #endif
1318   for (uint i = 0; i < argcnt; i++) {
1319     switch (sig_bt[i]) {
1320       case T_BOOLEAN:
1321       case T_CHAR:
1322       case T_BYTE:
1323       case T_SHORT:
1324       case T_INT:
1325       case T_FLOAT:
1326         parm_regs[i].set1(_arg_regs.at(i));
1327         break;
1328       case T_LONG:
1329       case T_DOUBLE:
1330         assert((i + 1) < argcnt && sig_bt[i + 1] == T_VOID, "expecting half");
1331         parm_regs[i].set2(_arg_regs.at(i));
1332         break;
1333       case T_VOID: // Halves of longs and doubles
1334         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1335         assert(_arg_regs.at(i) == VMRegImpl::Bad(), "expecting bad reg");

1338       default:
1339         ShouldNotReachHere();
1340         break;
1341     }
1342   }
1343 }
1344 
1345 //=============================================================================
1346 //------------------------------calling_convention-----------------------------
1347 
1348 
1349 //=============================================================================
1350 #ifndef PRODUCT
1351 void CallLeafNode::dump_spec(outputStream *st) const {
1352   st->print("# ");
1353   st->print("%s", _name);
1354   CallNode::dump_spec(st);
1355 }
1356 #endif
1357 






1358 //=============================================================================
1359 
1360 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1361   assert(verify_jvms(jvms), "jvms must match");
1362   int loc = jvms->locoff() + idx;
1363   if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1364     // If current local idx is top then local idx - 1 could
1365     // be a long/double that needs to be killed since top could
1366     // represent the 2nd half ofthe long/double.
1367     uint ideal = in(loc -1)->ideal_reg();
1368     if (ideal == Op_RegD || ideal == Op_RegL) {
1369       // set other (low index) half to top
1370       set_req(loc - 1, in(loc));
1371     }
1372   }
1373   set_req(loc, c);
1374 }
1375 
1376 uint SafePointNode::size_of() const { return sizeof(*this); }
1377 bool SafePointNode::cmp( const Node &n ) const {

1561 }
1562 
1563 //==============  SafePointScalarObjectNode  ==============
1564 
1565 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
1566 #ifdef ASSERT
1567                                                      Node* alloc,
1568 #endif
1569                                                      uint first_index,
1570                                                      uint n_fields,
1571                                                      bool is_auto_box) :
1572   TypeNode(tp, 1), // 1 control input -- seems required.  Get from root.
1573   _first_index(first_index),
1574   _n_fields(n_fields),
1575   _is_auto_box(is_auto_box)
1576 #ifdef ASSERT
1577   , _alloc(alloc)
1578 #endif
1579 {
1580 #ifdef ASSERT
1581   if (!alloc->is_Allocate()
1582       && !(alloc->Opcode() == Op_VectorBox)
1583       && (!alloc->is_CallStaticJava() || !alloc->as_CallStaticJava()->is_boxing_method())) {
1584     alloc->dump();
1585     assert(false, "unexpected call node");
1586   }
1587 #endif
1588   init_class_id(Class_SafePointScalarObject);
1589 }
1590 
1591 // Do not allow value-numbering for SafePointScalarObject node.
1592 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1593 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1594   return (&n == this); // Always fail except on self
1595 }
1596 
1597 uint SafePointScalarObjectNode::ideal_reg() const {
1598   return 0; // No matching to machine instruction
1599 }
1600 
1601 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {

1620   new_node = true;
1621   SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1622   sosn_map->Insert((void*)this, (void*)res);
1623   return res;
1624 }
1625 
1626 
1627 #ifndef PRODUCT
1628 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1629   st->print(" # fields@[%d..%d]", first_index(),
1630              first_index() + n_fields() - 1);
1631 }
1632 
1633 #endif
1634 
1635 //=============================================================================
1636 uint AllocateNode::size_of() const { return sizeof(*this); }
1637 
1638 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1639                            Node *ctrl, Node *mem, Node *abio,
1640                            Node *size, Node *klass_node, Node *initial_test)


1641   : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
1642 {
1643   init_class_id(Class_Allocate);
1644   init_flags(Flag_is_macro);
1645   _is_scalar_replaceable = false;
1646   _is_non_escaping = false;
1647   _is_allocation_MemBar_redundant = false;

1648   Node *topnode = C->top();
1649 
1650   init_req( TypeFunc::Control  , ctrl );
1651   init_req( TypeFunc::I_O      , abio );
1652   init_req( TypeFunc::Memory   , mem );
1653   init_req( TypeFunc::ReturnAdr, topnode );
1654   init_req( TypeFunc::FramePtr , topnode );
1655   init_req( AllocSize          , size);
1656   init_req( KlassNode          , klass_node);
1657   init_req( InitialTest        , initial_test);
1658   init_req( ALength            , topnode);



1659   C->add_macro_node(this);
1660 }
1661 
1662 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1663 {
1664   assert(initializer != NULL &&
1665          initializer->is_initializer() &&
1666          !initializer->is_static(),
1667              "unexpected initializer method");
1668   BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1669   if (analyzer == NULL) {
1670     return;
1671   }
1672 
1673   // Allocation node is first parameter in its initializer
1674   if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1675     _is_allocation_MemBar_redundant = true;
1676   }
1677 }
1678 Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {

1679   Node* mark_node = NULL;
1680   // For now only enable fast locking for non-array types
1681   mark_node = phase->MakeConX(markWord::prototype().value());
1682   return mark_node;







1683 }
1684 

1685 //=============================================================================
1686 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1687   if (remove_dead_region(phase, can_reshape))  return this;



1688   // Don't bother trying to transform a dead node
1689   if (in(0) && in(0)->is_top())  return NULL;
1690 
1691   const Type* type = phase->type(Ideal_length());
1692   if (type->isa_int() && type->is_int()->_hi < 0) {
1693     if (can_reshape) {
1694       PhaseIterGVN *igvn = phase->is_IterGVN();
1695       // Unreachable fall through path (negative array length),
1696       // the allocation can only throw so disconnect it.
1697       Node* proj = proj_out_or_null(TypeFunc::Control);
1698       Node* catchproj = NULL;
1699       if (proj != NULL) {
1700         for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1701           Node *cn = proj->fast_out(i);
1702           if (cn->is_Catch()) {
1703             catchproj = cn->as_Multi()->proj_out_or_null(CatchProjNode::fall_through_index);
1704             break;
1705           }
1706         }
1707       }

2096       this->collect_nodes_in_all_data(in_rel, true);
2097     }
2098     this->collect_nodes(out_rel, -2, false, false);
2099 }
2100 #endif
2101 
2102 //=============================================================================
2103 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2104 
2105   // perform any generic optimizations first (returns 'this' or NULL)
2106   Node *result = SafePointNode::Ideal(phase, can_reshape);
2107   if (result != NULL)  return result;
2108   // Don't bother trying to transform a dead node
2109   if (in(0) && in(0)->is_top())  return NULL;
2110 
2111   // Now see if we can optimize away this lock.  We don't actually
2112   // remove the locking here, we simply set the _eliminate flag which
2113   // prevents macro expansion from expanding the lock.  Since we don't
2114   // modify the graph, the value returned from this function is the
2115   // one computed above.
2116   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {


2117     //
2118     // If we are locking an non-escaped object, the lock/unlock is unnecessary
2119     //
2120     ConnectionGraph *cgr = phase->C->congraph();
2121     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
2122       assert(!is_eliminated() || is_coarsened(), "sanity");
2123       // The lock could be marked eliminated by lock coarsening
2124       // code during first IGVN before EA. Replace coarsened flag
2125       // to eliminate all associated locks/unlocks.
2126 #ifdef ASSERT
2127       this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2128 #endif
2129       this->set_non_esc_obj();
2130       return result;
2131     }
2132 
2133     if (!phase->C->do_locks_coarsening()) {
2134       return result; // Compiling without locks coarsening
2135     }
2136     //

2292 }
2293 
2294 //=============================================================================
2295 uint UnlockNode::size_of() const { return sizeof(*this); }
2296 
2297 //=============================================================================
2298 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2299 
2300   // perform any generic optimizations first (returns 'this' or NULL)
2301   Node *result = SafePointNode::Ideal(phase, can_reshape);
2302   if (result != NULL)  return result;
2303   // Don't bother trying to transform a dead node
2304   if (in(0) && in(0)->is_top())  return NULL;
2305 
2306   // Now see if we can optimize away this unlock.  We don't actually
2307   // remove the unlocking here, we simply set the _eliminate flag which
2308   // prevents macro expansion from expanding the unlock.  Since we don't
2309   // modify the graph, the value returned from this function is the
2310   // one computed above.
2311   // Escape state is defined after Parse phase.
2312   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {


2313     //
2314     // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2315     //
2316     ConnectionGraph *cgr = phase->C->congraph();
2317     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
2318       assert(!is_eliminated() || is_coarsened(), "sanity");
2319       // The lock could be marked eliminated by lock coarsening
2320       // code during first IGVN before EA. Replace coarsened flag
2321       // to eliminate all associated locks/unlocks.
2322 #ifdef ASSERT
2323       this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2324 #endif
2325       this->set_non_esc_obj();
2326     }
2327   }
2328   return result;
2329 }
2330 
2331 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock)  const {
2332   if (C == NULL) {

2372     }
2373     // unrelated
2374     return false;
2375   }
2376 
2377   if (dest_t->isa_aryptr()) {
2378     // arraycopy or array clone
2379     if (t_oop->isa_instptr()) {
2380       return false;
2381     }
2382     if (!t_oop->isa_aryptr()) {
2383       return true;
2384     }
2385 
2386     const Type* elem = dest_t->is_aryptr()->elem();
2387     if (elem == Type::BOTTOM) {
2388       // An array but we don't know what elements are
2389       return true;
2390     }
2391 
2392     dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();

2393     uint dest_alias = phase->C->get_alias_index(dest_t);
2394     uint t_oop_alias = phase->C->get_alias_index(t_oop);
2395 
2396     return dest_alias == t_oop_alias;
2397   }
2398 
2399   return true;
2400 }

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "ci/ciFlatArrayKlass.hpp"
  28 #include "ci/bcEscapeAnalyzer.hpp"
  29 #include "compiler/oopMap.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/c2/barrierSetC2.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "opto/callGenerator.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/escape.hpp"
  38 #include "opto/inlinetypenode.hpp"
  39 #include "opto/locknode.hpp"
  40 #include "opto/machnode.hpp"
  41 #include "opto/matcher.hpp"
  42 #include "opto/parse.hpp"
  43 #include "opto/regalloc.hpp"
  44 #include "opto/regmask.hpp"
  45 #include "opto/rootnode.hpp"
  46 #include "opto/runtime.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/stubRoutines.hpp"
  49 #include "utilities/powerOfTwo.hpp"
  50 #include "code/vmreg.hpp"
  51 
  52 // Portions of code courtesy of Clifford Click
  53 
  54 // Optimization - Graph Style
  55 
  56 //=============================================================================
  57 uint StartNode::size_of() const { return sizeof(*this); }
  58 bool StartNode::cmp( const Node &n ) const
  59 { return _domain == ((StartNode&)n)._domain; }
  60 const Type *StartNode::bottom_type() const { return _domain; }
  61 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
  62 #ifndef PRODUCT
  63 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
  64 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
  65 #endif
  66 
  67 //------------------------------Ideal------------------------------------------
  68 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
  69   return remove_dead_region(phase, can_reshape) ? this : NULL;
  70 }
  71 
  72 //------------------------------calling_convention-----------------------------
  73 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
  74   SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
  75 }
  76 
  77 //------------------------------Registers--------------------------------------
  78 const RegMask &StartNode::in_RegMask(uint) const {
  79   return RegMask::Empty;
  80 }
  81 
  82 //------------------------------match------------------------------------------
  83 // Construct projections for incoming parameters, and their RegMask info
  84 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
  85   switch (proj->_con) {
  86   case TypeFunc::Control:
  87   case TypeFunc::I_O:
  88   case TypeFunc::Memory:
  89     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
  90   case TypeFunc::FramePtr:
  91     return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
  92   case TypeFunc::ReturnAdr:
  93     return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
  94   case TypeFunc::Parms:
  95   default: {
  96       uint parm_num = proj->_con - TypeFunc::Parms;
  97       const Type *t = _domain->field_at(proj->_con);
  98       if (t->base() == Type::Half)  // 2nd half of Longs and Doubles
  99         return new ConNode(Type::TOP);
 100       uint ideal_reg = t->ideal_reg();
 101       RegMask &rm = match->_calling_convention_mask[parm_num];
 102       return new MachProjNode(this,proj->_con,rm,ideal_reg);
 103     }
 104   }
 105   return NULL;
 106 }
 107 











 108 //=============================================================================
 109 const char * const ParmNode::names[TypeFunc::Parms+1] = {
 110   "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
 111 };
 112 
 113 #ifndef PRODUCT
 114 void ParmNode::dump_spec(outputStream *st) const {
 115   if( _con < TypeFunc::Parms ) {
 116     st->print("%s", names[_con]);
 117   } else {
 118     st->print("Parm%d: ",_con-TypeFunc::Parms);
 119     // Verbose and WizardMode dump bottom_type for all nodes
 120     if( !Verbose && !WizardMode )   bottom_type()->dump_on(st);
 121   }
 122 }
 123 
 124 void ParmNode::dump_compact_spec(outputStream *st) const {
 125   if (_con < TypeFunc::Parms) {
 126     st->print("%s", names[_con]);
 127   } else {

 470       if (cik->is_instance_klass()) {
 471         cik->print_name_on(st);
 472         iklass = cik->as_instance_klass();
 473       } else if (cik->is_type_array_klass()) {
 474         cik->as_array_klass()->base_element_type()->print_name_on(st);
 475         st->print("[%d]", spobj->n_fields());
 476       } else if (cik->is_obj_array_klass()) {
 477         ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
 478         if (cie->is_instance_klass()) {
 479           cie->print_name_on(st);
 480         } else if (cie->is_type_array_klass()) {
 481           cie->as_array_klass()->base_element_type()->print_name_on(st);
 482         } else {
 483           ShouldNotReachHere();
 484         }
 485         st->print("[%d]", spobj->n_fields());
 486         int ndim = cik->as_array_klass()->dimension() - 1;
 487         while (ndim-- > 0) {
 488           st->print("[]");
 489         }
 490       } else if (cik->is_flat_array_klass()) {
 491         ciKlass* cie = cik->as_flat_array_klass()->base_element_klass();
 492         cie->print_name_on(st);
 493         st->print("[%d]", spobj->n_fields());
 494         int ndim = cik->as_array_klass()->dimension() - 1;
 495         while (ndim-- > 0) {
 496           st->print("[]");
 497         }
 498       }
 499       st->print("={");
 500       uint nf = spobj->n_fields();
 501       if (nf > 0) {
 502         uint first_ind = spobj->first_index(mcall->jvms());
 503         if (iklass != NULL && iklass->is_inlinetype()) {
 504           Node* init_node = mcall->in(first_ind++);
 505           if (!init_node->is_top()) {
 506             st->print(" [is_init");
 507             format_helper(regalloc, st, init_node, ":", -1, NULL);
 508           }
 509         }
 510         Node* fld_node = mcall->in(first_ind);
 511         ciField* cifield;
 512         if (iklass != NULL) {
 513           st->print(" [");
 514           cifield = iklass->nonstatic_field_at(0);
 515           cifield->print_name_on(st);
 516           format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
 517         } else {
 518           format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
 519         }
 520         for (uint j = 1; j < nf; j++) {
 521           fld_node = mcall->in(first_ind+j);
 522           if (iklass != NULL) {
 523             st->print(", [");
 524             cifield = iklass->nonstatic_field_at(j);
 525             cifield->print_name_on(st);
 526             format_helper(regalloc, st, fld_node, ":", j, &scobjs);
 527           } else {
 528             format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
 529           }

 698 #ifndef PRODUCT
 699 void CallNode::dump_req(outputStream *st) const {
 700   // Dump the required inputs, enclosed in '(' and ')'
 701   uint i;                       // Exit value of loop
 702   for (i = 0; i < req(); i++) {    // For all required inputs
 703     if (i == TypeFunc::Parms) st->print("(");
 704     if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
 705     else st->print("_ ");
 706   }
 707   st->print(")");
 708 }
 709 
 710 void CallNode::dump_spec(outputStream *st) const {
 711   st->print(" ");
 712   if (tf() != NULL)  tf()->dump_on(st);
 713   if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
 714   if (jvms() != NULL)  jvms()->dump_spec(st);
 715 }
 716 #endif
 717 
 718 const Type *CallNode::bottom_type() const { return tf()->range_cc(); }
 719 const Type* CallNode::Value(PhaseGVN* phase) const {
 720   if (!in(0) || phase->type(in(0)) == Type::TOP) {
 721     return Type::TOP;
 722   }
 723   return tf()->range_cc();
 724 }
 725 
 726 //------------------------------calling_convention-----------------------------
 727 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
 728   if (_entry_point == StubRoutines::store_inline_type_fields_to_buf()) {
 729     // The call to that stub is a special case: its inputs are
 730     // multiple values returned from a call and so it should follow
 731     // the return convention.
 732     SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
 733     return;
 734   }
 735   // Use the standard compiler calling convention
 736   SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
 737 }
 738 
 739 
 740 //------------------------------match------------------------------------------
 741 // Construct projections for control, I/O, memory-fields, ..., and
 742 // return result(s) along with their RegMask info
 743 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
 744   uint con = proj->_con;
 745   const TypeTuple* range_cc = tf()->range_cc();
 746   if (con >= TypeFunc::Parms) {
 747     if (tf()->returns_inline_type_as_fields()) {
 748       // The call returns multiple values (inline type fields): we
 749       // create one projection per returned value.
 750       assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return");
 751       uint ideal_reg = range_cc->field_at(con)->ideal_reg();
 752       return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg);
 753     } else {
 754       if (con == TypeFunc::Parms) {
 755         uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg();
 756         OptoRegPair regs = Opcode() == Op_CallLeafVector
 757           ? match->vector_return_value(ideal_reg)      // Calls into assembly vector routine
 758           : match->c_return_value(ideal_reg);
 759         RegMask rm = RegMask(regs.first());
 760 
 761         if (Opcode() == Op_CallLeafVector) {
 762           // If the return is in vector, compute appropriate regmask taking into account the whole range
 763           if(ideal_reg >= Op_VecS && ideal_reg <= Op_VecZ) {
 764             if(OptoReg::is_valid(regs.second())) {
 765               for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
 766                 rm.Insert(r);
 767               }
 768             }

 769           }
 770         }
 771 
 772         if (OptoReg::is_valid(regs.second())) {
 773           rm.Insert(regs.second());
 774         }
 775         return new MachProjNode(this,con,rm,ideal_reg);
 776       } else {
 777         assert(con == TypeFunc::Parms+1, "only one return value");
 778         assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, "");
 779         return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad);
 780       }
 781     }




 782   }
 783 
 784   switch (con) {
 785   case TypeFunc::Control:
 786   case TypeFunc::I_O:
 787   case TypeFunc::Memory:
 788     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
 789 
 790   case TypeFunc::ReturnAdr:
 791   case TypeFunc::FramePtr:
 792   default:
 793     ShouldNotReachHere();
 794   }
 795   return NULL;
 796 }
 797 
 798 // Do we Match on this edge index or not?  Match no edges
 799 uint CallNode::match_edge(uint idx) const {
 800   return 0;
 801 }
 802 
 803 //
 804 // Determine whether the call could modify the field of the specified
 805 // instance at the specified offset.
 806 //
 807 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
 808   assert((t_oop != NULL), "sanity");
 809   if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
 810     const TypeTuple* args = _tf->domain_sig();
 811     Node* dest = NULL;
 812     // Stubs that can be called once an ArrayCopyNode is expanded have
 813     // different signatures. Look for the second pointer argument,
 814     // that is the destination of the copy.
 815     for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 816       if (args->field_at(i)->isa_ptr()) {
 817         j++;
 818         if (j == 2) {
 819           dest = in(i);
 820           break;
 821         }
 822       }
 823     }
 824     guarantee(dest != NULL, "Call had only one ptr in, broken IR!");
 825     if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
 826       return true;
 827     }
 828     return false;
 829   }
 830   if (t_oop->is_known_instance()) {

 839       Node* proj = proj_out_or_null(TypeFunc::Parms);
 840       if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
 841         return false;
 842       }
 843     }
 844     if (is_CallJava() && as_CallJava()->method() != NULL) {
 845       ciMethod* meth = as_CallJava()->method();
 846       if (meth->is_getter()) {
 847         return false;
 848       }
 849       // May modify (by reflection) if an boxing object is passed
 850       // as argument or returned.
 851       Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL;
 852       if (proj != NULL) {
 853         const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
 854         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 855                                  (inst_t->klass() == boxing_klass))) {
 856           return true;
 857         }
 858       }
 859       const TypeTuple* d = tf()->domain_cc();
 860       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 861         const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
 862         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 863                                  (inst_t->klass() == boxing_klass))) {
 864           return true;
 865         }
 866       }
 867       return false;
 868     }
 869   }
 870   return true;
 871 }
 872 
 873 // Does this call have a direct reference to n other than debug information?
 874 bool CallNode::has_non_debug_use(Node* n) {
 875   const TypeTuple* d = tf()->domain_cc();
 876   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 877     if (in(i) == n) {

 878       return true;
 879     }
 880   }
 881   return false;
 882 }
 883 
 884 bool CallNode::has_debug_use(Node* n) {
 885   if (jvms() != NULL) {
 886     for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
 887       if (in(i) == n) {
 888         return true;
 889       }
 890     }
 891   }
 892   return false;
 893 }
 894 
 895 // Returns the unique CheckCastPP of a call
 896 // or 'this' if there are several CheckCastPP or unexpected uses
 897 // or returns NULL if there is no one.
 898 Node *CallNode::result_cast() {
 899   Node *cast = NULL;
 900 
 901   Node *p = proj_out_or_null(TypeFunc::Parms);
 902   if (p == NULL)
 903     return NULL;
 904 
 905   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
 906     Node *use = p->fast_out(i);
 907     if (use->is_CheckCastPP()) {
 908       if (cast != NULL) {
 909         return this;  // more than 1 CheckCastPP
 910       }
 911       cast = use;
 912     } else if (!use->is_Initialize() &&
 913                !use->is_AddP() &&
 914                use->Opcode() != Op_MemBarStoreStore) {
 915       // Expected uses are restricted to a CheckCastPP, an Initialize
 916       // node, a MemBarStoreStore (clone) and AddP nodes. If we
 917       // encounter any other use (a Phi node can be seen in rare
 918       // cases) return this to prevent incorrect optimizations.
 919       return this;
 920     }
 921   }
 922   return cast;
 923 }
 924 
 925 
 926 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) {
 927   uint max_res = TypeFunc::Parms-1;
 928   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 929     ProjNode *pn = fast_out(i)->as_Proj();
 930     max_res = MAX2(max_res, pn->_con);
 931   }
 932 
 933   assert(max_res < _tf->range_cc()->cnt(), "result out of bounds");
 934 
 935   uint projs_size = sizeof(CallProjections);
 936   if (max_res > TypeFunc::Parms) {
 937     projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*);
 938   }
 939   char* projs_storage = resource_allocate_bytes(projs_size);
 940   CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1);
 941 
 942   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 943     ProjNode *pn = fast_out(i)->as_Proj();
 944     if (pn->outcnt() == 0) continue;
 945     switch (pn->_con) {
 946     case TypeFunc::Control:
 947       {
 948         // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
 949         projs->fallthrough_proj = pn;
 950         const Node *cn = pn->unique_ctrl_out();
 951         if (cn != NULL && cn->is_Catch()) {
 952           ProjNode *cpn = NULL;
 953           for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
 954             cpn = cn->fast_out(k)->as_Proj();
 955             assert(cpn->is_CatchProj(), "must be a CatchProjNode");
 956             if (cpn->_con == CatchProjNode::fall_through_index)
 957               projs->fallthrough_catchproj = cpn;
 958             else {
 959               assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
 960               projs->catchall_catchproj = cpn;

 966     case TypeFunc::I_O:
 967       if (pn->_is_io_use)
 968         projs->catchall_ioproj = pn;
 969       else
 970         projs->fallthrough_ioproj = pn;
 971       for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
 972         Node* e = pn->out(j);
 973         if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
 974           assert(projs->exobj == NULL, "only one");
 975           projs->exobj = e;
 976         }
 977       }
 978       break;
 979     case TypeFunc::Memory:
 980       if (pn->_is_io_use)
 981         projs->catchall_memproj = pn;
 982       else
 983         projs->fallthrough_memproj = pn;
 984       break;
 985     case TypeFunc::Parms:
 986       projs->resproj[0] = pn;
 987       break;
 988     default:
 989       assert(pn->_con <= max_res, "unexpected projection from allocation node.");
 990       projs->resproj[pn->_con-TypeFunc::Parms] = pn;
 991       break;
 992     }
 993   }
 994 
 995   // The resproj may not exist because the result could be ignored
 996   // and the exception object may not exist if an exception handler
 997   // swallows the exception but all the other must exist and be found.

 998   do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
 999   assert(!do_asserts || projs->fallthrough_proj      != NULL, "must be found");
1000   assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
1001   assert(!do_asserts || projs->fallthrough_memproj   != NULL, "must be found");
1002   assert(!do_asserts || projs->fallthrough_ioproj    != NULL, "must be found");
1003   assert(!do_asserts || projs->catchall_catchproj    != NULL, "must be found");
1004   if (separate_io_proj) {
1005     assert(!do_asserts || projs->catchall_memproj    != NULL, "must be found");
1006     assert(!do_asserts || projs->catchall_ioproj     != NULL, "must be found");
1007   }
1008   return projs;
1009 }
1010 
1011 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1012 #ifdef ASSERT
1013   // Validate attached generator
1014   CallGenerator* cg = generator();
1015   if (cg != NULL) {
1016     assert(is_CallStaticJava()  && cg->is_mh_late_inline() ||
1017            is_CallDynamicJava() && cg->is_virtual_late_inline(), "mismatch");
1018   }
1019 #endif // ASSERT
1020   return SafePointNode::Ideal(phase, can_reshape);
1021 }
1022 
1023 bool CallNode::is_call_to_arraycopystub() const {
1024   if (_name != NULL && strstr(_name, "arraycopy") != 0) {
1025     return true;
1026   }
1027   return false;
1028 }
1029 
1030 //=============================================================================
1031 uint CallJavaNode::size_of() const { return sizeof(*this); }
1032 bool CallJavaNode::cmp( const Node &n ) const {
1033   CallJavaNode &call = (CallJavaNode&)n;
1034   return CallNode::cmp(call) && _method == call._method &&
1035          _override_symbolic_info == call._override_symbolic_info;
1036 }
1037 
1038 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1039   // Copy debug information and adjust JVMState information
1040   uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain_sig()->cnt() : (uint)TypeFunc::Parms+1;
1041   uint new_dbg_start = tf()->domain_sig()->cnt();
1042   int jvms_adj  = new_dbg_start - old_dbg_start;
1043   assert (new_dbg_start == req(), "argument count mismatch");
1044   Compile* C = phase->C;
1045 
1046   // SafePointScalarObject node could be referenced several times in debug info.
1047   // Use Dict to record cloned nodes.
1048   Dict* sosn_map = new Dict(cmpkey,hashkey);
1049   for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1050     Node* old_in = sfpt->in(i);
1051     // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1052     if (old_in != NULL && old_in->is_SafePointScalarObject()) {
1053       SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1054       bool new_node;
1055       Node* new_in = old_sosn->clone(sosn_map, new_node);
1056       if (new_node) { // New node?
1057         new_in->set_req(0, C->root()); // reset control edge
1058         new_in = phase->transform(new_in); // Register new node.
1059       }
1060       old_in = new_in;
1061     }
1062     add_req(old_in);
1063   }
1064 
1065   // JVMS may be shared so clone it before we modify it
1066   set_jvms(sfpt->jvms() != NULL ? sfpt->jvms()->clone_deep(C) : NULL);
1067   for (JVMState *jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
1068     jvms->set_map(this);
1069     jvms->set_locoff(jvms->locoff()+jvms_adj);
1070     jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1071     jvms->set_monoff(jvms->monoff()+jvms_adj);
1072     jvms->set_scloff(jvms->scloff()+jvms_adj);
1073     jvms->set_endoff(jvms->endoff()+jvms_adj);
1074   }
1075 }
1076 
1077 #ifdef ASSERT
1078 bool CallJavaNode::validate_symbolic_info() const {
1079   if (method() == NULL) {
1080     return true; // call into runtime or uncommon trap
1081   }
1082   Bytecodes::Code bc = jvms()->method()->java_code_at_bci(jvms()->bci());
1083   if (EnableValhalla && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) {
1084     return true;
1085   }
1086   ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1087   ciMethod* callee = method();
1088   if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1089     assert(override_symbolic_info(), "should be set");
1090   }
1091   assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1092   return true;
1093 }
1094 #endif
1095 
1096 #ifndef PRODUCT
1097 void CallJavaNode::dump_spec(outputStream* st) const {
1098   if( _method ) _method->print_short_name(st);
1099   CallNode::dump_spec(st);
1100 }
1101 
1102 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1103   if (_method) {
1104     _method->print_short_name(st);
1105   } else {
1106     st->print("<?>");
1107   }
1108 }
1109 #endif
1110 
1111 //=============================================================================
1112 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1113 bool CallStaticJavaNode::cmp( const Node &n ) const {
1114   CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1115   return CallJavaNode::cmp(call);
1116 }
1117 
1118 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1119   if (can_reshape && uncommon_trap_request() != 0) {
1120     if (remove_useless_allocation(phase, in(0), in(TypeFunc::Memory), in(TypeFunc::Parms))) {
1121       if (!in(0)->is_Region()) {
1122         PhaseIterGVN* igvn = phase->is_IterGVN();
1123         igvn->replace_input_of(this, 0, phase->C->top());
1124       }
1125       return this;
1126     }
1127   }
1128 
1129   CallGenerator* cg = generator();
1130   if (can_reshape && cg != NULL) {
1131     assert(IncrementalInlineMH, "required");
1132     assert(cg->call_node() == this, "mismatch");
1133     assert(cg->is_mh_late_inline(), "not virtual");
1134 
1135     // Check whether this MH handle call becomes a candidate for inlining.
1136     ciMethod* callee = cg->method();
1137     vmIntrinsics::ID iid = callee->intrinsic_id();
1138     if (iid == vmIntrinsics::_invokeBasic) {
1139       if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1140         phase->C->prepend_late_inline(cg);
1141         set_generator(NULL);
1142       }
1143     } else if (iid == vmIntrinsics::_linkToNative) {
1144       if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP /* NEP */
1145           && in(TypeFunc::Parms + 1)->Opcode() == Op_ConL /* address */) {
1146         phase->C->prepend_late_inline(cg);
1147         set_generator(NULL);
1148       }

1162 int CallStaticJavaNode::uncommon_trap_request() const {
1163   if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
1164     return extract_uncommon_trap_request(this);
1165   }
1166   return 0;
1167 }
1168 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1169 #ifndef PRODUCT
1170   if (!(call->req() > TypeFunc::Parms &&
1171         call->in(TypeFunc::Parms) != NULL &&
1172         call->in(TypeFunc::Parms)->is_Con() &&
1173         call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1174     assert(in_dump() != 0, "OK if dumping");
1175     tty->print("[bad uncommon trap]");
1176     return 0;
1177   }
1178 #endif
1179   return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1180 }
1181 
1182 bool CallStaticJavaNode::remove_useless_allocation(PhaseGVN *phase, Node* ctl, Node* mem, Node* unc_arg) {
1183   // Split if can cause the flattened array branch of an array load to
1184   // end in an uncommon trap. In that case, the allocation of the
1185   // loaded value and its initialization is useless. Eliminate it. use
1186   // the jvm state of the allocation to create a new uncommon trap
1187   // call at the load.
1188   if (ctl == NULL || ctl->is_top() || mem == NULL || mem->is_top() || !mem->is_MergeMem()) {
1189     return false;
1190   }
1191   PhaseIterGVN* igvn = phase->is_IterGVN();
1192   if (ctl->is_Region()) {
1193     bool res = false;
1194     for (uint i = 1; i < ctl->req(); i++) {
1195       MergeMemNode* mm = mem->clone()->as_MergeMem();
1196       for (MergeMemStream mms(mm); mms.next_non_empty(); ) {
1197         Node* m = mms.memory();
1198         if (m->is_Phi() && m->in(0) == ctl) {
1199           mms.set_memory(m->in(i));
1200         }
1201       }
1202       if (remove_useless_allocation(phase, ctl->in(i), mm, unc_arg)) {
1203         res = true;
1204         if (!ctl->in(i)->is_Region()) {
1205           igvn->replace_input_of(ctl, i, phase->C->top());
1206         }
1207       }
1208       igvn->remove_dead_node(mm);
1209     }
1210     return res;
1211   }
1212   // verify the control flow is ok
1213   Node* call = ctl;
1214   MemBarNode* membar = NULL;
1215   for (;;) {
1216     if (call == NULL || call->is_top()) {
1217       return false;
1218     }
1219     if (call->is_Proj() || call->is_Catch() || call->is_MemBar()) {
1220       call = call->in(0);
1221     } else if (call->Opcode() == Op_CallStaticJava &&
1222                call->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1223       assert(call->in(0)->is_Proj() && call->in(0)->in(0)->is_MemBar(), "missing membar");
1224       membar = call->in(0)->in(0)->as_MemBar();
1225       break;
1226     } else {
1227       return false;
1228     }
1229   }
1230 
1231   JVMState* jvms = call->jvms();
1232   if (phase->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) {
1233     return false;
1234   }
1235 
1236   Node* alloc_mem = call->in(TypeFunc::Memory);
1237   if (alloc_mem == NULL || alloc_mem->is_top()) {
1238     return false;
1239   }
1240   if (!alloc_mem->is_MergeMem()) {
1241     alloc_mem = MergeMemNode::make(alloc_mem);
1242     igvn->register_new_node_with_optimizer(alloc_mem);
1243   }
1244 
1245   // and that there's no unexpected side effect
1246   for (MergeMemStream mms2(mem->as_MergeMem(), alloc_mem->as_MergeMem()); mms2.next_non_empty2(); ) {
1247     Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory();
1248     Node* m2 = mms2.memory2();
1249 
1250     for (uint i = 0; i < 100; i++) {
1251       if (m1 == m2) {
1252         break;
1253       } else if (m1->is_Proj()) {
1254         m1 = m1->in(0);
1255       } else if (m1->is_MemBar()) {
1256         m1 = m1->in(TypeFunc::Memory);
1257       } else if (m1->Opcode() == Op_CallStaticJava &&
1258                  m1->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1259         if (m1 != call) {
1260           return false;
1261         }
1262         break;
1263       } else if (m1->is_MergeMem()) {
1264         MergeMemNode* mm = m1->as_MergeMem();
1265         int idx = mms2.alias_idx();
1266         if (idx == Compile::AliasIdxBot) {
1267           m1 = mm->base_memory();
1268         } else {
1269           m1 = mm->memory_at(idx);
1270         }
1271       } else {
1272         return false;
1273       }
1274     }
1275   }
1276   if (alloc_mem->outcnt() == 0) {
1277     igvn->remove_dead_node(alloc_mem);
1278   }
1279 
1280   // Remove membar preceding the call
1281   membar->remove(igvn);
1282 
1283   address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
1284   CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", NULL);
1285   unc->init_req(TypeFunc::Control, call->in(0));
1286   unc->init_req(TypeFunc::I_O, call->in(TypeFunc::I_O));
1287   unc->init_req(TypeFunc::Memory, call->in(TypeFunc::Memory));
1288   unc->init_req(TypeFunc::FramePtr,  call->in(TypeFunc::FramePtr));
1289   unc->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
1290   unc->init_req(TypeFunc::Parms+0, unc_arg);
1291   unc->set_cnt(PROB_UNLIKELY_MAG(4));
1292   unc->copy_call_debug_info(igvn, call->as_CallStaticJava());
1293 
1294   igvn->replace_input_of(call, 0, phase->C->top());
1295 
1296   igvn->register_new_node_with_optimizer(unc);
1297 
1298   Node* ctrl = phase->transform(new ProjNode(unc, TypeFunc::Control));
1299   Node* halt = phase->transform(new HaltNode(ctrl, call->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen"));
1300   phase->C->root()->add_req(halt);
1301 
1302   return true;
1303 }
1304 
1305 
1306 #ifndef PRODUCT
1307 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1308   st->print("# Static ");
1309   if (_name != NULL) {
1310     st->print("%s", _name);
1311     int trap_req = uncommon_trap_request();
1312     if (trap_req != 0) {
1313       char buf[100];
1314       st->print("(%s)",
1315                  Deoptimization::format_trap_request(buf, sizeof(buf),
1316                                                      trap_req));
1317     }
1318     st->print(" ");
1319   }
1320   CallJavaNode::dump_spec(st);
1321 }
1322 
1323 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1324   if (_method) {
1325     _method->print_short_name(st);

1395 #ifndef PRODUCT
1396 void CallRuntimeNode::dump_spec(outputStream *st) const {
1397   st->print("# ");
1398   st->print("%s", _name);
1399   CallNode::dump_spec(st);
1400 }
1401 #endif
1402 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1403 bool CallLeafVectorNode::cmp( const Node &n ) const {
1404   CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1405   return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1406 }
1407 
1408 //=============================================================================
1409 uint CallNativeNode::size_of() const { return sizeof(*this); }
1410 bool CallNativeNode::cmp( const Node &n ) const {
1411   CallNativeNode &call = (CallNativeNode&)n;
1412   return CallNode::cmp(call) && !strcmp(_name,call._name)
1413     && _arg_regs == call._arg_regs && _ret_regs == call._ret_regs;
1414 }
1415 Node* CallNativeNode::match(const ProjNode *proj, const Matcher *matcher, const RegMask* mask) {
1416   switch (proj->_con) {
1417     case TypeFunc::Control:
1418     case TypeFunc::I_O:
1419     case TypeFunc::Memory:
1420       return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
1421     case TypeFunc::ReturnAdr:
1422     case TypeFunc::FramePtr:
1423       ShouldNotReachHere();
1424     case TypeFunc::Parms: {
1425       const Type* field_at_con = tf()->range_sig()->field_at(proj->_con);
1426       const BasicType bt = field_at_con->basic_type();
1427       OptoReg::Name optoreg = OptoReg::as_OptoReg(_ret_regs.at(proj->_con - TypeFunc::Parms));
1428       OptoRegPair regs;
1429       if (bt == T_DOUBLE || bt == T_LONG) {
1430         regs.set2(optoreg);
1431       } else {
1432         regs.set1(optoreg);
1433       }
1434       RegMask rm = RegMask(regs.first());
1435       if(OptoReg::is_valid(regs.second()))
1436         rm.Insert(regs.second());
1437       return new MachProjNode(this, proj->_con, rm, field_at_con->ideal_reg());
1438     }
1439     case TypeFunc::Parms + 1: {
1440       assert(tf()->range_sig()->field_at(proj->_con) == Type::HALF, "Expected HALF");
1441       assert(_ret_regs.at(proj->_con - TypeFunc::Parms) == VMRegImpl::Bad(), "Unexpected register for Type::HALF");
1442       // 2nd half of doubles and longs
1443       return new MachProjNode(this, proj->_con, RegMask::Empty, (uint) OptoReg::Bad);
1444     }
1445     default:
1446       ShouldNotReachHere();
1447   }
1448   return NULL;
1449 }
1450 #ifndef PRODUCT
1451 void CallNativeNode::print_regs(const GrowableArray<VMReg>& regs, outputStream* st) {
1452   st->print("{ ");
1453   for (int i = 0; i < regs.length(); i++) {
1454     regs.at(i)->print_on(st);
1455     if (i < regs.length() - 1) {
1456       st->print(", ");
1457     }
1458   }
1459   st->print(" } ");
1460 }
1461 
1462 void CallNativeNode::dump_spec(outputStream *st) const {
1463   st->print("# ");
1464   st->print("%s ", _name);
1465   st->print("_arg_regs: ");
1466   print_regs(_arg_regs, st);
1467   st->print("_ret_regs: ");
1468   print_regs(_ret_regs, st);
1469   CallNode::dump_spec(st);
1470 }
1471 #endif
1472 
1473 //------------------------------calling_convention-----------------------------
1474 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1475   if (_entry_point == NULL) {
1476     // The call to that stub is a special case: its inputs are
1477     // multiple values returned from a call and so it should follow
1478     // the return convention.
1479     SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
1480     return;
1481   }
1482   SharedRuntime::c_calling_convention(sig_bt, parm_regs, /*regs2=*/nullptr, argcnt);
1483 }
1484 
1485 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1486 #ifdef ASSERT
1487   assert(tf()->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1488          "return vector size must match");
1489   const TypeTuple* d = tf()->domain_sig();
1490   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1491     Node* arg = in(i);
1492     assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1493            "vector argument size must match");
1494   }
1495 #endif
1496 
1497   SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1498 }
1499 
1500 void CallNativeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1501   assert((tf()->domain_sig()->cnt() - TypeFunc::Parms) == argcnt, "arg counts must match!");
1502 #ifdef ASSERT
1503   for (uint i = 0; i < argcnt; i++) {
1504     assert(tf()->domain_sig()->field_at(TypeFunc::Parms + i)->basic_type() == sig_bt[i], "types must match!");
1505   }
1506 #endif
1507   for (uint i = 0; i < argcnt; i++) {
1508     switch (sig_bt[i]) {
1509       case T_BOOLEAN:
1510       case T_CHAR:
1511       case T_BYTE:
1512       case T_SHORT:
1513       case T_INT:
1514       case T_FLOAT:
1515         parm_regs[i].set1(_arg_regs.at(i));
1516         break;
1517       case T_LONG:
1518       case T_DOUBLE:
1519         assert((i + 1) < argcnt && sig_bt[i + 1] == T_VOID, "expecting half");
1520         parm_regs[i].set2(_arg_regs.at(i));
1521         break;
1522       case T_VOID: // Halves of longs and doubles
1523         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1524         assert(_arg_regs.at(i) == VMRegImpl::Bad(), "expecting bad reg");

1527       default:
1528         ShouldNotReachHere();
1529         break;
1530     }
1531   }
1532 }
1533 
1534 //=============================================================================
1535 //------------------------------calling_convention-----------------------------
1536 
1537 
1538 //=============================================================================
1539 #ifndef PRODUCT
1540 void CallLeafNode::dump_spec(outputStream *st) const {
1541   st->print("# ");
1542   st->print("%s", _name);
1543   CallNode::dump_spec(st);
1544 }
1545 #endif
1546 
1547 uint CallLeafNoFPNode::match_edge(uint idx) const {
1548   // Null entry point is a special case for which the target is in a
1549   // register. Need to match that edge.
1550   return entry_point() == NULL && idx == TypeFunc::Parms;
1551 }
1552 
1553 //=============================================================================
1554 
1555 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1556   assert(verify_jvms(jvms), "jvms must match");
1557   int loc = jvms->locoff() + idx;
1558   if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1559     // If current local idx is top then local idx - 1 could
1560     // be a long/double that needs to be killed since top could
1561     // represent the 2nd half ofthe long/double.
1562     uint ideal = in(loc -1)->ideal_reg();
1563     if (ideal == Op_RegD || ideal == Op_RegL) {
1564       // set other (low index) half to top
1565       set_req(loc - 1, in(loc));
1566     }
1567   }
1568   set_req(loc, c);
1569 }
1570 
1571 uint SafePointNode::size_of() const { return sizeof(*this); }
1572 bool SafePointNode::cmp( const Node &n ) const {

1756 }
1757 
1758 //==============  SafePointScalarObjectNode  ==============
1759 
1760 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
1761 #ifdef ASSERT
1762                                                      Node* alloc,
1763 #endif
1764                                                      uint first_index,
1765                                                      uint n_fields,
1766                                                      bool is_auto_box) :
1767   TypeNode(tp, 1), // 1 control input -- seems required.  Get from root.
1768   _first_index(first_index),
1769   _n_fields(n_fields),
1770   _is_auto_box(is_auto_box)
1771 #ifdef ASSERT
1772   , _alloc(alloc)
1773 #endif
1774 {
1775 #ifdef ASSERT
1776   if (alloc != NULL && !alloc->is_Allocate()
1777       && !(alloc->Opcode() == Op_VectorBox)
1778       && (!alloc->is_CallStaticJava() || !alloc->as_CallStaticJava()->is_boxing_method())) {
1779     alloc->dump();
1780     assert(false, "unexpected call node");
1781   }
1782 #endif
1783   init_class_id(Class_SafePointScalarObject);
1784 }
1785 
1786 // Do not allow value-numbering for SafePointScalarObject node.
1787 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1788 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1789   return (&n == this); // Always fail except on self
1790 }
1791 
1792 uint SafePointScalarObjectNode::ideal_reg() const {
1793   return 0; // No matching to machine instruction
1794 }
1795 
1796 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {

1815   new_node = true;
1816   SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1817   sosn_map->Insert((void*)this, (void*)res);
1818   return res;
1819 }
1820 
1821 
1822 #ifndef PRODUCT
1823 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1824   st->print(" # fields@[%d..%d]", first_index(),
1825              first_index() + n_fields() - 1);
1826 }
1827 
1828 #endif
1829 
1830 //=============================================================================
1831 uint AllocateNode::size_of() const { return sizeof(*this); }
1832 
1833 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1834                            Node *ctrl, Node *mem, Node *abio,
1835                            Node *size, Node *klass_node,
1836                            Node* initial_test,
1837                            InlineTypeBaseNode* inline_type_node)
1838   : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
1839 {
1840   init_class_id(Class_Allocate);
1841   init_flags(Flag_is_macro);
1842   _is_scalar_replaceable = false;
1843   _is_non_escaping = false;
1844   _is_allocation_MemBar_redundant = false;
1845   _larval = false;
1846   Node *topnode = C->top();
1847 
1848   init_req( TypeFunc::Control  , ctrl );
1849   init_req( TypeFunc::I_O      , abio );
1850   init_req( TypeFunc::Memory   , mem );
1851   init_req( TypeFunc::ReturnAdr, topnode );
1852   init_req( TypeFunc::FramePtr , topnode );
1853   init_req( AllocSize          , size);
1854   init_req( KlassNode          , klass_node);
1855   init_req( InitialTest        , initial_test);
1856   init_req( ALength            , topnode);
1857   init_req( InlineTypeNode     , inline_type_node);
1858   // DefaultValue defaults to NULL
1859   // RawDefaultValue defaults to NULL
1860   C->add_macro_node(this);
1861 }
1862 
1863 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1864 {
1865   assert(initializer != NULL &&
1866          initializer->is_object_constructor_or_class_initializer(),
1867          "unexpected initializer method");

1868   BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1869   if (analyzer == NULL) {
1870     return;
1871   }
1872 
1873   // Allocation node is first parameter in its initializer
1874   if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1875     _is_allocation_MemBar_redundant = true;
1876   }
1877 }
1878 
1879 Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
1880   Node* mark_node = NULL;
1881   if (EnableValhalla) {
1882     Node* klass_node = in(AllocateNode::KlassNode);
1883     Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
1884     mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1885   } else {
1886     mark_node = phase->MakeConX(markWord::prototype().value());
1887   }
1888   mark_node = phase->transform(mark_node);
1889   // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal
1890   return new OrXNode(mark_node, phase->MakeConX(_larval ? markWord::larval_bit_in_place : 0));
1891 }
1892 
1893 
1894 //=============================================================================
1895 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1896   Node* res = SafePointNode::Ideal(phase, can_reshape);
1897   if (res != NULL) {
1898     return res;
1899   }
1900   // Don't bother trying to transform a dead node
1901   if (in(0) && in(0)->is_top())  return NULL;
1902 
1903   const Type* type = phase->type(Ideal_length());
1904   if (type->isa_int() && type->is_int()->_hi < 0) {
1905     if (can_reshape) {
1906       PhaseIterGVN *igvn = phase->is_IterGVN();
1907       // Unreachable fall through path (negative array length),
1908       // the allocation can only throw so disconnect it.
1909       Node* proj = proj_out_or_null(TypeFunc::Control);
1910       Node* catchproj = NULL;
1911       if (proj != NULL) {
1912         for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1913           Node *cn = proj->fast_out(i);
1914           if (cn->is_Catch()) {
1915             catchproj = cn->as_Multi()->proj_out_or_null(CatchProjNode::fall_through_index);
1916             break;
1917           }
1918         }
1919       }

2308       this->collect_nodes_in_all_data(in_rel, true);
2309     }
2310     this->collect_nodes(out_rel, -2, false, false);
2311 }
2312 #endif
2313 
2314 //=============================================================================
2315 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2316 
2317   // perform any generic optimizations first (returns 'this' or NULL)
2318   Node *result = SafePointNode::Ideal(phase, can_reshape);
2319   if (result != NULL)  return result;
2320   // Don't bother trying to transform a dead node
2321   if (in(0) && in(0)->is_top())  return NULL;
2322 
2323   // Now see if we can optimize away this lock.  We don't actually
2324   // remove the locking here, we simply set the _eliminate flag which
2325   // prevents macro expansion from expanding the lock.  Since we don't
2326   // modify the graph, the value returned from this function is the
2327   // one computed above.
2328   const Type* obj_type = phase->type(obj_node());
2329   if (can_reshape && EliminateLocks && !is_non_esc_obj() &&
2330       !obj_type->isa_inlinetype() && !obj_type->is_inlinetypeptr()) {
2331     //
2332     // If we are locking an non-escaped object, the lock/unlock is unnecessary
2333     //
2334     ConnectionGraph *cgr = phase->C->congraph();
2335     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
2336       assert(!is_eliminated() || is_coarsened(), "sanity");
2337       // The lock could be marked eliminated by lock coarsening
2338       // code during first IGVN before EA. Replace coarsened flag
2339       // to eliminate all associated locks/unlocks.
2340 #ifdef ASSERT
2341       this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2342 #endif
2343       this->set_non_esc_obj();
2344       return result;
2345     }
2346 
2347     if (!phase->C->do_locks_coarsening()) {
2348       return result; // Compiling without locks coarsening
2349     }
2350     //

2506 }
2507 
2508 //=============================================================================
2509 uint UnlockNode::size_of() const { return sizeof(*this); }
2510 
2511 //=============================================================================
2512 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2513 
2514   // perform any generic optimizations first (returns 'this' or NULL)
2515   Node *result = SafePointNode::Ideal(phase, can_reshape);
2516   if (result != NULL)  return result;
2517   // Don't bother trying to transform a dead node
2518   if (in(0) && in(0)->is_top())  return NULL;
2519 
2520   // Now see if we can optimize away this unlock.  We don't actually
2521   // remove the unlocking here, we simply set the _eliminate flag which
2522   // prevents macro expansion from expanding the unlock.  Since we don't
2523   // modify the graph, the value returned from this function is the
2524   // one computed above.
2525   // Escape state is defined after Parse phase.
2526   const Type* obj_type = phase->type(obj_node());
2527   if (can_reshape && EliminateLocks && !is_non_esc_obj() &&
2528       !obj_type->isa_inlinetype() && !obj_type->is_inlinetypeptr()) {
2529     //
2530     // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2531     //
2532     ConnectionGraph *cgr = phase->C->congraph();
2533     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
2534       assert(!is_eliminated() || is_coarsened(), "sanity");
2535       // The lock could be marked eliminated by lock coarsening
2536       // code during first IGVN before EA. Replace coarsened flag
2537       // to eliminate all associated locks/unlocks.
2538 #ifdef ASSERT
2539       this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2540 #endif
2541       this->set_non_esc_obj();
2542     }
2543   }
2544   return result;
2545 }
2546 
2547 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock)  const {
2548   if (C == NULL) {

2588     }
2589     // unrelated
2590     return false;
2591   }
2592 
2593   if (dest_t->isa_aryptr()) {
2594     // arraycopy or array clone
2595     if (t_oop->isa_instptr()) {
2596       return false;
2597     }
2598     if (!t_oop->isa_aryptr()) {
2599       return true;
2600     }
2601 
2602     const Type* elem = dest_t->is_aryptr()->elem();
2603     if (elem == Type::BOTTOM) {
2604       // An array but we don't know what elements are
2605       return true;
2606     }
2607 
2608     dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr();
2609     t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot);
2610     uint dest_alias = phase->C->get_alias_index(dest_t);
2611     uint t_oop_alias = phase->C->get_alias_index(t_oop);
2612 
2613     return dest_alias == t_oop_alias;
2614   }
2615 
2616   return true;
2617 }
< prev index next >