< prev index next >

src/hotspot/share/opto/callnode.cpp

Print this page

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"

  27 #include "ci/bcEscapeAnalyzer.hpp"
  28 #include "compiler/oopMap.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/c2/barrierSetC2.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "opto/callGenerator.hpp"
  33 #include "opto/callnode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/escape.hpp"

  37 #include "opto/locknode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/matcher.hpp"
  40 #include "opto/parse.hpp"
  41 #include "opto/regalloc.hpp"
  42 #include "opto/regmask.hpp"
  43 #include "opto/rootnode.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "runtime/sharedRuntime.hpp"

  46 #include "utilities/powerOfTwo.hpp"
  47 #include "code/vmreg.hpp"
  48 
  49 // Portions of code courtesy of Clifford Click
  50 
  51 // Optimization - Graph Style
  52 
  53 //=============================================================================
  54 uint StartNode::size_of() const { return sizeof(*this); }
  55 bool StartNode::cmp( const Node &n ) const
  56 { return _domain == ((StartNode&)n)._domain; }
  57 const Type *StartNode::bottom_type() const { return _domain; }
  58 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
  59 #ifndef PRODUCT
  60 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
  61 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
  62 #endif
  63 
  64 //------------------------------Ideal------------------------------------------
  65 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
  66   return remove_dead_region(phase, can_reshape) ? this : nullptr;
  67 }
  68 
  69 //------------------------------calling_convention-----------------------------
  70 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
  71   SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
  72 }
  73 
  74 //------------------------------Registers--------------------------------------
  75 const RegMask &StartNode::in_RegMask(uint) const {
  76   return RegMask::Empty;
  77 }
  78 
  79 //------------------------------match------------------------------------------
  80 // Construct projections for incoming parameters, and their RegMask info
  81 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
  82   switch (proj->_con) {
  83   case TypeFunc::Control:
  84   case TypeFunc::I_O:
  85   case TypeFunc::Memory:
  86     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
  87   case TypeFunc::FramePtr:
  88     return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
  89   case TypeFunc::ReturnAdr:
  90     return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
  91   case TypeFunc::Parms:
  92   default: {
  93       uint parm_num = proj->_con - TypeFunc::Parms;
  94       const Type *t = _domain->field_at(proj->_con);
  95       if (t->base() == Type::Half)  // 2nd half of Longs and Doubles
  96         return new ConNode(Type::TOP);
  97       uint ideal_reg = t->ideal_reg();
  98       RegMask &rm = match->_calling_convention_mask[parm_num];
  99       return new MachProjNode(this,proj->_con,rm,ideal_reg);
 100     }
 101   }
 102   return nullptr;
 103 }
 104 
 105 //------------------------------StartOSRNode----------------------------------
 106 // The method start node for an on stack replacement adapter
 107 
 108 //------------------------------osr_domain-----------------------------
 109 const TypeTuple *StartOSRNode::osr_domain() {
 110   const Type **fields = TypeTuple::fields(2);
 111   fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM;  // address of osr buffer
 112 
 113   return TypeTuple::make(TypeFunc::Parms+1, fields);
 114 }
 115 
 116 //=============================================================================
 117 const char * const ParmNode::names[TypeFunc::Parms+1] = {
 118   "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
 119 };
 120 
 121 #ifndef PRODUCT
 122 void ParmNode::dump_spec(outputStream *st) const {
 123   if( _con < TypeFunc::Parms ) {
 124     st->print("%s", names[_con]);
 125   } else {
 126     st->print("Parm%d: ",_con-TypeFunc::Parms);
 127     // Verbose and WizardMode dump bottom_type for all nodes
 128     if( !Verbose && !WizardMode )   bottom_type()->dump_on(st);
 129   }
 130 }
 131 
 132 void ParmNode::dump_compact_spec(outputStream *st) const {
 133   if (_con < TypeFunc::Parms) {
 134     st->print("%s", names[_con]);
 135   } else {

 481       if (cik->is_instance_klass()) {
 482         cik->print_name_on(st);
 483         iklass = cik->as_instance_klass();
 484       } else if (cik->is_type_array_klass()) {
 485         cik->as_array_klass()->base_element_type()->print_name_on(st);
 486         st->print("[%d]", spobj->n_fields());
 487       } else if (cik->is_obj_array_klass()) {
 488         ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
 489         if (cie->is_instance_klass()) {
 490           cie->print_name_on(st);
 491         } else if (cie->is_type_array_klass()) {
 492           cie->as_array_klass()->base_element_type()->print_name_on(st);
 493         } else {
 494           ShouldNotReachHere();
 495         }
 496         st->print("[%d]", spobj->n_fields());
 497         int ndim = cik->as_array_klass()->dimension() - 1;
 498         while (ndim-- > 0) {
 499           st->print("[]");
 500         }








 501       }
 502       st->print("={");
 503       uint nf = spobj->n_fields();
 504       if (nf > 0) {
 505         uint first_ind = spobj->first_index(mcall->jvms());







 506         Node* fld_node = mcall->in(first_ind);
 507         ciField* cifield;
 508         if (iklass != nullptr) {
 509           st->print(" [");
 510           cifield = iklass->nonstatic_field_at(0);
 511           cifield->print_name_on(st);
 512           format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
 513         } else {
 514           format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
 515         }
 516         for (uint j = 1; j < nf; j++) {
 517           fld_node = mcall->in(first_ind+j);
 518           if (iklass != nullptr) {
 519             st->print(", [");
 520             cifield = iklass->nonstatic_field_at(j);
 521             cifield->print_name_on(st);
 522             format_helper(regalloc, st, fld_node, ":", j, &scobjs);
 523           } else {
 524             format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
 525           }

 699     if (i == TypeFunc::Parms) st->print("(");
 700     Node* p = in(i);
 701     if (p != nullptr) {
 702       p->dump_idx(false, st, dc);
 703       st->print(" ");
 704     } else {
 705       st->print("_ ");
 706     }
 707   }
 708   st->print(")");
 709 }
 710 
 711 void CallNode::dump_spec(outputStream *st) const {
 712   st->print(" ");
 713   if (tf() != nullptr)  tf()->dump_on(st);
 714   if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
 715   if (jvms() != nullptr)  jvms()->dump_spec(st);
 716 }
 717 #endif
 718 
 719 const Type *CallNode::bottom_type() const { return tf()->range(); }
 720 const Type* CallNode::Value(PhaseGVN* phase) const {
 721   if (phase->type(in(0)) == Type::TOP)  return Type::TOP;
 722   return tf()->range();


 723 }
 724 
 725 //------------------------------calling_convention-----------------------------
 726 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {







 727   // Use the standard compiler calling convention
 728   SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
 729 }
 730 
 731 
 732 //------------------------------match------------------------------------------
 733 // Construct projections for control, I/O, memory-fields, ..., and
 734 // return result(s) along with their RegMask info
 735 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
 736   switch (proj->_con) {
 737   case TypeFunc::Control:
 738   case TypeFunc::I_O:
 739   case TypeFunc::Memory:
 740     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
 741 
 742   case TypeFunc::Parms+1:       // For LONG & DOUBLE returns
 743     assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
 744     // 2nd half of doubles and longs
 745     return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
 746 
 747   case TypeFunc::Parms: {       // Normal returns
 748     uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
 749     OptoRegPair regs = Opcode() == Op_CallLeafVector
 750       ? match->vector_return_value(ideal_reg)      // Calls into assembly vector routine
 751       : is_CallRuntime()
 752         ? match->c_return_value(ideal_reg)  // Calls into C runtime
 753         : match->  return_value(ideal_reg); // Calls into compiled Java code
 754     RegMask rm = RegMask(regs.first());
 755 
 756     if (Opcode() == Op_CallLeafVector) {
 757       // If the return is in vector, compute appropriate regmask taking into account the whole range
 758       if(ideal_reg >= Op_VecS && ideal_reg <= Op_VecZ) {
 759         if(OptoReg::is_valid(regs.second())) {
 760           for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
 761             rm.Insert(r);
 762           }
 763         }









 764       }
 765     }
 766 
 767     if( OptoReg::is_valid(regs.second()) )
 768       rm.Insert( regs.second() );
 769     return new MachProjNode(this,proj->_con,rm,ideal_reg);
 770   }
 771 






 772   case TypeFunc::ReturnAdr:
 773   case TypeFunc::FramePtr:
 774   default:
 775     ShouldNotReachHere();
 776   }
 777   return nullptr;
 778 }
 779 
 780 // Do we Match on this edge index or not?  Match no edges
 781 uint CallNode::match_edge(uint idx) const {
 782   return 0;
 783 }
 784 
 785 //
 786 // Determine whether the call could modify the field of the specified
 787 // instance at the specified offset.
 788 //
 789 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
 790   assert((t_oop != nullptr), "sanity");
 791   if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
 792     const TypeTuple* args = _tf->domain();
 793     Node* dest = nullptr;
 794     // Stubs that can be called once an ArrayCopyNode is expanded have
 795     // different signatures. Look for the second pointer argument,
 796     // that is the destination of the copy.
 797     for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 798       if (args->field_at(i)->isa_ptr()) {
 799         j++;
 800         if (j == 2) {
 801           dest = in(i);
 802           break;
 803         }
 804       }
 805     }
 806     guarantee(dest != nullptr, "Call had only one ptr in, broken IR!");
 807     if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
 808       return true;
 809     }
 810     return false;
 811   }
 812   if (t_oop->is_known_instance()) {

 821       Node* proj = proj_out_or_null(TypeFunc::Parms);
 822       if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
 823         return false;
 824       }
 825     }
 826     if (is_CallJava() && as_CallJava()->method() != nullptr) {
 827       ciMethod* meth = as_CallJava()->method();
 828       if (meth->is_getter()) {
 829         return false;
 830       }
 831       // May modify (by reflection) if an boxing object is passed
 832       // as argument or returned.
 833       Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr;
 834       if (proj != nullptr) {
 835         const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
 836         if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
 837                                    (inst_t->instance_klass() == boxing_klass))) {
 838           return true;
 839         }
 840       }
 841       const TypeTuple* d = tf()->domain();
 842       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 843         const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
 844         if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
 845                                  (inst_t->instance_klass() == boxing_klass))) {
 846           return true;
 847         }
 848       }
 849       return false;
 850     }
 851   }
 852   return true;
 853 }
 854 
 855 // Does this call have a direct reference to n other than debug information?
 856 bool CallNode::has_non_debug_use(Node *n) {
 857   const TypeTuple * d = tf()->domain();
 858   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 859     Node *arg = in(i);
 860     if (arg == n) {
 861       return true;
 862     }
 863   }
 864   return false;
 865 }
 866 











 867 // Returns the unique CheckCastPP of a call
 868 // or 'this' if there are several CheckCastPP or unexpected uses
 869 // or returns null if there is no one.
 870 Node *CallNode::result_cast() {
 871   Node *cast = nullptr;
 872 
 873   Node *p = proj_out_or_null(TypeFunc::Parms);
 874   if (p == nullptr)
 875     return nullptr;
 876 
 877   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
 878     Node *use = p->fast_out(i);
 879     if (use->is_CheckCastPP()) {
 880       if (cast != nullptr) {
 881         return this;  // more than 1 CheckCastPP
 882       }
 883       cast = use;
 884     } else if (!use->is_Initialize() &&
 885                !use->is_AddP() &&
 886                use->Opcode() != Op_MemBarStoreStore) {
 887       // Expected uses are restricted to a CheckCastPP, an Initialize
 888       // node, a MemBarStoreStore (clone) and AddP nodes. If we
 889       // encounter any other use (a Phi node can be seen in rare
 890       // cases) return this to prevent incorrect optimizations.
 891       return this;
 892     }
 893   }
 894   return cast;
 895 }
 896 
 897 
 898 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
 899   projs->fallthrough_proj      = nullptr;
 900   projs->fallthrough_catchproj = nullptr;
 901   projs->fallthrough_ioproj    = nullptr;
 902   projs->catchall_ioproj       = nullptr;
 903   projs->catchall_catchproj    = nullptr;
 904   projs->fallthrough_memproj   = nullptr;
 905   projs->catchall_memproj      = nullptr;
 906   projs->resproj               = nullptr;
 907   projs->exobj                 = nullptr;





 908 
 909   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 910     ProjNode *pn = fast_out(i)->as_Proj();
 911     if (pn->outcnt() == 0) continue;
 912     switch (pn->_con) {
 913     case TypeFunc::Control:
 914       {
 915         // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
 916         projs->fallthrough_proj = pn;
 917         const Node* cn = pn->unique_ctrl_out_or_null();
 918         if (cn != nullptr && cn->is_Catch()) {
 919           ProjNode *cpn = nullptr;
 920           for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
 921             cpn = cn->fast_out(k)->as_Proj();
 922             assert(cpn->is_CatchProj(), "must be a CatchProjNode");
 923             if (cpn->_con == CatchProjNode::fall_through_index)
 924               projs->fallthrough_catchproj = cpn;
 925             else {
 926               assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
 927               projs->catchall_catchproj = cpn;

 933     case TypeFunc::I_O:
 934       if (pn->_is_io_use)
 935         projs->catchall_ioproj = pn;
 936       else
 937         projs->fallthrough_ioproj = pn;
 938       for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
 939         Node* e = pn->out(j);
 940         if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
 941           assert(projs->exobj == nullptr, "only one");
 942           projs->exobj = e;
 943         }
 944       }
 945       break;
 946     case TypeFunc::Memory:
 947       if (pn->_is_io_use)
 948         projs->catchall_memproj = pn;
 949       else
 950         projs->fallthrough_memproj = pn;
 951       break;
 952     case TypeFunc::Parms:
 953       projs->resproj = pn;
 954       break;
 955     default:
 956       assert(false, "unexpected projection from allocation node.");


 957     }
 958   }
 959 
 960   // The resproj may not exist because the result could be ignored
 961   // and the exception object may not exist if an exception handler
 962   // swallows the exception but all the other must exist and be found.
 963   assert(projs->fallthrough_proj      != nullptr, "must be found");
 964   do_asserts = do_asserts && !Compile::current()->inlining_incrementally();

 965   assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
 966   assert(!do_asserts || projs->fallthrough_memproj   != nullptr, "must be found");
 967   assert(!do_asserts || projs->fallthrough_ioproj    != nullptr, "must be found");
 968   assert(!do_asserts || projs->catchall_catchproj    != nullptr, "must be found");
 969   if (separate_io_proj) {
 970     assert(!do_asserts || projs->catchall_memproj    != nullptr, "must be found");
 971     assert(!do_asserts || projs->catchall_ioproj     != nullptr, "must be found");
 972   }

 973 }
 974 
 975 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 976 #ifdef ASSERT
 977   // Validate attached generator
 978   CallGenerator* cg = generator();
 979   if (cg != nullptr) {
 980     assert(is_CallStaticJava()  && cg->is_mh_late_inline() ||
 981            is_CallDynamicJava() && cg->is_virtual_late_inline(), "mismatch");
 982   }
 983 #endif // ASSERT
 984   return SafePointNode::Ideal(phase, can_reshape);
 985 }
 986 
 987 bool CallNode::is_call_to_arraycopystub() const {
 988   if (_name != nullptr && strstr(_name, "arraycopy") != 0) {
 989     return true;
 990   }
 991   return false;
 992 }
 993 
 994 //=============================================================================
 995 uint CallJavaNode::size_of() const { return sizeof(*this); }
 996 bool CallJavaNode::cmp( const Node &n ) const {
 997   CallJavaNode &call = (CallJavaNode&)n;
 998   return CallNode::cmp(call) && _method == call._method &&
 999          _override_symbolic_info == call._override_symbolic_info;
1000 }
1001 
1002 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1003   // Copy debug information and adjust JVMState information
1004   uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1;
1005   uint new_dbg_start = tf()->domain()->cnt();
1006   int jvms_adj  = new_dbg_start - old_dbg_start;
1007   assert (new_dbg_start == req(), "argument count mismatch");
1008   Compile* C = phase->C;
1009 
1010   // SafePointScalarObject node could be referenced several times in debug info.
1011   // Use Dict to record cloned nodes.
1012   Dict* sosn_map = new Dict(cmpkey,hashkey);
1013   for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1014     Node* old_in = sfpt->in(i);
1015     // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1016     if (old_in != nullptr && old_in->is_SafePointScalarObject()) {
1017       SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1018       bool new_node;
1019       Node* new_in = old_sosn->clone(sosn_map, new_node);
1020       if (new_node) { // New node?
1021         new_in->set_req(0, C->root()); // reset control edge
1022         new_in = phase->transform(new_in); // Register new node.
1023       }
1024       old_in = new_in;
1025     }
1026     add_req(old_in);
1027   }
1028 
1029   // JVMS may be shared so clone it before we modify it
1030   set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr);
1031   for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1032     jvms->set_map(this);
1033     jvms->set_locoff(jvms->locoff()+jvms_adj);
1034     jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1035     jvms->set_monoff(jvms->monoff()+jvms_adj);
1036     jvms->set_scloff(jvms->scloff()+jvms_adj);
1037     jvms->set_endoff(jvms->endoff()+jvms_adj);
1038   }
1039 }
1040 
1041 #ifdef ASSERT
1042 bool CallJavaNode::validate_symbolic_info() const {
1043   if (method() == nullptr) {
1044     return true; // call into runtime or uncommon trap
1045   }




1046   ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1047   ciMethod* callee = method();
1048   if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1049     assert(override_symbolic_info(), "should be set");
1050   }
1051   assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1052   return true;
1053 }
1054 #endif
1055 
1056 #ifndef PRODUCT
1057 void CallJavaNode::dump_spec(outputStream* st) const {
1058   if( _method ) _method->print_short_name(st);
1059   CallNode::dump_spec(st);
1060 }
1061 
1062 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1063   if (_method) {
1064     _method->print_short_name(st);
1065   } else {
1066     st->print("<?>");
1067   }
1068 }
1069 #endif
1070 
1071 //=============================================================================
1072 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1073 bool CallStaticJavaNode::cmp( const Node &n ) const {
1074   CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1075   return CallJavaNode::cmp(call);
1076 }
1077 
1078 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {










1079   CallGenerator* cg = generator();
1080   if (can_reshape && cg != nullptr) {
1081     assert(IncrementalInlineMH, "required");
1082     assert(cg->call_node() == this, "mismatch");
1083     assert(cg->is_mh_late_inline(), "not virtual");
1084 
1085     // Check whether this MH handle call becomes a candidate for inlining.
1086     ciMethod* callee = cg->method();
1087     vmIntrinsics::ID iid = callee->intrinsic_id();
1088     if (iid == vmIntrinsics::_invokeBasic) {
1089       if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1090         phase->C->prepend_late_inline(cg);
1091         set_generator(nullptr);
1092       }
1093     } else if (iid == vmIntrinsics::_linkToNative) {
1094       // never retry
1095     } else {
1096       assert(callee->has_member_arg(), "wrong type of call?");
1097       if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1098         phase->C->prepend_late_inline(cg);

1111 
1112 //----------------------------uncommon_trap_request----------------------------
1113 // If this is an uncommon trap, return the request code, else zero.
1114 int CallStaticJavaNode::uncommon_trap_request() const {
1115   return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0;
1116 }
1117 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1118 #ifndef PRODUCT
1119   if (!(call->req() > TypeFunc::Parms &&
1120         call->in(TypeFunc::Parms) != nullptr &&
1121         call->in(TypeFunc::Parms)->is_Con() &&
1122         call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1123     assert(in_dump() != 0, "OK if dumping");
1124     tty->print("[bad uncommon trap]");
1125     return 0;
1126   }
1127 #endif
1128   return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1129 }
1130 




























































































































1131 #ifndef PRODUCT
1132 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1133   st->print("# Static ");
1134   if (_name != nullptr) {
1135     st->print("%s", _name);
1136     int trap_req = uncommon_trap_request();
1137     if (trap_req != 0) {
1138       char buf[100];
1139       st->print("(%s)",
1140                  Deoptimization::format_trap_request(buf, sizeof(buf),
1141                                                      trap_req));
1142     }
1143     st->print(" ");
1144   }
1145   CallJavaNode::dump_spec(st);
1146 }
1147 
1148 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1149   if (_method) {
1150     _method->print_short_name(st);

1215 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1216 bool CallRuntimeNode::cmp( const Node &n ) const {
1217   CallRuntimeNode &call = (CallRuntimeNode&)n;
1218   return CallNode::cmp(call) && !strcmp(_name,call._name);
1219 }
1220 #ifndef PRODUCT
1221 void CallRuntimeNode::dump_spec(outputStream *st) const {
1222   st->print("# ");
1223   st->print("%s", _name);
1224   CallNode::dump_spec(st);
1225 }
1226 #endif
1227 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1228 bool CallLeafVectorNode::cmp( const Node &n ) const {
1229   CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1230   return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1231 }
1232 
1233 //------------------------------calling_convention-----------------------------
1234 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {







1235   SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
1236 }
1237 
1238 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1239 #ifdef ASSERT
1240   assert(tf()->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1241          "return vector size must match");
1242   const TypeTuple* d = tf()->domain();
1243   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1244     Node* arg = in(i);
1245     assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1246            "vector argument size must match");
1247   }
1248 #endif
1249 
1250   SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1251 }
1252 
1253 //=============================================================================
1254 //------------------------------calling_convention-----------------------------
1255 
1256 
1257 //=============================================================================
1258 #ifndef PRODUCT
1259 void CallLeafNode::dump_spec(outputStream *st) const {
1260   st->print("# ");
1261   st->print("%s", _name);
1262   CallNode::dump_spec(st);
1263 }
1264 #endif
1265 






1266 //=============================================================================
1267 
1268 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1269   assert(verify_jvms(jvms), "jvms must match");
1270   int loc = jvms->locoff() + idx;
1271   if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1272     // If current local idx is top then local idx - 1 could
1273     // be a long/double that needs to be killed since top could
1274     // represent the 2nd half of the long/double.
1275     uint ideal = in(loc -1)->ideal_reg();
1276     if (ideal == Op_RegD || ideal == Op_RegL) {
1277       // set other (low index) half to top
1278       set_req(loc - 1, in(loc));
1279     }
1280   }
1281   set_req(loc, c);
1282 }
1283 
1284 uint SafePointNode::size_of() const { return sizeof(*this); }
1285 bool SafePointNode::cmp( const Node &n ) const {

1296   }
1297 }
1298 
1299 
1300 //----------------------------next_exception-----------------------------------
1301 SafePointNode* SafePointNode::next_exception() const {
1302   if (len() == req()) {
1303     return nullptr;
1304   } else {
1305     Node* n = in(req());
1306     assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1307     return (SafePointNode*) n;
1308   }
1309 }
1310 
1311 
1312 //------------------------------Ideal------------------------------------------
1313 // Skip over any collapsed Regions
1314 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1315   assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1316   return remove_dead_region(phase, can_reshape) ? this : nullptr;













1317 }
1318 
1319 //------------------------------Identity---------------------------------------
1320 // Remove obviously duplicate safepoints
1321 Node* SafePointNode::Identity(PhaseGVN* phase) {
1322 
1323   // If you have back to back safepoints, remove one
1324   if (in(TypeFunc::Control)->is_SafePoint()) {
1325     Node* out_c = unique_ctrl_out_or_null();
1326     // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1327     // outer loop's safepoint could confuse removal of the outer loop.
1328     if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) {
1329       return in(TypeFunc::Control);
1330     }
1331   }
1332 
1333   // Transforming long counted loops requires a safepoint node. Do not
1334   // eliminate a safepoint until loop opts are over.
1335   if (in(0)->is_Proj() && !phase->C->major_progress()) {
1336     Node *n0 = in(0)->in(0);

1453   return (TypeFunc::Parms == idx);
1454 }
1455 
1456 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1457   assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1458   int nb = igvn->C->root()->find_prec_edge(this);
1459   if (nb != -1) {
1460     igvn->delete_precedence_of(igvn->C->root(), nb);
1461   }
1462 }
1463 
1464 //==============  SafePointScalarObjectNode  ==============
1465 
1466 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint n_fields) :
1467   TypeNode(tp, 1), // 1 control input -- seems required.  Get from root.
1468   _first_index(first_index),
1469   _n_fields(n_fields),
1470   _alloc(alloc)
1471 {
1472 #ifdef ASSERT
1473   if (!alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
1474     alloc->dump();
1475     assert(false, "unexpected call node");
1476   }
1477 #endif
1478   init_class_id(Class_SafePointScalarObject);
1479 }
1480 
1481 // Do not allow value-numbering for SafePointScalarObject node.
1482 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1483 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1484   return (&n == this); // Always fail except on self
1485 }
1486 
1487 uint SafePointScalarObjectNode::ideal_reg() const {
1488   return 0; // No matching to machine instruction
1489 }
1490 
1491 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1492   return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1493 }

1558     new_node = false;
1559     return (SafePointScalarMergeNode*)cached;
1560   }
1561   new_node = true;
1562   SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone();
1563   sosn_map->Insert((void*)this, (void*)res);
1564   return res;
1565 }
1566 
1567 #ifndef PRODUCT
1568 void SafePointScalarMergeNode::dump_spec(outputStream *st) const {
1569   st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1);
1570 }
1571 #endif
1572 
1573 //=============================================================================
1574 uint AllocateNode::size_of() const { return sizeof(*this); }
1575 
1576 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1577                            Node *ctrl, Node *mem, Node *abio,
1578                            Node *size, Node *klass_node, Node *initial_test)


1579   : CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
1580 {
1581   init_class_id(Class_Allocate);
1582   init_flags(Flag_is_macro);
1583   _is_scalar_replaceable = false;
1584   _is_non_escaping = false;
1585   _is_allocation_MemBar_redundant = false;

1586   Node *topnode = C->top();
1587 
1588   init_req( TypeFunc::Control  , ctrl );
1589   init_req( TypeFunc::I_O      , abio );
1590   init_req( TypeFunc::Memory   , mem );
1591   init_req( TypeFunc::ReturnAdr, topnode );
1592   init_req( TypeFunc::FramePtr , topnode );
1593   init_req( AllocSize          , size);
1594   init_req( KlassNode          , klass_node);
1595   init_req( InitialTest        , initial_test);
1596   init_req( ALength            , topnode);
1597   init_req( ValidLengthTest    , topnode);



1598   C->add_macro_node(this);
1599 }
1600 
1601 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1602 {
1603   assert(initializer != nullptr &&
1604          initializer->is_initializer() &&
1605          !initializer->is_static(),
1606              "unexpected initializer method");
1607   BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1608   if (analyzer == nullptr) {
1609     return;
1610   }
1611 
1612   // Allocation node is first parameter in its initializer
1613   if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1614     _is_allocation_MemBar_redundant = true;
1615   }
1616 }
1617 Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {

1618   Node* mark_node = nullptr;
1619   // For now only enable fast locking for non-array types
1620   mark_node = phase->MakeConX(markWord::prototype().value());
1621   return mark_node;







1622 }
1623 
1624 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1625 // CastII, if appropriate.  If we are not allowed to create new nodes, and
1626 // a CastII is appropriate, return null.
1627 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) {
1628   Node *length = in(AllocateNode::ALength);
1629   assert(length != nullptr, "length is not null");
1630 
1631   const TypeInt* length_type = phase->find_int_type(length);
1632   const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1633 
1634   if (ary_type != nullptr && length_type != nullptr) {
1635     const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1636     if (narrow_length_type != length_type) {
1637       // Assert one of:
1638       //   - the narrow_length is 0
1639       //   - the narrow_length is not wider than length
1640       assert(narrow_length_type == TypeInt::ZERO ||
1641              length_type->is_con() && narrow_length_type->is_con() &&

1980 
1981 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
1982   st->print("%s", _kind_names[_kind]);
1983 }
1984 #endif
1985 
1986 //=============================================================================
1987 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1988 
1989   // perform any generic optimizations first (returns 'this' or null)
1990   Node *result = SafePointNode::Ideal(phase, can_reshape);
1991   if (result != nullptr)  return result;
1992   // Don't bother trying to transform a dead node
1993   if (in(0) && in(0)->is_top())  return nullptr;
1994 
1995   // Now see if we can optimize away this lock.  We don't actually
1996   // remove the locking here, we simply set the _eliminate flag which
1997   // prevents macro expansion from expanding the lock.  Since we don't
1998   // modify the graph, the value returned from this function is the
1999   // one computed above.
2000   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {

2001     //
2002     // If we are locking an non-escaped object, the lock/unlock is unnecessary
2003     //
2004     ConnectionGraph *cgr = phase->C->congraph();
2005     if (cgr != nullptr && cgr->not_global_escape(obj_node())) {
2006       assert(!is_eliminated() || is_coarsened(), "sanity");
2007       // The lock could be marked eliminated by lock coarsening
2008       // code during first IGVN before EA. Replace coarsened flag
2009       // to eliminate all associated locks/unlocks.
2010 #ifdef ASSERT
2011       this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2012 #endif
2013       this->set_non_esc_obj();
2014       return result;
2015     }
2016 
2017     if (!phase->C->do_locks_coarsening()) {
2018       return result; // Compiling without locks coarsening
2019     }
2020     //

2176 }
2177 
2178 //=============================================================================
2179 uint UnlockNode::size_of() const { return sizeof(*this); }
2180 
2181 //=============================================================================
2182 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2183 
2184   // perform any generic optimizations first (returns 'this' or null)
2185   Node *result = SafePointNode::Ideal(phase, can_reshape);
2186   if (result != nullptr)  return result;
2187   // Don't bother trying to transform a dead node
2188   if (in(0) && in(0)->is_top())  return nullptr;
2189 
2190   // Now see if we can optimize away this unlock.  We don't actually
2191   // remove the unlocking here, we simply set the _eliminate flag which
2192   // prevents macro expansion from expanding the unlock.  Since we don't
2193   // modify the graph, the value returned from this function is the
2194   // one computed above.
2195   // Escape state is defined after Parse phase.
2196   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {

2197     //
2198     // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2199     //
2200     ConnectionGraph *cgr = phase->C->congraph();
2201     if (cgr != nullptr && cgr->not_global_escape(obj_node())) {
2202       assert(!is_eliminated() || is_coarsened(), "sanity");
2203       // The lock could be marked eliminated by lock coarsening
2204       // code during first IGVN before EA. Replace coarsened flag
2205       // to eliminate all associated locks/unlocks.
2206 #ifdef ASSERT
2207       this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2208 #endif
2209       this->set_non_esc_obj();
2210     }
2211   }
2212   return result;
2213 }
2214 
2215 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock)  const {
2216   if (C == nullptr) {

2256     }
2257     // unrelated
2258     return false;
2259   }
2260 
2261   if (dest_t->isa_aryptr()) {
2262     // arraycopy or array clone
2263     if (t_oop->isa_instptr()) {
2264       return false;
2265     }
2266     if (!t_oop->isa_aryptr()) {
2267       return true;
2268     }
2269 
2270     const Type* elem = dest_t->is_aryptr()->elem();
2271     if (elem == Type::BOTTOM) {
2272       // An array but we don't know what elements are
2273       return true;
2274     }
2275 
2276     dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();

2277     uint dest_alias = phase->C->get_alias_index(dest_t);
2278     uint t_oop_alias = phase->C->get_alias_index(t_oop);
2279 
2280     return dest_alias == t_oop_alias;
2281   }
2282 
2283   return true;
2284 }

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "ci/ciFlatArrayKlass.hpp"
  28 #include "ci/bcEscapeAnalyzer.hpp"
  29 #include "compiler/oopMap.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/c2/barrierSetC2.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "opto/callGenerator.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/escape.hpp"
  38 #include "opto/inlinetypenode.hpp"
  39 #include "opto/locknode.hpp"
  40 #include "opto/machnode.hpp"
  41 #include "opto/matcher.hpp"
  42 #include "opto/parse.hpp"
  43 #include "opto/regalloc.hpp"
  44 #include "opto/regmask.hpp"
  45 #include "opto/rootnode.hpp"
  46 #include "opto/runtime.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/stubRoutines.hpp"
  49 #include "utilities/powerOfTwo.hpp"
  50 #include "code/vmreg.hpp"
  51 
  52 // Portions of code courtesy of Clifford Click
  53 
  54 // Optimization - Graph Style
  55 
  56 //=============================================================================
  57 uint StartNode::size_of() const { return sizeof(*this); }
  58 bool StartNode::cmp( const Node &n ) const
  59 { return _domain == ((StartNode&)n)._domain; }
  60 const Type *StartNode::bottom_type() const { return _domain; }
  61 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
  62 #ifndef PRODUCT
  63 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
  64 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
  65 #endif
  66 
  67 //------------------------------Ideal------------------------------------------
  68 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
  69   return remove_dead_region(phase, can_reshape) ? this : nullptr;
  70 }
  71 
  72 //------------------------------calling_convention-----------------------------
  73 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
  74   SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
  75 }
  76 
  77 //------------------------------Registers--------------------------------------
  78 const RegMask &StartNode::in_RegMask(uint) const {
  79   return RegMask::Empty;
  80 }
  81 
  82 //------------------------------match------------------------------------------
  83 // Construct projections for incoming parameters, and their RegMask info
  84 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
  85   switch (proj->_con) {
  86   case TypeFunc::Control:
  87   case TypeFunc::I_O:
  88   case TypeFunc::Memory:
  89     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
  90   case TypeFunc::FramePtr:
  91     return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
  92   case TypeFunc::ReturnAdr:
  93     return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
  94   case TypeFunc::Parms:
  95   default: {
  96       uint parm_num = proj->_con - TypeFunc::Parms;
  97       const Type *t = _domain->field_at(proj->_con);
  98       if (t->base() == Type::Half)  // 2nd half of Longs and Doubles
  99         return new ConNode(Type::TOP);
 100       uint ideal_reg = t->ideal_reg();
 101       RegMask &rm = match->_calling_convention_mask[parm_num];
 102       return new MachProjNode(this,proj->_con,rm,ideal_reg);
 103     }
 104   }
 105   return nullptr;
 106 }
 107 











 108 //=============================================================================
 109 const char * const ParmNode::names[TypeFunc::Parms+1] = {
 110   "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
 111 };
 112 
 113 #ifndef PRODUCT
 114 void ParmNode::dump_spec(outputStream *st) const {
 115   if( _con < TypeFunc::Parms ) {
 116     st->print("%s", names[_con]);
 117   } else {
 118     st->print("Parm%d: ",_con-TypeFunc::Parms);
 119     // Verbose and WizardMode dump bottom_type for all nodes
 120     if( !Verbose && !WizardMode )   bottom_type()->dump_on(st);
 121   }
 122 }
 123 
 124 void ParmNode::dump_compact_spec(outputStream *st) const {
 125   if (_con < TypeFunc::Parms) {
 126     st->print("%s", names[_con]);
 127   } else {

 473       if (cik->is_instance_klass()) {
 474         cik->print_name_on(st);
 475         iklass = cik->as_instance_klass();
 476       } else if (cik->is_type_array_klass()) {
 477         cik->as_array_klass()->base_element_type()->print_name_on(st);
 478         st->print("[%d]", spobj->n_fields());
 479       } else if (cik->is_obj_array_klass()) {
 480         ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
 481         if (cie->is_instance_klass()) {
 482           cie->print_name_on(st);
 483         } else if (cie->is_type_array_klass()) {
 484           cie->as_array_klass()->base_element_type()->print_name_on(st);
 485         } else {
 486           ShouldNotReachHere();
 487         }
 488         st->print("[%d]", spobj->n_fields());
 489         int ndim = cik->as_array_klass()->dimension() - 1;
 490         while (ndim-- > 0) {
 491           st->print("[]");
 492         }
 493       } else if (cik->is_flat_array_klass()) {
 494         ciKlass* cie = cik->as_flat_array_klass()->base_element_klass();
 495         cie->print_name_on(st);
 496         st->print("[%d]", spobj->n_fields());
 497         int ndim = cik->as_array_klass()->dimension() - 1;
 498         while (ndim-- > 0) {
 499           st->print("[]");
 500         }
 501       }
 502       st->print("={");
 503       uint nf = spobj->n_fields();
 504       if (nf > 0) {
 505         uint first_ind = spobj->first_index(mcall->jvms());
 506         if (iklass != nullptr && iklass->is_inlinetype()) {
 507           Node* init_node = mcall->in(first_ind++);
 508           if (!init_node->is_top()) {
 509             st->print(" [is_init");
 510             format_helper(regalloc, st, init_node, ":", -1, nullptr);
 511           }
 512         }
 513         Node* fld_node = mcall->in(first_ind);
 514         ciField* cifield;
 515         if (iklass != nullptr) {
 516           st->print(" [");
 517           cifield = iklass->nonstatic_field_at(0);
 518           cifield->print_name_on(st);
 519           format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
 520         } else {
 521           format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
 522         }
 523         for (uint j = 1; j < nf; j++) {
 524           fld_node = mcall->in(first_ind+j);
 525           if (iklass != nullptr) {
 526             st->print(", [");
 527             cifield = iklass->nonstatic_field_at(j);
 528             cifield->print_name_on(st);
 529             format_helper(regalloc, st, fld_node, ":", j, &scobjs);
 530           } else {
 531             format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
 532           }

 706     if (i == TypeFunc::Parms) st->print("(");
 707     Node* p = in(i);
 708     if (p != nullptr) {
 709       p->dump_idx(false, st, dc);
 710       st->print(" ");
 711     } else {
 712       st->print("_ ");
 713     }
 714   }
 715   st->print(")");
 716 }
 717 
 718 void CallNode::dump_spec(outputStream *st) const {
 719   st->print(" ");
 720   if (tf() != nullptr)  tf()->dump_on(st);
 721   if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
 722   if (jvms() != nullptr)  jvms()->dump_spec(st);
 723 }
 724 #endif
 725 
 726 const Type *CallNode::bottom_type() const { return tf()->range_cc(); }
 727 const Type* CallNode::Value(PhaseGVN* phase) const {
 728   if (!in(0) || phase->type(in(0)) == Type::TOP) {
 729     return Type::TOP;
 730   }
 731   return tf()->range_cc();
 732 }
 733 
 734 //------------------------------calling_convention-----------------------------
 735 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
 736   if (_entry_point == StubRoutines::store_inline_type_fields_to_buf()) {
 737     // The call to that stub is a special case: its inputs are
 738     // multiple values returned from a call and so it should follow
 739     // the return convention.
 740     SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
 741     return;
 742   }
 743   // Use the standard compiler calling convention
 744   SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
 745 }
 746 
 747 
 748 //------------------------------match------------------------------------------
 749 // Construct projections for control, I/O, memory-fields, ..., and
 750 // return result(s) along with their RegMask info
 751 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
 752   uint con = proj->_con;
 753   const TypeTuple* range_cc = tf()->range_cc();
 754   if (con >= TypeFunc::Parms) {
 755     if (tf()->returns_inline_type_as_fields()) {
 756       // The call returns multiple values (inline type fields): we
 757       // create one projection per returned value.
 758       assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return");
 759       uint ideal_reg = range_cc->field_at(con)->ideal_reg();
 760       return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg);
 761     } else {
 762       if (con == TypeFunc::Parms) {
 763         uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg();
 764         OptoRegPair regs = Opcode() == Op_CallLeafVector
 765           ? match->vector_return_value(ideal_reg)      // Calls into assembly vector routine
 766           : match->c_return_value(ideal_reg);
 767         RegMask rm = RegMask(regs.first());
 768 
 769         if (Opcode() == Op_CallLeafVector) {
 770           // If the return is in vector, compute appropriate regmask taking into account the whole range
 771           if(ideal_reg >= Op_VecS && ideal_reg <= Op_VecZ) {
 772             if(OptoReg::is_valid(regs.second())) {
 773               for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
 774                 rm.Insert(r);
 775               }
 776             }

 777           }
 778         }
 779 
 780         if (OptoReg::is_valid(regs.second())) {
 781           rm.Insert(regs.second());
 782         }
 783         return new MachProjNode(this,con,rm,ideal_reg);
 784       } else {
 785         assert(con == TypeFunc::Parms+1, "only one return value");
 786         assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, "");
 787         return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad);
 788       }
 789     }




 790   }
 791 
 792   switch (con) {
 793   case TypeFunc::Control:
 794   case TypeFunc::I_O:
 795   case TypeFunc::Memory:
 796     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
 797 
 798   case TypeFunc::ReturnAdr:
 799   case TypeFunc::FramePtr:
 800   default:
 801     ShouldNotReachHere();
 802   }
 803   return nullptr;
 804 }
 805 
 806 // Do we Match on this edge index or not?  Match no edges
 807 uint CallNode::match_edge(uint idx) const {
 808   return 0;
 809 }
 810 
 811 //
 812 // Determine whether the call could modify the field of the specified
 813 // instance at the specified offset.
 814 //
 815 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
 816   assert((t_oop != nullptr), "sanity");
 817   if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
 818     const TypeTuple* args = _tf->domain_sig();
 819     Node* dest = nullptr;
 820     // Stubs that can be called once an ArrayCopyNode is expanded have
 821     // different signatures. Look for the second pointer argument,
 822     // that is the destination of the copy.
 823     for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 824       if (args->field_at(i)->isa_ptr()) {
 825         j++;
 826         if (j == 2) {
 827           dest = in(i);
 828           break;
 829         }
 830       }
 831     }
 832     guarantee(dest != nullptr, "Call had only one ptr in, broken IR!");
 833     if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
 834       return true;
 835     }
 836     return false;
 837   }
 838   if (t_oop->is_known_instance()) {

 847       Node* proj = proj_out_or_null(TypeFunc::Parms);
 848       if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
 849         return false;
 850       }
 851     }
 852     if (is_CallJava() && as_CallJava()->method() != nullptr) {
 853       ciMethod* meth = as_CallJava()->method();
 854       if (meth->is_getter()) {
 855         return false;
 856       }
 857       // May modify (by reflection) if an boxing object is passed
 858       // as argument or returned.
 859       Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr;
 860       if (proj != nullptr) {
 861         const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
 862         if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
 863                                    (inst_t->instance_klass() == boxing_klass))) {
 864           return true;
 865         }
 866       }
 867       const TypeTuple* d = tf()->domain_cc();
 868       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 869         const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
 870         if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
 871                                  (inst_t->instance_klass() == boxing_klass))) {
 872           return true;
 873         }
 874       }
 875       return false;
 876     }
 877   }
 878   return true;
 879 }
 880 
 881 // Does this call have a direct reference to n other than debug information?
 882 bool CallNode::has_non_debug_use(Node* n) {
 883   const TypeTuple* d = tf()->domain_cc();
 884   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 885     if (in(i) == n) {

 886       return true;
 887     }
 888   }
 889   return false;
 890 }
 891 
 892 bool CallNode::has_debug_use(Node* n) {
 893   if (jvms() != nullptr) {
 894     for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
 895       if (in(i) == n) {
 896         return true;
 897       }
 898     }
 899   }
 900   return false;
 901 }
 902 
 903 // Returns the unique CheckCastPP of a call
 904 // or 'this' if there are several CheckCastPP or unexpected uses
 905 // or returns null if there is no one.
 906 Node *CallNode::result_cast() {
 907   Node *cast = nullptr;
 908 
 909   Node *p = proj_out_or_null(TypeFunc::Parms);
 910   if (p == nullptr)
 911     return nullptr;
 912 
 913   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
 914     Node *use = p->fast_out(i);
 915     if (use->is_CheckCastPP()) {
 916       if (cast != nullptr) {
 917         return this;  // more than 1 CheckCastPP
 918       }
 919       cast = use;
 920     } else if (!use->is_Initialize() &&
 921                !use->is_AddP() &&
 922                use->Opcode() != Op_MemBarStoreStore) {
 923       // Expected uses are restricted to a CheckCastPP, an Initialize
 924       // node, a MemBarStoreStore (clone) and AddP nodes. If we
 925       // encounter any other use (a Phi node can be seen in rare
 926       // cases) return this to prevent incorrect optimizations.
 927       return this;
 928     }
 929   }
 930   return cast;
 931 }
 932 
 933 
 934 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) {
 935   uint max_res = TypeFunc::Parms-1;
 936   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 937     ProjNode *pn = fast_out(i)->as_Proj();
 938     max_res = MAX2(max_res, pn->_con);
 939   }
 940 
 941   assert(max_res < _tf->range_cc()->cnt(), "result out of bounds");
 942 
 943   uint projs_size = sizeof(CallProjections);
 944   if (max_res > TypeFunc::Parms) {
 945     projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*);
 946   }
 947   char* projs_storage = resource_allocate_bytes(projs_size);
 948   CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1);
 949 
 950   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 951     ProjNode *pn = fast_out(i)->as_Proj();
 952     if (pn->outcnt() == 0) continue;
 953     switch (pn->_con) {
 954     case TypeFunc::Control:
 955       {
 956         // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
 957         projs->fallthrough_proj = pn;
 958         const Node* cn = pn->unique_ctrl_out_or_null();
 959         if (cn != nullptr && cn->is_Catch()) {
 960           ProjNode *cpn = nullptr;
 961           for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
 962             cpn = cn->fast_out(k)->as_Proj();
 963             assert(cpn->is_CatchProj(), "must be a CatchProjNode");
 964             if (cpn->_con == CatchProjNode::fall_through_index)
 965               projs->fallthrough_catchproj = cpn;
 966             else {
 967               assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
 968               projs->catchall_catchproj = cpn;

 974     case TypeFunc::I_O:
 975       if (pn->_is_io_use)
 976         projs->catchall_ioproj = pn;
 977       else
 978         projs->fallthrough_ioproj = pn;
 979       for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
 980         Node* e = pn->out(j);
 981         if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
 982           assert(projs->exobj == nullptr, "only one");
 983           projs->exobj = e;
 984         }
 985       }
 986       break;
 987     case TypeFunc::Memory:
 988       if (pn->_is_io_use)
 989         projs->catchall_memproj = pn;
 990       else
 991         projs->fallthrough_memproj = pn;
 992       break;
 993     case TypeFunc::Parms:
 994       projs->resproj[0] = pn;
 995       break;
 996     default:
 997       assert(pn->_con <= max_res, "unexpected projection from allocation node.");
 998       projs->resproj[pn->_con-TypeFunc::Parms] = pn;
 999       break;
1000     }
1001   }
1002 
1003   // The resproj may not exist because the result could be ignored
1004   // and the exception object may not exist if an exception handler
1005   // swallows the exception but all the other must exist and be found.

1006   do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
1007   assert(!do_asserts || projs->fallthrough_proj      != nullptr, "must be found");
1008   assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
1009   assert(!do_asserts || projs->fallthrough_memproj   != nullptr, "must be found");
1010   assert(!do_asserts || projs->fallthrough_ioproj    != nullptr, "must be found");
1011   assert(!do_asserts || projs->catchall_catchproj    != nullptr, "must be found");
1012   if (separate_io_proj) {
1013     assert(!do_asserts || projs->catchall_memproj    != nullptr, "must be found");
1014     assert(!do_asserts || projs->catchall_ioproj     != nullptr, "must be found");
1015   }
1016   return projs;
1017 }
1018 
1019 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1020 #ifdef ASSERT
1021   // Validate attached generator
1022   CallGenerator* cg = generator();
1023   if (cg != nullptr) {
1024     assert(is_CallStaticJava()  && cg->is_mh_late_inline() ||
1025            is_CallDynamicJava() && cg->is_virtual_late_inline(), "mismatch");
1026   }
1027 #endif // ASSERT
1028   return SafePointNode::Ideal(phase, can_reshape);
1029 }
1030 
1031 bool CallNode::is_call_to_arraycopystub() const {
1032   if (_name != nullptr && strstr(_name, "arraycopy") != 0) {
1033     return true;
1034   }
1035   return false;
1036 }
1037 
1038 //=============================================================================
1039 uint CallJavaNode::size_of() const { return sizeof(*this); }
1040 bool CallJavaNode::cmp( const Node &n ) const {
1041   CallJavaNode &call = (CallJavaNode&)n;
1042   return CallNode::cmp(call) && _method == call._method &&
1043          _override_symbolic_info == call._override_symbolic_info;
1044 }
1045 
1046 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1047   // Copy debug information and adjust JVMState information
1048   uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain_sig()->cnt() : (uint)TypeFunc::Parms+1;
1049   uint new_dbg_start = tf()->domain_sig()->cnt();
1050   int jvms_adj  = new_dbg_start - old_dbg_start;
1051   assert (new_dbg_start == req(), "argument count mismatch");
1052   Compile* C = phase->C;
1053 
1054   // SafePointScalarObject node could be referenced several times in debug info.
1055   // Use Dict to record cloned nodes.
1056   Dict* sosn_map = new Dict(cmpkey,hashkey);
1057   for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1058     Node* old_in = sfpt->in(i);
1059     // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1060     if (old_in != nullptr && old_in->is_SafePointScalarObject()) {
1061       SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1062       bool new_node;
1063       Node* new_in = old_sosn->clone(sosn_map, new_node);
1064       if (new_node) { // New node?
1065         new_in->set_req(0, C->root()); // reset control edge
1066         new_in = phase->transform(new_in); // Register new node.
1067       }
1068       old_in = new_in;
1069     }
1070     add_req(old_in);
1071   }
1072 
1073   // JVMS may be shared so clone it before we modify it
1074   set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr);
1075   for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1076     jvms->set_map(this);
1077     jvms->set_locoff(jvms->locoff()+jvms_adj);
1078     jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1079     jvms->set_monoff(jvms->monoff()+jvms_adj);
1080     jvms->set_scloff(jvms->scloff()+jvms_adj);
1081     jvms->set_endoff(jvms->endoff()+jvms_adj);
1082   }
1083 }
1084 
1085 #ifdef ASSERT
1086 bool CallJavaNode::validate_symbolic_info() const {
1087   if (method() == nullptr) {
1088     return true; // call into runtime or uncommon trap
1089   }
1090   Bytecodes::Code bc = jvms()->method()->java_code_at_bci(jvms()->bci());
1091   if (EnableValhalla && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) {
1092     return true;
1093   }
1094   ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1095   ciMethod* callee = method();
1096   if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1097     assert(override_symbolic_info(), "should be set");
1098   }
1099   assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1100   return true;
1101 }
1102 #endif
1103 
1104 #ifndef PRODUCT
1105 void CallJavaNode::dump_spec(outputStream* st) const {
1106   if( _method ) _method->print_short_name(st);
1107   CallNode::dump_spec(st);
1108 }
1109 
1110 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1111   if (_method) {
1112     _method->print_short_name(st);
1113   } else {
1114     st->print("<?>");
1115   }
1116 }
1117 #endif
1118 
1119 //=============================================================================
1120 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1121 bool CallStaticJavaNode::cmp( const Node &n ) const {
1122   CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1123   return CallJavaNode::cmp(call);
1124 }
1125 
1126 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1127   if (can_reshape && uncommon_trap_request() != 0) {
1128     if (remove_useless_allocation(phase, in(0), in(TypeFunc::Memory), in(TypeFunc::Parms))) {
1129       if (!in(0)->is_Region()) {
1130         PhaseIterGVN* igvn = phase->is_IterGVN();
1131         igvn->replace_input_of(this, 0, phase->C->top());
1132       }
1133       return this;
1134     }
1135   }
1136 
1137   CallGenerator* cg = generator();
1138   if (can_reshape && cg != nullptr) {
1139     assert(IncrementalInlineMH, "required");
1140     assert(cg->call_node() == this, "mismatch");
1141     assert(cg->is_mh_late_inline(), "not virtual");
1142 
1143     // Check whether this MH handle call becomes a candidate for inlining.
1144     ciMethod* callee = cg->method();
1145     vmIntrinsics::ID iid = callee->intrinsic_id();
1146     if (iid == vmIntrinsics::_invokeBasic) {
1147       if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1148         phase->C->prepend_late_inline(cg);
1149         set_generator(nullptr);
1150       }
1151     } else if (iid == vmIntrinsics::_linkToNative) {
1152       // never retry
1153     } else {
1154       assert(callee->has_member_arg(), "wrong type of call?");
1155       if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1156         phase->C->prepend_late_inline(cg);

1169 
1170 //----------------------------uncommon_trap_request----------------------------
1171 // If this is an uncommon trap, return the request code, else zero.
1172 int CallStaticJavaNode::uncommon_trap_request() const {
1173   return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0;
1174 }
1175 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1176 #ifndef PRODUCT
1177   if (!(call->req() > TypeFunc::Parms &&
1178         call->in(TypeFunc::Parms) != nullptr &&
1179         call->in(TypeFunc::Parms)->is_Con() &&
1180         call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1181     assert(in_dump() != 0, "OK if dumping");
1182     tty->print("[bad uncommon trap]");
1183     return 0;
1184   }
1185 #endif
1186   return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1187 }
1188 
1189 bool CallStaticJavaNode::remove_useless_allocation(PhaseGVN *phase, Node* ctl, Node* mem, Node* unc_arg) {
1190   // Split if can cause the flat array branch of an array load to
1191   // end in an uncommon trap. In that case, the allocation of the
1192   // loaded value and its initialization is useless. Eliminate it. use
1193   // the jvm state of the allocation to create a new uncommon trap
1194   // call at the load.
1195   if (ctl == nullptr || ctl->is_top() || mem == nullptr || mem->is_top() || !mem->is_MergeMem()) {
1196     return false;
1197   }
1198   PhaseIterGVN* igvn = phase->is_IterGVN();
1199   if (ctl->is_Region()) {
1200     bool res = false;
1201     for (uint i = 1; i < ctl->req(); i++) {
1202       MergeMemNode* mm = mem->clone()->as_MergeMem();
1203       for (MergeMemStream mms(mm); mms.next_non_empty(); ) {
1204         Node* m = mms.memory();
1205         if (m->is_Phi() && m->in(0) == ctl) {
1206           mms.set_memory(m->in(i));
1207         }
1208       }
1209       if (remove_useless_allocation(phase, ctl->in(i), mm, unc_arg)) {
1210         res = true;
1211         if (!ctl->in(i)->is_Region()) {
1212           igvn->replace_input_of(ctl, i, phase->C->top());
1213         }
1214       }
1215       igvn->remove_dead_node(mm);
1216     }
1217     return res;
1218   }
1219   // verify the control flow is ok
1220   Node* call = ctl;
1221   MemBarNode* membar = nullptr;
1222   for (;;) {
1223     if (call == nullptr || call->is_top()) {
1224       return false;
1225     }
1226     if (call->is_Proj() || call->is_Catch() || call->is_MemBar()) {
1227       call = call->in(0);
1228     } else if (call->Opcode() == Op_CallStaticJava &&
1229                call->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1230       assert(call->in(0)->is_Proj() && call->in(0)->in(0)->is_MemBar(), "missing membar");
1231       membar = call->in(0)->in(0)->as_MemBar();
1232       break;
1233     } else {
1234       return false;
1235     }
1236   }
1237 
1238   JVMState* jvms = call->jvms();
1239   if (phase->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) {
1240     return false;
1241   }
1242 
1243   Node* alloc_mem = call->in(TypeFunc::Memory);
1244   if (alloc_mem == nullptr || alloc_mem->is_top()) {
1245     return false;
1246   }
1247   if (!alloc_mem->is_MergeMem()) {
1248     alloc_mem = MergeMemNode::make(alloc_mem);
1249     igvn->register_new_node_with_optimizer(alloc_mem);
1250   }
1251 
1252   // and that there's no unexpected side effect
1253   for (MergeMemStream mms2(mem->as_MergeMem(), alloc_mem->as_MergeMem()); mms2.next_non_empty2(); ) {
1254     Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory();
1255     Node* m2 = mms2.memory2();
1256 
1257     for (uint i = 0; i < 100; i++) {
1258       if (m1 == m2) {
1259         break;
1260       } else if (m1->is_Proj()) {
1261         m1 = m1->in(0);
1262       } else if (m1->is_MemBar()) {
1263         m1 = m1->in(TypeFunc::Memory);
1264       } else if (m1->Opcode() == Op_CallStaticJava &&
1265                  m1->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1266         if (m1 != call) {
1267           return false;
1268         }
1269         break;
1270       } else if (m1->is_MergeMem()) {
1271         MergeMemNode* mm = m1->as_MergeMem();
1272         int idx = mms2.alias_idx();
1273         if (idx == Compile::AliasIdxBot) {
1274           m1 = mm->base_memory();
1275         } else {
1276           m1 = mm->memory_at(idx);
1277         }
1278       } else {
1279         return false;
1280       }
1281     }
1282   }
1283   if (alloc_mem->outcnt() == 0) {
1284     igvn->remove_dead_node(alloc_mem);
1285   }
1286 
1287   // Remove membar preceding the call
1288   membar->remove(igvn);
1289 
1290   address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
1291   CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", nullptr);
1292   unc->init_req(TypeFunc::Control, call->in(0));
1293   unc->init_req(TypeFunc::I_O, call->in(TypeFunc::I_O));
1294   unc->init_req(TypeFunc::Memory, call->in(TypeFunc::Memory));
1295   unc->init_req(TypeFunc::FramePtr,  call->in(TypeFunc::FramePtr));
1296   unc->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
1297   unc->init_req(TypeFunc::Parms+0, unc_arg);
1298   unc->set_cnt(PROB_UNLIKELY_MAG(4));
1299   unc->copy_call_debug_info(igvn, call->as_CallStaticJava());
1300 
1301   igvn->replace_input_of(call, 0, phase->C->top());
1302 
1303   igvn->register_new_node_with_optimizer(unc);
1304 
1305   Node* ctrl = phase->transform(new ProjNode(unc, TypeFunc::Control));
1306   Node* halt = phase->transform(new HaltNode(ctrl, call->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen"));
1307   igvn->add_input_to(phase->C->root(), halt);
1308 
1309   return true;
1310 }
1311 
1312 
1313 #ifndef PRODUCT
1314 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1315   st->print("# Static ");
1316   if (_name != nullptr) {
1317     st->print("%s", _name);
1318     int trap_req = uncommon_trap_request();
1319     if (trap_req != 0) {
1320       char buf[100];
1321       st->print("(%s)",
1322                  Deoptimization::format_trap_request(buf, sizeof(buf),
1323                                                      trap_req));
1324     }
1325     st->print(" ");
1326   }
1327   CallJavaNode::dump_spec(st);
1328 }
1329 
1330 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1331   if (_method) {
1332     _method->print_short_name(st);

1397 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1398 bool CallRuntimeNode::cmp( const Node &n ) const {
1399   CallRuntimeNode &call = (CallRuntimeNode&)n;
1400   return CallNode::cmp(call) && !strcmp(_name,call._name);
1401 }
1402 #ifndef PRODUCT
1403 void CallRuntimeNode::dump_spec(outputStream *st) const {
1404   st->print("# ");
1405   st->print("%s", _name);
1406   CallNode::dump_spec(st);
1407 }
1408 #endif
1409 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1410 bool CallLeafVectorNode::cmp( const Node &n ) const {
1411   CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1412   return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1413 }
1414 
1415 //------------------------------calling_convention-----------------------------
1416 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1417   if (_entry_point == nullptr) {
1418     // The call to that stub is a special case: its inputs are
1419     // multiple values returned from a call and so it should follow
1420     // the return convention.
1421     SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
1422     return;
1423   }
1424   SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
1425 }
1426 
1427 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1428 #ifdef ASSERT
1429   assert(tf()->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1430          "return vector size must match");
1431   const TypeTuple* d = tf()->domain_sig();
1432   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1433     Node* arg = in(i);
1434     assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1435            "vector argument size must match");
1436   }
1437 #endif
1438 
1439   SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1440 }
1441 
1442 //=============================================================================
1443 //------------------------------calling_convention-----------------------------
1444 
1445 
1446 //=============================================================================
1447 #ifndef PRODUCT
1448 void CallLeafNode::dump_spec(outputStream *st) const {
1449   st->print("# ");
1450   st->print("%s", _name);
1451   CallNode::dump_spec(st);
1452 }
1453 #endif
1454 
1455 uint CallLeafNoFPNode::match_edge(uint idx) const {
1456   // Null entry point is a special case for which the target is in a
1457   // register. Need to match that edge.
1458   return entry_point() == nullptr && idx == TypeFunc::Parms;
1459 }
1460 
1461 //=============================================================================
1462 
1463 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1464   assert(verify_jvms(jvms), "jvms must match");
1465   int loc = jvms->locoff() + idx;
1466   if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1467     // If current local idx is top then local idx - 1 could
1468     // be a long/double that needs to be killed since top could
1469     // represent the 2nd half of the long/double.
1470     uint ideal = in(loc -1)->ideal_reg();
1471     if (ideal == Op_RegD || ideal == Op_RegL) {
1472       // set other (low index) half to top
1473       set_req(loc - 1, in(loc));
1474     }
1475   }
1476   set_req(loc, c);
1477 }
1478 
1479 uint SafePointNode::size_of() const { return sizeof(*this); }
1480 bool SafePointNode::cmp( const Node &n ) const {

1491   }
1492 }
1493 
1494 
1495 //----------------------------next_exception-----------------------------------
1496 SafePointNode* SafePointNode::next_exception() const {
1497   if (len() == req()) {
1498     return nullptr;
1499   } else {
1500     Node* n = in(req());
1501     assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1502     return (SafePointNode*) n;
1503   }
1504 }
1505 
1506 
1507 //------------------------------Ideal------------------------------------------
1508 // Skip over any collapsed Regions
1509 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1510   assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1511   if (remove_dead_region(phase, can_reshape)) {
1512     return this;
1513   }
1514   // Scalarize inline types in safepoint debug info.
1515   // Delay this until all inlining is over to avoid getting inconsistent debug info.
1516   if (phase->C->scalarize_in_safepoints() && can_reshape && jvms() != nullptr) {
1517     for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
1518       Node* n = in(i)->uncast();
1519       if (n->is_InlineType()) {
1520         n->as_InlineType()->make_scalar_in_safepoints(phase->is_IterGVN());
1521       }
1522     }
1523   }
1524   return nullptr;
1525 }
1526 
1527 //------------------------------Identity---------------------------------------
1528 // Remove obviously duplicate safepoints
1529 Node* SafePointNode::Identity(PhaseGVN* phase) {
1530 
1531   // If you have back to back safepoints, remove one
1532   if (in(TypeFunc::Control)->is_SafePoint()) {
1533     Node* out_c = unique_ctrl_out_or_null();
1534     // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1535     // outer loop's safepoint could confuse removal of the outer loop.
1536     if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) {
1537       return in(TypeFunc::Control);
1538     }
1539   }
1540 
1541   // Transforming long counted loops requires a safepoint node. Do not
1542   // eliminate a safepoint until loop opts are over.
1543   if (in(0)->is_Proj() && !phase->C->major_progress()) {
1544     Node *n0 = in(0)->in(0);

1661   return (TypeFunc::Parms == idx);
1662 }
1663 
1664 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1665   assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1666   int nb = igvn->C->root()->find_prec_edge(this);
1667   if (nb != -1) {
1668     igvn->delete_precedence_of(igvn->C->root(), nb);
1669   }
1670 }
1671 
1672 //==============  SafePointScalarObjectNode  ==============
1673 
1674 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint n_fields) :
1675   TypeNode(tp, 1), // 1 control input -- seems required.  Get from root.
1676   _first_index(first_index),
1677   _n_fields(n_fields),
1678   _alloc(alloc)
1679 {
1680 #ifdef ASSERT
1681   if (alloc != nullptr && !alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
1682     alloc->dump();
1683     assert(false, "unexpected call node");
1684   }
1685 #endif
1686   init_class_id(Class_SafePointScalarObject);
1687 }
1688 
1689 // Do not allow value-numbering for SafePointScalarObject node.
1690 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1691 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1692   return (&n == this); // Always fail except on self
1693 }
1694 
1695 uint SafePointScalarObjectNode::ideal_reg() const {
1696   return 0; // No matching to machine instruction
1697 }
1698 
1699 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1700   return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1701 }

1766     new_node = false;
1767     return (SafePointScalarMergeNode*)cached;
1768   }
1769   new_node = true;
1770   SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone();
1771   sosn_map->Insert((void*)this, (void*)res);
1772   return res;
1773 }
1774 
1775 #ifndef PRODUCT
1776 void SafePointScalarMergeNode::dump_spec(outputStream *st) const {
1777   st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1);
1778 }
1779 #endif
1780 
1781 //=============================================================================
1782 uint AllocateNode::size_of() const { return sizeof(*this); }
1783 
1784 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1785                            Node *ctrl, Node *mem, Node *abio,
1786                            Node *size, Node *klass_node,
1787                            Node* initial_test,
1788                            InlineTypeNode* inline_type_node)
1789   : CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
1790 {
1791   init_class_id(Class_Allocate);
1792   init_flags(Flag_is_macro);
1793   _is_scalar_replaceable = false;
1794   _is_non_escaping = false;
1795   _is_allocation_MemBar_redundant = false;
1796   _larval = false;
1797   Node *topnode = C->top();
1798 
1799   init_req( TypeFunc::Control  , ctrl );
1800   init_req( TypeFunc::I_O      , abio );
1801   init_req( TypeFunc::Memory   , mem );
1802   init_req( TypeFunc::ReturnAdr, topnode );
1803   init_req( TypeFunc::FramePtr , topnode );
1804   init_req( AllocSize          , size);
1805   init_req( KlassNode          , klass_node);
1806   init_req( InitialTest        , initial_test);
1807   init_req( ALength            , topnode);
1808   init_req( ValidLengthTest    , topnode);
1809   init_req( InlineType     , inline_type_node);
1810   // DefaultValue defaults to nullptr
1811   // RawDefaultValue defaults to nullptr
1812   C->add_macro_node(this);
1813 }
1814 
1815 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1816 {
1817   assert(initializer != nullptr &&
1818          (initializer->is_object_constructor() || initializer->is_class_initializer()),
1819          "unexpected initializer method");

1820   BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1821   if (analyzer == nullptr) {
1822     return;
1823   }
1824 
1825   // Allocation node is first parameter in its initializer
1826   if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1827     _is_allocation_MemBar_redundant = true;
1828   }
1829 }
1830 
1831 Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
1832   Node* mark_node = nullptr;
1833   if (EnableValhalla) {
1834     Node* klass_node = in(AllocateNode::KlassNode);
1835     Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
1836     mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1837   } else {
1838     mark_node = phase->MakeConX(markWord::prototype().value());
1839   }
1840   mark_node = phase->transform(mark_node);
1841   // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal
1842   return new OrXNode(mark_node, phase->MakeConX(_larval ? markWord::larval_bit_in_place : 0));
1843 }
1844 
1845 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1846 // CastII, if appropriate.  If we are not allowed to create new nodes, and
1847 // a CastII is appropriate, return null.
1848 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) {
1849   Node *length = in(AllocateNode::ALength);
1850   assert(length != nullptr, "length is not null");
1851 
1852   const TypeInt* length_type = phase->find_int_type(length);
1853   const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1854 
1855   if (ary_type != nullptr && length_type != nullptr) {
1856     const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1857     if (narrow_length_type != length_type) {
1858       // Assert one of:
1859       //   - the narrow_length is 0
1860       //   - the narrow_length is not wider than length
1861       assert(narrow_length_type == TypeInt::ZERO ||
1862              length_type->is_con() && narrow_length_type->is_con() &&

2201 
2202 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
2203   st->print("%s", _kind_names[_kind]);
2204 }
2205 #endif
2206 
2207 //=============================================================================
2208 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2209 
2210   // perform any generic optimizations first (returns 'this' or null)
2211   Node *result = SafePointNode::Ideal(phase, can_reshape);
2212   if (result != nullptr)  return result;
2213   // Don't bother trying to transform a dead node
2214   if (in(0) && in(0)->is_top())  return nullptr;
2215 
2216   // Now see if we can optimize away this lock.  We don't actually
2217   // remove the locking here, we simply set the _eliminate flag which
2218   // prevents macro expansion from expanding the lock.  Since we don't
2219   // modify the graph, the value returned from this function is the
2220   // one computed above.
2221   const Type* obj_type = phase->type(obj_node());
2222   if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
2223     //
2224     // If we are locking an non-escaped object, the lock/unlock is unnecessary
2225     //
2226     ConnectionGraph *cgr = phase->C->congraph();
2227     if (cgr != nullptr && cgr->not_global_escape(obj_node())) {
2228       assert(!is_eliminated() || is_coarsened(), "sanity");
2229       // The lock could be marked eliminated by lock coarsening
2230       // code during first IGVN before EA. Replace coarsened flag
2231       // to eliminate all associated locks/unlocks.
2232 #ifdef ASSERT
2233       this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2234 #endif
2235       this->set_non_esc_obj();
2236       return result;
2237     }
2238 
2239     if (!phase->C->do_locks_coarsening()) {
2240       return result; // Compiling without locks coarsening
2241     }
2242     //

2398 }
2399 
2400 //=============================================================================
2401 uint UnlockNode::size_of() const { return sizeof(*this); }
2402 
2403 //=============================================================================
2404 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2405 
2406   // perform any generic optimizations first (returns 'this' or null)
2407   Node *result = SafePointNode::Ideal(phase, can_reshape);
2408   if (result != nullptr)  return result;
2409   // Don't bother trying to transform a dead node
2410   if (in(0) && in(0)->is_top())  return nullptr;
2411 
2412   // Now see if we can optimize away this unlock.  We don't actually
2413   // remove the unlocking here, we simply set the _eliminate flag which
2414   // prevents macro expansion from expanding the unlock.  Since we don't
2415   // modify the graph, the value returned from this function is the
2416   // one computed above.
2417   // Escape state is defined after Parse phase.
2418   const Type* obj_type = phase->type(obj_node());
2419   if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
2420     //
2421     // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2422     //
2423     ConnectionGraph *cgr = phase->C->congraph();
2424     if (cgr != nullptr && cgr->not_global_escape(obj_node())) {
2425       assert(!is_eliminated() || is_coarsened(), "sanity");
2426       // The lock could be marked eliminated by lock coarsening
2427       // code during first IGVN before EA. Replace coarsened flag
2428       // to eliminate all associated locks/unlocks.
2429 #ifdef ASSERT
2430       this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2431 #endif
2432       this->set_non_esc_obj();
2433     }
2434   }
2435   return result;
2436 }
2437 
2438 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock)  const {
2439   if (C == nullptr) {

2479     }
2480     // unrelated
2481     return false;
2482   }
2483 
2484   if (dest_t->isa_aryptr()) {
2485     // arraycopy or array clone
2486     if (t_oop->isa_instptr()) {
2487       return false;
2488     }
2489     if (!t_oop->isa_aryptr()) {
2490       return true;
2491     }
2492 
2493     const Type* elem = dest_t->is_aryptr()->elem();
2494     if (elem == Type::BOTTOM) {
2495       // An array but we don't know what elements are
2496       return true;
2497     }
2498 
2499     dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr();
2500     t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot);
2501     uint dest_alias = phase->C->get_alias_index(dest_t);
2502     uint t_oop_alias = phase->C->get_alias_index(t_oop);
2503 
2504     return dest_alias == t_oop_alias;
2505   }
2506 
2507   return true;
2508 }
< prev index next >