1 /*
   2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "ci/ciFlatArrayKlass.hpp"
  27 #include "ci/ciSymbols.hpp"
  28 #include "code/vmreg.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "compiler/oopMap.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "gc/shared/c2/barrierSetC2.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/convertnode.hpp"
  38 #include "opto/escape.hpp"
  39 #include "opto/inlinetypenode.hpp"
  40 #include "opto/locknode.hpp"
  41 #include "opto/machnode.hpp"
  42 #include "opto/matcher.hpp"
  43 #include "opto/movenode.hpp"
  44 #include "opto/parse.hpp"
  45 #include "opto/regalloc.hpp"
  46 #include "opto/regmask.hpp"
  47 #include "opto/rootnode.hpp"
  48 #include "opto/runtime.hpp"
  49 #include "runtime/arguments.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "runtime/stubRoutines.hpp"
  52 #include "utilities/powerOfTwo.hpp"
  53 
  54 // Portions of code courtesy of Clifford Click
  55 
  56 // Optimization - Graph Style
  57 
  58 //=============================================================================
  59 uint StartNode::size_of() const { return sizeof(*this); }
  60 bool StartNode::cmp( const Node &n ) const
  61 { return _domain == ((StartNode&)n)._domain; }
  62 const Type *StartNode::bottom_type() const { return _domain; }
  63 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
  64 #ifndef PRODUCT
  65 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
  66 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
  67 #endif
  68 
  69 //------------------------------Ideal------------------------------------------
  70 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
  71   return remove_dead_region(phase, can_reshape) ? this : nullptr;
  72 }
  73 
  74 //------------------------------calling_convention-----------------------------
  75 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
  76   SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
  77 }
  78 
  79 //------------------------------Registers--------------------------------------
  80 const RegMask &StartNode::in_RegMask(uint) const {
  81   return RegMask::EMPTY;
  82 }
  83 
  84 //------------------------------match------------------------------------------
  85 // Construct projections for incoming parameters, and their RegMask info
  86 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
  87   switch (proj->_con) {
  88   case TypeFunc::Control:
  89   case TypeFunc::I_O:
  90   case TypeFunc::Memory:
  91     return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
  92   case TypeFunc::FramePtr:
  93     return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
  94   case TypeFunc::ReturnAdr:
  95     return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
  96   case TypeFunc::Parms:
  97   default: {
  98       uint parm_num = proj->_con - TypeFunc::Parms;
  99       const Type *t = _domain->field_at(proj->_con);
 100       if (t->base() == Type::Half)  // 2nd half of Longs and Doubles
 101         return new ConNode(Type::TOP);
 102       uint ideal_reg = t->ideal_reg();
 103       RegMask &rm = match->_calling_convention_mask[parm_num];
 104       return new MachProjNode(this,proj->_con,rm,ideal_reg);
 105     }
 106   }
 107   return nullptr;
 108 }
 109 











 110 //=============================================================================
 111 const char * const ParmNode::names[TypeFunc::Parms+1] = {
 112   "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
 113 };
 114 
 115 #ifndef PRODUCT
 116 void ParmNode::dump_spec(outputStream *st) const {
 117   if( _con < TypeFunc::Parms ) {
 118     st->print("%s", names[_con]);
 119   } else {
 120     st->print("Parm%d: ",_con-TypeFunc::Parms);
 121     // Verbose and WizardMode dump bottom_type for all nodes
 122     if( !Verbose && !WizardMode )   bottom_type()->dump_on(st);
 123   }
 124 }
 125 
 126 void ParmNode::dump_compact_spec(outputStream *st) const {
 127   if (_con < TypeFunc::Parms) {
 128     st->print("%s", names[_con]);
 129   } else {
 130     st->print("%d:", _con-TypeFunc::Parms);
 131     // unconditionally dump bottom_type
 132     bottom_type()->dump_on(st);
 133   }
 134 }
 135 #endif
 136 
 137 uint ParmNode::ideal_reg() const {
 138   switch( _con ) {
 139   case TypeFunc::Control  : // fall through
 140   case TypeFunc::I_O      : // fall through
 141   case TypeFunc::Memory   : return 0;
 142   case TypeFunc::FramePtr : // fall through
 143   case TypeFunc::ReturnAdr: return Op_RegP;
 144   default                 : assert( _con > TypeFunc::Parms, "" );
 145     // fall through
 146   case TypeFunc::Parms    : {
 147     // Type of argument being passed
 148     const Type *t = in(0)->as_Start()->_domain->field_at(_con);
 149     return t->ideal_reg();
 150   }
 151   }
 152   ShouldNotReachHere();
 153   return 0;
 154 }
 155 
 156 //=============================================================================
 157 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
 158   init_req(TypeFunc::Control,cntrl);
 159   init_req(TypeFunc::I_O,i_o);
 160   init_req(TypeFunc::Memory,memory);
 161   init_req(TypeFunc::FramePtr,frameptr);
 162   init_req(TypeFunc::ReturnAdr,retadr);
 163 }
 164 
 165 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
 166   return remove_dead_region(phase, can_reshape) ? this : nullptr;
 167 }
 168 
 169 const Type* ReturnNode::Value(PhaseGVN* phase) const {
 170   return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
 171     ? Type::TOP
 172     : Type::BOTTOM;
 173 }
 174 
 175 // Do we Match on this edge index or not?  No edges on return nodes
 176 uint ReturnNode::match_edge(uint idx) const {
 177   return 0;
 178 }
 179 
 180 
 181 #ifndef PRODUCT
 182 void ReturnNode::dump_req(outputStream *st, DumpConfig* dc) const {
 183   // Dump the required inputs, after printing "returns"
 184   uint i;                       // Exit value of loop
 185   for (i = 0; i < req(); i++) {    // For all required inputs
 186     if (i == TypeFunc::Parms) st->print("returns ");
 187     Node* p = in(i);
 188     if (p != nullptr) {
 189       p->dump_idx(false, st, dc);
 190       st->print(" ");
 191     } else {
 192       st->print("_ ");
 193     }
 194   }
 195 }
 196 #endif
 197 
 198 //=============================================================================
 199 RethrowNode::RethrowNode(
 200   Node* cntrl,
 201   Node* i_o,
 202   Node* memory,
 203   Node* frameptr,
 204   Node* ret_adr,
 205   Node* exception
 206 ) : Node(TypeFunc::Parms + 1) {
 207   init_req(TypeFunc::Control  , cntrl    );
 208   init_req(TypeFunc::I_O      , i_o      );
 209   init_req(TypeFunc::Memory   , memory   );
 210   init_req(TypeFunc::FramePtr , frameptr );
 211   init_req(TypeFunc::ReturnAdr, ret_adr);
 212   init_req(TypeFunc::Parms    , exception);
 213 }
 214 
 215 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
 216   return remove_dead_region(phase, can_reshape) ? this : nullptr;
 217 }
 218 
 219 const Type* RethrowNode::Value(PhaseGVN* phase) const {
 220   return (phase->type(in(TypeFunc::Control)) == Type::TOP)
 221     ? Type::TOP
 222     : Type::BOTTOM;
 223 }
 224 
 225 uint RethrowNode::match_edge(uint idx) const {
 226   return 0;
 227 }
 228 
 229 #ifndef PRODUCT
 230 void RethrowNode::dump_req(outputStream *st, DumpConfig* dc) const {
 231   // Dump the required inputs, after printing "exception"
 232   uint i;                       // Exit value of loop
 233   for (i = 0; i < req(); i++) {    // For all required inputs
 234     if (i == TypeFunc::Parms) st->print("exception ");
 235     Node* p = in(i);
 236     if (p != nullptr) {
 237       p->dump_idx(false, st, dc);
 238       st->print(" ");
 239     } else {
 240       st->print("_ ");
 241     }
 242   }
 243 }
 244 #endif
 245 
 246 //=============================================================================
 247 // Do we Match on this edge index or not?  Match only target address & method
 248 uint TailCallNode::match_edge(uint idx) const {
 249   return TypeFunc::Parms <= idx  &&  idx <= TypeFunc::Parms+1;
 250 }
 251 
 252 //=============================================================================
 253 // Do we Match on this edge index or not?  Match only target address & oop
 254 uint TailJumpNode::match_edge(uint idx) const {
 255   return TypeFunc::Parms <= idx  &&  idx <= TypeFunc::Parms+1;
 256 }
 257 
 258 //=============================================================================
 259 JVMState::JVMState(ciMethod* method, JVMState* caller) :
 260   _method(method),
 261   _receiver_info(nullptr) {
 262   assert(method != nullptr, "must be valid call site");
 263   _bci = InvocationEntryBci;
 264   _reexecute = Reexecute_Undefined;
 265   DEBUG_ONLY(_bci = -99);  // random garbage value
 266   DEBUG_ONLY(_map = (SafePointNode*)-1);
 267   _caller = caller;
 268   _depth  = 1 + (caller == nullptr ? 0 : caller->depth());
 269   _locoff = TypeFunc::Parms;
 270   _stkoff = _locoff + _method->max_locals();
 271   _monoff = _stkoff + _method->max_stack();
 272   _scloff = _monoff;
 273   _endoff = _monoff;
 274   _sp = 0;
 275 }
 276 JVMState::JVMState(int stack_size) :
 277   _method(nullptr),
 278   _receiver_info(nullptr) {
 279   _bci = InvocationEntryBci;
 280   _reexecute = Reexecute_Undefined;
 281   DEBUG_ONLY(_map = (SafePointNode*)-1);
 282   _caller = nullptr;
 283   _depth  = 1;
 284   _locoff = TypeFunc::Parms;
 285   _stkoff = _locoff;
 286   _monoff = _stkoff + stack_size;
 287   _scloff = _monoff;
 288   _endoff = _monoff;
 289   _sp = 0;
 290 }
 291 
 292 //--------------------------------of_depth-------------------------------------
 293 JVMState* JVMState::of_depth(int d) const {
 294   const JVMState* jvmp = this;
 295   assert(0 < d && (uint)d <= depth(), "oob");
 296   for (int skip = depth() - d; skip > 0; skip--) {
 297     jvmp = jvmp->caller();
 298   }
 299   assert(jvmp->depth() == (uint)d, "found the right one");
 300   return (JVMState*)jvmp;
 301 }
 302 
 303 //-----------------------------same_calls_as-----------------------------------
 304 bool JVMState::same_calls_as(const JVMState* that) const {
 305   if (this == that)                    return true;
 306   if (this->depth() != that->depth())  return false;
 307   const JVMState* p = this;
 308   const JVMState* q = that;
 309   for (;;) {
 310     if (p->_method != q->_method)    return false;
 311     if (p->_method == nullptr)       return true;   // bci is irrelevant
 312     if (p->_bci    != q->_bci)       return false;
 313     if (p->_reexecute != q->_reexecute)  return false;
 314     p = p->caller();
 315     q = q->caller();
 316     if (p == q)                      return true;
 317     assert(p != nullptr && q != nullptr, "depth check ensures we don't run off end");
 318   }
 319 }
 320 
 321 //------------------------------debug_start------------------------------------
 322 uint JVMState::debug_start()  const {
 323   DEBUG_ONLY(JVMState* jvmroot = of_depth(1));
 324   assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
 325   return of_depth(1)->locoff();
 326 }
 327 
 328 //-------------------------------debug_end-------------------------------------
 329 uint JVMState::debug_end() const {
 330   DEBUG_ONLY(JVMState* jvmroot = of_depth(1));
 331   assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
 332   return endoff();
 333 }
 334 
 335 //------------------------------debug_depth------------------------------------
 336 uint JVMState::debug_depth() const {
 337   uint total = 0;
 338   for (const JVMState* jvmp = this; jvmp != nullptr; jvmp = jvmp->caller()) {
 339     total += jvmp->debug_size();
 340   }
 341   return total;
 342 }
 343 
 344 #ifndef PRODUCT
 345 
 346 //------------------------------format_helper----------------------------------
 347 // Given an allocation (a Chaitin object) and a Node decide if the Node carries
 348 // any defined value or not.  If it does, print out the register or constant.
 349 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) {
 350   if (n == nullptr) { st->print(" null"); return; }
 351   if (n->is_SafePointScalarObject()) {
 352     // Scalar replacement.
 353     SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject();
 354     scobjs->append_if_missing(spobj);
 355     int sco_n = scobjs->find(spobj);
 356     assert(sco_n >= 0, "");
 357     st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
 358     return;
 359   }
 360   if (regalloc->node_regs_max_index() > 0 &&
 361       OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
 362     char buf[50];
 363     regalloc->dump_register(n,buf,sizeof(buf));
 364     st->print(" %s%d]=%s",msg,i,buf);
 365   } else {                      // No register, but might be constant
 366     const Type *t = n->bottom_type();
 367     switch (t->base()) {
 368     case Type::Int:
 369       st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con());
 370       break;
 371     case Type::AnyPtr:
 372       assert( t == TypePtr::NULL_PTR || n->in_dump(), "" );
 373       st->print(" %s%d]=#null",msg,i);
 374       break;
 375     case Type::AryPtr:
 376     case Type::InstPtr:
 377       st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop()));
 378       break;
 379     case Type::KlassPtr:
 380     case Type::AryKlassPtr:
 381     case Type::InstKlassPtr:
 382       st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->exact_klass()));
 383       break;
 384     case Type::MetadataPtr:
 385       st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata()));
 386       break;
 387     case Type::NarrowOop:
 388       st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop()));
 389       break;
 390     case Type::RawPtr:
 391       st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr()));
 392       break;
 393     case Type::DoubleCon:
 394       st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
 395       break;
 396     case Type::FloatCon:
 397       st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
 398       break;
 399     case Type::Long:
 400       st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con()));
 401       break;
 402     case Type::Half:
 403     case Type::Top:
 404       st->print(" %s%d]=_",msg,i);
 405       break;
 406     default: ShouldNotReachHere();
 407     }
 408   }
 409 }
 410 
 411 //---------------------print_method_with_lineno--------------------------------
 412 void JVMState::print_method_with_lineno(outputStream* st, bool show_name) const {
 413   if (show_name) _method->print_short_name(st);
 414 
 415   int lineno = _method->line_number_from_bci(_bci);
 416   if (lineno != -1) {
 417     st->print(" @ bci:%d (line %d)", _bci, lineno);
 418   } else {
 419     st->print(" @ bci:%d", _bci);
 420   }
 421 }
 422 
 423 //------------------------------format-----------------------------------------
 424 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
 425   st->print("        #");
 426   if (_method) {
 427     print_method_with_lineno(st, true);
 428   } else {
 429     st->print_cr(" runtime stub ");
 430     return;
 431   }
 432   if (n->is_MachSafePoint()) {
 433     GrowableArray<SafePointScalarObjectNode*> scobjs;
 434     MachSafePointNode *mcall = n->as_MachSafePoint();
 435     uint i;
 436     // Print locals
 437     for (i = 0; i < (uint)loc_size(); i++)
 438       format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs);
 439     // Print stack
 440     for (i = 0; i < (uint)stk_size(); i++) {
 441       if ((uint)(_stkoff + i) >= mcall->len())
 442         st->print(" oob ");
 443       else
 444        format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs);
 445     }
 446     for (i = 0; (int)i < nof_monitors(); i++) {
 447       Node *box = mcall->monitor_box(this, i);
 448       Node *obj = mcall->monitor_obj(this, i);
 449       if (regalloc->node_regs_max_index() > 0 &&
 450           OptoReg::is_valid(regalloc->get_reg_first(box))) {
 451         box = BoxLockNode::box_node(box);
 452         format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs);
 453       } else {
 454         OptoReg::Name box_reg = BoxLockNode::reg(box);
 455         st->print(" MON-BOX%d=%s+%d",
 456                    i,
 457                    OptoReg::regname(OptoReg::c_frame_pointer),
 458                    regalloc->reg2offset(box_reg));
 459       }
 460       const char* obj_msg = "MON-OBJ[";
 461       if (EliminateLocks) {
 462         if (BoxLockNode::box_node(box)->is_eliminated())
 463           obj_msg = "MON-OBJ(LOCK ELIMINATED)[";
 464       }
 465       format_helper(regalloc, st, obj, obj_msg, i, &scobjs);
 466     }
 467 
 468     for (i = 0; i < (uint)scobjs.length(); i++) {
 469       // Scalar replaced objects.
 470       st->cr();
 471       st->print("        # ScObj" INT32_FORMAT " ", i);
 472       SafePointScalarObjectNode* spobj = scobjs.at(i);
 473       ciKlass* cik = spobj->bottom_type()->is_oopptr()->exact_klass();
 474       assert(cik->is_instance_klass() ||
 475              cik->is_array_klass(), "Not supported allocation.");
 476       ciInstanceKlass *iklass = nullptr;
 477       if (cik->is_instance_klass()) {
 478         cik->print_name_on(st);
 479         iklass = cik->as_instance_klass();
 480       } else if (cik->is_type_array_klass()) {
 481         cik->as_array_klass()->base_element_type()->print_name_on(st);
 482         st->print("[%d]", spobj->n_fields());
 483       } else if (cik->is_obj_array_klass()) {
 484         ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
 485         if (cie->is_instance_klass()) {
 486           cie->print_name_on(st);
 487         } else if (cie->is_type_array_klass()) {
 488           cie->as_array_klass()->base_element_type()->print_name_on(st);
 489         } else {
 490           ShouldNotReachHere();
 491         }
 492         st->print("[%d]", spobj->n_fields());
 493         int ndim = cik->as_array_klass()->dimension() - 1;
 494         while (ndim-- > 0) {
 495           st->print("[]");
 496         }
 497       } else {
 498         assert(false, "unexpected type %s", cik->name()->as_utf8());
 499       }
 500       st->print("={");
 501       uint nf = spobj->n_fields();
 502       if (nf > 0) {
 503         uint first_ind = spobj->first_index(mcall->jvms());
 504         if (iklass != nullptr && iklass->is_inlinetype()) {
 505           Node* null_marker = mcall->in(first_ind++);
 506           if (!null_marker->is_top()) {
 507             st->print(" [null marker");
 508             format_helper(regalloc, st, null_marker, ":", -1, nullptr);
 509           }
 510         }
 511         Node* fld_node = mcall->in(first_ind);

 512         if (iklass != nullptr) {
 513           st->print(" [");
 514           iklass->nonstatic_field_at(0)->print_name_on(st);

 515           format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
 516         } else {
 517           format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
 518         }
 519         for (uint j = 1; j < nf; j++) {
 520           fld_node = mcall->in(first_ind+j);
 521           if (iklass != nullptr) {
 522             st->print(", [");
 523             iklass->nonstatic_field_at(j)->print_name_on(st);

 524             format_helper(regalloc, st, fld_node, ":", j, &scobjs);
 525           } else {
 526             format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
 527           }
 528         }
 529       }
 530       st->print(" }");
 531     }
 532   }
 533   st->cr();
 534   if (caller() != nullptr) caller()->format(regalloc, n, st);
 535 }
 536 
 537 
 538 void JVMState::dump_spec(outputStream *st) const {
 539   if (_method != nullptr) {
 540     bool printed = false;
 541     if (!Verbose) {
 542       // The JVMS dumps make really, really long lines.
 543       // Take out the most boring parts, which are the package prefixes.
 544       char buf[500];
 545       stringStream namest(buf, sizeof(buf));
 546       _method->print_short_name(&namest);
 547       if (namest.count() < sizeof(buf)) {
 548         const char* name = namest.base();
 549         if (name[0] == ' ')  ++name;
 550         const char* endcn = strchr(name, ':');  // end of class name
 551         if (endcn == nullptr)  endcn = strchr(name, '(');
 552         if (endcn == nullptr)  endcn = name + strlen(name);
 553         while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
 554           --endcn;
 555         st->print(" %s", endcn);
 556         printed = true;
 557       }
 558     }
 559     print_method_with_lineno(st, !printed);
 560     if(_reexecute == Reexecute_True)
 561       st->print(" reexecute");
 562   } else {
 563     st->print(" runtime stub");
 564   }
 565   if (caller() != nullptr)  caller()->dump_spec(st);
 566 }
 567 
 568 
 569 void JVMState::dump_on(outputStream* st) const {
 570   bool print_map = _map && !((uintptr_t)_map & 1) &&
 571                   ((caller() == nullptr) || (caller()->map() != _map));
 572   if (print_map) {
 573     if (_map->len() > _map->req()) {  // _map->has_exceptions()
 574       Node* ex = _map->in(_map->req());  // _map->next_exception()
 575       // skip the first one; it's already being printed
 576       while (ex != nullptr && ex->len() > ex->req()) {
 577         ex = ex->in(ex->req());  // ex->next_exception()
 578         ex->dump(1);
 579       }
 580     }
 581     _map->dump(Verbose ? 2 : 1);
 582   }
 583   if (caller() != nullptr) {
 584     caller()->dump_on(st);
 585   }
 586   st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=",
 587              depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false");
 588   if (_method == nullptr) {
 589     st->print_cr("(none)");
 590   } else {
 591     _method->print_name(st);
 592     st->cr();
 593     if (bci() >= 0 && bci() < _method->code_size()) {
 594       st->print("    bc: ");
 595       _method->print_codes_on(bci(), bci()+1, st);
 596     }
 597   }
 598 }
 599 
 600 // Extra way to dump a jvms from the debugger,
 601 // to avoid a bug with C++ member function calls.
 602 void dump_jvms(JVMState* jvms) {
 603   jvms->dump();
 604 }
 605 #endif
 606 
 607 //--------------------------clone_shallow--------------------------------------
 608 JVMState* JVMState::clone_shallow(Compile* C) const {
 609   JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
 610   n->set_bci(_bci);
 611   n->_reexecute = _reexecute;
 612   n->set_locoff(_locoff);
 613   n->set_stkoff(_stkoff);
 614   n->set_monoff(_monoff);
 615   n->set_scloff(_scloff);
 616   n->set_endoff(_endoff);
 617   n->set_sp(_sp);
 618   n->set_map(_map);
 619   n->set_receiver_info(_receiver_info);
 620   return n;
 621 }
 622 
 623 //---------------------------clone_deep----------------------------------------
 624 JVMState* JVMState::clone_deep(Compile* C) const {
 625   JVMState* n = clone_shallow(C);
 626   for (JVMState* p = n; p->_caller != nullptr; p = p->_caller) {
 627     p->_caller = p->_caller->clone_shallow(C);
 628   }
 629   assert(n->depth() == depth(), "sanity");
 630   assert(n->debug_depth() == debug_depth(), "sanity");
 631   return n;
 632 }
 633 
 634 /**
 635  * Reset map for all callers
 636  */
 637 void JVMState::set_map_deep(SafePointNode* map) {
 638   for (JVMState* p = this; p != nullptr; p = p->_caller) {
 639     p->set_map(map);
 640   }
 641 }
 642 
 643 // unlike set_map(), this is two-way setting.
 644 void JVMState::bind_map(SafePointNode* map) {
 645   set_map(map);
 646   _map->set_jvms(this);
 647 }
 648 
 649 // Adapt offsets in in-array after adding or removing an edge.
 650 // Prerequisite is that the JVMState is used by only one node.
 651 void JVMState::adapt_position(int delta) {
 652   for (JVMState* jvms = this; jvms != nullptr; jvms = jvms->caller()) {
 653     jvms->set_locoff(jvms->locoff() + delta);
 654     jvms->set_stkoff(jvms->stkoff() + delta);
 655     jvms->set_monoff(jvms->monoff() + delta);
 656     jvms->set_scloff(jvms->scloff() + delta);
 657     jvms->set_endoff(jvms->endoff() + delta);
 658   }
 659 }
 660 
 661 // Mirror the stack size calculation in the deopt code
 662 // How much stack space would we need at this point in the program in
 663 // case of deoptimization?
 664 int JVMState::interpreter_frame_size() const {
 665   const JVMState* jvms = this;
 666   int size = 0;
 667   int callee_parameters = 0;
 668   int callee_locals = 0;
 669   int extra_args = method()->max_stack() - stk_size();
 670 
 671   while (jvms != nullptr) {
 672     int locks = jvms->nof_monitors();
 673     int temps = jvms->stk_size();
 674     bool is_top_frame = (jvms == this);
 675     ciMethod* method = jvms->method();
 676 
 677     int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
 678                                                                  temps + callee_parameters,
 679                                                                  extra_args,
 680                                                                  locks,
 681                                                                  callee_parameters,
 682                                                                  callee_locals,
 683                                                                  is_top_frame);
 684     size += frame_size;
 685 
 686     callee_parameters = method->size_of_parameters();
 687     callee_locals = method->max_locals();
 688     extra_args = 0;
 689     jvms = jvms->caller();
 690   }
 691   return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
 692 }
 693 
 694 // Compute receiver info for a compiled lambda form at call site.
 695 ciInstance* JVMState::compute_receiver_info(ciMethod* callee) const {
 696   assert(callee != nullptr && callee->is_compiled_lambda_form(), "");
 697   if (has_method() && method()->is_compiled_lambda_form()) { // callee is not a MH invoker
 698     Node* recv = map()->argument(this, 0);
 699     assert(recv != nullptr, "");
 700     const TypeOopPtr* recv_toop = recv->bottom_type()->isa_oopptr();
 701     if (recv_toop != nullptr && recv_toop->const_oop() != nullptr) {
 702       return recv_toop->const_oop()->as_instance();
 703     }
 704   }
 705   return nullptr;
 706 }
 707 
 708 //=============================================================================
 709 bool CallNode::cmp( const Node &n ) const
 710 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
 711 #ifndef PRODUCT
 712 void CallNode::dump_req(outputStream *st, DumpConfig* dc) const {
 713   // Dump the required inputs, enclosed in '(' and ')'
 714   uint i;                       // Exit value of loop
 715   for (i = 0; i < req(); i++) {    // For all required inputs
 716     if (i == TypeFunc::Parms) st->print("(");
 717     Node* p = in(i);
 718     if (p != nullptr) {
 719       p->dump_idx(false, st, dc);
 720       st->print(" ");
 721     } else {
 722       st->print("_ ");
 723     }
 724   }
 725   st->print(")");
 726 }
 727 
 728 void CallNode::dump_spec(outputStream *st) const {
 729   st->print(" ");
 730   if (tf() != nullptr)  tf()->dump_on(st);
 731   if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
 732   if (jvms() != nullptr)  jvms()->dump_spec(st);
 733 }
 734 
 735 void AllocateNode::dump_spec(outputStream* st) const {
 736   st->print(" ");
 737   if (tf() != nullptr) {
 738     tf()->dump_on(st);
 739   }
 740   if (_cnt != COUNT_UNKNOWN) {
 741     st->print(" C=%f", _cnt);
 742   }
 743   const Node* const klass_node = in(KlassNode);
 744   if (klass_node != nullptr) {
 745     const TypeKlassPtr* const klass_ptr = klass_node->bottom_type()->isa_klassptr();
 746 
 747     if (klass_ptr != nullptr && klass_ptr->klass_is_exact()) {
 748       st->print(" allocationKlass:");
 749       klass_ptr->exact_klass()->print_name_on(st);
 750     }
 751   }
 752   if (jvms() != nullptr) {
 753     jvms()->dump_spec(st);
 754   }
 755 }
 756 #endif
 757 
 758 const Type *CallNode::bottom_type() const { return tf()->range_cc(); }
 759 const Type* CallNode::Value(PhaseGVN* phase) const {
 760   if (in(0) == nullptr || phase->type(in(0)) == Type::TOP) {
 761     return Type::TOP;
 762   }
 763   return tf()->range_cc();
 764 }
 765 
 766 //------------------------------calling_convention-----------------------------
 767 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
 768   if (_entry_point == StubRoutines::store_inline_type_fields_to_buf()) {
 769     // The call to that stub is a special case: its inputs are
 770     // multiple values returned from a call and so it should follow
 771     // the return convention.
 772     SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
 773     return;
 774   }
 775   // Use the standard compiler calling convention
 776   SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
 777 }
 778 
 779 
 780 //------------------------------match------------------------------------------
 781 // Construct projections for control, I/O, memory-fields, ..., and
 782 // return result(s) along with their RegMask info
 783 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
 784   uint con = proj->_con;
 785   const TypeTuple* range_cc = tf()->range_cc();
 786   if (con >= TypeFunc::Parms) {
 787     if (tf()->returns_inline_type_as_fields()) {
 788       // The call returns multiple values (inline type fields): we
 789       // create one projection per returned value.
 790       assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return");
 791       uint ideal_reg = range_cc->field_at(con)->ideal_reg();
 792       return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg);
 793     } else {
 794       if (con == TypeFunc::Parms) {
 795         uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg();
 796         OptoRegPair regs = Opcode() == Op_CallLeafVector
 797           ? match->vector_return_value(ideal_reg)      // Calls into assembly vector routine
 798           : match->c_return_value(ideal_reg);
 799         RegMask rm = RegMask(regs.first());
 800 
 801         if (Opcode() == Op_CallLeafVector) {
 802           // If the return is in vector, compute appropriate regmask taking into account the whole range
 803           if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
 804             if(OptoReg::is_valid(regs.second())) {
 805               for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
 806                 rm.insert(r);
 807               }
 808             }

 809           }
 810         }
 811 
 812         if (OptoReg::is_valid(regs.second())) {
 813           rm.insert(regs.second());
 814         }
 815         return new MachProjNode(this,con,rm,ideal_reg);
 816       } else {
 817         assert(con == TypeFunc::Parms+1, "only one return value");
 818         assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, "");
 819         return new MachProjNode(this,con, RegMask::EMPTY, (uint)OptoReg::Bad);
 820       }
 821     }




 822   }
 823 
 824   switch (con) {
 825   case TypeFunc::Control:
 826   case TypeFunc::I_O:
 827   case TypeFunc::Memory:
 828     return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
 829 
 830   case TypeFunc::ReturnAdr:
 831   case TypeFunc::FramePtr:
 832   default:
 833     ShouldNotReachHere();
 834   }
 835   return nullptr;
 836 }
 837 
 838 // Do we Match on this edge index or not?  Match no edges
 839 uint CallNode::match_edge(uint idx) const {
 840   return 0;
 841 }
 842 
 843 //
 844 // Determine whether the call could modify the field of the specified
 845 // instance at the specified offset.
 846 //
 847 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const {
 848   assert((t_oop != nullptr), "sanity");
 849   if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
 850     const TypeTuple* args = _tf->domain_sig();
 851     Node* dest = nullptr;
 852     // Stubs that can be called once an ArrayCopyNode is expanded have
 853     // different signatures. Look for the second pointer argument,
 854     // that is the destination of the copy.
 855     for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 856       if (args->field_at(i)->isa_ptr()) {
 857         j++;
 858         if (j == 2) {
 859           dest = in(i);
 860           break;
 861         }
 862       }
 863     }
 864     guarantee(dest != nullptr, "Call had only one ptr in, broken IR!");
 865     if (phase->type(dest)->isa_rawptr()) {
 866       // may happen for an arraycopy that initializes a newly allocated object. Conservatively return true;
 867       return true;
 868     }
 869     if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
 870       return true;
 871     }
 872     return false;
 873   }
 874   if (t_oop->is_known_instance()) {
 875     // The instance_id is set only for scalar-replaceable allocations which
 876     // are not passed as arguments according to Escape Analysis.
 877     return false;
 878   }
 879   if (t_oop->is_ptr_to_boxed_value()) {
 880     ciKlass* boxing_klass = t_oop->is_instptr()->instance_klass();
 881     if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
 882       // Skip unrelated boxing methods.
 883       Node* proj = proj_out_or_null(TypeFunc::Parms);
 884       if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
 885         return false;
 886       }
 887     }
 888     if (is_CallJava() && as_CallJava()->method() != nullptr) {
 889       ciMethod* meth = as_CallJava()->method();
 890       if (meth->is_getter()) {
 891         return false;
 892       }
 893       // May modify (by reflection) if an boxing object is passed
 894       // as argument or returned.
 895       Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr;
 896       if (proj != nullptr) {
 897         const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
 898         if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
 899                                    (inst_t->instance_klass() == boxing_klass))) {
 900           return true;
 901         }
 902       }
 903       const TypeTuple* d = tf()->domain_cc();
 904       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 905         const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
 906         if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
 907                                  (inst_t->instance_klass() == boxing_klass))) {
 908           return true;
 909         }
 910       }
 911       return false;
 912     }
 913   }
 914   return true;
 915 }
 916 
 917 // Does this call have a direct reference to n other than debug information?
 918 bool CallNode::has_non_debug_use(const Node* n) {
 919   const TypeTuple* d = tf()->domain_cc();
 920   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 921     if (in(i) == n) {

 922       return true;
 923     }
 924   }
 925   return false;
 926 }
 927 
 928 bool CallNode::has_debug_use(const Node* n) const {
 929   if (jvms() != nullptr) {
 930     for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
 931       if (in(i) == n) {
 932         return true;
 933       }
 934     }
 935   }
 936   return false;
 937 }
 938 
 939 // Returns the unique CheckCastPP of a call
 940 // or 'this' if there are several CheckCastPP or unexpected uses
 941 // or returns null if there is no one.
 942 Node *CallNode::result_cast() {
 943   Node *cast = nullptr;
 944 
 945   Node *p = proj_out_or_null(TypeFunc::Parms);
 946   if (p == nullptr)
 947     return nullptr;
 948 
 949   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
 950     Node *use = p->fast_out(i);
 951     if (use->is_CheckCastPP()) {
 952       if (cast != nullptr) {
 953         return this;  // more than 1 CheckCastPP
 954       }
 955       cast = use;
 956     } else if (!use->is_Initialize() &&
 957                !use->is_AddP() &&
 958                use->Opcode() != Op_MemBarStoreStore) {
 959       // Expected uses are restricted to a CheckCastPP, an Initialize
 960       // node, a MemBarStoreStore (clone) and AddP nodes. If we
 961       // encounter any other use (a Phi node can be seen in rare
 962       // cases) return this to prevent incorrect optimizations.
 963       return this;
 964     }
 965   }
 966   return cast;
 967 }
 968 
 969 
 970 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts, bool allow_handlers) const {
 971   uint max_res = TypeFunc::Parms-1;
 972   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 973     ProjNode *pn = fast_out(i)->as_Proj();
 974     max_res = MAX2(max_res, pn->_con);
 975   }
 976 
 977   assert(max_res < _tf->range_cc()->cnt(), "result out of bounds");
 978 
 979   uint projs_size = sizeof(CallProjections);
 980   if (max_res > TypeFunc::Parms) {
 981     projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*);
 982   }
 983   char* projs_storage = resource_allocate_bytes(projs_size);
 984   CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1);
 985 
 986   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 987     ProjNode *pn = fast_out(i)->as_Proj();
 988     if (pn->outcnt() == 0) continue;
 989     switch (pn->_con) {
 990     case TypeFunc::Control:
 991       {
 992         // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
 993         projs->fallthrough_proj = pn;
 994         const Node* cn = pn->unique_ctrl_out_or_null();
 995         if (cn != nullptr && cn->is_Catch()) {
 996           for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
 997             CatchProjNode* cpn = cn->fast_out(k)->as_CatchProj();
 998             assert(allow_handlers || !cpn->is_handler_proj(), "not allowed");
 999             if (cpn->_con == CatchProjNode::fall_through_index) {
1000               assert(cpn->handler_bci() == CatchProjNode::no_handler_bci, "");
1001               projs->fallthrough_catchproj = cpn;
1002             } else if (!cpn->is_handler_proj()) {
1003               projs->catchall_catchproj = cpn;
1004             }
1005           }
1006         }
1007         break;
1008       }
1009     case TypeFunc::I_O:
1010       if (pn->_is_io_use) {
1011         projs->catchall_ioproj = pn;
1012       } else {
1013         projs->fallthrough_ioproj = pn;
1014       }
1015       for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
1016         Node* e = pn->out(j);
1017         if (e->Opcode() == Op_CreateEx && e->outcnt() > 0) {
1018           CatchProjNode* ecpn = e->in(0)->isa_CatchProj();
1019           assert(allow_handlers || ecpn == nullptr || !ecpn->is_handler_proj(), "not allowed");
1020           if (ecpn != nullptr && ecpn->_con != CatchProjNode::fall_through_index && !ecpn->is_handler_proj()) {
1021             assert(projs->exobj == nullptr, "only one");
1022             projs->exobj = e;
1023           }
1024         }
1025       }
1026       break;
1027     case TypeFunc::Memory:
1028       if (pn->_is_io_use)
1029         projs->catchall_memproj = pn;
1030       else
1031         projs->fallthrough_memproj = pn;
1032       break;
1033     case TypeFunc::Parms:
1034       projs->resproj[0] = pn;
1035       break;
1036     default:
1037       assert(pn->_con <= max_res, "unexpected projection from allocation node.");
1038       projs->resproj[pn->_con-TypeFunc::Parms] = pn;
1039       break;
1040     }
1041   }
1042 
1043   // The resproj may not exist because the result could be ignored
1044   // and the exception object may not exist if an exception handler
1045   // swallows the exception but all the other must exist and be found.

1046   do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
1047   assert(!do_asserts || projs->fallthrough_proj      != nullptr, "must be found");
1048   assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
1049   assert(!do_asserts || projs->fallthrough_memproj   != nullptr, "must be found");
1050   assert(!do_asserts || projs->fallthrough_ioproj    != nullptr, "must be found");
1051   assert(!do_asserts || projs->catchall_catchproj    != nullptr, "must be found");
1052   if (separate_io_proj) {
1053     assert(!do_asserts || projs->catchall_memproj    != nullptr, "must be found");
1054     assert(!do_asserts || projs->catchall_ioproj     != nullptr, "must be found");
1055   }
1056   return projs;
1057 }
1058 
1059 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1060 #ifdef ASSERT
1061   // Validate attached generator
1062   CallGenerator* cg = generator();
1063   if (cg != nullptr) {
1064     assert((is_CallStaticJava()  && cg->is_mh_late_inline()) ||
1065            (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch");
1066   }
1067 #endif // ASSERT
1068   return SafePointNode::Ideal(phase, can_reshape);
1069 }
1070 
1071 bool CallNode::is_call_to_arraycopystub() const {
1072   if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) {
1073     return true;
1074   }
1075   return false;
1076 }
1077 
1078 bool CallNode::is_call_to_multianewarray_stub() const {
1079   if (_name != nullptr &&
1080       strstr(_name, "multianewarray") != nullptr &&
1081       strstr(_name, "C2 runtime") != nullptr) {
1082     return true;
1083   }
1084   return false;
1085 }
1086 
1087 //=============================================================================
1088 uint CallJavaNode::size_of() const { return sizeof(*this); }
1089 bool CallJavaNode::cmp( const Node &n ) const {
1090   CallJavaNode &call = (CallJavaNode&)n;
1091   return CallNode::cmp(call) && _method == call._method &&
1092          _override_symbolic_info == call._override_symbolic_info;
1093 }
1094 
1095 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
1096   // Copy debug information and adjust JVMState information
1097   uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain_sig()->cnt() : (uint)TypeFunc::Parms+1;
1098   uint new_dbg_start = tf()->domain_sig()->cnt();
1099   int jvms_adj  = new_dbg_start - old_dbg_start;
1100   assert (new_dbg_start == req(), "argument count mismatch");
1101   Compile* C = phase->C;
1102 
1103   // SafePointScalarObject node could be referenced several times in debug info.
1104   // Use Dict to record cloned nodes.
1105   Dict* sosn_map = new Dict(cmpkey,hashkey);
1106   for (uint i = old_dbg_start; i < sfpt->req(); i++) {
1107     Node* old_in = sfpt->in(i);
1108     // Clone old SafePointScalarObjectNodes, adjusting their field contents.
1109     if (old_in != nullptr && old_in->is_SafePointScalarObject()) {
1110       SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
1111       bool new_node;
1112       Node* new_in = old_sosn->clone(sosn_map, new_node);
1113       if (new_node) { // New node?
1114         new_in->set_req(0, C->root()); // reset control edge
1115         new_in = phase->transform(new_in); // Register new node.
1116       }
1117       old_in = new_in;
1118     }
1119     add_req(old_in);
1120   }
1121 
1122   // JVMS may be shared so clone it before we modify it
1123   set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr);
1124   for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1125     jvms->set_map(this);
1126     jvms->set_locoff(jvms->locoff()+jvms_adj);
1127     jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1128     jvms->set_monoff(jvms->monoff()+jvms_adj);
1129     jvms->set_scloff(jvms->scloff()+jvms_adj);
1130     jvms->set_endoff(jvms->endoff()+jvms_adj);
1131   }
1132 }
1133 
1134 #ifdef ASSERT
1135 bool CallJavaNode::validate_symbolic_info() const {
1136   if (method() == nullptr) {
1137     return true; // call into runtime or uncommon trap
1138   }
1139   Bytecodes::Code bc = jvms()->method()->java_code_at_bci(jvms()->bci());
1140   if (Arguments::is_valhalla_enabled() && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) {
1141     return true;
1142   }
1143   ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
1144   ciMethod* callee = method();
1145   if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1146     assert(override_symbolic_info(), "should be set");
1147   }
1148   assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1149   return true;
1150 }
1151 #endif
1152 
1153 #ifndef PRODUCT
1154 void CallJavaNode::dump_spec(outputStream* st) const {
1155   if( _method ) _method->print_short_name(st);
1156   CallNode::dump_spec(st);
1157 }
1158 
1159 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1160   if (_method) {
1161     _method->print_short_name(st);
1162   } else {
1163     st->print("<?>");
1164   }
1165 }
1166 #endif
1167 
1168 void CallJavaNode::register_for_late_inline() {
1169   if (generator() != nullptr) {
1170     Compile::current()->prepend_late_inline(generator());
1171     set_generator(nullptr);
1172   } else {
1173     assert(false, "repeated inline attempt");
1174   }
1175 }
1176 
1177 //=============================================================================
1178 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
1179 bool CallStaticJavaNode::cmp( const Node &n ) const {
1180   CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1181   return CallJavaNode::cmp(call);
1182 }
1183 
1184 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1185   if (can_reshape && uncommon_trap_request() != 0) {
1186     PhaseIterGVN* igvn = phase->is_IterGVN();
1187     if (remove_unknown_flat_array_load(igvn, control(), memory(), in(TypeFunc::Parms))) {
1188       if (!control()->is_Region()) {
1189         igvn->replace_input_of(this, 0, phase->C->top());
1190       }
1191       return this;
1192     }
1193   }
1194 
1195   // Try to replace the runtime call to the substitutability test emitted by acmp if (at least) one operand is a known type
1196   if (can_reshape && !control()->is_top() && method() != nullptr && method()->holder() == phase->C->env()->ValueObjectMethods_klass() &&
1197       (method()->name() == ciSymbols::isSubstitutable_name())) {
1198     Node* left = in(TypeFunc::Parms);
1199     Node* right = in(TypeFunc::Parms + 1);
1200     if (!left->is_top() && !right->is_top() && (left->is_InlineType() || right->is_InlineType())) {
1201       if (!left->is_InlineType()) {
1202         swap(left, right);
1203       }
1204       InlineTypeNode* vt = left->as_InlineType();
1205 
1206       // Check if the field layout can be optimized
1207       if (vt->can_emit_substitutability_check(right)) {
1208         PhaseIterGVN* igvn = phase->is_IterGVN();
1209         if (UseAcmpFastPath) {
1210           // Sabotage the fast acmp path
1211           IfNode* fast_path_if = Parse::acmp_fast_path_if_from_substitutable_call(phase, this);
1212           if (fast_path_if != nullptr) {
1213             fast_path_if->set_req(1, phase->intcon(1));
1214             igvn->_worklist.push(fast_path_if);
1215           }
1216         }
1217 
1218         Node* ctrl = control();
1219         RegionNode* region = new RegionNode(1);
1220         Node* phi = new PhiNode(region, TypeInt::POS);
1221 
1222         Node* base = right;
1223         Node* ptr = right;
1224         if (!base->is_InlineType()) {
1225           // Parse time checks guarantee that both operands are non-null and have the same type
1226           base = igvn->register_new_node_with_optimizer(new CheckCastPPNode(ctrl, base, vt->bottom_type()));
1227           ptr = base;
1228         }
1229         // Emit IR for field-wise comparison
1230         vt->check_substitutability(igvn, region, phi, &ctrl, in(TypeFunc::Memory), base, ptr);
1231 
1232         // Equals
1233         region->add_req(ctrl);
1234         phi->add_req(igvn->intcon(1));
1235 
1236         ctrl = igvn->register_new_node_with_optimizer(region);
1237         Node* res = igvn->register_new_node_with_optimizer(phi);
1238 
1239         // Kill exception projections and return a tuple that will replace the call
1240         CallProjections* projs = extract_projections(false /*separate_io_proj*/);
1241         if (projs->fallthrough_catchproj != nullptr) {
1242           igvn->replace_node(projs->fallthrough_catchproj, ctrl);
1243         }
1244         if (projs->catchall_memproj != nullptr) {
1245           igvn->replace_node(projs->catchall_memproj, igvn->C->top());
1246         }
1247         if (projs->catchall_ioproj != nullptr) {
1248           igvn->replace_node(projs->catchall_ioproj, igvn->C->top());
1249         }
1250         if (projs->catchall_catchproj != nullptr) {
1251           igvn->replace_node(projs->catchall_catchproj, igvn->C->top());
1252         }
1253         return TupleNode::make(tf()->range_cc(), ctrl, i_o(), memory(), frameptr(), returnadr(), res);
1254       }
1255     }
1256   }
1257 
1258   CallGenerator* cg = generator();
1259   if (can_reshape && cg != nullptr) {
1260     if (cg->is_mh_late_inline()) {
1261       assert(IncrementalInlineMH, "required");
1262       assert(cg->call_node() == this, "mismatch");
1263       assert(cg->method()->is_method_handle_intrinsic(), "required");
1264 
1265       // Check whether this MH handle call becomes a candidate for inlining.
1266       ciMethod* callee = cg->method();
1267       vmIntrinsics::ID iid = callee->intrinsic_id();
1268       if (iid == vmIntrinsics::_invokeBasic) {
1269         if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1270           register_for_late_inline();
1271         }
1272       } else if (iid == vmIntrinsics::_linkToNative) {
1273         // never retry
1274       } else {
1275         assert(callee->has_member_arg(), "wrong type of call?");
1276         if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1277           register_for_late_inline();
1278         }
1279       }
1280     } else {
1281       assert(IncrementalInline, "required");
1282       assert(!cg->method()->is_method_handle_intrinsic(), "required");
1283       if (phase->C->print_inlining()) {
1284         phase->C->inline_printer()->record(cg->method(), cg->call_node()->jvms(), InliningResult::FAILURE,
1285           "static call node changed: trying again");
1286       }
1287       register_for_late_inline();
1288     }
1289   }
1290   return CallNode::Ideal(phase, can_reshape);
1291 }
1292 
1293 //----------------------------is_uncommon_trap----------------------------
1294 // Returns true if this is an uncommon trap.
1295 bool CallStaticJavaNode::is_uncommon_trap() const {
1296   return (_name != nullptr && !strcmp(_name, "uncommon_trap"));
1297 }
1298 
1299 //----------------------------uncommon_trap_request----------------------------
1300 // If this is an uncommon trap, return the request code, else zero.
1301 int CallStaticJavaNode::uncommon_trap_request() const {
1302   return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0;
1303 }
1304 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1305 #ifndef PRODUCT
1306   if (!(call->req() > TypeFunc::Parms &&
1307         call->in(TypeFunc::Parms) != nullptr &&
1308         call->in(TypeFunc::Parms)->is_Con() &&
1309         call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1310     assert(in_dump() != 0, "OK if dumping");
1311     tty->print("[bad uncommon trap]");
1312     return 0;
1313   }
1314 #endif
1315   return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1316 }
1317 
1318 // Split if can cause the flat array branch of an array load with unknown type (see
1319 // Parse::array_load) to end in an uncommon trap. In that case, the call to
1320 // 'load_unknown_inline' is useless. Replace it with an uncommon trap with the same JVMState.
1321 bool CallStaticJavaNode::remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg) {
1322   if (ctl == nullptr || ctl->is_top() || mem == nullptr || mem->is_top() || !mem->is_MergeMem()) {
1323     return false;
1324   }
1325   if (ctl->is_Region()) {
1326     bool res = false;
1327     for (uint i = 1; i < ctl->req(); i++) {
1328       MergeMemNode* mm = mem->clone()->as_MergeMem();
1329       for (MergeMemStream mms(mm); mms.next_non_empty(); ) {
1330         Node* m = mms.memory();
1331         if (m->is_Phi() && m->in(0) == ctl) {
1332           mms.set_memory(m->in(i));
1333         }
1334       }
1335       if (remove_unknown_flat_array_load(igvn, ctl->in(i), mm, unc_arg)) {
1336         res = true;
1337         if (!ctl->in(i)->is_Region()) {
1338           igvn->replace_input_of(ctl, i, igvn->C->top());
1339         }
1340       }
1341       igvn->remove_dead_node(mm, PhaseIterGVN::NodeOrigin::Speculative);
1342     }
1343     return res;
1344   }
1345   // Verify the control flow is ok
1346   Node* call = ctl;
1347   MemBarNode* membar = nullptr;
1348   for (;;) {
1349     if (call == nullptr || call->is_top()) {
1350       return false;
1351     }
1352     if (call->is_Proj() || call->is_Catch() || call->is_MemBar()) {
1353       call = call->in(0);
1354     } else if (call->Opcode() == Op_CallStaticJava && !call->in(0)->is_top() &&
1355                call->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1356       // If there is no explicit flat array accesses in the compilation unit, there would be no
1357       // membar here
1358       if (call->in(0)->is_Proj() && call->in(0)->in(0)->is_MemBar()) {
1359         membar = call->in(0)->in(0)->as_MemBar();
1360       }
1361       break;
1362     } else {
1363       return false;
1364     }
1365   }
1366 
1367   JVMState* jvms = call->jvms();
1368   if (igvn->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) {
1369     return false;
1370   }
1371 
1372   Node* call_mem = call->in(TypeFunc::Memory);
1373   if (call_mem == nullptr || call_mem->is_top()) {
1374     return false;
1375   }
1376   if (!call_mem->is_MergeMem()) {
1377     call_mem = MergeMemNode::make(call_mem);
1378     igvn->register_new_node_with_optimizer(call_mem);
1379   }
1380 
1381   // Verify that there's no unexpected side effect
1382   for (MergeMemStream mms2(mem->as_MergeMem(), call_mem->as_MergeMem()); mms2.next_non_empty2(); ) {
1383     Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory();
1384     Node* m2 = mms2.memory2();
1385 
1386     for (uint i = 0; i < 100; i++) {
1387       if (m1 == m2) {
1388         break;
1389       } else if (m1->is_Proj()) {
1390         m1 = m1->in(0);
1391       } else if (m1->is_MemBar()) {
1392         m1 = m1->in(TypeFunc::Memory);
1393       } else if (m1->Opcode() == Op_CallStaticJava &&
1394                  m1->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
1395         if (m1 != call) {
1396           if (call_mem->outcnt() == 0) {
1397             igvn->remove_dead_node(call_mem, PhaseIterGVN::NodeOrigin::Speculative);
1398           }
1399           return false;
1400         }
1401         break;
1402       } else if (m1->is_MergeMem()) {
1403         MergeMemNode* mm = m1->as_MergeMem();
1404         int idx = mms2.alias_idx();
1405         if (idx == Compile::AliasIdxBot) {
1406           m1 = mm->base_memory();
1407         } else {
1408           m1 = mm->memory_at(idx);
1409         }
1410       } else {
1411         if (call_mem->outcnt() == 0) {
1412           igvn->remove_dead_node(call_mem, PhaseIterGVN::NodeOrigin::Speculative);
1413         }
1414         return false;
1415       }
1416     }
1417   }
1418   if (call_mem->outcnt() == 0) {
1419     igvn->remove_dead_node(call_mem, PhaseIterGVN::NodeOrigin::Speculative);
1420   }
1421 
1422   // Remove membar preceding the call
1423   if (membar != nullptr) {
1424     membar->remove(igvn);
1425   }
1426 
1427   address call_addr = OptoRuntime::uncommon_trap_blob()->entry_point();
1428   CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", nullptr);
1429   unc->init_req(TypeFunc::Control, call->in(0));
1430   unc->init_req(TypeFunc::I_O, call->in(TypeFunc::I_O));
1431   unc->init_req(TypeFunc::Memory, call->in(TypeFunc::Memory));
1432   unc->init_req(TypeFunc::FramePtr,  call->in(TypeFunc::FramePtr));
1433   unc->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
1434   unc->init_req(TypeFunc::Parms+0, unc_arg);
1435   unc->set_cnt(PROB_UNLIKELY_MAG(4));
1436   unc->copy_call_debug_info(igvn, call->as_CallStaticJava());
1437 
1438   // Replace the call with an uncommon trap
1439   igvn->replace_input_of(call, 0, igvn->C->top());
1440 
1441   igvn->register_new_node_with_optimizer(unc);
1442 
1443   Node* ctrl = igvn->transform(new ProjNode(unc, TypeFunc::Control));
1444   Node* halt = igvn->transform(new HaltNode(ctrl, call->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen"));
1445   igvn->add_input_to(igvn->C->root(), halt);
1446 
1447   return true;
1448 }
1449 
1450 
1451 #ifndef PRODUCT
1452 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1453   st->print("# Static ");
1454   if (_name != nullptr) {
1455     st->print("%s", _name);
1456     int trap_req = uncommon_trap_request();
1457     if (trap_req != 0) {
1458       char buf[100];
1459       st->print("(%s)",
1460                  Deoptimization::format_trap_request(buf, sizeof(buf),
1461                                                      trap_req));
1462     }
1463     st->print(" ");
1464   }
1465   CallJavaNode::dump_spec(st);
1466 }
1467 
1468 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1469   if (_method) {
1470     _method->print_short_name(st);
1471   } else if (_name) {
1472     st->print("%s", _name);
1473   } else {
1474     st->print("<?>");
1475   }
1476 }
1477 #endif
1478 
1479 //=============================================================================
1480 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
1481 bool CallDynamicJavaNode::cmp( const Node &n ) const {
1482   CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
1483   return CallJavaNode::cmp(call);
1484 }
1485 
1486 Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1487   CallGenerator* cg = generator();
1488   if (can_reshape && cg != nullptr) {
1489     if (cg->is_virtual_late_inline()) {
1490       assert(IncrementalInlineVirtual, "required");
1491       assert(cg->call_node() == this, "mismatch");
1492 
1493       if (cg->callee_method() == nullptr) {
1494         // Recover symbolic info for method resolution.
1495         ciMethod* caller = jvms()->method();
1496         ciBytecodeStream iter(caller);
1497         iter.force_bci(jvms()->bci());
1498 
1499         bool             not_used1;
1500         ciSignature*     not_used2;
1501         ciMethod*        orig_callee  = iter.get_method(not_used1, &not_used2);  // callee in the bytecode
1502         ciKlass*         holder       = iter.get_declared_method_holder();
1503         if (orig_callee->is_method_handle_intrinsic()) {
1504           assert(_override_symbolic_info, "required");
1505           orig_callee = method();
1506           holder = method()->holder();
1507         }
1508 
1509         ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
1510 
1511         Node* receiver_node = in(TypeFunc::Parms);
1512         const TypeOopPtr* receiver_type = phase->type(receiver_node)->isa_oopptr();
1513 
1514         int  not_used3;
1515         bool call_does_dispatch;
1516         ciMethod* callee = phase->C->optimize_virtual_call(caller, klass, holder, orig_callee, receiver_type, true /*is_virtual*/,
1517                                                            call_does_dispatch, not_used3);  // out-parameters
1518         if (!call_does_dispatch) {
1519           cg->set_callee_method(callee);
1520         }
1521       }
1522       if (cg->callee_method() != nullptr) {
1523         // Register for late inlining.
1524         register_for_late_inline(); // MH late inlining prepends to the list, so do the same
1525       }
1526     } else {
1527       assert(IncrementalInline, "required");
1528       if (phase->C->print_inlining()) {
1529         phase->C->inline_printer()->record(cg->method(), cg->call_node()->jvms(), InliningResult::FAILURE,
1530           "dynamic call node changed: trying again");
1531       }
1532       register_for_late_inline();
1533     }
1534   }
1535   return CallNode::Ideal(phase, can_reshape);
1536 }
1537 
1538 #ifndef PRODUCT
1539 void CallDynamicJavaNode::dump_spec(outputStream *st) const {
1540   st->print("# Dynamic ");
1541   CallJavaNode::dump_spec(st);
1542 }
1543 #endif
1544 
1545 //=============================================================================
1546 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1547 bool CallRuntimeNode::cmp( const Node &n ) const {
1548   CallRuntimeNode &call = (CallRuntimeNode&)n;
1549   return CallNode::cmp(call) && !strcmp(_name,call._name);
1550 }
1551 #ifndef PRODUCT
1552 void CallRuntimeNode::dump_spec(outputStream *st) const {
1553   st->print("# ");
1554   st->print("%s", _name);
1555   CallNode::dump_spec(st);
1556 }
1557 #endif
1558 uint CallLeafVectorNode::size_of() const { return sizeof(*this); }
1559 bool CallLeafVectorNode::cmp( const Node &n ) const {
1560   CallLeafVectorNode &call = (CallLeafVectorNode&)n;
1561   return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
1562 }
1563 
1564 //------------------------------calling_convention-----------------------------
1565 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1566   if (_entry_point == nullptr) {
1567     // The call to that stub is a special case: its inputs are
1568     // multiple values returned from a call and so it should follow
1569     // the return convention.
1570     SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
1571     return;
1572   }
1573   SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
1574 }
1575 
1576 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1577 #ifdef ASSERT
1578   assert(tf()->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1579          "return vector size must match");
1580   const TypeTuple* d = tf()->domain_sig();
1581   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1582     Node* arg = in(i);
1583     assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
1584            "vector argument size must match");
1585   }
1586 #endif
1587 
1588   SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt);
1589 }
1590 
1591 //=============================================================================
1592 //------------------------------calling_convention-----------------------------
1593 
1594 
1595 //=============================================================================
1596 bool CallLeafPureNode::is_unused() const {
1597   return proj_out_or_null(TypeFunc::Parms) == nullptr;
1598 }
1599 
1600 bool CallLeafPureNode::is_dead() const {
1601   return proj_out_or_null(TypeFunc::Control) == nullptr;
1602 }
1603 
1604 /* We make a tuple of the global input state + TOP for the output values.
1605  * We use this to delete a pure function that is not used: by replacing the call with
1606  * such a tuple, we let output Proj's idealization pick the corresponding input of the
1607  * pure call, so jumping over it, and effectively, removing the call from the graph.
1608  * This avoids doing the graph surgery manually, but leaves that to IGVN
1609  * that is specialized for doing that right. We need also tuple components for output
1610  * values of the function to respect the return arity, and in case there is a projection
1611  * that would pick an output (which shouldn't happen at the moment).
1612  */
1613 TupleNode* CallLeafPureNode::make_tuple_of_input_state_and_top_return_values(const Compile* C) const {
1614   // Transparently propagate input state but parameters
1615   TupleNode* tuple = TupleNode::make(
1616       tf()->range_cc(),
1617       in(TypeFunc::Control),
1618       in(TypeFunc::I_O),
1619       in(TypeFunc::Memory),
1620       in(TypeFunc::FramePtr),
1621       in(TypeFunc::ReturnAdr));
1622 
1623   // And add TOPs for the return values
1624   for (uint i = TypeFunc::Parms; i < tf()->range_cc()->cnt(); i++) {
1625     tuple->set_req(i, C->top());
1626   }
1627 
1628   return tuple;
1629 }
1630 
1631 CallLeafPureNode* CallLeafPureNode::inline_call_leaf_pure_node(Node* control) const {
1632   Node* top = Compile::current()->top();
1633   if (control == nullptr) {
1634     control = in(TypeFunc::Control);
1635   }
1636 
1637   CallLeafPureNode* call = new CallLeafPureNode(tf(), entry_point(), _name);
1638   call->init_req(TypeFunc::Control, control);
1639   call->init_req(TypeFunc::I_O, top);
1640   call->init_req(TypeFunc::Memory, top);
1641   call->init_req(TypeFunc::ReturnAdr, top);
1642   call->init_req(TypeFunc::FramePtr, top);
1643   for (unsigned int i = 0; i < tf()->domain_cc()->cnt() - TypeFunc::Parms; i++) {
1644     call->init_req(TypeFunc::Parms + i, in(TypeFunc::Parms + i));
1645   }
1646 
1647   return call;
1648 }
1649 
1650 Node* CallLeafPureNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1651   if (is_dead()) {
1652     return nullptr;
1653   }
1654 
1655   // We need to wait until IGVN because during parsing, usages might still be missing
1656   // and we would remove the call immediately.
1657   if (can_reshape && is_unused()) {
1658     // The result is not used. We remove the call by replacing it with a tuple, that
1659     // is later disintegrated by the projections.
1660     return make_tuple_of_input_state_and_top_return_values(phase->C);
1661   }
1662 
1663   return CallRuntimeNode::Ideal(phase, can_reshape);
1664 }
1665 
1666 #ifndef PRODUCT
1667 void CallLeafNode::dump_spec(outputStream *st) const {
1668   st->print("# ");
1669   st->print("%s", _name);
1670   CallNode::dump_spec(st);
1671 }
1672 #endif
1673 
1674 uint CallLeafNoFPNode::match_edge(uint idx) const {
1675   // Null entry point is a special case for which the target is in a
1676   // register. Need to match that edge.
1677   return entry_point() == nullptr && idx == TypeFunc::Parms;
1678 }
1679 
1680 //=============================================================================
1681 
1682 void SafePointNode::set_local(const JVMState* jvms, uint idx, Node *c) {
1683   assert(verify_jvms(jvms), "jvms must match");
1684   int loc = jvms->locoff() + idx;
1685   if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1686     // If current local idx is top then local idx - 1 could
1687     // be a long/double that needs to be killed since top could
1688     // represent the 2nd half of the long/double.
1689     uint ideal = in(loc -1)->ideal_reg();
1690     if (ideal == Op_RegD || ideal == Op_RegL) {
1691       // set other (low index) half to top
1692       set_req(loc - 1, in(loc));
1693     }
1694   }
1695   set_req(loc, c);
1696 }
1697 
1698 uint SafePointNode::size_of() const { return sizeof(*this); }
1699 bool SafePointNode::cmp( const Node &n ) const {
1700   return (&n == this);          // Always fail except on self
1701 }
1702 
1703 //-------------------------set_next_exception----------------------------------
1704 void SafePointNode::set_next_exception(SafePointNode* n) {
1705   assert(n == nullptr || n->Opcode() == Op_SafePoint, "correct value for next_exception");
1706   if (len() == req()) {
1707     if (n != nullptr)  add_prec(n);
1708   } else {
1709     set_prec(req(), n);
1710   }
1711 }
1712 
1713 
1714 //----------------------------next_exception-----------------------------------
1715 SafePointNode* SafePointNode::next_exception() const {
1716   if (len() == req()) {
1717     return nullptr;
1718   } else {
1719     Node* n = in(req());
1720     assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1721     return (SafePointNode*) n;
1722   }
1723 }
1724 
1725 
1726 //------------------------------Ideal------------------------------------------
1727 // Skip over any collapsed Regions
1728 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1729   assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
1730   if (remove_dead_region(phase, can_reshape)) {
1731     return this;
1732   }
1733   // Scalarize inline types in safepoint debug info.
1734   // Delay this until all inlining is over to avoid getting inconsistent debug info.
1735   if (phase->C->scalarize_in_safepoints() && can_reshape && jvms() != nullptr) {
1736     for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
1737       Node* n = in(i)->uncast();
1738       if (n->is_InlineType()) {
1739         n->as_InlineType()->make_scalar_in_safepoints(phase->is_IterGVN());
1740       }
1741     }
1742   }
1743   return nullptr;
1744 }
1745 
1746 //------------------------------Identity---------------------------------------
1747 // Remove obviously duplicate safepoints
1748 Node* SafePointNode::Identity(PhaseGVN* phase) {
1749 
1750   // If you have back to back safepoints, remove one
1751   if (in(TypeFunc::Control)->is_SafePoint()) {
1752     Node* out_c = unique_ctrl_out_or_null();
1753     // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
1754     // outer loop's safepoint could confuse removal of the outer loop.
1755     if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) {
1756       return in(TypeFunc::Control);
1757     }
1758   }
1759 
1760   // Transforming long counted loops requires a safepoint node. Do not
1761   // eliminate a safepoint until loop opts are over.
1762   if (in(0)->is_Proj() && !phase->C->major_progress()) {
1763     Node *n0 = in(0)->in(0);
1764     // Check if he is a call projection (except Leaf Call)
1765     if( n0->is_Catch() ) {
1766       n0 = n0->in(0)->in(0);
1767       assert( n0->is_Call(), "expect a call here" );
1768     }
1769     if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
1770       // Don't remove a safepoint belonging to an OuterStripMinedLoopEndNode.
1771       // If the loop dies, they will be removed together.
1772       if (has_out_with(Op_OuterStripMinedLoopEnd)) {
1773         return this;
1774       }
1775       // Useless Safepoint, so remove it
1776       return in(TypeFunc::Control);
1777     }
1778   }
1779 
1780   return this;
1781 }
1782 
1783 //------------------------------Value------------------------------------------
1784 const Type* SafePointNode::Value(PhaseGVN* phase) const {
1785   if (phase->type(in(0)) == Type::TOP) {
1786     return Type::TOP;
1787   }
1788   if (in(0) == this) {
1789     return Type::TOP; // Dead infinite loop
1790   }
1791   return Type::CONTROL;
1792 }
1793 
1794 #ifndef PRODUCT
1795 void SafePointNode::dump_spec(outputStream *st) const {
1796   st->print(" SafePoint ");
1797   _replaced_nodes.dump(st);
1798 }
1799 #endif
1800 
1801 const RegMask &SafePointNode::in_RegMask(uint idx) const {
1802   if (idx < TypeFunc::Parms) {
1803     return RegMask::EMPTY;
1804   }
1805   // Values outside the domain represent debug info
1806   return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1807 }
1808 const RegMask &SafePointNode::out_RegMask() const {
1809   return RegMask::EMPTY;
1810 }
1811 
1812 
1813 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
1814   assert((int)grow_by > 0, "sanity");
1815   int monoff = jvms->monoff();
1816   int scloff = jvms->scloff();
1817   int endoff = jvms->endoff();
1818   assert(endoff == (int)req(), "no other states or debug info after me");
1819   Node* top = Compile::current()->top();
1820   for (uint i = 0; i < grow_by; i++) {
1821     ins_req(monoff, top);
1822   }
1823   jvms->set_monoff(monoff + grow_by);
1824   jvms->set_scloff(scloff + grow_by);
1825   jvms->set_endoff(endoff + grow_by);
1826 }
1827 
1828 void SafePointNode::push_monitor(const FastLockNode *lock) {
1829   // Add a LockNode, which points to both the original BoxLockNode (the
1830   // stack space for the monitor) and the Object being locked.
1831   const int MonitorEdges = 2;
1832   assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1833   assert(req() == jvms()->endoff(), "correct sizing");
1834   int nextmon = jvms()->scloff();
1835   ins_req(nextmon,   lock->box_node());
1836   ins_req(nextmon+1, lock->obj_node());
1837   jvms()->set_scloff(nextmon + MonitorEdges);
1838   jvms()->set_endoff(req());
1839 }
1840 
1841 void SafePointNode::pop_monitor() {
1842   // Delete last monitor from debug info
1843   DEBUG_ONLY(int num_before_pop = jvms()->nof_monitors());
1844   const int MonitorEdges = 2;
1845   assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1846   int scloff = jvms()->scloff();
1847   int endoff = jvms()->endoff();
1848   int new_scloff = scloff - MonitorEdges;
1849   int new_endoff = endoff - MonitorEdges;
1850   jvms()->set_scloff(new_scloff);
1851   jvms()->set_endoff(new_endoff);
1852   while (scloff > new_scloff)  del_req_ordered(--scloff);
1853   assert(jvms()->nof_monitors() == num_before_pop-1, "");
1854 }
1855 
1856 Node *SafePointNode::peek_monitor_box() const {
1857   int mon = jvms()->nof_monitors() - 1;
1858   assert(mon >= 0, "must have a monitor");
1859   return monitor_box(jvms(), mon);
1860 }
1861 
1862 Node *SafePointNode::peek_monitor_obj() const {
1863   int mon = jvms()->nof_monitors() - 1;
1864   assert(mon >= 0, "must have a monitor");
1865   return monitor_obj(jvms(), mon);
1866 }
1867 
1868 Node* SafePointNode::peek_operand(uint off) const {
1869   assert(jvms()->sp() > 0, "must have an operand");
1870   assert(off < jvms()->sp(), "off is out-of-range");
1871   return stack(jvms(), jvms()->sp() - off - 1);
1872 }
1873 
1874 // Do we Match on this edge index or not?  Match no edges
1875 uint SafePointNode::match_edge(uint idx) const {
1876   return (TypeFunc::Parms == idx);
1877 }
1878 
1879 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1880   assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1881   int nb = igvn->C->root()->find_prec_edge(this);
1882   if (nb != -1) {
1883     igvn->delete_precedence_of(igvn->C->root(), nb);
1884   }
1885 }
1886 
1887 void SafePointNode::remove_non_debug_edges(NodeEdgeTempStorage& non_debug_edges) {
1888   assert(non_debug_edges._state == NodeEdgeTempStorage::state_initial, "not processed");
1889   assert(non_debug_edges.is_empty(), "edges not processed");
1890 
1891   while (req() > jvms()->endoff()) {
1892     uint last = req() - 1;
1893     non_debug_edges.push(in(last));
1894     del_req(last);
1895   }
1896 
1897   assert(jvms()->endoff() == req(), "no extra edges past debug info allowed");
1898   DEBUG_ONLY(non_debug_edges._state = NodeEdgeTempStorage::state_populated);
1899 }
1900 
1901 void SafePointNode::restore_non_debug_edges(NodeEdgeTempStorage& non_debug_edges) {
1902   assert(non_debug_edges._state == NodeEdgeTempStorage::state_populated, "not populated");
1903   assert(jvms()->endoff() == req(), "no extra edges past debug info allowed");
1904 
1905   while (!non_debug_edges.is_empty()) {
1906     Node* non_debug_edge = non_debug_edges.pop();
1907     add_req(non_debug_edge);
1908   }
1909 
1910   assert(non_debug_edges.is_empty(), "edges not processed");
1911   DEBUG_ONLY(non_debug_edges._state = NodeEdgeTempStorage::state_processed);
1912 }
1913 
1914 //==============  SafePointScalarObjectNode  ==============
1915 
1916 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) :
1917   TypeNode(tp, 1), // 1 control input -- seems required.  Get from root.
1918   _first_index(first_index),
1919   _depth(depth),
1920   _n_fields(n_fields),
1921   _alloc(alloc)
1922 {
1923 #ifdef ASSERT
1924   if (alloc != nullptr && !alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
1925     alloc->dump();
1926     assert(false, "unexpected call node");
1927   }
1928 #endif
1929   init_class_id(Class_SafePointScalarObject);
1930 }
1931 
1932 // Do not allow value-numbering for SafePointScalarObject node.
1933 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1934 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1935   return (&n == this); // Always fail except on self
1936 }
1937 
1938 uint SafePointScalarObjectNode::ideal_reg() const {
1939   return 0; // No matching to machine instruction
1940 }
1941 
1942 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1943   return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1944 }
1945 
1946 const RegMask &SafePointScalarObjectNode::out_RegMask() const {
1947   return RegMask::EMPTY;
1948 }
1949 
1950 uint SafePointScalarObjectNode::match_edge(uint idx) const {
1951   return 0;
1952 }
1953 
1954 SafePointScalarObjectNode*
1955 SafePointScalarObjectNode::clone(Dict* sosn_map, bool& new_node) const {
1956   void* cached = (*sosn_map)[(void*)this];
1957   if (cached != nullptr) {
1958     new_node = false;
1959     return (SafePointScalarObjectNode*)cached;
1960   }
1961   new_node = true;
1962   SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1963   sosn_map->Insert((void*)this, (void*)res);
1964   return res;
1965 }
1966 
1967 
1968 #ifndef PRODUCT
1969 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1970   st->print(" # fields@[%d..%d]", first_index(), first_index() + n_fields() - 1);
1971 }
1972 #endif
1973 
1974 //==============  SafePointScalarMergeNode  ==============
1975 
1976 SafePointScalarMergeNode::SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx) :
1977   TypeNode(tp, 1), // 1 control input -- seems required.  Get from root.
1978   _merge_pointer_idx(merge_pointer_idx)
1979 {
1980   init_class_id(Class_SafePointScalarMerge);
1981 }
1982 
1983 // Do not allow value-numbering for SafePointScalarMerge node.
1984 uint SafePointScalarMergeNode::hash() const { return NO_HASH; }
1985 bool SafePointScalarMergeNode::cmp( const Node &n ) const {
1986   return (&n == this); // Always fail except on self
1987 }
1988 
1989 uint SafePointScalarMergeNode::ideal_reg() const {
1990   return 0; // No matching to machine instruction
1991 }
1992 
1993 const RegMask &SafePointScalarMergeNode::in_RegMask(uint idx) const {
1994   return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1995 }
1996 
1997 const RegMask &SafePointScalarMergeNode::out_RegMask() const {
1998   return RegMask::EMPTY;
1999 }
2000 
2001 uint SafePointScalarMergeNode::match_edge(uint idx) const {
2002   return 0;
2003 }
2004 
2005 SafePointScalarMergeNode*
2006 SafePointScalarMergeNode::clone(Dict* sosn_map, bool& new_node) const {
2007   void* cached = (*sosn_map)[(void*)this];
2008   if (cached != nullptr) {
2009     new_node = false;
2010     return (SafePointScalarMergeNode*)cached;
2011   }
2012   new_node = true;
2013   SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone();
2014   sosn_map->Insert((void*)this, (void*)res);
2015   return res;
2016 }
2017 
2018 #ifndef PRODUCT
2019 void SafePointScalarMergeNode::dump_spec(outputStream *st) const {
2020   st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1);
2021 }
2022 #endif
2023 
2024 //=============================================================================
2025 uint AllocateNode::size_of() const { return sizeof(*this); }
2026 
2027 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
2028                            Node *ctrl, Node *mem, Node *abio,
2029                            Node *size, Node *klass_node,
2030                            Node* initial_test,
2031                            InlineTypeNode* inline_type_node)
2032   : CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
2033 {
2034   init_class_id(Class_Allocate);
2035   init_flags(Flag_is_macro);
2036   _is_scalar_replaceable = false;
2037   _is_non_escaping = false;
2038   _is_allocation_MemBar_redundant = false;
2039   Node *topnode = C->top();
2040 
2041   init_req( TypeFunc::Control  , ctrl );
2042   init_req( TypeFunc::I_O      , abio );
2043   init_req( TypeFunc::Memory   , mem );
2044   init_req( TypeFunc::ReturnAdr, topnode );
2045   init_req( TypeFunc::FramePtr , topnode );
2046   init_req( AllocSize          , size);
2047   init_req( KlassNode          , klass_node);
2048   init_req( InitialTest        , initial_test);
2049   init_req( ALength            , topnode);
2050   init_req( ValidLengthTest    , topnode);
2051   init_req( InlineType     , inline_type_node);
2052   // DefaultValue defaults to nullptr
2053   // RawDefaultValue defaults to nullptr
2054   C->add_macro_node(this);
2055 }
2056 
2057 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
2058 {
2059   assert(initializer != nullptr &&
2060          (initializer->is_object_constructor() || initializer->is_class_initializer()),
2061          "unexpected initializer method");
2062   BCEscapeAnalyzer* analyzer = initializer->get_bcea();
2063   if (analyzer == nullptr) {
2064     return;
2065   }
2066 
2067   // Allocation node is first parameter in its initializer
2068   if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
2069     _is_allocation_MemBar_redundant = true;
2070   }
2071 }
2072 
2073 Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
2074   Node* mark_node = nullptr;
2075   if (UseCompactObjectHeaders || Arguments::is_valhalla_enabled()) {
2076     Node* klass_node = in(AllocateNode::KlassNode);
2077     Node* proto_adr = phase->transform(AddPNode::make_with_base(phase->C->top(), klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
2078     mark_node = LoadNode::make(*phase, control, mem, proto_adr, phase->type(proto_adr)->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
2079   } else {
2080     // For now only enable fast locking for non-array types
2081     mark_node = phase->MakeConX(markWord::prototype().value());
2082   }
2083   return mark_node;
2084 }
2085 
2086 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
2087 // CastII, if appropriate.  If we are not allowed to create new nodes, and
2088 // a CastII is appropriate, return null.
2089 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) {
2090   Node *length = in(AllocateNode::ALength);
2091   assert(length != nullptr, "length is not null");
2092 
2093   const TypeInt* length_type = phase->find_int_type(length);
2094   const TypeAryPtr* ary_type = oop_type->isa_aryptr();
2095 
2096   if (ary_type != nullptr && length_type != nullptr) {
2097     const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
2098     if (narrow_length_type != length_type) {
2099       // Assert one of:
2100       //   - the narrow_length is 0
2101       //   - the narrow_length is not wider than length
2102       assert(narrow_length_type == TypeInt::ZERO ||
2103              (length_type->is_con() && narrow_length_type->is_con() &&
2104               (narrow_length_type->_hi <= length_type->_lo)) ||
2105              (narrow_length_type->_hi <= length_type->_hi &&
2106               narrow_length_type->_lo >= length_type->_lo),
2107              "narrow type must be narrower than length type");
2108 
2109       // Return null if new nodes are not allowed
2110       if (!allow_new_nodes) {
2111         return nullptr;
2112       }
2113       // Create a cast which is control dependent on the initialization to
2114       // propagate the fact that the array length must be positive.
2115       InitializeNode* init = initialization();
2116       if (init != nullptr) {
2117         length = new CastIINode(init->proj_out_or_null(TypeFunc::Control), length, narrow_length_type);
2118       }
2119     }
2120   }
2121 
2122   return length;
2123 }
2124 
2125 //=============================================================================
2126 const TypeFunc* LockNode::_lock_type_Type = nullptr;
2127 
2128 uint LockNode::size_of() const { return sizeof(*this); }
2129 
2130 // Redundant lock elimination
2131 //
2132 // There are various patterns of locking where we release and
2133 // immediately reacquire a lock in a piece of code where no operations
2134 // occur in between that would be observable.  In those cases we can
2135 // skip releasing and reacquiring the lock without violating any
2136 // fairness requirements.  Doing this around a loop could cause a lock
2137 // to be held for a very long time so we concentrate on non-looping
2138 // control flow.  We also require that the operations are fully
2139 // redundant meaning that we don't introduce new lock operations on
2140 // some paths so to be able to eliminate it on others ala PRE.  This
2141 // would probably require some more extensive graph manipulation to
2142 // guarantee that the memory edges were all handled correctly.
2143 //
2144 // Assuming p is a simple predicate which can't trap in any way and s
2145 // is a synchronized method consider this code:
2146 //
2147 //   s();
2148 //   if (p)
2149 //     s();
2150 //   else
2151 //     s();
2152 //   s();
2153 //
2154 // 1. The unlocks of the first call to s can be eliminated if the
2155 // locks inside the then and else branches are eliminated.
2156 //
2157 // 2. The unlocks of the then and else branches can be eliminated if
2158 // the lock of the final call to s is eliminated.
2159 //
2160 // Either of these cases subsumes the simple case of sequential control flow
2161 //
2162 // Additionally we can eliminate versions without the else case:
2163 //
2164 //   s();
2165 //   if (p)
2166 //     s();
2167 //   s();
2168 //
2169 // 3. In this case we eliminate the unlock of the first s, the lock
2170 // and unlock in the then case and the lock in the final s.
2171 //
2172 // Note also that in all these cases the then/else pieces don't have
2173 // to be trivial as long as they begin and end with synchronization
2174 // operations.
2175 //
2176 //   s();
2177 //   if (p)
2178 //     s();
2179 //     f();
2180 //     s();
2181 //   s();
2182 //
2183 // The code will work properly for this case, leaving in the unlock
2184 // before the call to f and the relock after it.
2185 //
2186 // A potentially interesting case which isn't handled here is when the
2187 // locking is partially redundant.
2188 //
2189 //   s();
2190 //   if (p)
2191 //     s();
2192 //
2193 // This could be eliminated putting unlocking on the else case and
2194 // eliminating the first unlock and the lock in the then side.
2195 // Alternatively the unlock could be moved out of the then side so it
2196 // was after the merge and the first unlock and second lock
2197 // eliminated.  This might require less manipulation of the memory
2198 // state to get correct.
2199 //
2200 // Additionally we might allow work between a unlock and lock before
2201 // giving up eliminating the locks.  The current code disallows any
2202 // conditional control flow between these operations.  A formulation
2203 // similar to partial redundancy elimination computing the
2204 // availability of unlocking and the anticipatability of locking at a
2205 // program point would allow detection of fully redundant locking with
2206 // some amount of work in between.  I'm not sure how often I really
2207 // think that would occur though.  Most of the cases I've seen
2208 // indicate it's likely non-trivial work would occur in between.
2209 // There may be other more complicated constructs where we could
2210 // eliminate locking but I haven't seen any others appear as hot or
2211 // interesting.
2212 //
2213 // Locking and unlocking have a canonical form in ideal that looks
2214 // roughly like this:
2215 //
2216 //              <obj>
2217 //                | \\------+
2218 //                |  \       \
2219 //                | BoxLock   \
2220 //                |  |   |     \
2221 //                |  |    \     \
2222 //                |  |   FastLock
2223 //                |  |   /
2224 //                |  |  /
2225 //                |  |  |
2226 //
2227 //               Lock
2228 //                |
2229 //            Proj #0
2230 //                |
2231 //            MembarAcquire
2232 //                |
2233 //            Proj #0
2234 //
2235 //            MembarRelease
2236 //                |
2237 //            Proj #0
2238 //                |
2239 //              Unlock
2240 //                |
2241 //            Proj #0
2242 //
2243 //
2244 // This code proceeds by processing Lock nodes during PhaseIterGVN
2245 // and searching back through its control for the proper code
2246 // patterns.  Once it finds a set of lock and unlock operations to
2247 // eliminate they are marked as eliminatable which causes the
2248 // expansion of the Lock and Unlock macro nodes to make the operation a NOP
2249 //
2250 //=============================================================================
2251 
2252 //
2253 // Utility function to skip over uninteresting control nodes.  Nodes skipped are:
2254 //   - copy regions.  (These may not have been optimized away yet.)
2255 //   - eliminated locking nodes
2256 //
2257 static Node *next_control(Node *ctrl) {
2258   if (ctrl == nullptr)
2259     return nullptr;
2260   while (1) {
2261     if (ctrl->is_Region()) {
2262       RegionNode *r = ctrl->as_Region();
2263       Node *n = r->is_copy();
2264       if (n == nullptr)
2265         break;  // hit a region, return it
2266       else
2267         ctrl = n;
2268     } else if (ctrl->is_Proj()) {
2269       Node *in0 = ctrl->in(0);
2270       if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
2271         ctrl = in0->in(0);
2272       } else {
2273         break;
2274       }
2275     } else {
2276       break; // found an interesting control
2277     }
2278   }
2279   return ctrl;
2280 }
2281 //
2282 // Given a control, see if it's the control projection of an Unlock which
2283 // operating on the same object as lock.
2284 //
2285 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
2286                                             GrowableArray<AbstractLockNode*> &lock_ops) {
2287   ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : nullptr;
2288   if (ctrl_proj != nullptr && ctrl_proj->_con == TypeFunc::Control) {
2289     Node *n = ctrl_proj->in(0);
2290     if (n != nullptr && n->is_Unlock()) {
2291       UnlockNode *unlock = n->as_Unlock();
2292       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2293       Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
2294       Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node());
2295       if (lock_obj->eqv_uncast(unlock_obj) &&
2296           BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
2297           !unlock->is_eliminated()) {
2298         lock_ops.append(unlock);
2299         return true;
2300       }
2301     }
2302   }
2303   return false;
2304 }
2305 
2306 //
2307 // Find the lock matching an unlock.  Returns null if a safepoint
2308 // or complicated control is encountered first.
2309 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
2310   LockNode *lock_result = nullptr;
2311   // find the matching lock, or an intervening safepoint
2312   Node *ctrl = next_control(unlock->in(0));
2313   while (1) {
2314     assert(ctrl != nullptr, "invalid control graph");
2315     assert(!ctrl->is_Start(), "missing lock for unlock");
2316     if (ctrl->is_top()) break;  // dead control path
2317     if (ctrl->is_Proj()) ctrl = ctrl->in(0);
2318     if (ctrl->is_SafePoint()) {
2319         break;  // found a safepoint (may be the lock we are searching for)
2320     } else if (ctrl->is_Region()) {
2321       // Check for a simple diamond pattern.  Punt on anything more complicated
2322       if (ctrl->req() == 3 && ctrl->in(1) != nullptr && ctrl->in(2) != nullptr) {
2323         Node *in1 = next_control(ctrl->in(1));
2324         Node *in2 = next_control(ctrl->in(2));
2325         if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
2326              (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
2327           ctrl = next_control(in1->in(0)->in(0));
2328         } else {
2329           break;
2330         }
2331       } else {
2332         break;
2333       }
2334     } else {
2335       ctrl = next_control(ctrl->in(0));  // keep searching
2336     }
2337   }
2338   if (ctrl->is_Lock()) {
2339     LockNode *lock = ctrl->as_Lock();
2340     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2341     Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
2342     Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node());
2343     if (lock_obj->eqv_uncast(unlock_obj) &&
2344         BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
2345       lock_result = lock;
2346     }
2347   }
2348   return lock_result;
2349 }
2350 
2351 // This code corresponds to case 3 above.
2352 
2353 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
2354                                                        GrowableArray<AbstractLockNode*> &lock_ops) {
2355   Node* if_node = node->in(0);
2356   bool  if_true = node->is_IfTrue();
2357 
2358   if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
2359     Node *lock_ctrl = next_control(if_node->in(0));
2360     if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
2361       Node* lock1_node = nullptr;
2362       ProjNode* proj = if_node->as_If()->proj_out(!if_true);
2363       if (if_true) {
2364         if (proj->is_IfFalse() && proj->outcnt() == 1) {
2365           lock1_node = proj->unique_out();
2366         }
2367       } else {
2368         if (proj->is_IfTrue() && proj->outcnt() == 1) {
2369           lock1_node = proj->unique_out();
2370         }
2371       }
2372       if (lock1_node != nullptr && lock1_node->is_Lock()) {
2373         LockNode *lock1 = lock1_node->as_Lock();
2374         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2375         Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
2376         Node* lock1_obj = bs->step_over_gc_barrier(lock1->obj_node());
2377         if (lock_obj->eqv_uncast(lock1_obj) &&
2378             BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
2379             !lock1->is_eliminated()) {
2380           lock_ops.append(lock1);
2381           return true;
2382         }
2383       }
2384     }
2385   }
2386 
2387   lock_ops.trunc_to(0);
2388   return false;
2389 }
2390 
2391 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
2392                                GrowableArray<AbstractLockNode*> &lock_ops) {
2393   // check each control merging at this point for a matching unlock.
2394   // in(0) should be self edge so skip it.
2395   for (int i = 1; i < (int)region->req(); i++) {
2396     Node *in_node = next_control(region->in(i));
2397     if (in_node != nullptr) {
2398       if (find_matching_unlock(in_node, lock, lock_ops)) {
2399         // found a match so keep on checking.
2400         continue;
2401       } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
2402         continue;
2403       }
2404 
2405       // If we fall through to here then it was some kind of node we
2406       // don't understand or there wasn't a matching unlock, so give
2407       // up trying to merge locks.
2408       lock_ops.trunc_to(0);
2409       return false;
2410     }
2411   }
2412   return true;
2413 
2414 }
2415 
2416 // Check that all locks/unlocks associated with object come from balanced regions.
2417 bool AbstractLockNode::is_balanced() {
2418   Node* obj = obj_node();
2419   for (uint j = 0; j < obj->outcnt(); j++) {
2420     Node* n = obj->raw_out(j);
2421     if (n->is_AbstractLock() &&
2422         n->as_AbstractLock()->obj_node()->eqv_uncast(obj)) {
2423       BoxLockNode* n_box = n->as_AbstractLock()->box_node()->as_BoxLock();
2424       if (n_box->is_unbalanced()) {
2425         return false;
2426       }
2427     }
2428   }
2429   return true;
2430 }
2431 
2432 const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"};
2433 
2434 const char * AbstractLockNode::kind_as_string() const {
2435   return _kind_names[_kind];
2436 }
2437 
2438 #ifndef PRODUCT
2439 //
2440 // Create a counter which counts the number of times this lock is acquired
2441 //
2442 void AbstractLockNode::create_lock_counter(JVMState* state) {
2443   _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
2444 }
2445 
2446 void AbstractLockNode::set_eliminated_lock_counter() {
2447   if (_counter) {
2448     // Update the counter to indicate that this lock was eliminated.
2449     // The counter update code will stay around even though the
2450     // optimizer will eliminate the lock operation itself.
2451     _counter->set_tag(NamedCounter::EliminatedLockCounter);
2452   }
2453 }
2454 
2455 void AbstractLockNode::dump_spec(outputStream* st) const {
2456   st->print("%s ", _kind_names[_kind]);
2457   CallNode::dump_spec(st);
2458 }
2459 
2460 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
2461   st->print("%s", _kind_names[_kind]);
2462 }
2463 #endif
2464 
2465 //=============================================================================
2466 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2467 
2468   // perform any generic optimizations first (returns 'this' or null)
2469   Node *result = SafePointNode::Ideal(phase, can_reshape);
2470   if (result != nullptr)  return result;
2471   // Don't bother trying to transform a dead node
2472   if (in(0) && in(0)->is_top())  return nullptr;
2473 
2474   // Now see if we can optimize away this lock.  We don't actually
2475   // remove the locking here, we simply set the _eliminate flag which
2476   // prevents macro expansion from expanding the lock.  Since we don't
2477   // modify the graph, the value returned from this function is the
2478   // one computed above.
2479   const Type* obj_type = phase->type(obj_node());
2480   if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
2481     //
2482     // If we are locking an non-escaped object, the lock/unlock is unnecessary
2483     //
2484     ConnectionGraph *cgr = phase->C->congraph();
2485     if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2486       assert(!is_eliminated() || is_coarsened(), "sanity");
2487       // The lock could be marked eliminated by lock coarsening
2488       // code during first IGVN before EA. Replace coarsened flag
2489       // to eliminate all associated locks/unlocks.
2490 #ifdef ASSERT
2491       this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2492 #endif
2493       this->set_non_esc_obj();
2494       return result;
2495     }
2496 
2497     if (!phase->C->do_locks_coarsening()) {
2498       return result; // Compiling without locks coarsening
2499     }
2500     //
2501     // Try lock coarsening
2502     //
2503     PhaseIterGVN* iter = phase->is_IterGVN();
2504     if (iter != nullptr && !is_eliminated()) {
2505 
2506       GrowableArray<AbstractLockNode*>   lock_ops;
2507 
2508       Node *ctrl = next_control(in(0));
2509 
2510       // now search back for a matching Unlock
2511       if (find_matching_unlock(ctrl, this, lock_ops)) {
2512         // found an unlock directly preceding this lock.  This is the
2513         // case of single unlock directly control dependent on a
2514         // single lock which is the trivial version of case 1 or 2.
2515       } else if (ctrl->is_Region() ) {
2516         if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
2517         // found lock preceded by multiple unlocks along all paths
2518         // joining at this point which is case 3 in description above.
2519         }
2520       } else {
2521         // see if this lock comes from either half of an if and the
2522         // predecessors merges unlocks and the other half of the if
2523         // performs a lock.
2524         if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
2525           // found unlock splitting to an if with locks on both branches.
2526         }
2527       }
2528 
2529       if (lock_ops.length() > 0) {
2530         // add ourselves to the list of locks to be eliminated.
2531         lock_ops.append(this);
2532 
2533   #ifndef PRODUCT
2534         if (PrintEliminateLocks) {
2535           int locks = 0;
2536           int unlocks = 0;
2537           if (Verbose) {
2538             tty->print_cr("=== Locks coarsening ===");
2539             tty->print("Obj: ");
2540             obj_node()->dump();
2541           }
2542           for (int i = 0; i < lock_ops.length(); i++) {
2543             AbstractLockNode* lock = lock_ops.at(i);
2544             if (lock->Opcode() == Op_Lock)
2545               locks++;
2546             else
2547               unlocks++;
2548             if (Verbose) {
2549               tty->print("Box %d: ", i);
2550               box_node()->dump();
2551               tty->print(" %d: ", i);
2552               lock->dump();
2553             }
2554           }
2555           tty->print_cr("=== Coarsened %d unlocks and %d locks", unlocks, locks);
2556         }
2557   #endif
2558 
2559         // for each of the identified locks, mark them
2560         // as eliminatable
2561         for (int i = 0; i < lock_ops.length(); i++) {
2562           AbstractLockNode* lock = lock_ops.at(i);
2563 
2564           // Mark it eliminated by coarsening and update any counters
2565 #ifdef ASSERT
2566           lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened");
2567 #endif
2568           lock->set_coarsened();
2569         }
2570         // Record this coarsened group.
2571         phase->C->add_coarsened_locks(lock_ops);
2572       } else if (ctrl->is_Region() &&
2573                  iter->_worklist.member(ctrl)) {
2574         // We weren't able to find any opportunities but the region this
2575         // lock is control dependent on hasn't been processed yet so put
2576         // this lock back on the worklist so we can check again once any
2577         // region simplification has occurred.
2578         iter->_worklist.push(this);
2579       }
2580     }
2581   }
2582 
2583   return result;
2584 }
2585 
2586 //=============================================================================
2587 bool LockNode::is_nested_lock_region() {
2588   return is_nested_lock_region(nullptr);
2589 }
2590 
2591 // p is used for access to compilation log; no logging if null
2592 bool LockNode::is_nested_lock_region(Compile * c) {
2593   BoxLockNode* box = box_node()->as_BoxLock();
2594   int stk_slot = box->stack_slot();
2595   if (stk_slot <= 0) {
2596 #ifdef ASSERT
2597     this->log_lock_optimization(c, "eliminate_lock_INLR_1");
2598 #endif
2599     return false; // External lock or it is not Box (Phi node).
2600   }
2601 
2602   // Ignore complex cases: merged locks or multiple locks.
2603   Node* obj = obj_node();
2604   LockNode* unique_lock = nullptr;
2605   Node* bad_lock = nullptr;
2606   if (!box->is_simple_lock_region(&unique_lock, obj, &bad_lock)) {
2607 #ifdef ASSERT
2608     this->log_lock_optimization(c, "eliminate_lock_INLR_2a", bad_lock);
2609 #endif
2610     return false;
2611   }
2612   if (unique_lock != this) {
2613 #ifdef ASSERT
2614     this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != nullptr ? unique_lock : bad_lock));
2615     if (PrintEliminateLocks && Verbose) {
2616       tty->print_cr("=============== unique_lock != this ============");
2617       tty->print(" this: ");
2618       this->dump();
2619       tty->print(" box: ");
2620       box->dump();
2621       tty->print(" obj: ");
2622       obj->dump();
2623       if (unique_lock != nullptr) {
2624         tty->print(" unique_lock: ");
2625         unique_lock->dump();
2626       }
2627       if (bad_lock != nullptr) {
2628         tty->print(" bad_lock: ");
2629         bad_lock->dump();
2630       }
2631       tty->print_cr("===============");
2632     }
2633 #endif
2634     return false;
2635   }
2636 
2637   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2638   obj = bs->step_over_gc_barrier(obj);
2639   // Look for external lock for the same object.
2640   SafePointNode* sfn = this->as_SafePoint();
2641   JVMState* youngest_jvms = sfn->jvms();
2642   int max_depth = youngest_jvms->depth();
2643   for (int depth = 1; depth <= max_depth; depth++) {
2644     JVMState* jvms = youngest_jvms->of_depth(depth);
2645     int num_mon  = jvms->nof_monitors();
2646     // Loop over monitors
2647     for (int idx = 0; idx < num_mon; idx++) {
2648       Node* obj_node = sfn->monitor_obj(jvms, idx);
2649       obj_node = bs->step_over_gc_barrier(obj_node);
2650       BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
2651       if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
2652         box->set_nested();
2653         return true;
2654       }
2655     }
2656   }
2657 #ifdef ASSERT
2658   this->log_lock_optimization(c, "eliminate_lock_INLR_3");
2659 #endif
2660   return false;
2661 }
2662 
2663 //=============================================================================
2664 uint UnlockNode::size_of() const { return sizeof(*this); }
2665 
2666 //=============================================================================
2667 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2668 
2669   // perform any generic optimizations first (returns 'this' or null)
2670   Node *result = SafePointNode::Ideal(phase, can_reshape);
2671   if (result != nullptr)  return result;
2672   // Don't bother trying to transform a dead node
2673   if (in(0) && in(0)->is_top())  return nullptr;
2674 
2675   // Now see if we can optimize away this unlock.  We don't actually
2676   // remove the unlocking here, we simply set the _eliminate flag which
2677   // prevents macro expansion from expanding the unlock.  Since we don't
2678   // modify the graph, the value returned from this function is the
2679   // one computed above.
2680   // Escape state is defined after Parse phase.
2681   const Type* obj_type = phase->type(obj_node());
2682   if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
2683     //
2684     // If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
2685     //
2686     ConnectionGraph *cgr = phase->C->congraph();
2687     if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
2688       assert(!is_eliminated() || is_coarsened(), "sanity");
2689       // The lock could be marked eliminated by lock coarsening
2690       // code during first IGVN before EA. Replace coarsened flag
2691       // to eliminate all associated locks/unlocks.
2692 #ifdef ASSERT
2693       this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2694 #endif
2695       this->set_non_esc_obj();
2696     }
2697   }
2698   return result;
2699 }
2700 
2701 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock)  const {
2702   if (C == nullptr) {
2703     return;
2704   }
2705   CompileLog* log = C->log();
2706   if (log != nullptr) {
2707     Node* box = box_node();
2708     Node* obj = obj_node();
2709     int box_id = box != nullptr ? box->_idx : -1;
2710     int obj_id = obj != nullptr ? obj->_idx : -1;
2711 
2712     log->begin_head("%s compile_id='%d' lock_id='%d' class='%s' kind='%s' box_id='%d' obj_id='%d' bad_id='%d'",
2713           tag, C->compile_id(), this->_idx,
2714           is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?",
2715           kind_as_string(), box_id, obj_id, (bad_lock != nullptr ? bad_lock->_idx : -1));
2716     log->stamp();
2717     log->end_head();
2718     JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms();
2719     while (p != nullptr) {
2720       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
2721       p = p->caller();
2722     }
2723     log->tail(tag);
2724   }
2725 }
2726 
2727 bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) const {
2728   if (dest_t->is_known_instance() && t_oop->is_known_instance()) {
2729     return dest_t->instance_id() == t_oop->instance_id();
2730   }
2731 
2732   if (dest_t->isa_instptr() && !dest_t->is_instptr()->instance_klass()->equals(phase->C->env()->Object_klass())) {
2733     // clone
2734     if (t_oop->isa_aryptr()) {
2735       return false;
2736     }
2737     if (!t_oop->isa_instptr()) {
2738       return true;
2739     }
2740     if (dest_t->maybe_java_subtype_of(t_oop) || t_oop->maybe_java_subtype_of(dest_t)) {
2741       return true;
2742     }
2743     // unrelated
2744     return false;
2745   }
2746 
2747   if (dest_t->isa_aryptr()) {
2748     // arraycopy or array clone
2749     if (t_oop->isa_instptr()) {
2750       return false;
2751     }
2752     if (!t_oop->isa_aryptr()) {
2753       return true;
2754     }
2755 
2756     const Type* elem = dest_t->is_aryptr()->elem();
2757     if (elem == Type::BOTTOM) {
2758       // An array but we don't know what elements are
2759       return true;
2760     }
2761 
2762     dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr();
2763     t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot);
2764     uint dest_alias = phase->C->get_alias_index(dest_t);
2765     uint t_oop_alias = phase->C->get_alias_index(t_oop);
2766 
2767     return dest_alias == t_oop_alias;
2768   }
2769 
2770   return true;
2771 }
2772 
2773 PowDNode::PowDNode(Compile* C, Node* base, Node* exp)
2774     : CallLeafPureNode(
2775         OptoRuntime::Math_DD_D_Type(),
2776         StubRoutines::dpow() != nullptr ? StubRoutines::dpow() : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
2777         "pow") {
2778   add_flag(Flag_is_macro);
2779   C->add_macro_node(this);
2780 
2781   init_req(TypeFunc::Parms + 0, base);
2782   init_req(TypeFunc::Parms + 1, C->top());  // double slot padding
2783   init_req(TypeFunc::Parms + 2, exp);
2784   init_req(TypeFunc::Parms + 3, C->top());  // double slot padding
2785 }
2786 
2787 const Type* PowDNode::Value(PhaseGVN* phase) const {
2788   const Type* t_base = phase->type(base());
2789   const Type* t_exp  = phase->type(exp());
2790 
2791   if (t_base == Type::TOP || t_exp == Type::TOP) {
2792     return Type::TOP;
2793   }
2794 
2795   const TypeD* base_con = t_base->isa_double_constant();
2796   const TypeD* exp_con  = t_exp->isa_double_constant();
2797   const TypeD* result_t = nullptr;
2798 
2799   // constant folding: both inputs are constants
2800   if (base_con != nullptr && exp_con != nullptr) {
2801     result_t = TypeD::make(SharedRuntime::dpow(base_con->getd(), exp_con->getd()));
2802   }
2803 
2804   // Special cases when only the exponent is known:
2805   if (exp_con != nullptr) {
2806     double e = exp_con->getd();
2807 
2808     // If the second argument is positive or negative zero, then the result is 1.0.
2809     // i.e., pow(x, +/-0.0D) => 1.0
2810     if (e == 0.0) { // true for both -0.0 and +0.0
2811       result_t = TypeD::ONE;
2812     }
2813 
2814     // If the second argument is NaN, then the result is NaN.
2815     // i.e., pow(x, NaN) => NaN
2816     if (g_isnan(e)) {
2817       result_t = TypeD::make(NAN);
2818     }
2819   }
2820 
2821   if (result_t != nullptr) {
2822     // We can't simply return a TypeD here, it must be a tuple type to be compatible with call nodes.
2823     const Type** fields = TypeTuple::fields(2);
2824     fields[TypeFunc::Parms + 0] = result_t;
2825     fields[TypeFunc::Parms + 1] = Type::HALF;
2826     return TypeTuple::make(TypeFunc::Parms + 2, fields);
2827   }
2828 
2829   return tf()->range_cc();
2830 }
2831 
2832 Node* PowDNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2833   if (!can_reshape) {
2834     return nullptr;  // wait for igvn
2835   }
2836 
2837   PhaseIterGVN* igvn = phase->is_IterGVN();
2838   Node* base = this->base();
2839   Node* exp  = this->exp();
2840 
2841   const Type* t_exp  = phase->type(exp);
2842   const TypeD* exp_con  = t_exp->isa_double_constant();
2843 
2844   // Special cases when only the exponent is known:
2845   if (exp_con != nullptr) {
2846     double e = exp_con->getd();
2847 
2848     // If the second argument is 1.0, then the result is the same as the first argument.
2849     // i.e., pow(x, 1.0) => x
2850     if (e == 1.0) {
2851       return make_tuple_of_input_state_and_result(igvn, base);
2852     }
2853 
2854     // If the second argument is 2.0, then strength reduce to multiplications.
2855     // i.e., pow(x, 2.0) => x * x
2856     if (e == 2.0) {
2857       Node* mul = igvn->transform(new MulDNode(base, base));
2858       return make_tuple_of_input_state_and_result(igvn, mul);
2859     }
2860 
2861     // If the second argument is 0.5, the strength reduce to square roots.
2862     // i.e., pow(x, 0.5) => sqrt(x) iff x > 0
2863     if (e == 0.5 && Matcher::match_rule_supported(Op_SqrtD)) {
2864       Node* ctrl = in(TypeFunc::Control);
2865       Node* zero = igvn->zerocon(T_DOUBLE);
2866 
2867       // According to the API specs, pow(-0.0, 0.5) = 0.0 and sqrt(-0.0) = -0.0.
2868       // So pow(-0.0, 0.5) shouldn't be replaced with sqrt(-0.0).
2869       // -0.0/+0.0 are both excluded since floating-point comparison doesn't distinguish -0.0 from +0.0.
2870       Node* cmp = igvn->register_new_node_with_optimizer(new CmpDNode(base, zero));
2871       Node* test = igvn->register_new_node_with_optimizer(new BoolNode(cmp, BoolTest::le));
2872 
2873       IfNode* iff = new IfNode(ctrl, test, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2874       igvn->register_new_node_with_optimizer(iff);
2875       Node* if_slow = igvn->register_new_node_with_optimizer(new IfTrueNode(iff));  // x <= 0
2876       Node* if_fast = igvn->register_new_node_with_optimizer(new IfFalseNode(iff)); // x > 0
2877 
2878       // slow path: call pow(x, 0.5)
2879       Node* call = igvn->register_new_node_with_optimizer(inline_call_leaf_pure_node(if_slow));
2880       Node* call_ctrl = igvn->register_new_node_with_optimizer(new ProjNode(call, TypeFunc::Control));
2881       Node* call_result = igvn->register_new_node_with_optimizer(new ProjNode(call, TypeFunc::Parms + 0));
2882 
2883       // fast path: sqrt(x)
2884       Node* sqrt = igvn->register_new_node_with_optimizer(new SqrtDNode(igvn->C, if_fast, base));
2885 
2886       // merge paths
2887       RegionNode* region = new RegionNode(3);
2888       igvn->register_new_node_with_optimizer(region);
2889       region->init_req(1, call_ctrl); // slow path
2890       region->init_req(2, if_fast);   // fast path
2891 
2892       PhiNode* phi = new PhiNode(region, Type::DOUBLE);
2893       igvn->register_new_node_with_optimizer(phi);
2894       phi->init_req(1, call_result); // slow: pow() result
2895       phi->init_req(2, sqrt);        // fast: sqrt() result
2896 
2897       igvn->C->set_has_split_ifs(true); // Has chance for split-if optimization
2898 
2899       return make_tuple_of_input_state_and_result(igvn, phi, region);
2900     }
2901   }
2902 
2903   return CallLeafPureNode::Ideal(phase, can_reshape);
2904 }
2905 
2906 // We can't simply have Ideal() returning a Con or MulNode since the users are still expecting a Call node, but we could
2907 // produce a tuple that follows the same pattern so users can still get control, io, memory, etc..
2908 TupleNode* PowDNode::make_tuple_of_input_state_and_result(PhaseIterGVN* phase, Node* result, Node* control) {
2909   if (control == nullptr) {
2910     control = in(TypeFunc::Control);
2911   }
2912 
2913   Compile* C = phase->C;
2914   C->remove_macro_node(this);
2915   TupleNode* tuple = TupleNode::make(
2916       tf()->range_cc(),
2917       control,
2918       in(TypeFunc::I_O),
2919       in(TypeFunc::Memory),
2920       in(TypeFunc::FramePtr),
2921       in(TypeFunc::ReturnAdr),
2922       result,
2923       C->top());
2924   return tuple;
2925 }
--- EOF ---