< prev index next >

src/hotspot/share/opto/macro.cpp

Print this page

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 



  25 #include "compiler/compileLog.hpp"
  26 #include "gc/shared/collectedHeap.inline.hpp"
  27 #include "gc/shared/tlab_globals.hpp"
  28 #include "libadt/vectset.hpp"
  29 #include "memory/universe.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/arraycopynode.hpp"
  32 #include "opto/callnode.hpp"
  33 #include "opto/castnode.hpp"
  34 #include "opto/cfgnode.hpp"
  35 #include "opto/compile.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/graphKit.hpp"

  38 #include "opto/intrinsicnode.hpp"
  39 #include "opto/locknode.hpp"
  40 #include "opto/loopnode.hpp"
  41 #include "opto/macro.hpp"
  42 #include "opto/memnode.hpp"
  43 #include "opto/narrowptrnode.hpp"
  44 #include "opto/node.hpp"
  45 #include "opto/opaquenode.hpp"

  46 #include "opto/phaseX.hpp"
  47 #include "opto/reachability.hpp"
  48 #include "opto/rootnode.hpp"
  49 #include "opto/runtime.hpp"
  50 #include "opto/subnode.hpp"
  51 #include "opto/subtypenode.hpp"
  52 #include "opto/type.hpp"
  53 #include "prims/jvmtiExport.hpp"
  54 #include "runtime/continuation.hpp"
  55 #include "runtime/sharedRuntime.hpp"

  56 #include "utilities/globalDefinitions.hpp"
  57 #include "utilities/macros.hpp"
  58 #include "utilities/powerOfTwo.hpp"
  59 #if INCLUDE_G1GC
  60 #include "gc/g1/g1ThreadLocalData.hpp"
  61 #endif // INCLUDE_G1GC
  62 
  63 
  64 //
  65 // Replace any references to "oldref" in inputs to "use" with "newref".
  66 // Returns the number of replacements made.
  67 //
  68 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
  69   int nreplacements = 0;
  70   uint req = use->req();
  71   for (uint j = 0; j < use->len(); j++) {
  72     Node *uin = use->in(j);
  73     if (uin == oldref) {
  74       if (j < req)
  75         use->set_req(j, newref);
  76       else
  77         use->set_prec(j, newref);
  78       nreplacements++;
  79     } else if (j >= req && uin == nullptr) {
  80       break;
  81     }
  82   }
  83   return nreplacements;
  84 }
  85 
  86 void PhaseMacroExpand::migrate_outs(Node *old, Node *target) {
  87   assert(old != nullptr, "sanity");
  88   for (DUIterator_Fast imax, i = old->fast_outs(imax); i < imax; i++) {
  89     Node* use = old->fast_out(i);
  90     _igvn.rehash_node_delayed(use);
  91     imax -= replace_input(use, old, target);
  92     // back up iterator
  93     --i;
  94   }
  95   assert(old->outcnt() == 0, "all uses must be deleted");
  96 }
  97 
  98 Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word) {
  99   Node* cmp = word;
 100   Node* bol = transform_later(new BoolNode(cmp, BoolTest::ne));
 101   IfNode* iff = new IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN );
 102   transform_later(iff);
 103 
 104   // Fast path taken.
 105   Node *fast_taken = transform_later(new IfFalseNode(iff));
 106 
 107   // Fast path not-taken, i.e. slow path
 108   Node *slow_taken = transform_later(new IfTrueNode(iff));
 109 
 110     region->init_req(edge, fast_taken); // Capture fast-control
 111     return slow_taken;
 112 }
 113 
 114 //--------------------copy_predefined_input_for_runtime_call--------------------
 115 void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call) {
 116   // Set fixed predefined input arguments

 129   // Slow-path call
 130  CallNode *call = leaf_name
 131    ? (CallNode*)new CallLeafNode      ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM )
 132    : (CallNode*)new CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), TypeRawPtr::BOTTOM );
 133 
 134   // Slow path call has no side-effects, uses few values
 135   copy_predefined_input_for_runtime_call(slow_path, oldcall, call );
 136   if (parm0 != nullptr)  call->init_req(TypeFunc::Parms+0, parm0);
 137   if (parm1 != nullptr)  call->init_req(TypeFunc::Parms+1, parm1);
 138   if (parm2 != nullptr)  call->init_req(TypeFunc::Parms+2, parm2);
 139   call->copy_call_debug_info(&_igvn, oldcall);
 140   call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
 141   _igvn.replace_node(oldcall, call);
 142   transform_later(call);
 143 
 144   return call;
 145 }
 146 
 147 void PhaseMacroExpand::eliminate_gc_barrier(Node* p2x) {
 148   BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
 149   bs->eliminate_gc_barrier(this, p2x);
 150 #ifndef PRODUCT
 151   if (PrintOptoStatistics) {
 152     AtomicAccess::inc(&PhaseMacroExpand::_GC_barriers_removed_counter);
 153   }
 154 #endif
 155 }
 156 
 157 // Search for a memory operation for the specified memory slice.
 158 static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) {
 159   Node *orig_mem = mem;
 160   Node *alloc_mem = alloc->as_Allocate()->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
 161   assert(alloc_mem != nullptr, "Allocation without a memory projection.");
 162   const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
 163   while (true) {
 164     if (mem == alloc_mem || mem == start_mem ) {
 165       return mem;  // hit one of our sentinels
 166     } else if (mem->is_MergeMem()) {
 167       mem = mem->as_MergeMem()->memory_at(alias_idx);
 168     } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) {
 169       Node *in = mem->in(0);

 172       if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) {
 173         return in;
 174       } else if (in->is_Call()) {
 175         CallNode *call = in->as_Call();
 176         if (call->may_modify(tinst, phase)) {
 177           assert(call->is_ArrayCopy(), "ArrayCopy is the only call node that doesn't make allocation escape");
 178           if (call->as_ArrayCopy()->modifies(offset, offset, phase, false)) {
 179             return in;
 180           }
 181         }
 182         mem = in->in(TypeFunc::Memory);
 183       } else if (in->is_MemBar()) {
 184         ArrayCopyNode* ac = nullptr;
 185         if (ArrayCopyNode::may_modify(tinst, in->as_MemBar(), phase, ac)) {
 186           if (ac != nullptr) {
 187             assert(ac->is_clonebasic(), "Only basic clone is a non escaping clone");
 188             return ac;
 189           }
 190         }
 191         mem = in->in(TypeFunc::Memory);


 192       } else {
 193 #ifdef ASSERT
 194         in->dump();
 195         mem->dump();
 196         assert(false, "unexpected projection");
 197 #endif
 198       }
 199     } else if (mem->is_Store()) {
 200       const TypePtr* atype = mem->as_Store()->adr_type();
 201       int adr_idx = phase->C->get_alias_index(atype);
 202       if (adr_idx == alias_idx) {
 203         assert(atype->isa_oopptr(), "address type must be oopptr");
 204         int adr_offset = atype->offset();
 205         uint adr_iid = atype->is_oopptr()->instance_id();
 206         // Array elements references have the same alias_idx
 207         // but different offset and different instance_id.
 208         if (adr_offset == offset && adr_iid == alloc->_idx) {
 209           return mem;
 210         }
 211       } else {
 212         assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
 213       }
 214       mem = mem->in(MemNode::Memory);
 215     } else if (mem->is_ClearArray()) {
 216       if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
 217         // Can not bypass initialization of the instance
 218         // we are looking.
 219         DEBUG_ONLY(intptr_t offset;)
 220         assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
 221         InitializeNode* init = alloc->as_Allocate()->initialization();
 222         // We are looking for stored value, return Initialize node
 223         // or memory edge from Allocate node.
 224         if (init != nullptr) {

 229       }
 230       // Otherwise skip it (the call updated 'mem' value).
 231     } else if (mem->Opcode() == Op_SCMemProj) {
 232       mem = mem->in(0);
 233       Node* adr = nullptr;
 234       if (mem->is_LoadStore()) {
 235         adr = mem->in(MemNode::Address);
 236       } else {
 237         assert(mem->Opcode() == Op_EncodeISOArray ||
 238                mem->Opcode() == Op_StrCompressedCopy, "sanity");
 239         adr = mem->in(3); // Destination array
 240       }
 241       const TypePtr* atype = adr->bottom_type()->is_ptr();
 242       int adr_idx = phase->C->get_alias_index(atype);
 243       if (adr_idx == alias_idx) {
 244         DEBUG_ONLY(mem->dump();)
 245         assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
 246         return nullptr;
 247       }
 248       mem = mem->in(MemNode::Memory);
 249    } else if (mem->Opcode() == Op_StrInflatedCopy) {
 250       Node* adr = mem->in(3); // Destination array
 251       const TypePtr* atype = adr->bottom_type()->is_ptr();
 252       int adr_idx = phase->C->get_alias_index(atype);
 253       if (adr_idx == alias_idx) {
 254         DEBUG_ONLY(mem->dump();)
 255         assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
 256         return nullptr;
 257       }
 258       mem = mem->in(MemNode::Memory);
 259     } else {
 260       return mem;
 261     }
 262     assert(mem != orig_mem, "dead memory loop");
 263   }
 264 }
 265 
 266 // Determine if there is an interfering store between a rematerialization load and an arraycopy that is in the process
 267 // of being elided. Starting from the given rematerialization load this method starts a BFS traversal upwards through
 268 // the memory graph towards the provided ArrayCopyNode. For every node encountered on the traversal, check that it is
 269 // independent from the provided rematerialization. Returns false if every node on the traversal is independent and

 309 // Generate loads from source of the arraycopy for fields of destination needed at a deoptimization point.
 310 // Returns nullptr if the load cannot be created because the arraycopy is not suitable for elimination
 311 // (e.g. copy inside the array with non-constant offsets) or the inputs do not match our assumptions (e.g.
 312 // the arraycopy does not actually write something at the provided offset).
 313 Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type* ftype, AllocateNode* alloc) {
 314   assert((ctl == ac->control() && mem == ac->memory()) != (mem != ac->memory() && ctl->is_Proj() && ctl->as_Proj()->is_uncommon_trap_proj()),
 315     "Either the control and memory are the same as for the arraycopy or they are pinned in an uncommon trap.");
 316   BasicType bt = ft;
 317   const Type *type = ftype;
 318   if (ft == T_NARROWOOP) {
 319     bt = T_OBJECT;
 320     type = ftype->make_oopptr();
 321   }
 322   Node* base = ac->in(ArrayCopyNode::Src);
 323   Node* adr = nullptr;
 324   const TypePtr* adr_type = nullptr;
 325 
 326   if (ac->is_clonebasic()) {
 327     assert(ac->in(ArrayCopyNode::Src) != ac->in(ArrayCopyNode::Dest), "clone source equals destination");
 328     adr = _igvn.transform(AddPNode::make_with_base(base, _igvn.MakeConX(offset)));
 329     adr_type = _igvn.type(base)->is_ptr()->add_offset(offset);





 330   } else {
 331     if (!ac->modifies(offset, offset, &_igvn, true)) {
 332       // If the arraycopy does not copy to this offset, we cannot generate a rematerialization load for it.
 333       return nullptr;
 334     }
 335     assert(ac->in(ArrayCopyNode::Dest) == alloc->result_cast(), "arraycopy destination should be allocation's result");
 336     uint shift = exact_log2(type2aelembytes(bt));
 337     Node* src_pos = ac->in(ArrayCopyNode::SrcPos);
 338     Node* dest_pos = ac->in(ArrayCopyNode::DestPos);
 339     const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int();
 340     const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int();
 341 




 342     if (src_pos_t->is_con() && dest_pos_t->is_con()) {
 343       intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset;
 344       adr = _igvn.transform(AddPNode::make_with_base(base, _igvn.MakeConX(off)));
 345       adr_type = _igvn.type(base)->is_ptr()->add_offset(off);

 346       if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
 347         // Don't emit a new load from src if src == dst but try to get the value from memory instead
 348         return value_from_mem(ac, ctl, ft, ftype, adr_type->isa_oopptr(), alloc);
 349       }
 350     } else {





 351       Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
 352 #ifdef _LP64
 353       diff = _igvn.transform(new ConvI2LNode(diff));
 354 #endif
 355       diff = _igvn.transform(new LShiftXNode(diff, _igvn.intcon(shift)));
 356 
 357       Node* off = _igvn.transform(new AddXNode(_igvn.MakeConX(offset), diff));
 358       adr = _igvn.transform(AddPNode::make_with_base(base, off));
 359       adr_type = _igvn.type(base)->is_ptr()->add_offset(Type::OffsetBot);
 360       if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
 361         // Non constant offset in the array: we can't statically
 362         // determine the value
 363         return nullptr;
 364       }
 365     }
 366   }
 367   assert(adr != nullptr && adr_type != nullptr, "sanity");
 368 
 369   // Create the rematerialization load ...
 370   MergeMemNode* mergemem = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem();
 371   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 372   Node* res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemem, adr, adr_type, type, bt);
 373   assert(res != nullptr, "load should have been created");
 374 
 375   // ... and ensure that pinning the rematerialization load inside the uncommon path is safe.
 376   if (mem != ac->memory() && ctl->is_Proj() && ctl->as_Proj()->is_uncommon_trap_proj() && res->is_Load() &&
 377       has_interfering_store(ac, res->as_Load(), &_igvn)) {
 378     // Not safe: use control and memory from the arraycopy to ensure correct memory state.
 379     _igvn.remove_dead_node(res, PhaseIterGVN::NodeOrigin::Graph); // Clean up the unusable rematerialization load.
 380     return make_arraycopy_load(ac, offset, ac->control(), ac->memory(), ft, ftype, alloc);
 381   }
 382 
 383   if (ftype->isa_narrowoop()) {
 384     // PhaseMacroExpand::scalar_replacement adds DecodeN nodes
 385     res = _igvn.transform(new EncodePNode(res, ftype));
 386   }
 387   return res;
 388 }
 389 
 390 //
 391 // Given a Memory Phi, compute a value Phi containing the values from stores
 392 // on the input paths.
 393 // Note: this function is recursive, its depth is limited by the "level" argument
 394 // Returns the computed Phi, or null if it cannot compute it.
 395 Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) {
 396   assert(mem->is_Phi(), "sanity");
 397   int alias_idx = C->get_alias_index(adr_t);
 398   int offset = adr_t->offset();
 399   int instance_id = adr_t->instance_id();
 400 
 401   // Check if an appropriate value phi already exists.
 402   Node* region = mem->in(0);
 403   for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
 404     Node* phi = region->fast_out(k);
 405     if (phi->is_Phi() && phi != mem &&
 406         phi->as_Phi()->is_same_inst_field(phi_type, (int)mem->_idx, instance_id, alias_idx, offset)) {
 407       return phi;
 408     }
 409   }
 410   // Check if an appropriate new value phi already exists.
 411   Node* new_phi = value_phis->find(mem->_idx);
 412   if (new_phi != nullptr)
 413     return new_phi;
 414 
 415   if (level <= 0) {
 416     return nullptr; // Give up: phi tree too deep
 417   }
 418   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
 419   Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
 420   assert(alloc_mem != nullptr, "Allocation without a memory projection.");
 421 
 422   uint length = mem->req();
 423   GrowableArray <Node *> values(length, length, nullptr);
 424 
 425   // create a new Phi for the value
 426   PhiNode *phi = new PhiNode(mem->in(0), phi_type, nullptr, mem->_idx, instance_id, alias_idx, offset);
 427   transform_later(phi);
 428   value_phis->push(phi, mem->_idx);
 429 
 430   for (uint j = 1; j < length; j++) {
 431     Node *in = mem->in(j);
 432     if (in == nullptr || in->is_top()) {
 433       values.at_put(j, in);
 434     } else  {
 435       Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
 436       if (val == start_mem || val == alloc_mem) {
 437         // hit a sentinel, return appropriate 0 value
 438         values.at_put(j, _igvn.zerocon(ft));
 439         continue;





 440       }
 441       if (val->is_Initialize()) {
 442         val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 443       }
 444       if (val == nullptr) {
 445         return nullptr;  // can't find a value on this path
 446       }
 447       if (val == mem) {
 448         values.at_put(j, mem);
 449       } else if (val->is_Store()) {
 450         Node* n = val->in(MemNode::ValueIn);
 451         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 452         n = bs->step_over_gc_barrier(n);
 453         if (is_subword_type(ft)) {
 454           n = Compile::narrow_value(ft, n, phi_type, &_igvn, true);
 455         }
 456         values.at_put(j, n);
 457       } else if(val->is_Proj() && val->in(0) == alloc) {
 458         values.at_put(j, _igvn.zerocon(ft));





 459       } else if (val->is_Phi()) {
 460         val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
 461         if (val == nullptr) {
 462           return nullptr;
 463         }
 464         values.at_put(j, val);
 465       } else if (val->Opcode() == Op_SCMemProj) {
 466         assert(val->in(0)->is_LoadStore() ||
 467                val->in(0)->Opcode() == Op_EncodeISOArray ||
 468                val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity");
 469         assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
 470         return nullptr;
 471       } else if (val->is_ArrayCopy()) {
 472         Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), val->in(TypeFunc::Memory), ft, phi_type, alloc);
 473         if (res == nullptr) {
 474           return nullptr;
 475         }
 476         values.at_put(j, res);
 477       } else if (val->is_top()) {
 478         // This indicates that this path into the phi is dead. Top will eventually also propagate into the Region.
 479         // IGVN will clean this up later.
 480         values.at_put(j, val);
 481       } else {
 482         DEBUG_ONLY( val->dump(); )
 483         assert(false, "unknown node on this path");
 484         return nullptr;  // unknown node on this path
 485       }
 486     }
 487   }
 488   // Set Phi's inputs
 489   for (uint j = 1; j < length; j++) {
 490     if (values.at(j) == mem) {
 491       phi->init_req(j, phi);
 492     } else {
 493       phi->init_req(j, values.at(j));
 494     }
 495   }
 496   return phi;
 497 }
 498 
























































 499 // Search the last value stored into the object's field.
 500 Node* PhaseMacroExpand::value_from_mem(Node* origin, Node* ctl, BasicType ft, const Type* ftype, const TypeOopPtr* adr_t, AllocateNode* alloc) {
 501   assert(adr_t->is_known_instance_field(), "instance required");
 502   int instance_id = adr_t->instance_id();
 503   assert((uint)instance_id == alloc->_idx, "wrong allocation");
 504 
 505   int alias_idx = C->get_alias_index(adr_t);
 506   int offset = adr_t->offset();
 507   Node* orig_mem = origin->in(TypeFunc::Memory);
 508   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
 509   Node *alloc_ctrl = alloc->in(TypeFunc::Control);
 510   Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
 511   assert(alloc_mem != nullptr, "Allocation without a memory projection.");
 512   VectorSet visited;
 513 
 514   bool done = orig_mem == alloc_mem;
 515   Node *mem = orig_mem;
 516   while (!done) {
 517     if (visited.test_set(mem->_idx)) {
 518       return nullptr;  // found a loop, give up
 519     }
 520     mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
 521     if (mem == start_mem || mem == alloc_mem) {
 522       done = true;  // hit a sentinel, return appropriate 0 value
 523     } else if (mem->is_Initialize()) {
 524       mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 525       if (mem == nullptr) {
 526         done = true; // Something go wrong.
 527       } else if (mem->is_Store()) {
 528         const TypePtr* atype = mem->as_Store()->adr_type();
 529         assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice");
 530         done = true;
 531       }
 532     } else if (mem->is_Store()) {
 533       const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr();
 534       assert(atype != nullptr, "address type must be oopptr");
 535       assert(C->get_alias_index(atype) == alias_idx &&
 536              atype->is_known_instance_field() && atype->offset() == offset &&
 537              atype->instance_id() == instance_id, "store is correct memory slice");
 538       done = true;
 539     } else if (mem->is_Phi()) {
 540       // try to find a phi's unique input
 541       Node *unique_input = nullptr;
 542       Node *top = C->top();
 543       for (uint i = 1; i < mem->req(); i++) {
 544         Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn);
 545         if (n == nullptr || n == top || n == mem) {
 546           continue;
 547         } else if (unique_input == nullptr) {
 548           unique_input = n;
 549         } else if (unique_input != n) {
 550           unique_input = top;
 551           break;
 552         }
 553       }
 554       if (unique_input != nullptr && unique_input != top) {
 555         mem = unique_input;
 556       } else {
 557         done = true;
 558       }
 559     } else if (mem->is_ArrayCopy()) {
 560       done = true;
 561     } else if (mem->is_top()) {
 562       // The slice is on a dead path. Returning nullptr would lead to elimination
 563       // bailout, but we want to prevent that. Just forwarding the top is also legal,
 564       // and IGVN can just clean things up, and remove whatever receives top.
 565       return mem;
 566     } else {
 567       DEBUG_ONLY( mem->dump(); )
 568       assert(false, "unexpected node");
 569     }
 570   }
 571   if (mem != nullptr) {
 572     if (mem == start_mem || mem == alloc_mem) {
 573       // hit a sentinel, return appropriate 0 value
 574       return _igvn.zerocon(ft);
 575     } else if (mem->is_Store()) {
 576       Node* n = mem->in(MemNode::ValueIn);
 577       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 578       n = bs->step_over_gc_barrier(n);
 579       return n;
 580     } else if (mem->is_Phi()) {
 581       // attempt to produce a Phi reflecting the values on the input paths of the Phi
 582       Node_Stack value_phis(8);
 583       Node* phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
 584       if (phi != nullptr) {
 585         return phi;
 586       } else {
 587         // Kill all new Phis
 588         while(value_phis.is_nonempty()) {
 589           Node* n = value_phis.node();
 590           _igvn.replace_node(n, C->top());
 591           value_phis.pop();
 592         }
 593       }
 594     } else if (mem->is_ArrayCopy()) {
 595       // Rematerialize the scalar-replaced array. If possible, pin the loads to the uncommon path of the uncommon trap.
 596       // Check for each element of the source array, whether it was modified. If not, pin both memory and control to
 597       // the uncommon path. Otherwise, use the control and memory state of the arraycopy. Control and memory state must
 598       // come from the same source to prevent anti-dependence problems in the backend.
 599       ArrayCopyNode* ac = mem->as_ArrayCopy();
 600       Node* ac_ctl = ac->control();
 601       Node* ac_mem = ac->memory();
 602       if (ctl->is_Proj() && ctl->as_Proj()->is_uncommon_trap_proj()) {
 603         // pin the loads in the uncommon trap path
 604         ac_ctl = ctl;
 605         ac_mem = orig_mem;
 606       }
 607       return make_arraycopy_load(ac, offset, ac_ctl, ac_mem, ft, ftype, alloc);
 608     }
 609   }
 610   // Something went wrong.
 611   return nullptr;
 612 }
 613 







































































 614 // Check the possibility of scalar replacement.
 615 bool PhaseMacroExpand::can_eliminate_allocation(PhaseIterGVN* igvn, AllocateNode *alloc, GrowableArray <SafePointNode *>* safepoints) {
 616   //  Scan the uses of the allocation to check for anything that would
 617   //  prevent us from eliminating it.
 618   NOT_PRODUCT( const char* fail_eliminate = nullptr; )
 619   DEBUG_ONLY( Node* disq_node = nullptr; )
 620   bool can_eliminate = true;
 621   bool reduce_merge_precheck = (safepoints == nullptr);
 622 

 623   Node* res = alloc->result_cast();
 624   const TypeOopPtr* res_type = nullptr;
 625   if (res == nullptr) {
 626     // All users were eliminated.
 627   } else if (!res->is_CheckCastPP()) {
 628     NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
 629     can_eliminate = false;
 630   } else {

 631     res_type = igvn->type(res)->isa_oopptr();
 632     if (res_type == nullptr) {
 633       NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";)
 634       can_eliminate = false;
 635     } else if (!res_type->klass_is_exact()) {
 636       NOT_PRODUCT(fail_eliminate = "Not an exact type.";)
 637       can_eliminate = false;
 638     } else if (res_type->isa_aryptr()) {
 639       int length = alloc->in(AllocateNode::ALength)->find_int_con(-1);
 640       if (length < 0) {
 641         NOT_PRODUCT(fail_eliminate = "Array's size is not constant";)
 642         can_eliminate = false;
 643       }
 644     }
 645   }
 646 
 647   if (can_eliminate && res != nullptr) {
 648     BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
 649     for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
 650                                j < jmax && can_eliminate; j++) {
 651       Node* use = res->fast_out(j);
 652 
 653       if (use->is_AddP()) {
 654         const TypePtr* addp_type = igvn->type(use)->is_ptr();
 655         int offset = addp_type->offset();
 656 
 657         if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
 658           NOT_PRODUCT(fail_eliminate = "Undefined field reference";)
 659           can_eliminate = false;
 660           break;
 661         }
 662         for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
 663                                    k < kmax && can_eliminate; k++) {
 664           Node* n = use->fast_out(k);
 665           if (n->is_Mem() && n->as_Mem()->is_mismatched_access()) {
 666             DEBUG_ONLY(disq_node = n);
 667             NOT_PRODUCT(fail_eliminate = "Mismatched access");
 668             can_eliminate = false;
 669           }
 670           if (!n->is_Store() && n->Opcode() != Op_CastP2X && !bs->is_gc_pre_barrier_node(n) && !reduce_merge_precheck) {
 671             DEBUG_ONLY(disq_node = n;)
 672             if (n->is_Load() || n->is_LoadStore()) {
 673               NOT_PRODUCT(fail_eliminate = "Field load";)
 674             } else {
 675               NOT_PRODUCT(fail_eliminate = "Not store field reference";)
 676             }
 677             can_eliminate = false;
 678           }
 679         }
 680       } else if (use->is_ArrayCopy() &&
 681                  (use->as_ArrayCopy()->is_clonebasic() ||
 682                   use->as_ArrayCopy()->is_arraycopy_validated() ||
 683                   use->as_ArrayCopy()->is_copyof_validated() ||
 684                   use->as_ArrayCopy()->is_copyofrange_validated()) &&
 685                  use->in(ArrayCopyNode::Dest) == res) {
 686         // ok to eliminate
 687       } else if (use->is_ReachabilityFence() && OptimizeReachabilityFences) {
 688         // ok to eliminate
 689       } else if (use->is_SafePoint()) {
 690         SafePointNode* sfpt = use->as_SafePoint();
 691         if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
 692           // Object is passed as argument.
 693           DEBUG_ONLY(disq_node = use;)
 694           NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
 695           can_eliminate = false;
 696         }
 697         Node* sfptMem = sfpt->memory();
 698         if (sfptMem == nullptr || sfptMem->is_top()) {
 699           DEBUG_ONLY(disq_node = use;)
 700           NOT_PRODUCT(fail_eliminate = "null or TOP memory";)
 701           can_eliminate = false;
 702         } else if (!reduce_merge_precheck) {

 703           safepoints->append_if_missing(sfpt);
 704         }























 705       } else if (reduce_merge_precheck &&
 706                  (use->is_Phi() || use->is_EncodeP() ||
 707                   use->Opcode() == Op_MemBarRelease ||
 708                   (UseStoreStoreForCtor && use->Opcode() == Op_MemBarStoreStore))) {
 709         // Nothing to do
 710       } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark
 711         if (use->is_Phi()) {
 712           if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
 713             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 714           } else {
 715             NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
 716           }
 717           DEBUG_ONLY(disq_node = use;)
 718         } else {
 719           if (use->Opcode() == Op_Return) {
 720             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 721           } else {
 722             NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
 723           }
 724           DEBUG_ONLY(disq_node = use;)
 725         }
 726         can_eliminate = false;



 727       }
 728     }
 729   }
 730 
 731 #ifndef PRODUCT
 732   if (PrintEliminateAllocations && safepoints != nullptr) {
 733     if (can_eliminate) {
 734       tty->print("Scalar ");
 735       if (res == nullptr)
 736         alloc->dump();
 737       else
 738         res->dump();
 739     } else if (alloc->_is_scalar_replaceable) {
 740       tty->print("NotScalar (%s)", fail_eliminate);
 741       if (res == nullptr)
 742         alloc->dump();
 743       else
 744         res->dump();
 745 #ifdef ASSERT
 746       if (disq_node != nullptr) {
 747           tty->print("  >>>> ");
 748           disq_node->dump();
 749       }
 750 #endif /*ASSERT*/
 751     }
 752   }
 753 
 754   if (TraceReduceAllocationMerges && !can_eliminate && reduce_merge_precheck) {
 755     tty->print_cr("\tCan't eliminate allocation because '%s': ", fail_eliminate != nullptr ? fail_eliminate : "");
 756     DEBUG_ONLY(if (disq_node != nullptr) disq_node->dump();)
 757   }
 758 #endif
 759   return can_eliminate;

 833     // CheckCastPP result was not updated in the stack slot, and so
 834     // we ended up using the CastPP. That means that the field knows
 835     // that it should get an oop from an interface, but the value lost
 836     // that information, and so it is not a subtype.
 837     // There may be other issues, feel free to investigate further!
 838     if (!is_java_primitive(value_bt)) { return; }
 839 
 840     tty->print_cr("value not compatible for field: %s vs %s",
 841                   type2name(value_bt),
 842                   type2name(field_bt));
 843     tty->print("value_type: ");
 844     value_type->dump();
 845     tty->cr();
 846     tty->print("field_type: ");
 847     field_type->dump();
 848     tty->cr();
 849     assert(false, "value_type does not fit field_type");
 850   }
 851 #endif
 852 
 853 SafePointScalarObjectNode* PhaseMacroExpand::create_scalarized_object_description(AllocateNode *alloc, SafePointNode* sfpt) {



















































































































































 854   assert(sfpt->jvms()->endoff() == sfpt->req(), "no extra edges past debug info allowed");
 855 
 856   // Fields of scalar objs are referenced only at the end
 857   // of regular debuginfo at the last (youngest) JVMS.
 858   // Record relative start index.
 859   ciInstanceKlass* iklass    = nullptr;
 860   BasicType basic_elem_type  = T_ILLEGAL;
 861   const Type* field_type     = nullptr;
 862   const TypeOopPtr* res_type = nullptr;
 863   int nfields                = 0;
 864   int array_base             = 0;
 865   int element_size           = 0;
 866   uint first_ind             = (sfpt->req() - sfpt->jvms()->scloff());
 867   Node* res                  = alloc->result_cast();
 868 
 869   assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");
 870   assert(sfpt->jvms() != nullptr, "missed JVMS");

 871 
 872   if (res != nullptr) { // Could be null when there are no users
 873     res_type = _igvn.type(res)->isa_oopptr();
 874 
 875     if (res_type->isa_instptr()) {
 876       // find the fields of the class which will be needed for safepoint debug information
 877       iklass = res_type->is_instptr()->instance_klass();
 878       nfields = iklass->nof_nonstatic_fields();
 879     } else {
 880       // find the array's elements which will be needed for safepoint debug information
 881       nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
 882       assert(nfields >= 0, "must be an array klass.");
 883       basic_elem_type = res_type->is_aryptr()->elem()->array_element_basic_type();
 884       array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
 885       element_size = type2aelembytes(basic_elem_type);
 886       field_type = res_type->is_aryptr()->elem();






 887     }
 888   }
 889 
 890   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(res_type, alloc, first_ind, sfpt->jvms()->depth(), nfields);
 891   sobj->init_req(0, C->root());
 892   transform_later(sobj);
 893 
 894   // Scan object's fields adding an input to the safepoint for each field.
 895   for (int j = 0; j < nfields; j++) {
 896     intptr_t offset;
 897     ciField* field = nullptr;
 898     if (iklass != nullptr) {
 899       field = iklass->nonstatic_field_at(j);
 900       offset = field->offset_in_bytes();
 901       ciType* elem_type = field->type();
 902       basic_elem_type = field->layout_type();
 903 
 904       // The next code is taken from Parse::do_get_xxx().
 905       if (is_reference_type(basic_elem_type)) {
 906         if (!elem_type->is_loaded()) {
 907           field_type = TypeInstPtr::BOTTOM;
 908         } else if (field != nullptr && field->is_static_constant()) {
 909           ciObject* con = field->constant_value().as_object();
 910           // Do not "join" in the previous type; it doesn't add value,
 911           // and may yield a vacuous result if the field is of interface type.
 912           field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 913           assert(field_type != nullptr, "field singleton type must be consistent");
 914         } else {
 915           field_type = TypeOopPtr::make_from_klass(elem_type->as_klass());
 916         }
 917         if (UseCompressedOops) {
 918           field_type = field_type->make_narrowoop();
 919           basic_elem_type = T_NARROWOOP;
 920         }
 921       } else {
 922         field_type = Type::get_const_basic_type(basic_elem_type);
 923       }
 924     } else {
 925       offset = array_base + j * (intptr_t)element_size;
 926     }
 927 
 928     const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr();
 929 
 930     Node* field_val = value_from_mem(sfpt, sfpt->control(), basic_elem_type, field_type, field_addr_type, alloc);
 931 
 932     // We weren't able to find a value for this field,
 933     // give up on eliminating this allocation.
 934     if (field_val == nullptr) {
 935       uint last = sfpt->req() - 1;
 936       for (int k = 0;  k < j; k++) {
 937         sfpt->del_req(last--);
 938       }
 939       _igvn._worklist.push(sfpt);
 940 
 941 #ifndef PRODUCT
 942       if (PrintEliminateAllocations) {
 943         if (field != nullptr) {
 944           tty->print("=== At SafePoint node %d can't find value of field: ", sfpt->_idx);
 945           field->print();
 946           int field_idx = C->get_alias_index(field_addr_type);
 947           tty->print(" (alias_idx=%d)", field_idx);
 948         } else { // Array's element
 949           tty->print("=== At SafePoint node %d can't find value of array element [%d]", sfpt->_idx, j);
 950         }
 951         tty->print(", which prevents elimination of: ");
 952         if (res == nullptr)
 953           alloc->dump();
 954         else
 955           res->dump();
 956       }
 957 #endif
 958 
 959       return nullptr;
 960     }




 961 
 962     if (UseCompressedOops && field_type->isa_narrowoop()) {
 963       // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
 964       // to be able scalar replace the allocation.
 965       if (field_val->is_EncodeP()) {
 966         field_val = field_val->in(1);
 967       } else {
 968         field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type()));
 969       }
 970     }
 971     DEBUG_ONLY(verify_type_compatability(field_val->bottom_type(), field_type);)
 972     sfpt->add_req(field_val);
 973   }
 974 
 975   sfpt->jvms()->set_endoff(sfpt->req());
 976 
 977   return sobj;
 978 }
 979 
 980 // Do scalar replacement.
 981 bool PhaseMacroExpand::scalar_replacement(AllocateNode* alloc, GrowableArray<SafePointNode*>& safepoints) {
 982   GrowableArray<SafePointNode*> safepoints_done;
 983   Node* res = alloc->result_cast();
 984   assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");




 985 
 986   // Process the safepoint uses

 987   while (safepoints.length() > 0) {
 988     SafePointNode* sfpt = safepoints.pop();
 989 
 990   SafePointNode::NodeEdgeTempStorage non_debug_edges_worklist(igvn());
 991 
 992     // All sfpt inputs are implicitly included into debug info during the scalarization process below.
 993     // Keep non-debug inputs separately, so they stay non-debug.
 994     sfpt->remove_non_debug_edges(non_debug_edges_worklist);
 995 
 996     SafePointScalarObjectNode* sobj = create_scalarized_object_description(alloc, sfpt);
 997 
 998     if (sobj == nullptr) {
 999       sfpt->restore_non_debug_edges(non_debug_edges_worklist);
1000       undo_previous_scalarizations(safepoints_done, alloc);
1001       return false;
1002     }
1003 
1004     // Now make a pass over the debug information replacing any references
1005     // to the allocated object with "sobj"
1006     JVMState *jvms = sfpt->jvms();
1007     sfpt->replace_edges_in_range(res, sobj, jvms->debug_start(), jvms->debug_end(), &_igvn);
1008     non_debug_edges_worklist.remove_edge_if_present(res); // drop scalarized input from non-debug info
1009     sfpt->restore_non_debug_edges(non_debug_edges_worklist);
1010     _igvn._worklist.push(sfpt);
1011 
1012     // keep it for rollback
1013     safepoints_done.append_if_missing(sfpt);
1014   }
1015 







1016   return true;
1017 }
1018 
1019 static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
1020   Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
1021   Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory);
1022   if (ctl_proj != nullptr) {
1023     igvn.replace_node(ctl_proj, n->in(0));
1024   }
1025   if (mem_proj != nullptr) {
1026     igvn.replace_node(mem_proj, n->in(TypeFunc::Memory));
1027   }
1028 }
1029 
1030 // Process users of eliminated allocation.
1031 void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {

1032   Node* res = alloc->result_cast();
1033   if (res != nullptr) {




1034     for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
1035       Node *use = res->last_out(j);
1036       uint oc1 = res->outcnt();
1037 
1038       if (use->is_AddP()) {
1039         for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
1040           Node *n = use->last_out(k);
1041           uint oc2 = use->outcnt();
1042           if (n->is_Store()) {
1043 #ifdef ASSERT
1044             // Verify that there is no dependent MemBarVolatile nodes,
1045             // they should be removed during IGVN, see MemBarNode::Ideal().
1046             for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
1047                                        p < pmax; p++) {
1048               Node* mb = n->fast_out(p);
1049               assert(mb->is_Initialize() || !mb->is_MemBar() ||
1050                      mb->req() <= MemBarNode::Precedent ||
1051                      mb->in(MemBarNode::Precedent) != n,
1052                      "MemBarVolatile should be eliminated for non-escaping object");
1053             }
1054 #endif
1055             _igvn.replace_node(n, n->in(MemNode::Memory));
1056           } else {
1057             eliminate_gc_barrier(n);
1058           }
1059           k -= (oc2 - use->outcnt());
1060         }
1061         _igvn.remove_dead_node(use, PhaseIterGVN::NodeOrigin::Graph);
1062       } else if (use->is_ArrayCopy()) {
1063         // Disconnect ArrayCopy node
1064         ArrayCopyNode* ac = use->as_ArrayCopy();
1065         if (ac->is_clonebasic()) {
1066           Node* membar_after = ac->proj_out(TypeFunc::Control)->unique_ctrl_out();
1067           disconnect_projections(ac, _igvn);
1068           assert(alloc->in(TypeFunc::Memory)->is_Proj() && alloc->in(TypeFunc::Memory)->in(0)->Opcode() == Op_MemBarCPUOrder, "mem barrier expected before allocation");
1069           Node* membar_before = alloc->in(TypeFunc::Memory)->in(0);
1070           disconnect_projections(membar_before->as_MemBar(), _igvn);
1071           if (membar_after->is_MemBar()) {
1072             disconnect_projections(membar_after->as_MemBar(), _igvn);
1073           }
1074         } else {
1075           assert(ac->is_arraycopy_validated() ||
1076                  ac->is_copyof_validated() ||
1077                  ac->is_copyofrange_validated(), "unsupported");
1078           CallProjections callprojs;
1079           ac->extract_projections(&callprojs, true);
1080 
1081           _igvn.replace_node(callprojs.fallthrough_ioproj, ac->in(TypeFunc::I_O));
1082           _igvn.replace_node(callprojs.fallthrough_memproj, ac->in(TypeFunc::Memory));
1083           _igvn.replace_node(callprojs.fallthrough_catchproj, ac->in(TypeFunc::Control));
1084 
1085           // Set control to top. IGVN will remove the remaining projections
1086           ac->set_req(0, top());
1087           ac->replace_edge(res, top(), &_igvn);
1088 
1089           // Disconnect src right away: it can help find new
1090           // opportunities for allocation elimination
1091           Node* src = ac->in(ArrayCopyNode::Src);
1092           ac->replace_edge(src, top(), &_igvn);
1093           // src can be top at this point if src and dest of the
1094           // arraycopy were the same
1095           if (src->outcnt() == 0 && !src->is_top()) {
1096             _igvn.remove_dead_node(src, PhaseIterGVN::NodeOrigin::Graph);
1097           }
1098         }
1099         _igvn._worklist.push(ac);
























1100       } else if (use->is_ReachabilityFence() && OptimizeReachabilityFences) {
1101         use->as_ReachabilityFence()->clear_referent(_igvn); // redundant fence; will be removed during IGVN
1102       } else {
1103         eliminate_gc_barrier(use);
1104       }
1105       j -= (oc1 - res->outcnt());
1106     }
1107     assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
1108     _igvn.remove_dead_node(res, PhaseIterGVN::NodeOrigin::Graph);
1109   }
1110 
1111   //
1112   // Process other users of allocation's projections
1113   //
1114   if (_callprojs.resproj != nullptr && _callprojs.resproj->outcnt() != 0) {
1115     // First disconnect stores captured by Initialize node.
1116     // If Initialize node is eliminated first in the following code,
1117     // it will kill such stores and DUIterator_Last will assert.
1118     for (DUIterator_Fast jmax, j = _callprojs.resproj->fast_outs(jmax);  j < jmax; j++) {
1119       Node* use = _callprojs.resproj->fast_out(j);
1120       if (use->is_AddP()) {
1121         // raw memory addresses used only by the initialization
1122         _igvn.replace_node(use, C->top());
1123         --j; --jmax;
1124       }
1125     }
1126     for (DUIterator_Last jmin, j = _callprojs.resproj->last_outs(jmin); j >= jmin; ) {
1127       Node* use = _callprojs.resproj->last_out(j);
1128       uint oc1 = _callprojs.resproj->outcnt();
1129       if (use->is_Initialize()) {
1130         // Eliminate Initialize node.
1131         InitializeNode *init = use->as_Initialize();
1132         Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
1133         if (ctrl_proj != nullptr) {
1134           _igvn.replace_node(ctrl_proj, init->in(TypeFunc::Control));
1135 #ifdef ASSERT
1136           // If the InitializeNode has no memory out, it will die, and tmp will become null
1137           Node* tmp = init->in(TypeFunc::Control);
1138           assert(tmp == nullptr || tmp == _callprojs.fallthrough_catchproj, "allocation control projection");
1139 #endif
1140         }
1141         Node* mem = init->in(TypeFunc::Memory);
1142 #ifdef ASSERT
1143         if (init->number_of_projs(TypeFunc::Memory) > 0) {
1144           if (mem->is_MergeMem()) {
1145             assert(mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == _callprojs.fallthrough_memproj, "allocation memory projection");
1146           } else {
1147             assert(mem == _callprojs.fallthrough_memproj, "allocation memory projection");
1148           }
1149         }
1150 #endif
1151         init->replace_mem_projs_by(mem, &_igvn);
1152         assert(init->outcnt() == 0, "should only have had a control and some memory projections, and we removed them");




1153       } else  {
1154         assert(false, "only Initialize or AddP expected");
1155       }
1156       j -= (oc1 - _callprojs.resproj->outcnt());
1157     }
1158   }
1159   if (_callprojs.fallthrough_catchproj != nullptr) {
1160     _igvn.replace_node(_callprojs.fallthrough_catchproj, alloc->in(TypeFunc::Control));
1161   }
1162   if (_callprojs.fallthrough_memproj != nullptr) {
1163     _igvn.replace_node(_callprojs.fallthrough_memproj, alloc->in(TypeFunc::Memory));
1164   }
1165   if (_callprojs.catchall_memproj != nullptr) {
1166     _igvn.replace_node(_callprojs.catchall_memproj, C->top());
1167   }
1168   if (_callprojs.fallthrough_ioproj != nullptr) {
1169     _igvn.replace_node(_callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
1170   }
1171   if (_callprojs.catchall_ioproj != nullptr) {
1172     _igvn.replace_node(_callprojs.catchall_ioproj, C->top());
1173   }
1174   if (_callprojs.catchall_catchproj != nullptr) {
1175     _igvn.replace_node(_callprojs.catchall_catchproj, C->top());
1176   }
1177 }
1178 
1179 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
1180   // If reallocation fails during deoptimization we'll pop all
1181   // interpreter frames for this compiled frame and that won't play
1182   // nice with JVMTI popframe.
1183   // We avoid this issue by eager reallocation when the popframe request
1184   // is received.
1185   if (!EliminateAllocations || !alloc->_is_non_escaping) {
1186     return false;
1187   }
1188   Node* klass = alloc->in(AllocateNode::KlassNode);
1189   const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
1190   Node* res = alloc->result_cast();







1191   // Eliminate boxing allocations which are not used
1192   // regardless scalar replaceable status.
1193   bool boxing_alloc = C->eliminate_boxing() &&

1194                       tklass->isa_instklassptr() &&
1195                       tklass->is_instklassptr()->instance_klass()->is_box_klass();
1196   if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != nullptr))) {
1197     return false;
1198   }
1199 
1200   alloc->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1201 
1202   GrowableArray <SafePointNode *> safepoints;
1203   if (!can_eliminate_allocation(&_igvn, alloc, &safepoints)) {
1204     return false;
1205   }
1206 
1207   if (!alloc->_is_scalar_replaceable) {
1208     assert(res == nullptr, "sanity");
1209     // We can only eliminate allocation if all debug info references
1210     // are already replaced with SafePointScalarObject because
1211     // we can't search for a fields value without instance_id.
1212     if (safepoints.length() > 0) {
1213       return false;
1214     }
1215   }
1216 
1217   if (!scalar_replacement(alloc, safepoints)) {
1218     return false;
1219   }
1220 
1221   CompileLog* log = C->log();
1222   if (log != nullptr) {
1223     log->head("eliminate_allocation type='%d'",
1224               log->identify(tklass->exact_klass()));
1225     JVMState* p = alloc->jvms();
1226     while (p != nullptr) {
1227       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1228       p = p->caller();
1229     }
1230     log->tail("eliminate_allocation");
1231   }
1232 
1233   process_users_of_allocation(alloc);
1234 
1235 #ifndef PRODUCT
1236   if (PrintEliminateAllocations) {
1237     if (alloc->is_AllocateArray())
1238       tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1239     else
1240       tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1241   }
1242 #endif
1243 
1244   return true;
1245 }
1246 
1247 bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
1248   // EA should remove all uses of non-escaping boxing node.
1249   if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != nullptr) {
1250     return false;
1251   }
1252 
1253   assert(boxing->result_cast() == nullptr, "unexpected boxing node result");
1254 
1255   boxing->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1256 
1257   const TypeTuple* r = boxing->tf()->range();
1258   assert(r->cnt() > TypeFunc::Parms, "sanity");
1259   const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
1260   assert(t != nullptr, "sanity");
1261 
1262   CompileLog* log = C->log();
1263   if (log != nullptr) {
1264     log->head("eliminate_boxing type='%d'",
1265               log->identify(t->instance_klass()));
1266     JVMState* p = boxing->jvms();
1267     while (p != nullptr) {
1268       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1269       p = p->caller();
1270     }
1271     log->tail("eliminate_boxing");
1272   }
1273 
1274   process_users_of_allocation(boxing);
1275 
1276 #ifndef PRODUCT
1277   if (PrintEliminateAllocations) {

1341 // oop flavor.
1342 //
1343 //=============================================================================
1344 // FastAllocateSizeLimit value is in DOUBLEWORDS.
1345 // Allocations bigger than this always go the slow route.
1346 // This value must be small enough that allocation attempts that need to
1347 // trigger exceptions go the slow route.  Also, it must be small enough so
1348 // that heap_top + size_in_bytes does not wrap around the 4Gig limit.
1349 //=============================================================================j//
1350 // %%% Here is an old comment from parseHelper.cpp; is it outdated?
1351 // The allocator will coalesce int->oop copies away.  See comment in
1352 // coalesce.cpp about how this works.  It depends critically on the exact
1353 // code shape produced here, so if you are changing this code shape
1354 // make sure the GC info for the heap-top is correct in and around the
1355 // slow-path call.
1356 //
1357 
1358 void PhaseMacroExpand::expand_allocate_common(
1359             AllocateNode* alloc, // allocation node to be expanded
1360             Node* length,  // array length for an array allocation

1361             const TypeFunc* slow_call_type, // Type of slow call
1362             address slow_call_address,  // Address of slow call
1363             Node* valid_length_test // whether length is valid or not
1364     )
1365 {
1366   Node* ctrl = alloc->in(TypeFunc::Control);
1367   Node* mem  = alloc->in(TypeFunc::Memory);
1368   Node* i_o  = alloc->in(TypeFunc::I_O);
1369   Node* size_in_bytes     = alloc->in(AllocateNode::AllocSize);
1370   Node* klass_node        = alloc->in(AllocateNode::KlassNode);
1371   Node* initial_slow_test = alloc->in(AllocateNode::InitialTest);
1372   assert(ctrl != nullptr, "must have control");
1373 
1374   // We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
1375   // they will not be used if "always_slow" is set
1376   enum { slow_result_path = 1, fast_result_path = 2 };
1377   Node *result_region = nullptr;
1378   Node *result_phi_rawmem = nullptr;
1379   Node *result_phi_rawoop = nullptr;
1380   Node *result_phi_i_o = nullptr;

1425 #endif
1426       yank_alloc_node(alloc);
1427       return;
1428     }
1429   }
1430 
1431   enum { too_big_or_final_path = 1, need_gc_path = 2 };
1432   Node *slow_region = nullptr;
1433   Node *toobig_false = ctrl;
1434 
1435   // generate the initial test if necessary
1436   if (initial_slow_test != nullptr ) {
1437     assert (expand_fast_path, "Only need test if there is a fast path");
1438     slow_region = new RegionNode(3);
1439 
1440     // Now make the initial failure test.  Usually a too-big test but
1441     // might be a TRUE for finalizers.
1442     IfNode *toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
1443     transform_later(toobig_iff);
1444     // Plug the failing-too-big test into the slow-path region
1445     Node *toobig_true = new IfTrueNode( toobig_iff );
1446     transform_later(toobig_true);
1447     slow_region    ->init_req( too_big_or_final_path, toobig_true );
1448     toobig_false = new IfFalseNode( toobig_iff );
1449     transform_later(toobig_false);
1450   } else {
1451     // No initial test, just fall into next case
1452     assert(allocation_has_use || !expand_fast_path, "Should already have been handled");
1453     toobig_false = ctrl;
1454     DEBUG_ONLY(slow_region = NodeSentinel);
1455   }
1456 
1457   // If we are here there are several possibilities
1458   // - expand_fast_path is false - then only a slow path is expanded. That's it.
1459   // no_initial_check means a constant allocation.
1460   // - If check always evaluates to false -> expand_fast_path is false (see above)
1461   // - If check always evaluates to true -> directly into fast path (but may bailout to slowpath)
1462   // if !allocation_has_use the fast path is empty
1463   // if !allocation_has_use && no_initial_check
1464   // - Then there are no fastpath that can fall out to slowpath -> no allocation code at all.
1465   //   removed by yank_alloc_node above.
1466 
1467   Node *slow_mem = mem;  // save the current memory state for slow path
1468   // generate the fast allocation code unless we know that the initial test will always go slow
1469   if (expand_fast_path) {
1470     // Fast path modifies only raw memory.
1471     if (mem->is_MergeMem()) {
1472       mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1473     }
1474 
1475     // allocate the Region and Phi nodes for the result
1476     result_region = new RegionNode(3);
1477     result_phi_rawmem = new PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1478     result_phi_i_o    = new PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch
1479 
1480     // Grab regular I/O before optional prefetch may change it.
1481     // Slow-path does no I/O so just set it to the original I/O.
1482     result_phi_i_o->init_req(slow_result_path, i_o);
1483 
1484     // Name successful fast-path variables
1485     Node* fast_oop_ctrl;
1486     Node* fast_oop_rawmem;

1487     if (allocation_has_use) {
1488       Node* needgc_ctrl = nullptr;
1489       result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM);
1490 
1491       intx prefetch_lines = length != nullptr ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
1492       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1493       Node* fast_oop = bs->obj_allocate(this, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl,
1494                                         fast_oop_ctrl, fast_oop_rawmem,
1495                                         prefetch_lines);
1496 
1497       if (initial_slow_test != nullptr) {
1498         // This completes all paths into the slow merge point
1499         slow_region->init_req(need_gc_path, needgc_ctrl);
1500         transform_later(slow_region);
1501       } else {
1502         // No initial slow path needed!
1503         // Just fall from the need-GC path straight into the VM call.
1504         slow_region = needgc_ctrl;
1505       }
1506 

1524     result_phi_i_o   ->init_req(fast_result_path, i_o);
1525     result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem);
1526   } else {
1527     slow_region = ctrl;
1528     result_phi_i_o = i_o; // Rename it to use in the following code.
1529   }
1530 
1531   // Generate slow-path call
1532   CallNode *call = new CallStaticJavaNode(slow_call_type, slow_call_address,
1533                                OptoRuntime::stub_name(slow_call_address),
1534                                TypePtr::BOTTOM);
1535   call->init_req(TypeFunc::Control,   slow_region);
1536   call->init_req(TypeFunc::I_O,       top());    // does no i/o
1537   call->init_req(TypeFunc::Memory,    slow_mem); // may gc ptrs
1538   call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
1539   call->init_req(TypeFunc::FramePtr,  alloc->in(TypeFunc::FramePtr));
1540 
1541   call->init_req(TypeFunc::Parms+0, klass_node);
1542   if (length != nullptr) {
1543     call->init_req(TypeFunc::Parms+1, length);



1544   }
1545 
1546   // Copy debug information and adjust JVMState information, then replace
1547   // allocate node with the call
1548   call->copy_call_debug_info(&_igvn, alloc);
1549   // For array allocations, copy the valid length check to the call node so Compile::final_graph_reshaping() can verify
1550   // that the call has the expected number of CatchProj nodes (in case the allocation always fails and the fallthrough
1551   // path dies).
1552   if (valid_length_test != nullptr) {
1553     call->add_req(valid_length_test);
1554   }
1555   if (expand_fast_path) {
1556     call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
1557   } else {
1558     // Hook i_o projection to avoid its elimination during allocation
1559     // replacement (when only a slow call is generated).
1560     call->set_req(TypeFunc::I_O, result_phi_i_o);
1561   }
1562   _igvn.replace_node(alloc, call);
1563   transform_later(call);
1564 
1565   // Identify the output projections from the allocate node and
1566   // adjust any references to them.
1567   // The control and io projections look like:
1568   //
1569   //        v---Proj(ctrl) <-----+   v---CatchProj(ctrl)
1570   //  Allocate                   Catch
1571   //        ^---Proj(io) <-------+   ^---CatchProj(io)
1572   //
1573   //  We are interested in the CatchProj nodes.
1574   //
1575   call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1576 
1577   // An allocate node has separate memory projections for the uses on
1578   // the control and i_o paths. Replace the control memory projection with
1579   // result_phi_rawmem (unless we are only generating a slow call when
1580   // both memory projections are combined)
1581   if (expand_fast_path && _callprojs.fallthrough_memproj != nullptr) {
1582     migrate_outs(_callprojs.fallthrough_memproj, result_phi_rawmem);
1583   }
1584   // Now change uses of catchall_memproj to use fallthrough_memproj and delete
1585   // catchall_memproj so we end up with a call that has only 1 memory projection.
1586   if (_callprojs.catchall_memproj != nullptr ) {
1587     if (_callprojs.fallthrough_memproj == nullptr) {
1588       _callprojs.fallthrough_memproj = new ProjNode(call, TypeFunc::Memory);
1589       transform_later(_callprojs.fallthrough_memproj);
1590     }
1591     migrate_outs(_callprojs.catchall_memproj, _callprojs.fallthrough_memproj);
1592     _igvn.remove_dead_node(_callprojs.catchall_memproj, PhaseIterGVN::NodeOrigin::Graph);
1593   }
1594 
1595   // An allocate node has separate i_o projections for the uses on the control
1596   // and i_o paths. Always replace the control i_o projection with result i_o
1597   // otherwise incoming i_o become dead when only a slow call is generated
1598   // (it is different from memory projections where both projections are
1599   // combined in such case).
1600   if (_callprojs.fallthrough_ioproj != nullptr) {
1601     migrate_outs(_callprojs.fallthrough_ioproj, result_phi_i_o);
1602   }
1603   // Now change uses of catchall_ioproj to use fallthrough_ioproj and delete
1604   // catchall_ioproj so we end up with a call that has only 1 i_o projection.
1605   if (_callprojs.catchall_ioproj != nullptr ) {
1606     if (_callprojs.fallthrough_ioproj == nullptr) {
1607       _callprojs.fallthrough_ioproj = new ProjNode(call, TypeFunc::I_O);
1608       transform_later(_callprojs.fallthrough_ioproj);
1609     }
1610     migrate_outs(_callprojs.catchall_ioproj, _callprojs.fallthrough_ioproj);
1611     _igvn.remove_dead_node(_callprojs.catchall_ioproj, PhaseIterGVN::NodeOrigin::Graph);
1612   }
1613 
1614   // if we generated only a slow call, we are done
1615   if (!expand_fast_path) {
1616     // Now we can unhook i_o.
1617     if (result_phi_i_o->outcnt() > 1) {
1618       call->set_req(TypeFunc::I_O, top());
1619     } else {
1620       assert(result_phi_i_o->unique_ctrl_out() == call, "sanity");
1621       // Case of new array with negative size known during compilation.
1622       // AllocateArrayNode::Ideal() optimization disconnect unreachable
1623       // following code since call to runtime will throw exception.
1624       // As result there will be no users of i_o after the call.
1625       // Leave i_o attached to this call to avoid problems in preceding graph.
1626     }
1627     return;
1628   }
1629 
1630   if (_callprojs.fallthrough_catchproj != nullptr) {
1631     ctrl = _callprojs.fallthrough_catchproj->clone();
1632     transform_later(ctrl);
1633     _igvn.replace_node(_callprojs.fallthrough_catchproj, result_region);
1634   } else {
1635     ctrl = top();
1636   }
1637   Node *slow_result;
1638   if (_callprojs.resproj == nullptr) {
1639     // no uses of the allocation result
1640     slow_result = top();
1641   } else {
1642     slow_result = _callprojs.resproj->clone();
1643     transform_later(slow_result);
1644     _igvn.replace_node(_callprojs.resproj, result_phi_rawoop);
1645   }
1646 
1647   // Plug slow-path into result merge point
1648   result_region->init_req( slow_result_path, ctrl);
1649   transform_later(result_region);
1650   if (allocation_has_use) {
1651     result_phi_rawoop->init_req(slow_result_path, slow_result);
1652     transform_later(result_phi_rawoop);
1653   }
1654   result_phi_rawmem->init_req(slow_result_path, _callprojs.fallthrough_memproj);
1655   transform_later(result_phi_rawmem);
1656   transform_later(result_phi_i_o);
1657   // This completes all paths into the result merge point
1658 }
1659 
1660 // Remove alloc node that has no uses.
1661 void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
1662   Node* ctrl = alloc->in(TypeFunc::Control);
1663   Node* mem  = alloc->in(TypeFunc::Memory);
1664   Node* i_o  = alloc->in(TypeFunc::I_O);
1665 
1666   alloc->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1667   if (_callprojs.resproj != nullptr) {
1668     for (DUIterator_Fast imax, i = _callprojs.resproj->fast_outs(imax); i < imax; i++) {
1669       Node* use = _callprojs.resproj->fast_out(i);
1670       use->isa_MemBar()->remove(&_igvn);
1671       --imax;
1672       --i; // back up iterator
1673     }
1674     assert(_callprojs.resproj->outcnt() == 0, "all uses must be deleted");
1675     _igvn.remove_dead_node(_callprojs.resproj, PhaseIterGVN::NodeOrigin::Graph);
1676   }
1677   if (_callprojs.fallthrough_catchproj != nullptr) {
1678     migrate_outs(_callprojs.fallthrough_catchproj, ctrl);
1679     _igvn.remove_dead_node(_callprojs.fallthrough_catchproj, PhaseIterGVN::NodeOrigin::Graph);
1680   }
1681   if (_callprojs.catchall_catchproj != nullptr) {
1682     _igvn.rehash_node_delayed(_callprojs.catchall_catchproj);
1683     _callprojs.catchall_catchproj->set_req(0, top());
1684   }
1685   if (_callprojs.fallthrough_proj != nullptr) {
1686     Node* catchnode = _callprojs.fallthrough_proj->unique_ctrl_out();
1687     _igvn.remove_dead_node(catchnode, PhaseIterGVN::NodeOrigin::Graph);
1688     _igvn.remove_dead_node(_callprojs.fallthrough_proj, PhaseIterGVN::NodeOrigin::Graph);
1689   }
1690   if (_callprojs.fallthrough_memproj != nullptr) {
1691     migrate_outs(_callprojs.fallthrough_memproj, mem);
1692     _igvn.remove_dead_node(_callprojs.fallthrough_memproj, PhaseIterGVN::NodeOrigin::Graph);
1693   }
1694   if (_callprojs.fallthrough_ioproj != nullptr) {
1695     migrate_outs(_callprojs.fallthrough_ioproj, i_o);
1696     _igvn.remove_dead_node(_callprojs.fallthrough_ioproj, PhaseIterGVN::NodeOrigin::Graph);
1697   }
1698   if (_callprojs.catchall_memproj != nullptr) {
1699     _igvn.rehash_node_delayed(_callprojs.catchall_memproj);
1700     _callprojs.catchall_memproj->set_req(0, top());
1701   }
1702   if (_callprojs.catchall_ioproj != nullptr) {
1703     _igvn.rehash_node_delayed(_callprojs.catchall_ioproj);
1704     _callprojs.catchall_ioproj->set_req(0, top());
1705   }
1706 #ifndef PRODUCT
1707   if (PrintEliminateAllocations) {
1708     if (alloc->is_AllocateArray()) {
1709       tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1710     } else {
1711       tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1712     }
1713   }
1714 #endif
1715   _igvn.remove_dead_node(alloc, PhaseIterGVN::NodeOrigin::Graph);
1716 }
1717 
1718 void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeNode* init,
1719                                                 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem) {
1720   // If initialization is performed by an array copy, any required
1721   // MemBarStoreStore was already added. If the object does not
1722   // escape no need for a MemBarStoreStore. If the object does not
1723   // escape in its initializer and memory barrier (MemBarStoreStore or
1724   // stronger) is already added at exit of initializer, also no need

1818     Node* thread = new ThreadLocalNode();
1819     transform_later(thread);
1820 
1821     call->init_req(TypeFunc::Parms + 0, thread);
1822     call->init_req(TypeFunc::Parms + 1, oop);
1823     call->init_req(TypeFunc::Control, ctrl);
1824     call->init_req(TypeFunc::I_O    , top()); // does no i/o
1825     call->init_req(TypeFunc::Memory , rawmem);
1826     call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
1827     call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
1828     transform_later(call);
1829     ctrl = new ProjNode(call, TypeFunc::Control);
1830     transform_later(ctrl);
1831     rawmem = new ProjNode(call, TypeFunc::Memory);
1832     transform_later(rawmem);
1833   }
1834 }
1835 
1836 // Helper for PhaseMacroExpand::expand_allocate_common.
1837 // Initializes the newly-allocated storage.
1838 Node*
1839 PhaseMacroExpand::initialize_object(AllocateNode* alloc,
1840                                     Node* control, Node* rawmem, Node* object,
1841                                     Node* klass_node, Node* length,
1842                                     Node* size_in_bytes) {
1843   InitializeNode* init = alloc->initialization();
1844   // Store the klass & mark bits
1845   Node* mark_node = alloc->make_ideal_mark(&_igvn, control, rawmem);
1846   if (!mark_node->is_Con()) {
1847     transform_later(mark_node);
1848   }
1849   rawmem = make_store_raw(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type());
1850 
1851   if (!UseCompactObjectHeaders) {
1852     rawmem = make_store_raw(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
1853   }
1854   int header_size = alloc->minimum_header_size();  // conservatively small
1855 
1856   // Array length
1857   if (length != nullptr) {         // Arrays need length field
1858     rawmem = make_store_raw(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
1859     // conservatively small header size:
1860     header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1861     if (_igvn.type(klass_node)->isa_aryklassptr()) {   // we know the exact header size in most cases:
1862       BasicType elem = _igvn.type(klass_node)->is_klassptr()->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
1863       if (is_reference_type(elem, true)) {
1864         elem = T_OBJECT;
1865       }
1866       header_size = Klass::layout_helper_header_size(Klass::array_layout_helper(elem));
1867     }
1868   }
1869 
1870   // Clear the object body, if necessary.
1871   if (init == nullptr) {
1872     // The init has somehow disappeared; be cautious and clear everything.
1873     //
1874     // This can happen if a node is allocated but an uncommon trap occurs
1875     // immediately.  In this case, the Initialize gets associated with the
1876     // trap, and may be placed in a different (outer) loop, if the Allocate
1877     // is in a loop.  If (this is rare) the inner loop gets unrolled, then
1878     // there can be two Allocates to one Initialize.  The answer in all these
1879     // edge cases is safety first.  It is always safe to clear immediately
1880     // within an Allocate, and then (maybe or maybe not) clear some more later.
1881     if (!(UseTLAB && ZeroTLAB)) {
1882       rawmem = ClearArrayNode::clear_memory(control, rawmem, object,


1883                                             header_size, size_in_bytes,
1884                                             true,
1885                                             &_igvn);
1886     }
1887   } else {
1888     if (!init->is_complete()) {
1889       // Try to win by zeroing only what the init does not store.
1890       // We can also try to do some peephole optimizations,
1891       // such as combining some adjacent subword stores.
1892       rawmem = init->complete_stores(control, rawmem, object,
1893                                      header_size, size_in_bytes, &_igvn);
1894     }
1895     // We have no more use for this link, since the AllocateNode goes away:
1896     init->set_req(InitializeNode::RawAddress, top());
1897     // (If we keep the link, it just confuses the register allocator,
1898     // who thinks he sees a real use of the address by the membar.)
1899   }
1900 
1901   return rawmem;
1902 }

2037       for (intx i = 0; i < lines; i++) {
2038         prefetch_adr = AddPNode::make_off_heap(new_eden_top,
2039                                                _igvn.MakeConX(distance));
2040         transform_later(prefetch_adr);
2041         prefetch = new PrefetchAllocationNode(i_o, prefetch_adr);
2042         // Do not let it float too high, since if eden_top == eden_end,
2043         // both might be null.
2044         if (i == 0) { // Set control for first prefetch, next follows it
2045           prefetch->init_req(0, needgc_false);
2046         }
2047         transform_later(prefetch);
2048         distance += step_size;
2049         i_o = prefetch;
2050       }
2051    }
2052    return i_o;
2053 }
2054 
2055 
2056 void PhaseMacroExpand::expand_allocate(AllocateNode *alloc) {
2057   expand_allocate_common(alloc, nullptr,
2058                          OptoRuntime::new_instance_Type(),
2059                          OptoRuntime::new_instance_Java(), nullptr);
2060 }
2061 
2062 void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) {
2063   Node* length = alloc->in(AllocateNode::ALength);
2064   Node* valid_length_test = alloc->in(AllocateNode::ValidLengthTest);
2065   InitializeNode* init = alloc->initialization();
2066   Node* klass_node = alloc->in(AllocateNode::KlassNode);

2067   const TypeAryKlassPtr* ary_klass_t = _igvn.type(klass_node)->isa_aryklassptr();



2068   address slow_call_address;  // Address of slow call
2069   if (init != nullptr && init->is_complete_with_arraycopy() &&
2070       ary_klass_t && ary_klass_t->elem()->isa_klassptr() == nullptr) {
2071     // Don't zero type array during slow allocation in VM since
2072     // it will be initialized later by arraycopy in compiled code.
2073     slow_call_address = OptoRuntime::new_array_nozero_Java();

2074   } else {
2075     slow_call_address = OptoRuntime::new_array_Java();







2076   }
2077   expand_allocate_common(alloc, length,
2078                          OptoRuntime::new_array_Type(),
2079                          slow_call_address, valid_length_test);
2080 }
2081 
2082 //-------------------mark_eliminated_box----------------------------------
2083 //
2084 // During EA obj may point to several objects but after few ideal graph
2085 // transformations (CCP) it may point to only one non escaping object
2086 // (but still using phi), corresponding locks and unlocks will be marked
2087 // for elimination. Later obj could be replaced with a new node (new phi)
2088 // and which does not have escape information. And later after some graph
2089 // reshape other locks and unlocks (which were not marked for elimination
2090 // before) are connected to this new obj (phi) but they still will not be
2091 // marked for elimination since new obj has no escape information.
2092 // Mark all associated (same box and obj) lock and unlock nodes for
2093 // elimination if some of them marked already.
2094 void PhaseMacroExpand::mark_eliminated_box(Node* box, Node* obj) {
2095   BoxLockNode* oldbox = box->as_BoxLock();
2096   if (oldbox->is_eliminated()) {
2097     return; // This BoxLock node was processed already.
2098   }

2270 #ifdef ASSERT
2271   if (!alock->is_coarsened()) {
2272     // Check that new "eliminated" BoxLock node is created.
2273     BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
2274     assert(oldbox->is_eliminated(), "should be done already");
2275   }
2276 #endif
2277 
2278   alock->log_lock_optimization(C, "eliminate_lock");
2279 
2280 #ifndef PRODUCT
2281   if (PrintEliminateLocks) {
2282     tty->print_cr("++++ Eliminated: %d %s '%s'", alock->_idx, (alock->is_Lock() ? "Lock" : "Unlock"), alock->kind_as_string());
2283   }
2284 #endif
2285 
2286   Node* mem  = alock->in(TypeFunc::Memory);
2287   Node* ctrl = alock->in(TypeFunc::Control);
2288   guarantee(ctrl != nullptr, "missing control projection, cannot replace_node() with null");
2289 
2290   alock->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
2291   // There are 2 projections from the lock.  The lock node will
2292   // be deleted when its last use is subsumed below.
2293   assert(alock->outcnt() == 2 &&
2294          _callprojs.fallthrough_proj != nullptr &&
2295          _callprojs.fallthrough_memproj != nullptr,
2296          "Unexpected projections from Lock/Unlock");
2297 
2298   Node* fallthroughproj = _callprojs.fallthrough_proj;
2299   Node* memproj_fallthrough = _callprojs.fallthrough_memproj;
2300 
2301   // The memory projection from a lock/unlock is RawMem
2302   // The input to a Lock is merged memory, so extract its RawMem input
2303   // (unless the MergeMem has been optimized away.)
2304   if (alock->is_Lock()) {
2305     // Search for MemBarAcquireLock node and delete it also.
2306     MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
2307     assert(membar != nullptr && membar->Opcode() == Op_MemBarAcquireLock, "");
2308     Node* ctrlproj = membar->proj_out(TypeFunc::Control);
2309     Node* memproj = membar->proj_out(TypeFunc::Memory);
2310     _igvn.replace_node(ctrlproj, fallthroughproj);
2311     _igvn.replace_node(memproj, memproj_fallthrough);
2312 
2313     // Delete FastLock node also if this Lock node is unique user
2314     // (a loop peeling may clone a Lock node).
2315     Node* flock = alock->as_Lock()->fastlock_node();
2316     if (flock->outcnt() == 1) {
2317       assert(flock->unique_out() == alock, "sanity");
2318       _igvn.replace_node(flock, top());
2319     }

2350   assert(!box->as_BoxLock()->is_eliminated(), "sanity");
2351 
2352   // Make the merge point
2353   Node *region;
2354   Node *mem_phi;
2355   Node *slow_path;
2356 
2357   region  = new RegionNode(3);
2358   // create a Phi for the memory state
2359   mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
2360 
2361   // Optimize test; set region slot 2
2362   slow_path = opt_bits_test(ctrl, region, 2, flock);
2363   mem_phi->init_req(2, mem);
2364 
2365   // Make slow path call
2366   CallNode* call = make_slow_call(lock, OptoRuntime::complete_monitor_enter_Type(),
2367                                   OptoRuntime::complete_monitor_locking_Java(), nullptr, slow_path,
2368                                   obj, box, nullptr);
2369 
2370   call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
2371 
2372   // Slow path can only throw asynchronous exceptions, which are always
2373   // de-opted.  So the compiler thinks the slow-call can never throw an
2374   // exception.  If it DOES throw an exception we would need the debug
2375   // info removed first (since if it throws there is no monitor).
2376   assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr &&
2377          _callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock");
2378 
2379   // Capture slow path
2380   // disconnect fall-through projection from call and create a new one
2381   // hook up users of fall-through projection to region
2382   Node *slow_ctrl = _callprojs.fallthrough_proj->clone();
2383   transform_later(slow_ctrl);
2384   _igvn.hash_delete(_callprojs.fallthrough_proj);
2385   _callprojs.fallthrough_proj->disconnect_inputs(C);
2386   region->init_req(1, slow_ctrl);
2387   // region inputs are now complete
2388   transform_later(region);
2389   _igvn.replace_node(_callprojs.fallthrough_proj, region);
2390 
2391   Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory));
2392 
2393   mem_phi->init_req(1, memproj);
2394 
2395   transform_later(mem_phi);
2396 
2397   _igvn.replace_node(_callprojs.fallthrough_memproj, mem_phi);
2398 }
2399 
2400 //------------------------------expand_unlock_node----------------------
2401 void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
2402 
2403   Node* ctrl = unlock->in(TypeFunc::Control);
2404   Node* mem = unlock->in(TypeFunc::Memory);
2405   Node* obj = unlock->obj_node();
2406   Node* box = unlock->box_node();
2407 
2408   assert(!box->as_BoxLock()->is_eliminated(), "sanity");
2409 
2410   // No need for a null check on unlock
2411 
2412   // Make the merge point
2413   Node* region = new RegionNode(3);
2414 
2415   FastUnlockNode *funlock = new FastUnlockNode( ctrl, obj, box );
2416   funlock = transform_later( funlock )->as_FastUnlock();
2417   // Optimize test; set region slot 2
2418   Node *slow_path = opt_bits_test(ctrl, region, 2, funlock);
2419   Node *thread = transform_later(new ThreadLocalNode());
2420 
2421   CallNode *call = make_slow_call((CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(),
2422                                   CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C),
2423                                   "complete_monitor_unlocking_C", slow_path, obj, box, thread);
2424 
2425   call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
2426   assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr &&
2427          _callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock");
2428 
2429   // No exceptions for unlocking
2430   // Capture slow path
2431   // disconnect fall-through projection from call and create a new one
2432   // hook up users of fall-through projection to region
2433   Node *slow_ctrl = _callprojs.fallthrough_proj->clone();
2434   transform_later(slow_ctrl);
2435   _igvn.hash_delete(_callprojs.fallthrough_proj);
2436   _callprojs.fallthrough_proj->disconnect_inputs(C);
2437   region->init_req(1, slow_ctrl);
2438   // region inputs are now complete
2439   transform_later(region);
2440   _igvn.replace_node(_callprojs.fallthrough_proj, region);
2441 
2442   if (_callprojs.fallthrough_memproj != nullptr) {
2443     // create a Phi for the memory state
2444     Node* mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
2445     Node* memproj = transform_later(new ProjNode(call, TypeFunc::Memory));
2446     mem_phi->init_req(1, memproj);
2447     mem_phi->init_req(2, mem);
2448     transform_later(mem_phi);
2449     _igvn.replace_node(_callprojs.fallthrough_memproj, mem_phi);
2450   }
2451 }
2452 




























































































































































































































2453 void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) {
2454   assert(check->in(SubTypeCheckNode::Control) == nullptr, "should be pinned");
2455   Node* bol = check->unique_out();
2456   Node* obj_or_subklass = check->in(SubTypeCheckNode::ObjOrSubKlass);
2457   Node* superklass = check->in(SubTypeCheckNode::SuperKlass);
2458   assert(bol->is_Bool() && bol->as_Bool()->_test._test == BoolTest::ne, "unexpected bool node");
2459 
2460   for (DUIterator_Last imin, i = bol->last_outs(imin); i >= imin; --i) {
2461     Node* iff = bol->last_out(i);
2462     assert(iff->is_If(), "where's the if?");
2463 
2464     if (iff->in(0)->is_top()) {
2465       _igvn.replace_input_of(iff, 1, C->top());
2466       continue;
2467     }
2468 
2469     IfTrueNode* iftrue = iff->as_If()->true_proj();
2470     IfFalseNode* iffalse = iff->as_If()->false_proj();
2471     Node* ctrl = iff->in(0);
2472 
2473     Node* subklass = nullptr;
2474     if (_igvn.type(obj_or_subklass)->isa_klassptr()) {
2475       subklass = obj_or_subklass;
2476     } else {
2477       Node* k_adr = basic_plus_adr(obj_or_subklass, oopDesc::klass_offset_in_bytes());
2478       subklass = _igvn.transform(LoadKlassNode::make(_igvn, C->immutable_memory(), k_adr, TypeInstPtr::KLASS));
2479     }
2480 
2481     Node* not_subtype_ctrl = Phase::gen_subtype_check(subklass, superklass, &ctrl, nullptr, _igvn, check->method(), check->bci());
2482 
2483     _igvn.replace_input_of(iff, 0, C->top());
2484     _igvn.replace_node(iftrue, not_subtype_ctrl);
2485     _igvn.replace_node(iffalse, ctrl);
2486   }
2487   _igvn.replace_node(check, C->top());
2488 }
2489 

















































































































2490 // Perform refining of strip mined loop nodes in the macro nodes list.
2491 void PhaseMacroExpand::refine_strip_mined_loop_macro_nodes() {
2492    for (int i = C->macro_count(); i > 0; i--) {
2493     Node* n = C->macro_node(i - 1);
2494     if (n->is_OuterStripMinedLoop()) {
2495       n->as_OuterStripMinedLoop()->adjust_strip_mined_loop(&_igvn);
2496     }
2497   }
2498 }
2499 
2500 //---------------------------eliminate_macro_nodes----------------------
2501 // Eliminate scalar replaced allocations and associated locks.
2502 void PhaseMacroExpand::eliminate_macro_nodes() {
2503   if (C->macro_count() == 0)
2504     return;

2505 
2506   if (StressMacroElimination) {
2507     C->shuffle_macro_nodes();
2508   }
2509   NOT_PRODUCT(int membar_before = count_MemBar(C);)
2510 
2511   // Before elimination may re-mark (change to Nested or NonEscObj)
2512   // all associated (same box and obj) lock and unlock nodes.
2513   int cnt = C->macro_count();
2514   for (int i=0; i < cnt; i++) {
2515     Node *n = C->macro_node(i);
2516     if (n->is_AbstractLock()) { // Lock and Unlock nodes
2517       mark_eliminated_locking_nodes(n->as_AbstractLock());
2518     }
2519   }
2520   // Re-marking may break consistency of Coarsened locks.
2521   if (!C->coarsened_locks_consistent()) {
2522     return; // recompile without Coarsened locks if broken
2523   } else {
2524     // After coarsened locks are eliminated locking regions
2525     // become unbalanced. We should not execute any more
2526     // locks elimination optimizations on them.
2527     C->mark_unbalanced_boxes();
2528   }
2529 
2530   // First, attempt to eliminate locks
2531   bool progress = true;
2532   while (progress) {
2533     progress = false;
2534     for (int i = C->macro_count(); i > 0; i = MIN2(i - 1, C->macro_count())) { // more than 1 element can be eliminated at once
2535       Node* n = C->macro_node(i - 1);
2536       bool success = false;
2537       DEBUG_ONLY(int old_macro_count = C->macro_count();)
2538       if (n->is_AbstractLock()) {
2539         success = eliminate_locking_node(n->as_AbstractLock());
2540 #ifndef PRODUCT
2541         if (success && PrintOptoStatistics) {
2542           AtomicAccess::inc(&PhaseMacroExpand::_monitor_objects_removed_counter);
2543         }
2544 #endif
2545       }
2546       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2547       progress = progress || success;
2548       if (success) {
2549         C->print_method(PHASE_AFTER_MACRO_ELIMINATION_STEP, 5, n);




2550       }
2551     }
2552   }
2553   // Next, attempt to eliminate allocations
2554   progress = true;
2555   while (progress) {
2556     progress = false;
2557     for (int i = C->macro_count(); i > 0; i = MIN2(i - 1, C->macro_count())) { // more than 1 element can be eliminated at once
2558       Node* n = C->macro_node(i - 1);
2559       bool success = false;
2560       DEBUG_ONLY(int old_macro_count = C->macro_count();)
2561       switch (n->class_id()) {
2562       case Node::Class_Allocate:
2563       case Node::Class_AllocateArray:
2564         success = eliminate_allocate_node(n->as_Allocate());
2565 #ifndef PRODUCT
2566         if (success && PrintOptoStatistics) {
2567           AtomicAccess::inc(&PhaseMacroExpand::_objs_scalar_replaced_counter);
2568         }
2569 #endif
2570         break;
2571       case Node::Class_CallStaticJava:
2572         success = eliminate_boxing_node(n->as_CallStaticJava());



2573         break;

2574       case Node::Class_Lock:
2575       case Node::Class_Unlock:
2576         assert(!n->as_AbstractLock()->is_eliminated(), "sanity");







2577         break;
2578       case Node::Class_ArrayCopy:
2579         break;
2580       case Node::Class_OuterStripMinedLoop:
2581         break;
2582       case Node::Class_SubTypeCheck:
2583         break;
2584       case Node::Class_Opaque1:
2585         break;


2586       default:
2587         assert(n->Opcode() == Op_LoopLimit ||
2588                n->Opcode() == Op_ModD ||
2589                n->Opcode() == Op_ModF ||
2590                n->Opcode() == Op_PowD ||
2591                n->is_OpaqueConstantBool()    ||
2592                n->is_OpaqueInitializedAssertionPredicate() ||
2593                n->Opcode() == Op_MaxL      ||
2594                n->Opcode() == Op_MinL      ||
2595                BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(n),
2596                "unknown node type in macro list");
2597       }
2598       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2599       progress = progress || success;
2600       if (success) {
2601         C->print_method(PHASE_AFTER_MACRO_ELIMINATION_STEP, 5, n);
2602       }
2603     }
















2604   }
2605 #ifndef PRODUCT
2606   if (PrintOptoStatistics) {
2607     int membar_after = count_MemBar(C);
2608     AtomicAccess::add(&PhaseMacroExpand::_memory_barriers_removed_counter, membar_before - membar_after);
2609   }
2610 #endif
2611 }
2612 
2613 void PhaseMacroExpand::eliminate_opaque_looplimit_macro_nodes() {
2614   if (C->macro_count() == 0) {
2615     return;
2616   }
2617   refine_strip_mined_loop_macro_nodes();
2618   // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
2619   bool progress = true;
2620   while (progress) {
2621     progress = false;
2622     for (int i = C->macro_count(); i > 0; i--) {
2623       Node* n = C->macro_node(i-1);
2624       bool success = false;
2625       DEBUG_ONLY(int old_macro_count = C->macro_count();)
2626       if (n->Opcode() == Op_LoopLimit) {
2627         // Remove it from macro list and put on IGVN worklist to optimize.
2628         C->remove_macro_node(n);
2629         _igvn._worklist.push(n);
2630         success = true;
2631       } else if (n->Opcode() == Op_CallStaticJava) {
2632         // Remove it from macro list and put on IGVN worklist to optimize.
2633         C->remove_macro_node(n);
2634         _igvn._worklist.push(n);
2635         success = true;



2636       } else if (n->is_Opaque1()) {
2637         _igvn.replace_node(n, n->in(1));
2638         success = true;
2639       } else if (n->is_OpaqueConstantBool()) {
2640         // Tests with OpaqueConstantBool nodes are implicitly known. Replace the node with true/false. In debug builds,
2641         // we leave the test in the graph to have an additional sanity check at runtime. If the test fails (i.e. a bug),
2642         // we will execute a Halt node.
2643 #ifdef ASSERT
2644         _igvn.replace_node(n, n->in(1));
2645 #else
2646         _igvn.replace_node(n, _igvn.intcon(n->as_OpaqueConstantBool()->constant()));
2647 #endif
2648         success = true;
2649       } else if (n->is_OpaqueInitializedAssertionPredicate()) {
2650           // Initialized Assertion Predicates must always evaluate to true. Therefore, we get rid of them in product
2651           // builds as they are useless. In debug builds we keep them as additional verification code. Even though
2652           // loop opts are already over, we want to keep Initialized Assertion Predicates alive as long as possible to
2653           // enable folding of dead control paths within which cast nodes become top after due to impossible types -
2654           // even after loop opts are over. Therefore, we delay the removal of these opaque nodes until now.
2655 #ifdef ASSERT

2724     // Worst case is a macro node gets expanded into about 200 nodes.
2725     // Allow 50% more for optimization.
2726     if (C->check_node_count(300, "out of nodes before macro expansion")) {
2727       return true;
2728     }
2729 
2730     DEBUG_ONLY(int old_macro_count = C->macro_count();)
2731     switch (n->class_id()) {
2732     case Node::Class_Lock:
2733       expand_lock_node(n->as_Lock());
2734       break;
2735     case Node::Class_Unlock:
2736       expand_unlock_node(n->as_Unlock());
2737       break;
2738     case Node::Class_ArrayCopy:
2739       expand_arraycopy_node(n->as_ArrayCopy());
2740       break;
2741     case Node::Class_SubTypeCheck:
2742       expand_subtypecheck_node(n->as_SubTypeCheck());
2743       break;







2744     default:
2745       switch (n->Opcode()) {
2746       case Op_ModD:
2747       case Op_ModF:
2748       case Op_PowD: {
2749         CallLeafPureNode* call_macro = n->as_CallLeafPure();
2750         CallLeafPureNode* call = call_macro->inline_call_leaf_pure_node();
2751         _igvn.replace_node(call_macro, call);
2752         transform_later(call);
2753         break;
2754       }
2755       default:
2756         assert(false, "unknown node type in macro list");
2757       }
2758     }
2759     assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list");
2760     if (C->failing())  return true;
2761     C->print_method(PHASE_AFTER_MACRO_EXPANSION_STEP, 5, n);
2762 
2763     // Clean up the graph so we're less likely to hit the maximum node

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciFlatArrayKlass.hpp"
  26 #include "ci/ciInlineKlass.hpp"
  27 #include "ci/ciInstanceKlass.hpp"
  28 #include "compiler/compileLog.hpp"
  29 #include "gc/shared/collectedHeap.inline.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "libadt/vectset.hpp"
  32 #include "memory/universe.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/convertnode.hpp"
  40 #include "opto/graphKit.hpp"
  41 #include "opto/inlinetypenode.hpp"
  42 #include "opto/intrinsicnode.hpp"
  43 #include "opto/locknode.hpp"
  44 #include "opto/loopnode.hpp"
  45 #include "opto/macro.hpp"
  46 #include "opto/memnode.hpp"
  47 #include "opto/narrowptrnode.hpp"
  48 #include "opto/node.hpp"
  49 #include "opto/opaquenode.hpp"
  50 #include "opto/opcodes.hpp"
  51 #include "opto/phaseX.hpp"
  52 #include "opto/reachability.hpp"
  53 #include "opto/rootnode.hpp"
  54 #include "opto/runtime.hpp"
  55 #include "opto/subnode.hpp"
  56 #include "opto/subtypenode.hpp"
  57 #include "opto/type.hpp"
  58 #include "prims/jvmtiExport.hpp"
  59 #include "runtime/continuation.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "utilities/globalDefinitions.hpp"
  63 #include "utilities/macros.hpp"
  64 #include "utilities/powerOfTwo.hpp"
  65 #if INCLUDE_G1GC
  66 #include "gc/g1/g1ThreadLocalData.hpp"
  67 #endif // INCLUDE_G1GC
  68 
  69 
  70 //
  71 // Replace any references to "oldref" in inputs to "use" with "newref".
  72 // Returns the number of replacements made.
  73 //
  74 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
  75   int nreplacements = 0;
  76   uint req = use->req();
  77   for (uint j = 0; j < use->len(); j++) {
  78     Node *uin = use->in(j);
  79     if (uin == oldref) {
  80       if (j < req)
  81         use->set_req(j, newref);
  82       else
  83         use->set_prec(j, newref);
  84       nreplacements++;
  85     } else if (j >= req && uin == nullptr) {
  86       break;
  87     }
  88   }
  89   return nreplacements;
  90 }
  91 











  92 
  93 Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word) {
  94   Node* cmp = word;
  95   Node* bol = transform_later(new BoolNode(cmp, BoolTest::ne));
  96   IfNode* iff = new IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN );
  97   transform_later(iff);
  98 
  99   // Fast path taken.
 100   Node *fast_taken = transform_later(new IfFalseNode(iff));
 101 
 102   // Fast path not-taken, i.e. slow path
 103   Node *slow_taken = transform_later(new IfTrueNode(iff));
 104 
 105     region->init_req(edge, fast_taken); // Capture fast-control
 106     return slow_taken;
 107 }
 108 
 109 //--------------------copy_predefined_input_for_runtime_call--------------------
 110 void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call) {
 111   // Set fixed predefined input arguments

 124   // Slow-path call
 125  CallNode *call = leaf_name
 126    ? (CallNode*)new CallLeafNode      ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM )
 127    : (CallNode*)new CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), TypeRawPtr::BOTTOM );
 128 
 129   // Slow path call has no side-effects, uses few values
 130   copy_predefined_input_for_runtime_call(slow_path, oldcall, call );
 131   if (parm0 != nullptr)  call->init_req(TypeFunc::Parms+0, parm0);
 132   if (parm1 != nullptr)  call->init_req(TypeFunc::Parms+1, parm1);
 133   if (parm2 != nullptr)  call->init_req(TypeFunc::Parms+2, parm2);
 134   call->copy_call_debug_info(&_igvn, oldcall);
 135   call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
 136   _igvn.replace_node(oldcall, call);
 137   transform_later(call);
 138 
 139   return call;
 140 }
 141 
 142 void PhaseMacroExpand::eliminate_gc_barrier(Node* p2x) {
 143   BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
 144   bs->eliminate_gc_barrier(&_igvn, p2x);
 145 #ifndef PRODUCT
 146   if (PrintOptoStatistics) {
 147     AtomicAccess::inc(&PhaseMacroExpand::_GC_barriers_removed_counter);
 148   }
 149 #endif
 150 }
 151 
 152 // Search for a memory operation for the specified memory slice.
 153 static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) {
 154   Node *orig_mem = mem;
 155   Node *alloc_mem = alloc->as_Allocate()->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
 156   assert(alloc_mem != nullptr, "Allocation without a memory projection.");
 157   const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
 158   while (true) {
 159     if (mem == alloc_mem || mem == start_mem ) {
 160       return mem;  // hit one of our sentinels
 161     } else if (mem->is_MergeMem()) {
 162       mem = mem->as_MergeMem()->memory_at(alias_idx);
 163     } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) {
 164       Node *in = mem->in(0);

 167       if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) {
 168         return in;
 169       } else if (in->is_Call()) {
 170         CallNode *call = in->as_Call();
 171         if (call->may_modify(tinst, phase)) {
 172           assert(call->is_ArrayCopy(), "ArrayCopy is the only call node that doesn't make allocation escape");
 173           if (call->as_ArrayCopy()->modifies(offset, offset, phase, false)) {
 174             return in;
 175           }
 176         }
 177         mem = in->in(TypeFunc::Memory);
 178       } else if (in->is_MemBar()) {
 179         ArrayCopyNode* ac = nullptr;
 180         if (ArrayCopyNode::may_modify(tinst, in->as_MemBar(), phase, ac)) {
 181           if (ac != nullptr) {
 182             assert(ac->is_clonebasic(), "Only basic clone is a non escaping clone");
 183             return ac;
 184           }
 185         }
 186         mem = in->in(TypeFunc::Memory);
 187       } else if (in->is_LoadFlat() || in->is_StoreFlat()) {
 188         mem = in->in(TypeFunc::Memory);
 189       } else {
 190 #ifdef ASSERT
 191         in->dump();
 192         mem->dump();
 193         assert(false, "unexpected projection");
 194 #endif
 195       }
 196     } else if (mem->is_Store()) {
 197       const TypePtr* atype = mem->as_Store()->adr_type();
 198       int adr_idx = phase->C->get_alias_index(atype);
 199       if (adr_idx == alias_idx) {
 200         assert(atype->isa_oopptr(), "address type must be oopptr");
 201         int adr_offset = atype->flat_offset();
 202         uint adr_iid = atype->is_oopptr()->instance_id();
 203         // Array elements references have the same alias_idx
 204         // but different offset and different instance_id.
 205         if (adr_offset == offset && adr_iid == alloc->_idx) {
 206           return mem;
 207         }
 208       } else {
 209         assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
 210       }
 211       mem = mem->in(MemNode::Memory);
 212     } else if (mem->is_ClearArray()) {
 213       if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
 214         // Can not bypass initialization of the instance
 215         // we are looking.
 216         DEBUG_ONLY(intptr_t offset;)
 217         assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
 218         InitializeNode* init = alloc->as_Allocate()->initialization();
 219         // We are looking for stored value, return Initialize node
 220         // or memory edge from Allocate node.
 221         if (init != nullptr) {

 226       }
 227       // Otherwise skip it (the call updated 'mem' value).
 228     } else if (mem->Opcode() == Op_SCMemProj) {
 229       mem = mem->in(0);
 230       Node* adr = nullptr;
 231       if (mem->is_LoadStore()) {
 232         adr = mem->in(MemNode::Address);
 233       } else {
 234         assert(mem->Opcode() == Op_EncodeISOArray ||
 235                mem->Opcode() == Op_StrCompressedCopy, "sanity");
 236         adr = mem->in(3); // Destination array
 237       }
 238       const TypePtr* atype = adr->bottom_type()->is_ptr();
 239       int adr_idx = phase->C->get_alias_index(atype);
 240       if (adr_idx == alias_idx) {
 241         DEBUG_ONLY(mem->dump();)
 242         assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
 243         return nullptr;
 244       }
 245       mem = mem->in(MemNode::Memory);
 246     } else if (mem->Opcode() == Op_StrInflatedCopy) {
 247       Node* adr = mem->in(3); // Destination array
 248       const TypePtr* atype = adr->bottom_type()->is_ptr();
 249       int adr_idx = phase->C->get_alias_index(atype);
 250       if (adr_idx == alias_idx) {
 251         DEBUG_ONLY(mem->dump();)
 252         assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
 253         return nullptr;
 254       }
 255       mem = mem->in(MemNode::Memory);
 256     } else {
 257       return mem;
 258     }
 259     assert(mem != orig_mem, "dead memory loop");
 260   }
 261 }
 262 
 263 // Determine if there is an interfering store between a rematerialization load and an arraycopy that is in the process
 264 // of being elided. Starting from the given rematerialization load this method starts a BFS traversal upwards through
 265 // the memory graph towards the provided ArrayCopyNode. For every node encountered on the traversal, check that it is
 266 // independent from the provided rematerialization. Returns false if every node on the traversal is independent and

 306 // Generate loads from source of the arraycopy for fields of destination needed at a deoptimization point.
 307 // Returns nullptr if the load cannot be created because the arraycopy is not suitable for elimination
 308 // (e.g. copy inside the array with non-constant offsets) or the inputs do not match our assumptions (e.g.
 309 // the arraycopy does not actually write something at the provided offset).
 310 Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type* ftype, AllocateNode* alloc) {
 311   assert((ctl == ac->control() && mem == ac->memory()) != (mem != ac->memory() && ctl->is_Proj() && ctl->as_Proj()->is_uncommon_trap_proj()),
 312     "Either the control and memory are the same as for the arraycopy or they are pinned in an uncommon trap.");
 313   BasicType bt = ft;
 314   const Type *type = ftype;
 315   if (ft == T_NARROWOOP) {
 316     bt = T_OBJECT;
 317     type = ftype->make_oopptr();
 318   }
 319   Node* base = ac->in(ArrayCopyNode::Src);
 320   Node* adr = nullptr;
 321   const TypePtr* adr_type = nullptr;
 322 
 323   if (ac->is_clonebasic()) {
 324     assert(ac->in(ArrayCopyNode::Src) != ac->in(ArrayCopyNode::Dest), "clone source equals destination");
 325     adr = _igvn.transform(AddPNode::make_with_base(base, _igvn.MakeConX(offset)));
 326     adr_type = _igvn.type(base)->is_ptr();
 327     if (adr_type->isa_aryptr()) {
 328       adr_type = adr_type->is_aryptr()->add_field_offset_and_offset(offset);
 329     } else {
 330       adr_type = adr_type->add_offset(offset);
 331     }
 332   } else {
 333     if (!ac->modifies(offset, offset, &_igvn, true)) {
 334       // If the arraycopy does not copy to this offset, we cannot generate a rematerialization load for it.
 335       return nullptr;
 336     }
 337     assert(ac->in(ArrayCopyNode::Dest) == alloc->result_cast(), "arraycopy destination should be allocation's result");
 338     uint shift = exact_log2(type2aelembytes(bt));
 339     Node* src_pos = ac->in(ArrayCopyNode::SrcPos);
 340     Node* dest_pos = ac->in(ArrayCopyNode::DestPos);
 341     const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int();
 342     const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int();
 343 
 344     adr_type = _igvn.type(base)->is_aryptr();
 345     if (((const TypeAryPtr*)adr_type)->is_flat()) {
 346       shift = ((const TypeAryPtr*)adr_type)->flat_log_elem_size();
 347     }
 348     if (src_pos_t->is_con() && dest_pos_t->is_con()) {
 349       intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset;
 350       adr = _igvn.transform(AddPNode::make_with_base(base, base, _igvn.MakeConX(off)));
 351       adr_type = _igvn.type(adr)->is_aryptr();
 352       assert(adr_type == _igvn.type(base)->is_aryptr()->add_field_offset_and_offset(off), "incorrect address type");
 353       if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
 354         // Don't emit a new load from src if src == dst but try to get the value from memory instead
 355         return value_from_mem(ac, ctl, ft, ftype, (const TypeAryPtr*)adr_type, alloc);
 356       }
 357     } else {
 358       if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
 359         // Non constant offset in the array: we can't statically
 360         // determine the value
 361         return nullptr;
 362       }
 363       Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
 364 #ifdef _LP64
 365       diff = _igvn.transform(new ConvI2LNode(diff));
 366 #endif
 367       diff = _igvn.transform(new LShiftXNode(diff, _igvn.intcon(shift)));
 368 
 369       Node* off = _igvn.transform(new AddXNode(_igvn.MakeConX(offset), diff));
 370       adr = _igvn.transform(AddPNode::make_with_base(base, base, off));
 371       // In the case of a flat inline type array, each field has its
 372       // own slice so we need to extract the field being accessed from
 373       // the address computation
 374       adr_type = ((TypeAryPtr*)adr_type)->add_field_offset_and_offset(offset)->add_offset(Type::OffsetBot)->is_aryptr();
 375       adr = _igvn.transform(new CastPPNode(ctl, adr, adr_type));

 376     }
 377   }
 378   assert(adr != nullptr && adr_type != nullptr, "sanity");
 379 
 380   // Create the rematerialization load ...
 381   MergeMemNode* mergemem = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem();
 382   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 383   Node* res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemem, adr, adr_type, type, bt);
 384   assert(res != nullptr, "load should have been created");
 385 
 386   // ... and ensure that pinning the rematerialization load inside the uncommon path is safe.
 387   if (mem != ac->memory() && ctl->is_Proj() && ctl->as_Proj()->is_uncommon_trap_proj() && res->is_Load() &&
 388       has_interfering_store(ac, res->as_Load(), &_igvn)) {
 389     // Not safe: use control and memory from the arraycopy to ensure correct memory state.
 390     _igvn.remove_dead_node(res, PhaseIterGVN::NodeOrigin::Graph); // Clean up the unusable rematerialization load.
 391     return make_arraycopy_load(ac, offset, ac->control(), ac->memory(), ft, ftype, alloc);
 392   }
 393 
 394   if (ftype->isa_narrowoop()) {
 395     // PhaseMacroExpand::scalar_replacement adds DecodeN nodes
 396     res = _igvn.transform(new EncodePNode(res, ftype));
 397   }
 398   return res;
 399 }
 400 
 401 //
 402 // Given a Memory Phi, compute a value Phi containing the values from stores
 403 // on the input paths.
 404 // Note: this function is recursive, its depth is limited by the "level" argument
 405 // Returns the computed Phi, or null if it cannot compute it.
 406 Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) {
 407   assert(mem->is_Phi(), "sanity");
 408   int alias_idx = C->get_alias_index(adr_t);
 409   int offset = adr_t->flat_offset();
 410   int instance_id = adr_t->instance_id();
 411 
 412   // Check if an appropriate value phi already exists.
 413   Node* region = mem->in(0);
 414   for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
 415     Node* phi = region->fast_out(k);
 416     if (phi->is_Phi() && phi != mem &&
 417         phi->as_Phi()->is_same_inst_field(phi_type, (int)mem->_idx, instance_id, alias_idx, offset)) {
 418       return phi;
 419     }
 420   }
 421   // Check if an appropriate new value phi already exists.
 422   Node* new_phi = value_phis->find(mem->_idx);
 423   if (new_phi != nullptr)
 424     return new_phi;
 425 
 426   if (level <= 0) {
 427     return nullptr; // Give up: phi tree too deep
 428   }
 429   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
 430   Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
 431   assert(alloc_mem != nullptr, "Allocation without a memory projection.");
 432 
 433   uint length = mem->req();
 434   GrowableArray <Node *> values(length, length, nullptr);
 435 
 436   // create a new Phi for the value
 437   PhiNode *phi = new PhiNode(mem->in(0), phi_type, nullptr, mem->_idx, instance_id, alias_idx, offset);
 438   transform_later(phi);
 439   value_phis->push(phi, mem->_idx);
 440 
 441   for (uint j = 1; j < length; j++) {
 442     Node *in = mem->in(j);
 443     if (in == nullptr || in->is_top()) {
 444       values.at_put(j, in);
 445     } else {
 446       Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
 447       if (val == start_mem || val == alloc_mem) {
 448         // hit a sentinel, return appropriate value
 449         Node* init_value = value_from_alloc(ft, adr_t, alloc);
 450         if (init_value == nullptr) {
 451           return nullptr;
 452         } else {
 453           values.at_put(j, init_value);
 454           continue;
 455         }
 456       }
 457       if (val->is_Initialize()) {
 458         val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 459       }
 460       if (val == nullptr) {
 461         return nullptr;  // can't find a value on this path
 462       }
 463       if (val == mem) {
 464         values.at_put(j, mem);
 465       } else if (val->is_Store()) {
 466         Node* n = val->in(MemNode::ValueIn);
 467         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 468         n = bs->step_over_gc_barrier(n);
 469         if (is_subword_type(ft)) {
 470           n = Compile::narrow_value(ft, n, phi_type, &_igvn, true);
 471         }
 472         values.at_put(j, n);
 473       } else if (val->is_Proj() && val->in(0) == alloc) {
 474         Node* init_value = value_from_alloc(ft, adr_t, alloc);
 475         if (init_value == nullptr) {
 476           return nullptr;
 477         } else {
 478           values.at_put(j, init_value);
 479         }
 480       } else if (val->is_Phi()) {
 481         val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
 482         if (val == nullptr) {
 483           return nullptr;
 484         }
 485         values.at_put(j, val);
 486       } else if (val->Opcode() == Op_SCMemProj) {
 487         assert(val->in(0)->is_LoadStore() ||
 488                val->in(0)->Opcode() == Op_EncodeISOArray ||
 489                val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity");
 490         assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
 491         return nullptr;
 492       } else if (val->is_ArrayCopy()) {
 493         Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), val->in(TypeFunc::Memory), ft, phi_type, alloc);
 494         if (res == nullptr) {
 495           return nullptr;
 496         }
 497         values.at_put(j, res);
 498       } else if (val->is_top()) {
 499         // This indicates that this path into the phi is dead. Top will eventually also propagate into the Region.
 500         // IGVN will clean this up later.
 501         values.at_put(j, val);
 502       } else {
 503         DEBUG_ONLY( val->dump(); )
 504         assert(false, "unknown node on this path");
 505         return nullptr;  // unknown node on this path
 506       }
 507     }
 508   }
 509   // Set Phi's inputs
 510   for (uint j = 1; j < length; j++) {
 511     if (values.at(j) == mem) {
 512       phi->init_req(j, phi);
 513     } else {
 514       phi->init_req(j, values.at(j));
 515     }
 516   }
 517   return phi;
 518 }
 519 
 520 // Extract the initial value of a field in an allocation
 521 Node* PhaseMacroExpand::value_from_alloc(BasicType ft, const TypeOopPtr* adr_t, AllocateNode* alloc) {
 522   Node* init_value = alloc->in(AllocateNode::InitValue);
 523   if (init_value == nullptr) {
 524     assert(alloc->in(AllocateNode::RawInitValue) == nullptr, "conflicting InitValue and RawInitValue");
 525     return _igvn.zerocon(ft);
 526   }
 527 
 528   const TypeAryPtr* ary_t = adr_t->isa_aryptr();
 529   assert(ary_t != nullptr, "must be a pointer into an array");
 530 
 531   // If this is not a flat array, then it must be an oop array with elements being init_value
 532   if (ary_t->is_not_flat()) {
 533 #ifdef ASSERT
 534     BasicType init_bt = init_value->bottom_type()->basic_type();
 535     assert(ft == init_bt ||
 536            (!is_java_primitive(ft) && !is_java_primitive(init_bt) && type2aelembytes(ft, true) == type2aelembytes(init_bt, true)) ||
 537            (is_subword_type(ft) && init_bt == T_INT),
 538            "invalid init_value of type %s for field of type %s", type2name(init_bt), type2name(ft));
 539 #endif // ASSERT
 540     return init_value;
 541   }
 542 
 543   assert(ary_t->klass_is_exact() && ary_t->is_flat(), "must be an exact flat array");
 544   assert(ary_t->field_offset().get() != Type::OffsetBot, "unknown offset");
 545   if (init_value->is_EncodeP()) {
 546     init_value = init_value->in(1);
 547   }
 548   // Cannot look through init_value if it is an oop
 549   if (!init_value->is_InlineType()) {
 550     return nullptr;
 551   }
 552 
 553   ciInlineKlass* vk = init_value->bottom_type()->inline_klass();
 554   if (ary_t->field_offset().get() == vk->null_marker_offset_in_payload()) {
 555     init_value = init_value->as_InlineType()->get_null_marker();
 556   } else {
 557     init_value = init_value->as_InlineType()->field_value_by_offset(ary_t->field_offset().get() + vk->payload_offset(), true);
 558   }
 559 
 560   if (ft == T_NARROWOOP) {
 561     assert(init_value->bottom_type()->isa_ptr(), "must be a pointer");
 562     init_value = transform_later(new EncodePNode(init_value, init_value->bottom_type()->make_narrowoop()));
 563   }
 564 
 565 #ifdef ASSERT
 566   BasicType init_bt = init_value->bottom_type()->basic_type();
 567   assert(ft == init_bt ||
 568          (!is_java_primitive(ft) && !is_java_primitive(init_bt) && type2aelembytes(ft, true) == type2aelembytes(init_bt, true)) ||
 569          (is_subword_type(ft) && init_bt == T_INT),
 570          "invalid init_value of type %s for field of type %s", type2name(init_bt), type2name(ft));
 571 #endif // ASSERT
 572 
 573   return init_value;
 574 }
 575 
 576 // Search the last value stored into the object's field.
 577 Node* PhaseMacroExpand::value_from_mem(Node* origin, Node* ctl, BasicType ft, const Type* ftype, const TypeOopPtr* adr_t, AllocateNode* alloc) {
 578   assert(adr_t->is_known_instance_field(), "instance required");
 579   int instance_id = adr_t->instance_id();
 580   assert((uint)instance_id == alloc->_idx, "wrong allocation");
 581 
 582   int alias_idx = C->get_alias_index(adr_t);
 583   int offset = adr_t->flat_offset();
 584   Node* orig_mem = origin->in(TypeFunc::Memory);
 585   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);

 586   Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
 587   assert(alloc_mem != nullptr, "Allocation without a memory projection.");
 588   VectorSet visited;
 589 
 590   bool done = orig_mem == alloc_mem;
 591   Node *mem = orig_mem;
 592   while (!done) {
 593     if (visited.test_set(mem->_idx)) {
 594       return nullptr;  // found a loop, give up
 595     }
 596     mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
 597     if (mem == start_mem || mem == alloc_mem) {
 598       done = true;  // hit a sentinel, return appropriate 0 value
 599     } else if (mem->is_Initialize()) {
 600       mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 601       if (mem == nullptr) {
 602         done = true; // Something went wrong.
 603       } else if (mem->is_Store()) {
 604         const TypePtr* atype = mem->as_Store()->adr_type();
 605         assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice");
 606         done = true;
 607       }
 608     } else if (mem->is_Store()) {
 609       const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr();
 610       assert(atype != nullptr, "address type must be oopptr");
 611       assert(C->get_alias_index(atype) == alias_idx &&
 612              atype->is_known_instance_field() && atype->flat_offset() == offset &&
 613              atype->instance_id() == instance_id, "store is correct memory slice");
 614       done = true;
 615     } else if (mem->is_Phi()) {
 616       // try to find a phi's unique input
 617       Node *unique_input = nullptr;
 618       Node *top = C->top();
 619       for (uint i = 1; i < mem->req(); i++) {
 620         Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn);
 621         if (n == nullptr || n == top || n == mem) {
 622           continue;
 623         } else if (unique_input == nullptr) {
 624           unique_input = n;
 625         } else if (unique_input != n) {
 626           unique_input = top;
 627           break;
 628         }
 629       }
 630       if (unique_input != nullptr && unique_input != top) {
 631         mem = unique_input;
 632       } else {
 633         done = true;
 634       }
 635     } else if (mem->is_ArrayCopy()) {
 636       done = true;
 637     } else if (mem->is_top()) {
 638       // The slice is on a dead path. Returning nullptr would lead to elimination
 639       // bailout, but we want to prevent that. Just forwarding the top is also legal,
 640       // and IGVN can just clean things up, and remove whatever receives top.
 641       return mem;
 642     } else {
 643       DEBUG_ONLY( mem->dump(); )
 644       assert(false, "unexpected node");
 645     }
 646   }
 647   if (mem != nullptr) {
 648     if (mem == start_mem || mem == alloc_mem) {
 649       // hit a sentinel, return appropriate value
 650       return value_from_alloc(ft, adr_t, alloc);
 651     } else if (mem->is_Store()) {
 652       Node* n = mem->in(MemNode::ValueIn);
 653       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 654       n = bs->step_over_gc_barrier(n);
 655       return n;
 656     } else if (mem->is_Phi()) {
 657       // attempt to produce a Phi reflecting the values on the input paths of the Phi
 658       Node_Stack value_phis(8);
 659       Node* phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
 660       if (phi != nullptr) {
 661         return phi;
 662       } else {
 663         // Kill all new Phis
 664         while(value_phis.is_nonempty()) {
 665           Node* n = value_phis.node();
 666           _igvn.replace_node(n, C->top());
 667           value_phis.pop();
 668         }
 669       }
 670     } else if (mem->is_ArrayCopy()) {
 671       // Rematerialize the scalar-replaced array. If possible, pin the loads to the uncommon path of the uncommon trap.
 672       // Check for each element of the source array, whether it was modified. If not, pin both memory and control to
 673       // the uncommon path. Otherwise, use the control and memory state of the arraycopy. Control and memory state must
 674       // come from the same source to prevent anti-dependence problems in the backend.
 675       ArrayCopyNode* ac = mem->as_ArrayCopy();
 676       Node* ac_ctl = ac->control();
 677       Node* ac_mem = ac->memory();
 678       if (ctl->is_Proj() && ctl->as_Proj()->is_uncommon_trap_proj()) {
 679         // pin the loads in the uncommon trap path
 680         ac_ctl = ctl;
 681         ac_mem = orig_mem;
 682       }
 683       return make_arraycopy_load(ac, offset, ac_ctl, ac_mem, ft, ftype, alloc);
 684     }
 685   }
 686   // Something went wrong.
 687   return nullptr;
 688 }
 689 
 690 // Search the last value stored into the inline type's fields (for flat arrays).
 691 Node* PhaseMacroExpand::inline_type_from_mem(ciInlineKlass* vk, const TypeAryPtr* elem_adr_type, int elem_idx, int offset_in_element, bool null_free, AllocateNode* alloc, SafePointNode* sfpt) {
 692   auto report_failure = [&](int field_offset_in_element) {
 693 #ifndef PRODUCT
 694     if (PrintEliminateAllocations) {
 695       ciInlineKlass* elem_klass = elem_adr_type->elem()->inline_klass();
 696       int offset = field_offset_in_element + elem_klass->payload_offset();
 697       ciField* flattened_field = elem_klass->get_field_by_offset(offset, false);
 698       assert(flattened_field != nullptr, "must have a field of type %s at offset %d", elem_klass->name()->as_utf8(), offset);
 699       tty->print("=== At SafePoint node %d can't find value of field [%s] of array element [%d]", sfpt->_idx, flattened_field->name()->as_utf8(), elem_idx);
 700       tty->print(", which prevents elimination of: ");
 701       alloc->dump();
 702     }
 703 #endif // PRODUCT
 704   };
 705 
 706   // Create a new InlineTypeNode and retrieve the field values from memory
 707   InlineTypeNode* vt = InlineTypeNode::make_uninitialized(_igvn, vk, false);
 708   transform_later(vt);
 709   if (null_free) {
 710     vt->set_null_marker(_igvn);
 711   } else {
 712     int nm_offset_in_element = offset_in_element + vk->null_marker_offset_in_payload();
 713     const TypeAryPtr* nm_adr_type = elem_adr_type->with_field_offset(nm_offset_in_element);
 714     Node* nm_value = value_from_mem(sfpt, sfpt->control(), T_BOOLEAN, TypeInt::BOOL, nm_adr_type, alloc);
 715     if (nm_value != nullptr) {
 716       vt->set_null_marker(_igvn, nm_value);
 717     } else {
 718       report_failure(nm_offset_in_element);
 719       return nullptr;
 720     }
 721   }
 722 
 723   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); ++i) {
 724     ciField* field = vt->field(i);
 725     ciType* field_type = field->type();
 726     int field_offset_in_element = offset_in_element + field->offset_in_bytes() - vk->payload_offset();
 727     Node* field_value = nullptr;
 728     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 729     if (field->is_flat()) {
 730       field_value = inline_type_from_mem(field_type->as_inline_klass(), elem_adr_type, elem_idx, field_offset_in_element, field->is_null_free(), alloc, sfpt);
 731     } else {
 732       const Type* ft = Type::get_const_type(field_type);
 733       BasicType bt = type2field[field_type->basic_type()];
 734       if (UseCompressedOops && !is_java_primitive(bt)) {
 735         ft = ft->make_narrowoop();
 736         bt = T_NARROWOOP;
 737       }
 738       // Each inline type field has its own memory slice
 739       const TypeAryPtr* field_adr_type = elem_adr_type->with_field_offset(field_offset_in_element);
 740       field_value = value_from_mem(sfpt, sfpt->control(), bt, ft, field_adr_type, alloc);
 741       if (field_value == nullptr) {
 742         report_failure(field_offset_in_element);
 743       } else if (ft->isa_narrowoop()) {
 744         assert(UseCompressedOops, "unexpected narrow oop");
 745         if (field_value->is_EncodeP()) {
 746           field_value = field_value->in(1);
 747         } else if (!field_value->is_InlineType()) {
 748           field_value = transform_later(new DecodeNNode(field_value, field_value->get_ptr_type()));
 749         }
 750       }
 751     }
 752     if (field_value != nullptr) {
 753       vt->set_field_value(i, field_value);
 754     } else {
 755       return nullptr;
 756     }
 757   }
 758   return vt;
 759 }
 760 
 761 // Check the possibility of scalar replacement.
 762 bool PhaseMacroExpand::can_eliminate_allocation(PhaseIterGVN* igvn, AllocateNode *alloc, GrowableArray <SafePointNode *>* safepoints) {
 763   //  Scan the uses of the allocation to check for anything that would
 764   //  prevent us from eliminating it.
 765   NOT_PRODUCT( const char* fail_eliminate = nullptr; )
 766   DEBUG_ONLY( Node* disq_node = nullptr; )
 767   bool can_eliminate = true;
 768   bool reduce_merge_precheck = (safepoints == nullptr);
 769 
 770   Unique_Node_List worklist;
 771   Node* res = alloc->result_cast();
 772   const TypeOopPtr* res_type = nullptr;
 773   if (res == nullptr) {
 774     // All users were eliminated.
 775   } else if (!res->is_CheckCastPP()) {
 776     NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
 777     can_eliminate = false;
 778   } else {
 779     worklist.push(res);
 780     res_type = igvn->type(res)->isa_oopptr();
 781     if (res_type == nullptr) {
 782       NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";)
 783       can_eliminate = false;
 784     } else if (!res_type->klass_is_exact()) {
 785       NOT_PRODUCT(fail_eliminate = "Not an exact type.";)
 786       can_eliminate = false;
 787     } else if (res_type->isa_aryptr()) {
 788       int length = alloc->in(AllocateNode::ALength)->find_int_con(-1);
 789       if (length < 0) {
 790         NOT_PRODUCT(fail_eliminate = "Array's size is not constant";)
 791         can_eliminate = false;
 792       }
 793     }
 794   }
 795 
 796   while (can_eliminate && worklist.size() > 0) {
 797     BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
 798     res = worklist.pop();
 799     for (DUIterator_Fast jmax, j = res->fast_outs(jmax); j < jmax && can_eliminate; j++) {
 800       Node* use = res->fast_out(j);
 801 
 802       if (use->is_AddP()) {
 803         const TypePtr* addp_type = igvn->type(use)->is_ptr();
 804         int offset = addp_type->offset();
 805 
 806         if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
 807           NOT_PRODUCT(fail_eliminate = "Undefined field reference";)
 808           can_eliminate = false;
 809           break;
 810         }
 811         for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
 812                                    k < kmax && can_eliminate; k++) {
 813           Node* n = use->fast_out(k);
 814           if ((n->is_Mem() && n->as_Mem()->is_mismatched_access()) || n->is_LoadFlat() || n->is_StoreFlat()) {
 815             DEBUG_ONLY(disq_node = n);
 816             NOT_PRODUCT(fail_eliminate = "Mismatched access");
 817             can_eliminate = false;
 818           }
 819           if (!n->is_Store() && n->Opcode() != Op_CastP2X && !bs->is_gc_pre_barrier_node(n) && !reduce_merge_precheck) {
 820             DEBUG_ONLY(disq_node = n;)
 821             if (n->is_Load() || n->is_LoadStore()) {
 822               NOT_PRODUCT(fail_eliminate = "Field load";)
 823             } else {
 824               NOT_PRODUCT(fail_eliminate = "Not store field reference";)
 825             }
 826             can_eliminate = false;
 827           }
 828         }
 829       } else if (use->is_ArrayCopy() &&
 830                  (use->as_ArrayCopy()->is_clonebasic() ||
 831                   use->as_ArrayCopy()->is_arraycopy_validated() ||
 832                   use->as_ArrayCopy()->is_copyof_validated() ||
 833                   use->as_ArrayCopy()->is_copyofrange_validated()) &&
 834                  use->in(ArrayCopyNode::Dest) == res) {
 835         // ok to eliminate
 836       } else if (use->is_ReachabilityFence() && OptimizeReachabilityFences) {
 837         // ok to eliminate
 838       } else if (use->is_SafePoint()) {
 839         SafePointNode* sfpt = use->as_SafePoint();
 840         if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
 841           // Object is passed as argument.
 842           DEBUG_ONLY(disq_node = use;)
 843           NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
 844           can_eliminate = false;
 845         }
 846         Node* sfptMem = sfpt->memory();
 847         if (sfptMem == nullptr || sfptMem->is_top()) {
 848           DEBUG_ONLY(disq_node = use;)
 849           NOT_PRODUCT(fail_eliminate = "null or TOP memory";)
 850           can_eliminate = false;
 851         } else if (!reduce_merge_precheck) {
 852           assert(!res->is_Phi() || !res->as_Phi()->can_be_inline_type(), "Inline type allocations should not have safepoint uses");
 853           safepoints->append_if_missing(sfpt);
 854         }
 855       } else if (use->is_InlineType() && use->as_InlineType()->get_oop() == res) {
 856         // Look at uses
 857         for (DUIterator_Fast kmax, k = use->fast_outs(kmax); k < kmax; k++) {
 858           Node* u = use->fast_out(k);
 859           if (u->is_InlineType()) {
 860             // Use in flat field can be eliminated
 861             InlineTypeNode* vt = u->as_InlineType();
 862             for (uint i = 0; i < vt->field_count(); ++i) {
 863               if (vt->field_value(i) == use && !vt->field(i)->is_flat()) {
 864                 can_eliminate = false; // Use in non-flat field
 865                 break;
 866               }
 867             }
 868           } else {
 869             // Add other uses to the worklist to process individually
 870             worklist.push(use);
 871           }
 872         }
 873       } else if (use->Opcode() == Op_StoreX && use->in(MemNode::Address) == res) {
 874         // Store to mark word of inline type larval buffer
 875         assert(res_type->is_inlinetypeptr(), "Unexpected store to mark word");
 876       } else if (res_type->is_inlinetypeptr() && (use->Opcode() == Op_MemBarRelease || use->Opcode() == Op_MemBarStoreStore)) {
 877         // Inline type buffer allocations are followed by a membar
 878       } else if (reduce_merge_precheck &&
 879                  (use->is_Phi() || use->is_EncodeP() ||
 880                   use->Opcode() == Op_MemBarRelease ||
 881                   (UseStoreStoreForCtor && use->Opcode() == Op_MemBarStoreStore))) {
 882         // Nothing to do
 883       } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark
 884         if (use->is_Phi()) {
 885           if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
 886             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 887           } else {
 888             NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
 889           }
 890           DEBUG_ONLY(disq_node = use;)
 891         } else {
 892           if (use->Opcode() == Op_Return) {
 893             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 894           } else {
 895             NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
 896           }
 897           DEBUG_ONLY(disq_node = use;)
 898         }
 899         can_eliminate = false;
 900       } else {
 901         assert(use->Opcode() == Op_CastP2X, "should be");
 902         assert(!use->has_out_with(Op_OrL), "should have been removed because oop is never null");
 903       }
 904     }
 905   }
 906 
 907 #ifndef PRODUCT
 908   if (PrintEliminateAllocations && safepoints != nullptr) {
 909     if (can_eliminate) {
 910       tty->print("Scalar ");
 911       if (res == nullptr)
 912         alloc->dump();
 913       else
 914         res->dump();
 915     } else {
 916       tty->print("NotScalar (%s)", fail_eliminate);
 917       if (res == nullptr)
 918         alloc->dump();
 919       else
 920         res->dump();
 921 #ifdef ASSERT
 922       if (disq_node != nullptr) {
 923           tty->print("  >>>> ");
 924           disq_node->dump();
 925       }
 926 #endif /*ASSERT*/
 927     }
 928   }
 929 
 930   if (TraceReduceAllocationMerges && !can_eliminate && reduce_merge_precheck) {
 931     tty->print_cr("\tCan't eliminate allocation because '%s': ", fail_eliminate != nullptr ? fail_eliminate : "");
 932     DEBUG_ONLY(if (disq_node != nullptr) disq_node->dump();)
 933   }
 934 #endif
 935   return can_eliminate;

1009     // CheckCastPP result was not updated in the stack slot, and so
1010     // we ended up using the CastPP. That means that the field knows
1011     // that it should get an oop from an interface, but the value lost
1012     // that information, and so it is not a subtype.
1013     // There may be other issues, feel free to investigate further!
1014     if (!is_java_primitive(value_bt)) { return; }
1015 
1016     tty->print_cr("value not compatible for field: %s vs %s",
1017                   type2name(value_bt),
1018                   type2name(field_bt));
1019     tty->print("value_type: ");
1020     value_type->dump();
1021     tty->cr();
1022     tty->print("field_type: ");
1023     field_type->dump();
1024     tty->cr();
1025     assert(false, "value_type does not fit field_type");
1026   }
1027 #endif
1028 
1029 void PhaseMacroExpand::process_field_value_at_safepoint(const Type* field_type, Node* field_val, SafePointNode* sfpt, Unique_Node_List* value_worklist) {
1030   if (UseCompressedOops && field_type->isa_narrowoop()) {
1031     // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
1032     // to be able scalar replace the allocation.
1033     if (field_val->is_EncodeP()) {
1034       field_val = field_val->in(1);
1035     } else if (!field_val->is_InlineType()) {
1036       field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type()));
1037     }
1038   }
1039 
1040   // Keep track of inline types to scalarize them later
1041   if (field_val->is_InlineType()) {
1042     value_worklist->push(field_val);
1043   } else if (field_val->is_Phi()) {
1044     PhiNode* phi = field_val->as_Phi();
1045     // Eagerly replace inline type phis now since we could be removing an inline type allocation where we must
1046     // scalarize all its fields in safepoints.
1047     field_val = phi->try_push_inline_types_down(&_igvn, true);
1048     if (field_val->is_InlineType()) {
1049       value_worklist->push(field_val);
1050     }
1051   }
1052   DEBUG_ONLY(verify_type_compatability(field_val->bottom_type(), field_type);)
1053   sfpt->add_req(field_val);
1054 }
1055 
1056 bool PhaseMacroExpand::add_array_elems_to_safepoint(AllocateNode* alloc, const TypeAryPtr* array_type, SafePointNode* sfpt, Unique_Node_List* value_worklist) {
1057   const Type* elem_type = array_type->elem();
1058   BasicType basic_elem_type = elem_type->array_element_basic_type();
1059 
1060   intptr_t elem_size;
1061   uint header_size;
1062   if (array_type->is_flat()) {
1063     elem_size = array_type->flat_elem_size();
1064     header_size = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT);
1065   } else {
1066     elem_size = type2aelembytes(basic_elem_type);
1067     header_size = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
1068   }
1069 
1070   int n_elems = alloc->in(AllocateNode::ALength)->get_int();
1071   for (int elem_idx = 0; elem_idx < n_elems; elem_idx++) {
1072     intptr_t elem_offset = header_size + elem_idx * elem_size;
1073     const TypeAryPtr* elem_adr_type = array_type->with_offset(elem_offset);
1074     Node* elem_val;
1075     if (array_type->is_flat()) {
1076       ciInlineKlass* elem_klass = elem_type->inline_klass();
1077       assert(elem_klass->maybe_flat_in_array(), "must be flat in array");
1078       elem_val = inline_type_from_mem(elem_klass, elem_adr_type, elem_idx, 0, array_type->is_null_free(), alloc, sfpt);
1079     } else {
1080       elem_val = value_from_mem(sfpt, sfpt->control(), basic_elem_type, elem_type, elem_adr_type, alloc);
1081 #ifndef PRODUCT
1082       if (PrintEliminateAllocations && elem_val == nullptr) {
1083         tty->print("=== At SafePoint node %d can't find value of array element [%d]", sfpt->_idx, elem_idx);
1084         tty->print(", which prevents elimination of: ");
1085         alloc->dump();
1086       }
1087 #endif // PRODUCT
1088     }
1089     if (elem_val == nullptr) {
1090       return false;
1091     }
1092 
1093     process_field_value_at_safepoint(elem_type, elem_val, sfpt, value_worklist);
1094   }
1095 
1096   return true;
1097 }
1098 
1099 // Recursively adds all flattened fields of a type 'iklass' inside 'base' to 'sfpt'.
1100 // 'offset_minus_header' refers to the offset of the payload of 'iklass' inside 'base' minus the
1101 // payload offset of 'iklass'. If 'base' is of type 'iklass' then 'offset_minus_header' == 0.
1102 bool PhaseMacroExpand::add_inst_fields_to_safepoint(ciInstanceKlass* iklass, AllocateNode* alloc, Node* base, int offset_minus_header, SafePointNode* sfpt, Unique_Node_List* value_worklist) {
1103   const TypeInstPtr* base_type = _igvn.type(base)->is_instptr();
1104   auto report_failure = [&](int offset) {
1105 #ifndef PRODUCT
1106     if (PrintEliminateAllocations) {
1107       ciInstanceKlass* base_klass = base_type->instance_klass();
1108       ciField* flattened_field = base_klass->get_field_by_offset(offset, false);
1109       assert(flattened_field != nullptr, "must have a field of type %s at offset %d", base_klass->name()->as_utf8(), offset);
1110       tty->print("=== At SafePoint node %d can't find value of field: ", sfpt->_idx);
1111       flattened_field->print();
1112       int field_idx = C->alias_type(flattened_field)->index();
1113       tty->print(" (alias_idx=%d)", field_idx);
1114       tty->print(", which prevents elimination of: ");
1115       base->dump();
1116     }
1117 #endif // PRODUCT
1118   };
1119 
1120   for (int i = 0; i < iklass->nof_declared_nonstatic_fields(); i++) {
1121     ciField* field = iklass->declared_nonstatic_field_at(i);
1122     if (field->is_flat()) {
1123       ciInlineKlass* fvk = field->type()->as_inline_klass();
1124       int field_offset_minus_header = offset_minus_header + field->offset_in_bytes() - fvk->payload_offset();
1125       bool success = add_inst_fields_to_safepoint(fvk, alloc, base, field_offset_minus_header, sfpt, value_worklist);
1126       if (!success) {
1127         return false;
1128       }
1129 
1130       // The null marker of a field is added right after we scalarize that field
1131       if (!field->is_null_free()) {
1132         int nm_offset = offset_minus_header + field->null_marker_offset();
1133         Node* null_marker = value_from_mem(sfpt, sfpt->control(), T_BOOLEAN, TypeInt::BOOL, base_type->with_offset(nm_offset), alloc);
1134         if (null_marker == nullptr) {
1135           report_failure(nm_offset);
1136           return false;
1137         }
1138         process_field_value_at_safepoint(TypeInt::BOOL, null_marker, sfpt, value_worklist);
1139       }
1140 
1141       continue;
1142     }
1143 
1144     int offset = offset_minus_header + field->offset_in_bytes();
1145     ciType* elem_type = field->type();
1146     BasicType basic_elem_type = field->layout_type();
1147 
1148     const Type* field_type;
1149     if (is_reference_type(basic_elem_type)) {
1150       if (!elem_type->is_loaded()) {
1151         field_type = TypeInstPtr::BOTTOM;
1152       } else {
1153         field_type = TypeOopPtr::make_from_klass(elem_type->as_klass());
1154       }
1155       if (UseCompressedOops) {
1156         field_type = field_type->make_narrowoop();
1157         basic_elem_type = T_NARROWOOP;
1158       }
1159     } else {
1160       field_type = Type::get_const_basic_type(basic_elem_type);
1161     }
1162 
1163     const TypeInstPtr* field_addr_type = base_type->add_offset(offset)->isa_instptr();
1164     Node* field_val = value_from_mem(sfpt, sfpt->control(), basic_elem_type, field_type, field_addr_type, alloc);
1165     if (field_val == nullptr) {
1166       report_failure(offset);
1167       return false;
1168     }
1169     process_field_value_at_safepoint(field_type, field_val, sfpt, value_worklist);
1170   }
1171 
1172   return true;
1173 }
1174 
1175 SafePointScalarObjectNode* PhaseMacroExpand::create_scalarized_object_description(AllocateNode* alloc, SafePointNode* sfpt,
1176                                                                                   Unique_Node_List* value_worklist) {
1177   assert(sfpt->jvms()->endoff() == sfpt->req(), "no extra edges past debug info allowed");
1178 
1179   // Fields of scalar objs are referenced only at the end
1180   // of regular debuginfo at the last (youngest) JVMS.
1181   // Record relative start index.
1182   ciInstanceKlass* iklass    = nullptr;


1183   const TypeOopPtr* res_type = nullptr;
1184   int nfields                = 0;


1185   uint first_ind             = (sfpt->req() - sfpt->jvms()->scloff());
1186   Node* res                  = alloc->result_cast();
1187 
1188   assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");
1189   assert(sfpt->jvms() != nullptr, "missed JVMS");
1190   uint before_sfpt_req = sfpt->req();
1191 
1192   if (res != nullptr) { // Could be null when there are no users
1193     res_type = _igvn.type(res)->isa_oopptr();
1194 
1195     if (res_type->isa_instptr()) {
1196       // find the fields of the class which will be needed for safepoint debug information
1197       iklass = res_type->is_instptr()->instance_klass();
1198       nfields = iklass->nof_nonstatic_fields();
1199     } else {
1200       // find the array's elements which will be needed for safepoint debug information
1201       nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
1202       assert(nfields >= 0, "must be an array klass.");
1203     }
1204 
1205     if (res->bottom_type()->is_inlinetypeptr()) {
1206       // Nullable inline types have a null marker field which is added to the safepoint when scalarizing them (see
1207       // InlineTypeNode::make_scalar_in_safepoint()). When having circular inline types, we stop scalarizing at depth 1
1208       // to avoid an endless recursion. Therefore, we do not have a SafePointScalarObjectNode node here, yet.
1209       // We are about to create a SafePointScalarObjectNode as if this is a normal object. Add an additional int input
1210       // with value 1 which sets the null marker to true to indicate that the object is always non-null. This input is checked
1211       // later in PhaseOutput::filLocArray() for inline types.
1212       sfpt->add_req(_igvn.intcon(1));
1213     }
1214   }
1215 
1216   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(res_type, alloc, first_ind, sfpt->jvms()->depth(), nfields);
1217   sobj->init_req(0, C->root());
1218   transform_later(sobj);
1219 
1220   if (res == nullptr) {
1221     sfpt->jvms()->set_endoff(sfpt->req());
1222     return sobj;
1223   }




























































1224 
1225   bool success;
1226   if (iklass == nullptr) {
1227     success = add_array_elems_to_safepoint(alloc, res_type->is_aryptr(), sfpt, value_worklist);
1228   } else {
1229     success = add_inst_fields_to_safepoint(iklass, alloc, res, 0, sfpt, value_worklist);
1230   }
1231 
1232   // We weren't able to find a value for this field, remove all the fields added to the safepoint
1233   if (!success) {
1234     for (uint i = sfpt->req() - 1; i >= before_sfpt_req; i--) {
1235       sfpt->del_req(i);




1236     }
1237     _igvn._worklist.push(sfpt);
1238     return nullptr;
1239   }
1240 
1241   sfpt->jvms()->set_endoff(sfpt->req());

1242   return sobj;
1243 }
1244 
1245 // Do scalar replacement.
1246 bool PhaseMacroExpand::scalar_replacement(AllocateNode* alloc, GrowableArray<SafePointNode*>& safepoints) {
1247   GrowableArray<SafePointNode*> safepoints_done;
1248   Node* res = alloc->result_cast();
1249   assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");
1250   const TypeOopPtr* res_type = nullptr;
1251   if (res != nullptr) { // Could be null when there are no users
1252     res_type = _igvn.type(res)->isa_oopptr();
1253   }
1254 
1255   // Process the safepoint uses
1256   Unique_Node_List value_worklist;
1257   while (safepoints.length() > 0) {
1258     SafePointNode* sfpt = safepoints.pop();
1259 
1260   SafePointNode::NodeEdgeTempStorage non_debug_edges_worklist(igvn());
1261 
1262     // All sfpt inputs are implicitly included into debug info during the scalarization process below.
1263     // Keep non-debug inputs separately, so they stay non-debug.
1264     sfpt->remove_non_debug_edges(non_debug_edges_worklist);
1265 
1266     SafePointScalarObjectNode* sobj = create_scalarized_object_description(alloc, sfpt, &value_worklist);
1267 
1268     if (sobj == nullptr) {
1269       sfpt->restore_non_debug_edges(non_debug_edges_worklist);
1270       undo_previous_scalarizations(safepoints_done, alloc);
1271       return false;
1272     }
1273 
1274     // Now make a pass over the debug information replacing any references
1275     // to the allocated object with "sobj"
1276     JVMState *jvms = sfpt->jvms();
1277     sfpt->replace_edges_in_range(res, sobj, jvms->debug_start(), jvms->debug_end(), &_igvn);
1278     non_debug_edges_worklist.remove_edge_if_present(res); // drop scalarized input from non-debug info
1279     sfpt->restore_non_debug_edges(non_debug_edges_worklist);
1280     _igvn._worklist.push(sfpt);
1281 
1282     // keep it for rollback
1283     safepoints_done.append_if_missing(sfpt);
1284   }
1285   // Scalarize inline types that were added to the safepoint.
1286   // Don't allow linking a constant oop (if available) for flat array elements
1287   // because Deoptimization::reassign_flat_array_elements needs field values.
1288   bool allow_oop = (res_type != nullptr) && !res_type->is_flat();
1289   for (uint i = 0; i < value_worklist.size(); ++i) {
1290     InlineTypeNode* vt = value_worklist.at(i)->as_InlineType();
1291     vt->make_scalar_in_safepoints(&_igvn, allow_oop);
1292   }
1293   return true;
1294 }
1295 
1296 static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
1297   Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
1298   Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory);
1299   if (ctl_proj != nullptr) {
1300     igvn.replace_node(ctl_proj, n->in(0));
1301   }
1302   if (mem_proj != nullptr) {
1303     igvn.replace_node(mem_proj, n->in(TypeFunc::Memory));
1304   }
1305 }
1306 
1307 // Process users of eliminated allocation.
1308 void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc, bool inline_alloc) {
1309   Unique_Node_List worklist;
1310   Node* res = alloc->result_cast();
1311   if (res != nullptr) {
1312     worklist.push(res);
1313   }
1314   while (worklist.size() > 0) {
1315     res = worklist.pop();
1316     for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
1317       Node *use = res->last_out(j);
1318       uint oc1 = res->outcnt();
1319 
1320       if (use->is_AddP()) {
1321         for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
1322           Node *n = use->last_out(k);
1323           uint oc2 = use->outcnt();
1324           if (n->is_Store()) {
1325             for (DUIterator_Fast pmax, p = n->fast_outs(pmax); p < pmax; p++) {
1326               MemBarNode* mb = n->fast_out(p)->isa_MemBar();
1327               if (mb != nullptr && mb->req() <= MemBarNode::Precedent && mb->in(MemBarNode::Precedent) == n) {
1328                 // MemBarVolatiles should have been removed by MemBarNode::Ideal() for non-inline allocations
1329                 assert(inline_alloc, "MemBarVolatile should be eliminated for non-escaping object");
1330                 mb->remove(&_igvn);
1331               }



1332             }

1333             _igvn.replace_node(n, n->in(MemNode::Memory));
1334           } else {
1335             eliminate_gc_barrier(n);
1336           }
1337           k -= (oc2 - use->outcnt());
1338         }
1339         _igvn.remove_dead_node(use, PhaseIterGVN::NodeOrigin::Graph);
1340       } else if (use->is_ArrayCopy()) {
1341         // Disconnect ArrayCopy node
1342         ArrayCopyNode* ac = use->as_ArrayCopy();
1343         if (ac->is_clonebasic()) {
1344           Node* membar_after = ac->proj_out(TypeFunc::Control)->unique_ctrl_out();
1345           disconnect_projections(ac, _igvn);
1346           assert(alloc->in(TypeFunc::Memory)->is_Proj() && alloc->in(TypeFunc::Memory)->in(0)->Opcode() == Op_MemBarCPUOrder, "mem barrier expected before allocation");
1347           Node* membar_before = alloc->in(TypeFunc::Memory)->in(0);
1348           disconnect_projections(membar_before->as_MemBar(), _igvn);
1349           if (membar_after->is_MemBar()) {
1350             disconnect_projections(membar_after->as_MemBar(), _igvn);
1351           }
1352         } else {
1353           assert(ac->is_arraycopy_validated() ||
1354                  ac->is_copyof_validated() ||
1355                  ac->is_copyofrange_validated(), "unsupported");
1356           CallProjections* callprojs = ac->extract_projections(true);

1357 
1358           _igvn.replace_node(callprojs->fallthrough_ioproj, ac->in(TypeFunc::I_O));
1359           _igvn.replace_node(callprojs->fallthrough_memproj, ac->in(TypeFunc::Memory));
1360           _igvn.replace_node(callprojs->fallthrough_catchproj, ac->in(TypeFunc::Control));
1361 
1362           // Set control to top. IGVN will remove the remaining projections
1363           ac->set_req(0, top());
1364           ac->replace_edge(res, top(), &_igvn);
1365 
1366           // Disconnect src right away: it can help find new
1367           // opportunities for allocation elimination
1368           Node* src = ac->in(ArrayCopyNode::Src);
1369           ac->replace_edge(src, top(), &_igvn);
1370           // src can be top at this point if src and dest of the
1371           // arraycopy were the same
1372           if (src->outcnt() == 0 && !src->is_top()) {
1373             _igvn.remove_dead_node(src, PhaseIterGVN::NodeOrigin::Graph);
1374           }
1375         }
1376         _igvn._worklist.push(ac);
1377       } else if (use->is_InlineType()) {
1378         assert(use->as_InlineType()->get_oop() == res, "unexpected inline type ptr use");
1379         // Cut off oop input and remove known instance id from type
1380         _igvn.rehash_node_delayed(use);
1381         use->as_InlineType()->set_oop(_igvn, _igvn.zerocon(T_OBJECT));
1382         use->as_InlineType()->set_is_buffered(_igvn, false);
1383         const TypeOopPtr* toop = _igvn.type(use)->is_oopptr()->cast_to_instance_id(TypeOopPtr::InstanceBot);
1384         _igvn.set_type(use, toop);
1385         use->as_InlineType()->set_type(toop);
1386         // Process users
1387         for (DUIterator_Fast kmax, k = use->fast_outs(kmax); k < kmax; k++) {
1388           Node* u = use->fast_out(k);
1389           if (!u->is_InlineType() && !u->is_StoreFlat()) {
1390             worklist.push(u);
1391           }
1392         }
1393       } else if (use->Opcode() == Op_StoreX && use->in(MemNode::Address) == res) {
1394         // Store to mark word of inline type larval buffer
1395         assert(inline_alloc, "Unexpected store to mark word");
1396         _igvn.replace_node(use, use->in(MemNode::Memory));
1397       } else if (use->Opcode() == Op_MemBarRelease || use->Opcode() == Op_MemBarStoreStore) {
1398         // Inline type buffer allocations are followed by a membar
1399         assert(inline_alloc, "Unexpected MemBarRelease");
1400         use->as_MemBar()->remove(&_igvn);
1401       } else if (use->is_ReachabilityFence() && OptimizeReachabilityFences) {
1402         use->as_ReachabilityFence()->clear_referent(_igvn); // redundant fence; will be removed during IGVN
1403       } else {
1404         eliminate_gc_barrier(use);
1405       }
1406       j -= (oc1 - res->outcnt());
1407     }
1408     assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
1409     _igvn.remove_dead_node(res, PhaseIterGVN::NodeOrigin::Graph);
1410   }
1411 
1412   //
1413   // Process other users of allocation's projections
1414   //
1415   if (_callprojs->resproj[0] != nullptr && _callprojs->resproj[0]->outcnt() != 0) {
1416     // First disconnect stores captured by Initialize node.
1417     // If Initialize node is eliminated first in the following code,
1418     // it will kill such stores and DUIterator_Last will assert.
1419     for (DUIterator_Fast jmax, j = _callprojs->resproj[0]->fast_outs(jmax);  j < jmax; j++) {
1420       Node* use = _callprojs->resproj[0]->fast_out(j);
1421       if (use->is_AddP()) {
1422         // raw memory addresses used only by the initialization
1423         _igvn.replace_node(use, C->top());
1424         --j; --jmax;
1425       }
1426     }
1427     for (DUIterator_Last jmin, j = _callprojs->resproj[0]->last_outs(jmin); j >= jmin; ) {
1428       Node* use = _callprojs->resproj[0]->last_out(j);
1429       uint oc1 = _callprojs->resproj[0]->outcnt();
1430       if (use->is_Initialize()) {
1431         // Eliminate Initialize node.
1432         InitializeNode *init = use->as_Initialize();
1433         Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
1434         if (ctrl_proj != nullptr) {
1435           _igvn.replace_node(ctrl_proj, init->in(TypeFunc::Control));
1436 #ifdef ASSERT
1437           // If the InitializeNode has no memory out, it will die, and tmp will become null
1438           Node* tmp = init->in(TypeFunc::Control);
1439           assert(tmp == nullptr || tmp == _callprojs->fallthrough_catchproj, "allocation control projection");
1440 #endif
1441         }
1442         Node* mem = init->in(TypeFunc::Memory);
1443 #ifdef ASSERT
1444         if (init->number_of_projs(TypeFunc::Memory) > 0) {
1445           if (mem->is_MergeMem()) {
1446             assert(mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == _callprojs->fallthrough_memproj, "allocation memory projection");
1447           } else {
1448             assert(mem == _callprojs->fallthrough_memproj, "allocation memory projection");
1449           }
1450         }
1451 #endif
1452         init->replace_mem_projs_by(mem, &_igvn);
1453         assert(init->outcnt() == 0, "should only have had a control and some memory projections, and we removed them");
1454       } else if (use->Opcode() == Op_MemBarStoreStore) {
1455         // Inline type buffer allocations are followed by a membar
1456         assert(inline_alloc, "Unexpected MemBarStoreStore");
1457         use->as_MemBar()->remove(&_igvn);
1458       } else  {
1459         assert(false, "only Initialize or AddP expected");
1460       }
1461       j -= (oc1 - _callprojs->resproj[0]->outcnt());
1462     }
1463   }
1464   if (_callprojs->fallthrough_catchproj != nullptr) {
1465     _igvn.replace_node(_callprojs->fallthrough_catchproj, alloc->in(TypeFunc::Control));
1466   }
1467   if (_callprojs->fallthrough_memproj != nullptr) {
1468     _igvn.replace_node(_callprojs->fallthrough_memproj, alloc->in(TypeFunc::Memory));
1469   }
1470   if (_callprojs->catchall_memproj != nullptr) {
1471     _igvn.replace_node(_callprojs->catchall_memproj, C->top());
1472   }
1473   if (_callprojs->fallthrough_ioproj != nullptr) {
1474     _igvn.replace_node(_callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
1475   }
1476   if (_callprojs->catchall_ioproj != nullptr) {
1477     _igvn.replace_node(_callprojs->catchall_ioproj, C->top());
1478   }
1479   if (_callprojs->catchall_catchproj != nullptr) {
1480     _igvn.replace_node(_callprojs->catchall_catchproj, C->top());
1481   }
1482 }
1483 
1484 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
1485   // If reallocation fails during deoptimization we'll pop all
1486   // interpreter frames for this compiled frame and that won't play
1487   // nice with JVMTI popframe.
1488   // We avoid this issue by eager reallocation when the popframe request
1489   // is received.
1490   if (!EliminateAllocations) {
1491     return false;
1492   }
1493   Node* klass = alloc->in(AllocateNode::KlassNode);
1494   const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
1495 
1496   // Attempt to eliminate inline type buffer allocations
1497   // regardless of usage and escape/replaceable status.
1498   bool inline_alloc = tklass->isa_instklassptr() &&
1499                       tklass->is_instklassptr()->instance_klass()->is_inlinetype();
1500   if (!alloc->_is_non_escaping && !inline_alloc) {
1501     return false;
1502   }
1503   // Eliminate boxing allocations which are not used
1504   // regardless scalar replaceable status.
1505   Node* res = alloc->result_cast();
1506   bool boxing_alloc = (res == nullptr) && C->eliminate_boxing() &&
1507                       tklass->isa_instklassptr() &&
1508                       tklass->is_instklassptr()->instance_klass()->is_box_klass();
1509   if (!alloc->_is_scalar_replaceable && !boxing_alloc && !inline_alloc) {
1510     return false;
1511   }
1512 
1513   _callprojs = alloc->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
1514 
1515   GrowableArray <SafePointNode *> safepoints;
1516   if (!can_eliminate_allocation(&_igvn, alloc, &safepoints)) {
1517     return false;
1518   }
1519 
1520   if (!alloc->_is_scalar_replaceable) {
1521     assert(res == nullptr || inline_alloc, "sanity");
1522     // We can only eliminate allocation if all debug info references
1523     // are already replaced with SafePointScalarObject because
1524     // we can't search for a fields value without instance_id.
1525     if (safepoints.length() > 0) {
1526       return false;
1527     }
1528   }
1529 
1530   if (!scalar_replacement(alloc, safepoints)) {
1531     return false;
1532   }
1533 
1534   CompileLog* log = C->log();
1535   if (log != nullptr) {
1536     log->head("eliminate_allocation type='%d'",
1537               log->identify(tklass->exact_klass()));
1538     JVMState* p = alloc->jvms();
1539     while (p != nullptr) {
1540       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1541       p = p->caller();
1542     }
1543     log->tail("eliminate_allocation");
1544   }
1545 
1546   process_users_of_allocation(alloc, inline_alloc);
1547 
1548 #ifndef PRODUCT
1549   if (PrintEliminateAllocations) {
1550     if (alloc->is_AllocateArray())
1551       tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1552     else
1553       tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1554   }
1555 #endif
1556 
1557   return true;
1558 }
1559 
1560 bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
1561   // EA should remove all uses of non-escaping boxing node.
1562   if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != nullptr) {
1563     return false;
1564   }
1565 
1566   assert(boxing->result_cast() == nullptr, "unexpected boxing node result");
1567 
1568   _callprojs = boxing->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
1569 
1570   const TypeTuple* r = boxing->tf()->range_sig();
1571   assert(r->cnt() > TypeFunc::Parms, "sanity");
1572   const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
1573   assert(t != nullptr, "sanity");
1574 
1575   CompileLog* log = C->log();
1576   if (log != nullptr) {
1577     log->head("eliminate_boxing type='%d'",
1578               log->identify(t->instance_klass()));
1579     JVMState* p = boxing->jvms();
1580     while (p != nullptr) {
1581       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1582       p = p->caller();
1583     }
1584     log->tail("eliminate_boxing");
1585   }
1586 
1587   process_users_of_allocation(boxing);
1588 
1589 #ifndef PRODUCT
1590   if (PrintEliminateAllocations) {

1654 // oop flavor.
1655 //
1656 //=============================================================================
1657 // FastAllocateSizeLimit value is in DOUBLEWORDS.
1658 // Allocations bigger than this always go the slow route.
1659 // This value must be small enough that allocation attempts that need to
1660 // trigger exceptions go the slow route.  Also, it must be small enough so
1661 // that heap_top + size_in_bytes does not wrap around the 4Gig limit.
1662 //=============================================================================j//
1663 // %%% Here is an old comment from parseHelper.cpp; is it outdated?
1664 // The allocator will coalesce int->oop copies away.  See comment in
1665 // coalesce.cpp about how this works.  It depends critically on the exact
1666 // code shape produced here, so if you are changing this code shape
1667 // make sure the GC info for the heap-top is correct in and around the
1668 // slow-path call.
1669 //
1670 
1671 void PhaseMacroExpand::expand_allocate_common(
1672             AllocateNode* alloc, // allocation node to be expanded
1673             Node* length,  // array length for an array allocation
1674             Node* init_val, // value to initialize the array with
1675             const TypeFunc* slow_call_type, // Type of slow call
1676             address slow_call_address,  // Address of slow call
1677             Node* valid_length_test // whether length is valid or not
1678     )
1679 {
1680   Node* ctrl = alloc->in(TypeFunc::Control);
1681   Node* mem  = alloc->in(TypeFunc::Memory);
1682   Node* i_o  = alloc->in(TypeFunc::I_O);
1683   Node* size_in_bytes     = alloc->in(AllocateNode::AllocSize);
1684   Node* klass_node        = alloc->in(AllocateNode::KlassNode);
1685   Node* initial_slow_test = alloc->in(AllocateNode::InitialTest);
1686   assert(ctrl != nullptr, "must have control");
1687 
1688   // We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
1689   // they will not be used if "always_slow" is set
1690   enum { slow_result_path = 1, fast_result_path = 2 };
1691   Node *result_region = nullptr;
1692   Node *result_phi_rawmem = nullptr;
1693   Node *result_phi_rawoop = nullptr;
1694   Node *result_phi_i_o = nullptr;

1739 #endif
1740       yank_alloc_node(alloc);
1741       return;
1742     }
1743   }
1744 
1745   enum { too_big_or_final_path = 1, need_gc_path = 2 };
1746   Node *slow_region = nullptr;
1747   Node *toobig_false = ctrl;
1748 
1749   // generate the initial test if necessary
1750   if (initial_slow_test != nullptr ) {
1751     assert (expand_fast_path, "Only need test if there is a fast path");
1752     slow_region = new RegionNode(3);
1753 
1754     // Now make the initial failure test.  Usually a too-big test but
1755     // might be a TRUE for finalizers.
1756     IfNode *toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
1757     transform_later(toobig_iff);
1758     // Plug the failing-too-big test into the slow-path region
1759     Node* toobig_true = new IfTrueNode(toobig_iff);
1760     transform_later(toobig_true);
1761     slow_region    ->init_req( too_big_or_final_path, toobig_true );
1762     toobig_false = new IfFalseNode(toobig_iff);
1763     transform_later(toobig_false);
1764   } else {
1765     // No initial test, just fall into next case
1766     assert(allocation_has_use || !expand_fast_path, "Should already have been handled");
1767     toobig_false = ctrl;
1768     DEBUG_ONLY(slow_region = NodeSentinel);
1769   }
1770 
1771   // If we are here there are several possibilities
1772   // - expand_fast_path is false - then only a slow path is expanded. That's it.
1773   // no_initial_check means a constant allocation.
1774   // - If check always evaluates to false -> expand_fast_path is false (see above)
1775   // - If check always evaluates to true -> directly into fast path (but may bailout to slowpath)
1776   // if !allocation_has_use the fast path is empty
1777   // if !allocation_has_use && no_initial_check
1778   // - Then there are no fastpath that can fall out to slowpath -> no allocation code at all.
1779   //   removed by yank_alloc_node above.
1780 
1781   Node *slow_mem = mem;  // save the current memory state for slow path
1782   // generate the fast allocation code unless we know that the initial test will always go slow
1783   if (expand_fast_path) {
1784     // Fast path modifies only raw memory.
1785     if (mem->is_MergeMem()) {
1786       mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1787     }
1788 
1789     // allocate the Region and Phi nodes for the result
1790     result_region = new RegionNode(3);
1791     result_phi_rawmem = new PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1792     result_phi_i_o    = new PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch
1793 
1794     // Grab regular I/O before optional prefetch may change it.
1795     // Slow-path does no I/O so just set it to the original I/O.
1796     result_phi_i_o->init_req(slow_result_path, i_o);
1797 
1798     // Name successful fast-path variables
1799     Node* fast_oop_ctrl;
1800     Node* fast_oop_rawmem;
1801 
1802     if (allocation_has_use) {
1803       Node* needgc_ctrl = nullptr;
1804       result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM);
1805 
1806       intx prefetch_lines = length != nullptr ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
1807       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1808       Node* fast_oop = bs->obj_allocate(this, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl,
1809                                         fast_oop_ctrl, fast_oop_rawmem,
1810                                         prefetch_lines);
1811 
1812       if (initial_slow_test != nullptr) {
1813         // This completes all paths into the slow merge point
1814         slow_region->init_req(need_gc_path, needgc_ctrl);
1815         transform_later(slow_region);
1816       } else {
1817         // No initial slow path needed!
1818         // Just fall from the need-GC path straight into the VM call.
1819         slow_region = needgc_ctrl;
1820       }
1821 

1839     result_phi_i_o   ->init_req(fast_result_path, i_o);
1840     result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem);
1841   } else {
1842     slow_region = ctrl;
1843     result_phi_i_o = i_o; // Rename it to use in the following code.
1844   }
1845 
1846   // Generate slow-path call
1847   CallNode *call = new CallStaticJavaNode(slow_call_type, slow_call_address,
1848                                OptoRuntime::stub_name(slow_call_address),
1849                                TypePtr::BOTTOM);
1850   call->init_req(TypeFunc::Control,   slow_region);
1851   call->init_req(TypeFunc::I_O,       top());    // does no i/o
1852   call->init_req(TypeFunc::Memory,    slow_mem); // may gc ptrs
1853   call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
1854   call->init_req(TypeFunc::FramePtr,  alloc->in(TypeFunc::FramePtr));
1855 
1856   call->init_req(TypeFunc::Parms+0, klass_node);
1857   if (length != nullptr) {
1858     call->init_req(TypeFunc::Parms+1, length);
1859     if (init_val != nullptr) {
1860       call->init_req(TypeFunc::Parms+2, init_val);
1861     }
1862   }
1863 
1864   // Copy debug information and adjust JVMState information, then replace
1865   // allocate node with the call
1866   call->copy_call_debug_info(&_igvn, alloc);
1867   // For array allocations, copy the valid length check to the call node so Compile::final_graph_reshaping() can verify
1868   // that the call has the expected number of CatchProj nodes (in case the allocation always fails and the fallthrough
1869   // path dies).
1870   if (valid_length_test != nullptr) {
1871     call->add_req(valid_length_test);
1872   }
1873   if (expand_fast_path) {
1874     call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
1875   } else {
1876     // Hook i_o projection to avoid its elimination during allocation
1877     // replacement (when only a slow call is generated).
1878     call->set_req(TypeFunc::I_O, result_phi_i_o);
1879   }
1880   _igvn.replace_node(alloc, call);
1881   transform_later(call);
1882 
1883   // Identify the output projections from the allocate node and
1884   // adjust any references to them.
1885   // The control and io projections look like:
1886   //
1887   //        v---Proj(ctrl) <-----+   v---CatchProj(ctrl)
1888   //  Allocate                   Catch
1889   //        ^---Proj(io) <-------+   ^---CatchProj(io)
1890   //
1891   //  We are interested in the CatchProj nodes.
1892   //
1893   _callprojs = call->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
1894 
1895   // An allocate node has separate memory projections for the uses on
1896   // the control and i_o paths. Replace the control memory projection with
1897   // result_phi_rawmem (unless we are only generating a slow call when
1898   // both memory projections are combined)
1899   if (expand_fast_path && _callprojs->fallthrough_memproj != nullptr) {
1900     _igvn.replace_in_uses(_callprojs->fallthrough_memproj, result_phi_rawmem);
1901   }
1902   // Now change uses of catchall_memproj to use fallthrough_memproj and delete
1903   // catchall_memproj so we end up with a call that has only 1 memory projection.
1904   if (_callprojs->catchall_memproj != nullptr) {
1905     if (_callprojs->fallthrough_memproj == nullptr) {
1906       _callprojs->fallthrough_memproj = new ProjNode(call, TypeFunc::Memory);
1907       transform_later(_callprojs->fallthrough_memproj);
1908     }
1909     _igvn.replace_in_uses(_callprojs->catchall_memproj, _callprojs->fallthrough_memproj);
1910     _igvn.remove_dead_node(_callprojs->catchall_memproj, PhaseIterGVN::NodeOrigin::Graph);
1911   }
1912 
1913   // An allocate node has separate i_o projections for the uses on the control
1914   // and i_o paths. Always replace the control i_o projection with result i_o
1915   // otherwise incoming i_o become dead when only a slow call is generated
1916   // (it is different from memory projections where both projections are
1917   // combined in such case).
1918   if (_callprojs->fallthrough_ioproj != nullptr) {
1919     _igvn.replace_in_uses(_callprojs->fallthrough_ioproj, result_phi_i_o);
1920   }
1921   // Now change uses of catchall_ioproj to use fallthrough_ioproj and delete
1922   // catchall_ioproj so we end up with a call that has only 1 i_o projection.
1923   if (_callprojs->catchall_ioproj != nullptr) {
1924     if (_callprojs->fallthrough_ioproj == nullptr) {
1925       _callprojs->fallthrough_ioproj = new ProjNode(call, TypeFunc::I_O);
1926       transform_later(_callprojs->fallthrough_ioproj);
1927     }
1928     _igvn.replace_in_uses(_callprojs->catchall_ioproj, _callprojs->fallthrough_ioproj);
1929     _igvn.remove_dead_node(_callprojs->catchall_ioproj, PhaseIterGVN::NodeOrigin::Graph);
1930   }
1931 
1932   // if we generated only a slow call, we are done
1933   if (!expand_fast_path) {
1934     // Now we can unhook i_o.
1935     if (result_phi_i_o->outcnt() > 1) {
1936       call->set_req(TypeFunc::I_O, top());
1937     } else {
1938       assert(result_phi_i_o->unique_ctrl_out() == call, "sanity");
1939       // Case of new array with negative size known during compilation.
1940       // AllocateArrayNode::Ideal() optimization disconnect unreachable
1941       // following code since call to runtime will throw exception.
1942       // As result there will be no users of i_o after the call.
1943       // Leave i_o attached to this call to avoid problems in preceding graph.
1944     }
1945     return;
1946   }
1947 
1948   if (_callprojs->fallthrough_catchproj != nullptr) {
1949     ctrl = _callprojs->fallthrough_catchproj->clone();
1950     transform_later(ctrl);
1951     _igvn.replace_node(_callprojs->fallthrough_catchproj, result_region);
1952   } else {
1953     ctrl = top();
1954   }
1955   Node *slow_result;
1956   if (_callprojs->resproj[0] == nullptr) {
1957     // no uses of the allocation result
1958     slow_result = top();
1959   } else {
1960     slow_result = _callprojs->resproj[0]->clone();
1961     transform_later(slow_result);
1962     _igvn.replace_node(_callprojs->resproj[0], result_phi_rawoop);
1963   }
1964 
1965   // Plug slow-path into result merge point
1966   result_region->init_req( slow_result_path, ctrl);
1967   transform_later(result_region);
1968   if (allocation_has_use) {
1969     result_phi_rawoop->init_req(slow_result_path, slow_result);
1970     transform_later(result_phi_rawoop);
1971   }
1972   result_phi_rawmem->init_req(slow_result_path, _callprojs->fallthrough_memproj);
1973   transform_later(result_phi_rawmem);
1974   transform_later(result_phi_i_o);
1975   // This completes all paths into the result merge point
1976 }
1977 
1978 // Remove alloc node that has no uses.
1979 void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
1980   Node* ctrl = alloc->in(TypeFunc::Control);
1981   Node* mem  = alloc->in(TypeFunc::Memory);
1982   Node* i_o  = alloc->in(TypeFunc::I_O);
1983 
1984   _callprojs = alloc->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
1985   if (_callprojs->resproj[0] != nullptr) {
1986     for (DUIterator_Fast imax, i = _callprojs->resproj[0]->fast_outs(imax); i < imax; i++) {
1987       Node* use = _callprojs->resproj[0]->fast_out(i);
1988       use->isa_MemBar()->remove(&_igvn);
1989       --imax;
1990       --i; // back up iterator
1991     }
1992     assert(_callprojs->resproj[0]->outcnt() == 0, "all uses must be deleted");
1993     _igvn.remove_dead_node(_callprojs->resproj[0], PhaseIterGVN::NodeOrigin::Graph);
1994   }
1995   if (_callprojs->fallthrough_catchproj != nullptr) {
1996     _igvn.replace_in_uses(_callprojs->fallthrough_catchproj, ctrl);
1997     _igvn.remove_dead_node(_callprojs->fallthrough_catchproj, PhaseIterGVN::NodeOrigin::Graph);
1998   }
1999   if (_callprojs->catchall_catchproj != nullptr) {
2000     _igvn.rehash_node_delayed(_callprojs->catchall_catchproj);
2001     _callprojs->catchall_catchproj->set_req(0, top());
2002   }
2003   if (_callprojs->fallthrough_proj != nullptr) {
2004     Node* catchnode = _callprojs->fallthrough_proj->unique_ctrl_out();
2005     _igvn.remove_dead_node(catchnode, PhaseIterGVN::NodeOrigin::Graph);
2006     _igvn.remove_dead_node(_callprojs->fallthrough_proj, PhaseIterGVN::NodeOrigin::Graph);
2007   }
2008   if (_callprojs->fallthrough_memproj != nullptr) {
2009     _igvn.replace_in_uses(_callprojs->fallthrough_memproj, mem);
2010     _igvn.remove_dead_node(_callprojs->fallthrough_memproj, PhaseIterGVN::NodeOrigin::Graph);
2011   }
2012   if (_callprojs->fallthrough_ioproj != nullptr) {
2013     _igvn.replace_in_uses(_callprojs->fallthrough_ioproj, i_o);
2014     _igvn.remove_dead_node(_callprojs->fallthrough_ioproj, PhaseIterGVN::NodeOrigin::Graph);
2015   }
2016   if (_callprojs->catchall_memproj != nullptr) {
2017     _igvn.rehash_node_delayed(_callprojs->catchall_memproj);
2018     _callprojs->catchall_memproj->set_req(0, top());
2019   }
2020   if (_callprojs->catchall_ioproj != nullptr) {
2021     _igvn.rehash_node_delayed(_callprojs->catchall_ioproj);
2022     _callprojs->catchall_ioproj->set_req(0, top());
2023   }
2024 #ifndef PRODUCT
2025   if (PrintEliminateAllocations) {
2026     if (alloc->is_AllocateArray()) {
2027       tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
2028     } else {
2029       tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
2030     }
2031   }
2032 #endif
2033   _igvn.remove_dead_node(alloc, PhaseIterGVN::NodeOrigin::Graph);
2034 }
2035 
2036 void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeNode* init,
2037                                                 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem) {
2038   // If initialization is performed by an array copy, any required
2039   // MemBarStoreStore was already added. If the object does not
2040   // escape no need for a MemBarStoreStore. If the object does not
2041   // escape in its initializer and memory barrier (MemBarStoreStore or
2042   // stronger) is already added at exit of initializer, also no need

2136     Node* thread = new ThreadLocalNode();
2137     transform_later(thread);
2138 
2139     call->init_req(TypeFunc::Parms + 0, thread);
2140     call->init_req(TypeFunc::Parms + 1, oop);
2141     call->init_req(TypeFunc::Control, ctrl);
2142     call->init_req(TypeFunc::I_O    , top()); // does no i/o
2143     call->init_req(TypeFunc::Memory , rawmem);
2144     call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
2145     call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
2146     transform_later(call);
2147     ctrl = new ProjNode(call, TypeFunc::Control);
2148     transform_later(ctrl);
2149     rawmem = new ProjNode(call, TypeFunc::Memory);
2150     transform_later(rawmem);
2151   }
2152 }
2153 
2154 // Helper for PhaseMacroExpand::expand_allocate_common.
2155 // Initializes the newly-allocated storage.
2156 Node* PhaseMacroExpand::initialize_object(AllocateNode* alloc,
2157                                           Node* control, Node* rawmem, Node* object,
2158                                           Node* klass_node, Node* length,
2159                                           Node* size_in_bytes) {

2160   InitializeNode* init = alloc->initialization();
2161   // Store the klass & mark bits
2162   Node* mark_node = alloc->make_ideal_mark(&_igvn, control, rawmem);
2163   if (!mark_node->is_Con()) {
2164     transform_later(mark_node);
2165   }
2166   rawmem = make_store_raw(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type());
2167 
2168   if (!UseCompactObjectHeaders) {
2169     rawmem = make_store_raw(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
2170   }
2171   int header_size = alloc->minimum_header_size();  // conservatively small
2172 
2173   // Array length
2174   if (length != nullptr) {         // Arrays need length field
2175     rawmem = make_store_raw(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
2176     // conservatively small header size:
2177     header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
2178     if (_igvn.type(klass_node)->isa_aryklassptr()) {   // we know the exact header size in most cases:
2179       BasicType elem = _igvn.type(klass_node)->is_klassptr()->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
2180       if (is_reference_type(elem, true)) {
2181         elem = T_OBJECT;
2182       }
2183       header_size = Klass::layout_helper_header_size(Klass::array_layout_helper(elem));
2184     }
2185   }
2186 
2187   // Clear the object body, if necessary.
2188   if (init == nullptr) {
2189     // The init has somehow disappeared; be cautious and clear everything.
2190     //
2191     // This can happen if a node is allocated but an uncommon trap occurs
2192     // immediately.  In this case, the Initialize gets associated with the
2193     // trap, and may be placed in a different (outer) loop, if the Allocate
2194     // is in a loop.  If (this is rare) the inner loop gets unrolled, then
2195     // there can be two Allocates to one Initialize.  The answer in all these
2196     // edge cases is safety first.  It is always safe to clear immediately
2197     // within an Allocate, and then (maybe or maybe not) clear some more later.
2198     if (!(UseTLAB && ZeroTLAB)) {
2199       rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
2200                                             alloc->in(AllocateNode::InitValue),
2201                                             alloc->in(AllocateNode::RawInitValue),
2202                                             header_size, size_in_bytes,
2203                                             true,
2204                                             &_igvn);
2205     }
2206   } else {
2207     if (!init->is_complete()) {
2208       // Try to win by zeroing only what the init does not store.
2209       // We can also try to do some peephole optimizations,
2210       // such as combining some adjacent subword stores.
2211       rawmem = init->complete_stores(control, rawmem, object,
2212                                      header_size, size_in_bytes, &_igvn);
2213     }
2214     // We have no more use for this link, since the AllocateNode goes away:
2215     init->set_req(InitializeNode::RawAddress, top());
2216     // (If we keep the link, it just confuses the register allocator,
2217     // who thinks he sees a real use of the address by the membar.)
2218   }
2219 
2220   return rawmem;
2221 }

2356       for (intx i = 0; i < lines; i++) {
2357         prefetch_adr = AddPNode::make_off_heap(new_eden_top,
2358                                                _igvn.MakeConX(distance));
2359         transform_later(prefetch_adr);
2360         prefetch = new PrefetchAllocationNode(i_o, prefetch_adr);
2361         // Do not let it float too high, since if eden_top == eden_end,
2362         // both might be null.
2363         if (i == 0) { // Set control for first prefetch, next follows it
2364           prefetch->init_req(0, needgc_false);
2365         }
2366         transform_later(prefetch);
2367         distance += step_size;
2368         i_o = prefetch;
2369       }
2370    }
2371    return i_o;
2372 }
2373 
2374 
2375 void PhaseMacroExpand::expand_allocate(AllocateNode *alloc) {
2376   expand_allocate_common(alloc, nullptr, nullptr,
2377                          OptoRuntime::new_instance_Type(),
2378                          OptoRuntime::new_instance_Java(), nullptr);
2379 }
2380 
2381 void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) {
2382   Node* length = alloc->in(AllocateNode::ALength);
2383   Node* valid_length_test = alloc->in(AllocateNode::ValidLengthTest);
2384   InitializeNode* init = alloc->initialization();
2385   Node* klass_node = alloc->in(AllocateNode::KlassNode);
2386   Node* init_value = alloc->in(AllocateNode::InitValue);
2387   const TypeAryKlassPtr* ary_klass_t = _igvn.type(klass_node)->isa_aryklassptr();
2388   assert(!ary_klass_t || !ary_klass_t->klass_is_exact() || !ary_klass_t->exact_klass()->is_obj_array_klass() ||
2389          ary_klass_t->is_refined_type(), "Must be a refined array klass");
2390   const TypeFunc* slow_call_type;
2391   address slow_call_address;  // Address of slow call
2392   if (init != nullptr && init->is_complete_with_arraycopy() &&
2393       ary_klass_t && ary_klass_t->elem()->isa_klassptr() == nullptr) {
2394     // Don't zero type array during slow allocation in VM since
2395     // it will be initialized later by arraycopy in compiled code.
2396     slow_call_address = OptoRuntime::new_array_nozero_Java();
2397     slow_call_type = OptoRuntime::new_array_nozero_Type();
2398   } else {
2399     slow_call_address = OptoRuntime::new_array_Java();
2400     slow_call_type = OptoRuntime::new_array_Type();
2401 
2402     if (init_value == nullptr) {
2403       init_value = _igvn.zerocon(T_OBJECT);
2404     } else if (UseCompressedOops) {
2405       init_value = transform_later(new DecodeNNode(init_value, init_value->bottom_type()->make_ptr()));
2406     }
2407   }
2408   expand_allocate_common(alloc, length, init_value,
2409                          slow_call_type,
2410                          slow_call_address, valid_length_test);
2411 }
2412 
2413 //-------------------mark_eliminated_box----------------------------------
2414 //
2415 // During EA obj may point to several objects but after few ideal graph
2416 // transformations (CCP) it may point to only one non escaping object
2417 // (but still using phi), corresponding locks and unlocks will be marked
2418 // for elimination. Later obj could be replaced with a new node (new phi)
2419 // and which does not have escape information. And later after some graph
2420 // reshape other locks and unlocks (which were not marked for elimination
2421 // before) are connected to this new obj (phi) but they still will not be
2422 // marked for elimination since new obj has no escape information.
2423 // Mark all associated (same box and obj) lock and unlock nodes for
2424 // elimination if some of them marked already.
2425 void PhaseMacroExpand::mark_eliminated_box(Node* box, Node* obj) {
2426   BoxLockNode* oldbox = box->as_BoxLock();
2427   if (oldbox->is_eliminated()) {
2428     return; // This BoxLock node was processed already.
2429   }

2601 #ifdef ASSERT
2602   if (!alock->is_coarsened()) {
2603     // Check that new "eliminated" BoxLock node is created.
2604     BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
2605     assert(oldbox->is_eliminated(), "should be done already");
2606   }
2607 #endif
2608 
2609   alock->log_lock_optimization(C, "eliminate_lock");
2610 
2611 #ifndef PRODUCT
2612   if (PrintEliminateLocks) {
2613     tty->print_cr("++++ Eliminated: %d %s '%s'", alock->_idx, (alock->is_Lock() ? "Lock" : "Unlock"), alock->kind_as_string());
2614   }
2615 #endif
2616 
2617   Node* mem  = alock->in(TypeFunc::Memory);
2618   Node* ctrl = alock->in(TypeFunc::Control);
2619   guarantee(ctrl != nullptr, "missing control projection, cannot replace_node() with null");
2620 
2621   _callprojs = alock->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
2622   // There are 2 projections from the lock.  The lock node will
2623   // be deleted when its last use is subsumed below.
2624   assert(alock->outcnt() == 2 &&
2625          _callprojs->fallthrough_proj != nullptr &&
2626          _callprojs->fallthrough_memproj != nullptr,
2627          "Unexpected projections from Lock/Unlock");
2628 
2629   Node* fallthroughproj = _callprojs->fallthrough_proj;
2630   Node* memproj_fallthrough = _callprojs->fallthrough_memproj;
2631 
2632   // The memory projection from a lock/unlock is RawMem
2633   // The input to a Lock is merged memory, so extract its RawMem input
2634   // (unless the MergeMem has been optimized away.)
2635   if (alock->is_Lock()) {
2636     // Search for MemBarAcquireLock node and delete it also.
2637     MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
2638     assert(membar != nullptr && membar->Opcode() == Op_MemBarAcquireLock, "");
2639     Node* ctrlproj = membar->proj_out(TypeFunc::Control);
2640     Node* memproj = membar->proj_out(TypeFunc::Memory);
2641     _igvn.replace_node(ctrlproj, fallthroughproj);
2642     _igvn.replace_node(memproj, memproj_fallthrough);
2643 
2644     // Delete FastLock node also if this Lock node is unique user
2645     // (a loop peeling may clone a Lock node).
2646     Node* flock = alock->as_Lock()->fastlock_node();
2647     if (flock->outcnt() == 1) {
2648       assert(flock->unique_out() == alock, "sanity");
2649       _igvn.replace_node(flock, top());
2650     }

2681   assert(!box->as_BoxLock()->is_eliminated(), "sanity");
2682 
2683   // Make the merge point
2684   Node *region;
2685   Node *mem_phi;
2686   Node *slow_path;
2687 
2688   region  = new RegionNode(3);
2689   // create a Phi for the memory state
2690   mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
2691 
2692   // Optimize test; set region slot 2
2693   slow_path = opt_bits_test(ctrl, region, 2, flock);
2694   mem_phi->init_req(2, mem);
2695 
2696   // Make slow path call
2697   CallNode* call = make_slow_call(lock, OptoRuntime::complete_monitor_enter_Type(),
2698                                   OptoRuntime::complete_monitor_locking_Java(), nullptr, slow_path,
2699                                   obj, box, nullptr);
2700 
2701   _callprojs = call->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
2702 
2703   // Slow path can only throw asynchronous exceptions, which are always
2704   // de-opted.  So the compiler thinks the slow-call can never throw an
2705   // exception.  If it DOES throw an exception we would need the debug
2706   // info removed first (since if it throws there is no monitor).
2707   assert(_callprojs->fallthrough_ioproj == nullptr && _callprojs->catchall_ioproj == nullptr &&
2708          _callprojs->catchall_memproj == nullptr && _callprojs->catchall_catchproj == nullptr, "Unexpected projection from Lock");
2709 
2710   // Capture slow path
2711   // disconnect fall-through projection from call and create a new one
2712   // hook up users of fall-through projection to region
2713   Node *slow_ctrl = _callprojs->fallthrough_proj->clone();
2714   transform_later(slow_ctrl);
2715   _igvn.hash_delete(_callprojs->fallthrough_proj);
2716   _callprojs->fallthrough_proj->disconnect_inputs(C);
2717   region->init_req(1, slow_ctrl);
2718   // region inputs are now complete
2719   transform_later(region);
2720   _igvn.replace_node(_callprojs->fallthrough_proj, region);
2721 
2722   Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory));
2723 
2724   mem_phi->init_req(1, memproj);
2725 
2726   transform_later(mem_phi);
2727 
2728   _igvn.replace_node(_callprojs->fallthrough_memproj, mem_phi);
2729 }
2730 
2731 //------------------------------expand_unlock_node----------------------
2732 void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
2733 
2734   Node* ctrl = unlock->in(TypeFunc::Control);
2735   Node* mem = unlock->in(TypeFunc::Memory);
2736   Node* obj = unlock->obj_node();
2737   Node* box = unlock->box_node();
2738 
2739   assert(!box->as_BoxLock()->is_eliminated(), "sanity");
2740 
2741   // No need for a null check on unlock
2742 
2743   // Make the merge point
2744   Node* region = new RegionNode(3);
2745 
2746   FastUnlockNode *funlock = new FastUnlockNode( ctrl, obj, box );
2747   funlock = transform_later( funlock )->as_FastUnlock();
2748   // Optimize test; set region slot 2
2749   Node *slow_path = opt_bits_test(ctrl, region, 2, funlock);
2750   Node *thread = transform_later(new ThreadLocalNode());
2751 
2752   CallNode *call = make_slow_call((CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(),
2753                                   CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C),
2754                                   "complete_monitor_unlocking_C", slow_path, obj, box, thread);
2755 
2756   _callprojs = call->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
2757   assert(_callprojs->fallthrough_ioproj == nullptr && _callprojs->catchall_ioproj == nullptr &&
2758          _callprojs->catchall_memproj == nullptr && _callprojs->catchall_catchproj == nullptr, "Unexpected projection from Lock");
2759 
2760   // No exceptions for unlocking
2761   // Capture slow path
2762   // disconnect fall-through projection from call and create a new one
2763   // hook up users of fall-through projection to region
2764   Node *slow_ctrl = _callprojs->fallthrough_proj->clone();
2765   transform_later(slow_ctrl);
2766   _igvn.hash_delete(_callprojs->fallthrough_proj);
2767   _callprojs->fallthrough_proj->disconnect_inputs(C);
2768   region->init_req(1, slow_ctrl);
2769   // region inputs are now complete
2770   transform_later(region);
2771   _igvn.replace_node(_callprojs->fallthrough_proj, region);
2772 
2773   if (_callprojs->fallthrough_memproj != nullptr) {
2774     // create a Phi for the memory state
2775     Node* mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
2776     Node* memproj = transform_later(new ProjNode(call, TypeFunc::Memory));
2777     mem_phi->init_req(1, memproj);
2778     mem_phi->init_req(2, mem);
2779     transform_later(mem_phi);
2780     _igvn.replace_node(_callprojs->fallthrough_memproj, mem_phi);
2781   }
2782 }
2783 
2784 // An inline type might be returned from the call but we don't know its
2785 // type. Either we get a buffered inline type (and nothing needs to be done)
2786 // or one of the values being returned is the klass of the inline type
2787 // and we need to allocate an inline type instance of that type and
2788 // initialize it with other values being returned. In that case, we
2789 // first try a fast path allocation and initialize the value with the
2790 // inline klass's pack handler or we fall back to a runtime call.
2791 void PhaseMacroExpand::expand_mh_intrinsic_return(CallStaticJavaNode* call) {
2792   assert(call->method()->is_method_handle_intrinsic(), "must be a method handle intrinsic call");
2793   Node* ret = call->proj_out_or_null(TypeFunc::Parms);
2794   if (ret == nullptr) {
2795     return;
2796   }
2797   const TypeFunc* tf = call->_tf;
2798   const TypeTuple* domain = OptoRuntime::store_inline_type_fields_Type()->domain_cc();
2799   const TypeFunc* new_tf = TypeFunc::make(tf->domain_sig(), tf->domain_cc(), tf->range_sig(), domain, true);
2800   call->_tf = new_tf;
2801   // Make sure the change of type is applied before projections are processed by igvn
2802   _igvn.set_type(call, call->Value(&_igvn));
2803   _igvn.set_type(ret, ret->Value(&_igvn));
2804 
2805   // Before any new projection is added:
2806   CallProjections* projs = call->extract_projections(true, true);
2807 
2808   // Create temporary hook nodes that will be replaced below.
2809   // Add an input to prevent hook nodes from being dead.
2810   Node* ctl = new Node(call);
2811   Node* mem = new Node(ctl);
2812   Node* io = new Node(ctl);
2813   Node* ex_ctl = new Node(ctl);
2814   Node* ex_mem = new Node(ctl);
2815   Node* ex_io = new Node(ctl);
2816   Node* res = new Node(ctl);
2817 
2818   // Allocate a new buffered inline type only if a new one is not returned
2819   Node* cast = transform_later(new CastP2XNode(ctl, res));
2820   Node* mask = MakeConX(0x1);
2821   Node* masked = transform_later(new AndXNode(cast, mask));
2822   Node* cmp = transform_later(new CmpXNode(masked, mask));
2823   Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq));
2824   IfNode* allocation_iff = new IfNode(ctl, bol, PROB_MAX, COUNT_UNKNOWN);
2825   transform_later(allocation_iff);
2826   Node* allocation_ctl = transform_later(new IfTrueNode(allocation_iff));
2827   Node* no_allocation_ctl = transform_later(new IfFalseNode(allocation_iff));
2828   Node* no_allocation_res = transform_later(new CheckCastPPNode(no_allocation_ctl, res, TypeInstPtr::BOTTOM));
2829 
2830   // Try to allocate a new buffered inline instance either from TLAB or eden space
2831   Node* needgc_ctrl = nullptr; // needgc means slowcase, i.e. allocation failed
2832   CallLeafNoFPNode* handler_call;
2833   const bool alloc_in_place = UseTLAB;
2834   if (alloc_in_place) {
2835     Node* fast_oop_ctrl = nullptr;
2836     Node* fast_oop_rawmem = nullptr;
2837     Node* mask2 = MakeConX(-2);
2838     Node* masked2 = transform_later(new AndXNode(cast, mask2));
2839     Node* rawklassptr = transform_later(new CastX2PNode(masked2));
2840     Node* klass_node = transform_later(new CheckCastPPNode(allocation_ctl, rawklassptr, TypeInstKlassPtr::OBJECT_OR_NULL));
2841     Node* layout_val = make_load_raw(nullptr, mem, klass_node, in_bytes(Klass::layout_helper_offset()), TypeInt::INT, T_INT);
2842     Node* size_in_bytes = ConvI2X(layout_val);
2843     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2844     Node* fast_oop = bs->obj_allocate(this, mem, allocation_ctl, size_in_bytes, io, needgc_ctrl,
2845                                       fast_oop_ctrl, fast_oop_rawmem,
2846                                       AllocateInstancePrefetchLines);
2847     // Allocation succeed, initialize buffered inline instance header firstly,
2848     // and then initialize its fields with an inline class specific handler
2849     Node* mark_word_node;
2850     if (UseCompactObjectHeaders) {
2851       // COH: We need to load the prototype from the klass at runtime since it encodes the klass pointer already.
2852       mark_word_node = make_load_raw(fast_oop_ctrl, fast_oop_rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2853     } else {
2854       // Otherwise, use the static prototype.
2855       mark_word_node = makecon(TypeRawPtr::make((address)markWord::inline_type_prototype().value()));
2856     }
2857 
2858     fast_oop_rawmem = make_store_raw(fast_oop_ctrl, fast_oop_rawmem, fast_oop, oopDesc::mark_offset_in_bytes(), mark_word_node, T_ADDRESS);
2859     if (!UseCompactObjectHeaders) {
2860       // COH: Everything is encoded in the mark word, so nothing left to do.
2861       fast_oop_rawmem = make_store_raw(fast_oop_ctrl, fast_oop_rawmem, fast_oop, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
2862       fast_oop_rawmem = make_store_raw(fast_oop_ctrl, fast_oop_rawmem, fast_oop, oopDesc::klass_gap_offset_in_bytes(), intcon(0), T_INT);
2863     }
2864     Node* members  = make_load_raw(fast_oop_ctrl, fast_oop_rawmem, klass_node, in_bytes(InlineKlass::adr_members_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2865     Node* pack_handler = make_load_raw(fast_oop_ctrl, fast_oop_rawmem, members, in_bytes(InlineKlass::pack_handler_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2866     handler_call = new CallLeafNoFPNode(OptoRuntime::pack_inline_type_Type(),
2867                                         nullptr,
2868                                         "pack handler",
2869                                         TypeRawPtr::BOTTOM);
2870     handler_call->init_req(TypeFunc::Control, fast_oop_ctrl);
2871     handler_call->init_req(TypeFunc::Memory, fast_oop_rawmem);
2872     handler_call->init_req(TypeFunc::I_O, top());
2873     handler_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
2874     handler_call->init_req(TypeFunc::ReturnAdr, top());
2875     handler_call->init_req(TypeFunc::Parms, pack_handler);
2876     handler_call->init_req(TypeFunc::Parms+1, fast_oop);
2877   } else {
2878     needgc_ctrl = allocation_ctl;
2879   }
2880 
2881   // Allocation failed, fall back to a runtime call
2882   CallStaticJavaNode* slow_call = new CallStaticJavaNode(OptoRuntime::store_inline_type_fields_Type(),
2883                                                          StubRoutines::store_inline_type_fields_to_buf(),
2884                                                          "store_inline_type_fields",
2885                                                          TypePtr::BOTTOM);
2886   slow_call->init_req(TypeFunc::Control, needgc_ctrl);
2887   slow_call->init_req(TypeFunc::Memory, mem);
2888   slow_call->init_req(TypeFunc::I_O, io);
2889   slow_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
2890   slow_call->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
2891   slow_call->init_req(TypeFunc::Parms, res);
2892 
2893   Node* slow_ctl = transform_later(new ProjNode(slow_call, TypeFunc::Control));
2894   Node* slow_mem = transform_later(new ProjNode(slow_call, TypeFunc::Memory));
2895   Node* slow_io = transform_later(new ProjNode(slow_call, TypeFunc::I_O));
2896   Node* slow_res = transform_later(new ProjNode(slow_call, TypeFunc::Parms));
2897   Node* slow_catc = transform_later(new CatchNode(slow_ctl, slow_io, 2));
2898   Node* slow_norm = transform_later(new CatchProjNode(slow_catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci));
2899   Node* slow_excp = transform_later(new CatchProjNode(slow_catc, CatchProjNode::catch_all_index,    CatchProjNode::no_handler_bci));
2900 
2901   Node* ex_r = new RegionNode(3);
2902   Node* ex_mem_phi = new PhiNode(ex_r, Type::MEMORY, TypePtr::BOTTOM);
2903   Node* ex_io_phi = new PhiNode(ex_r, Type::ABIO);
2904   ex_r->init_req(1, slow_excp);
2905   ex_mem_phi->init_req(1, slow_mem);
2906   ex_io_phi->init_req(1, slow_io);
2907   ex_r->init_req(2, ex_ctl);
2908   ex_mem_phi->init_req(2, ex_mem);
2909   ex_io_phi->init_req(2, ex_io);
2910   transform_later(ex_r);
2911   transform_later(ex_mem_phi);
2912   transform_later(ex_io_phi);
2913 
2914   // We don't know how many values are returned. This assumes the
2915   // worst case, that all available registers are used.
2916   for (uint i = TypeFunc::Parms+1; i < domain->cnt(); i++) {
2917     if (domain->field_at(i) == Type::HALF) {
2918       slow_call->init_req(i, top());
2919       if (alloc_in_place) {
2920         handler_call->init_req(i+1, top());
2921       }
2922       continue;
2923     }
2924     Node* proj = transform_later(new ProjNode(call, i));
2925     slow_call->init_req(i, proj);
2926     if (alloc_in_place) {
2927       handler_call->init_req(i+1, proj);
2928     }
2929   }
2930   // We can safepoint at that new call
2931   slow_call->copy_call_debug_info(&_igvn, call);
2932   transform_later(slow_call);
2933   if (alloc_in_place) {
2934     transform_later(handler_call);
2935   }
2936 
2937   Node* fast_ctl = nullptr;
2938   Node* fast_res = nullptr;
2939   MergeMemNode* fast_mem = nullptr;
2940   if (alloc_in_place) {
2941     fast_ctl = transform_later(new ProjNode(handler_call, TypeFunc::Control));
2942     Node* rawmem = transform_later(new ProjNode(handler_call, TypeFunc::Memory));
2943     fast_res = transform_later(new ProjNode(handler_call, TypeFunc::Parms));
2944     fast_mem = MergeMemNode::make(mem);
2945     fast_mem->set_memory_at(Compile::AliasIdxRaw, rawmem);
2946     transform_later(fast_mem);
2947   }
2948 
2949   Node* r = new RegionNode(alloc_in_place ? 4 : 3);
2950   Node* mem_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2951   Node* io_phi = new PhiNode(r, Type::ABIO);
2952   Node* res_phi = new PhiNode(r, TypeInstPtr::BOTTOM);
2953   r->init_req(1, no_allocation_ctl);
2954   mem_phi->init_req(1, mem);
2955   io_phi->init_req(1, io);
2956   res_phi->init_req(1, no_allocation_res);
2957   r->init_req(2, slow_norm);
2958   mem_phi->init_req(2, slow_mem);
2959   io_phi->init_req(2, slow_io);
2960   res_phi->init_req(2, slow_res);
2961   if (alloc_in_place) {
2962     r->init_req(3, fast_ctl);
2963     mem_phi->init_req(3, fast_mem);
2964     io_phi->init_req(3, io);
2965     res_phi->init_req(3, fast_res);
2966   }
2967   transform_later(r);
2968   transform_later(mem_phi);
2969   transform_later(io_phi);
2970   transform_later(res_phi);
2971 
2972   // Do not let stores that initialize this buffer be reordered with a subsequent
2973   // store that would make this buffer accessible by other threads.
2974   MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
2975   transform_later(mb);
2976   mb->init_req(TypeFunc::Memory, mem_phi);
2977   mb->init_req(TypeFunc::Control, r);
2978   r = new ProjNode(mb, TypeFunc::Control);
2979   transform_later(r);
2980   mem_phi = new ProjNode(mb, TypeFunc::Memory);
2981   transform_later(mem_phi);
2982 
2983   assert(projs->nb_resproj == 1, "unexpected number of results");
2984   _igvn.replace_in_uses(projs->fallthrough_catchproj, r);
2985   _igvn.replace_in_uses(projs->fallthrough_memproj, mem_phi);
2986   _igvn.replace_in_uses(projs->fallthrough_ioproj, io_phi);
2987   _igvn.replace_in_uses(projs->resproj[0], res_phi);
2988   _igvn.replace_in_uses(projs->catchall_catchproj, ex_r);
2989   _igvn.replace_in_uses(projs->catchall_memproj, ex_mem_phi);
2990   _igvn.replace_in_uses(projs->catchall_ioproj, ex_io_phi);
2991   // The CatchNode should not use the ex_io_phi. Re-connect it to the catchall_ioproj.
2992   Node* cn = projs->fallthrough_catchproj->in(0);
2993   _igvn.replace_input_of(cn, 1, projs->catchall_ioproj);
2994 
2995   _igvn.replace_node(ctl, projs->fallthrough_catchproj);
2996   _igvn.replace_node(mem, projs->fallthrough_memproj);
2997   _igvn.replace_node(io, projs->fallthrough_ioproj);
2998   _igvn.replace_node(res, projs->resproj[0]);
2999   _igvn.replace_node(ex_ctl, projs->catchall_catchproj);
3000   _igvn.replace_node(ex_mem, projs->catchall_memproj);
3001   _igvn.replace_node(ex_io, projs->catchall_ioproj);
3002  }
3003 
3004 void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) {
3005   assert(check->in(SubTypeCheckNode::Control) == nullptr, "should be pinned");
3006   Node* bol = check->unique_out();
3007   Node* obj_or_subklass = check->in(SubTypeCheckNode::ObjOrSubKlass);
3008   Node* superklass = check->in(SubTypeCheckNode::SuperKlass);
3009   assert(bol->is_Bool() && bol->as_Bool()->_test._test == BoolTest::ne, "unexpected bool node");
3010 
3011   for (DUIterator_Last imin, i = bol->last_outs(imin); i >= imin; --i) {
3012     Node* iff = bol->last_out(i);
3013     assert(iff->is_If(), "where's the if?");
3014 
3015     if (iff->in(0)->is_top()) {
3016       _igvn.replace_input_of(iff, 1, C->top());
3017       continue;
3018     }
3019 
3020     IfTrueNode* iftrue = iff->as_If()->true_proj();
3021     IfFalseNode* iffalse = iff->as_If()->false_proj();
3022     Node* ctrl = iff->in(0);
3023 
3024     Node* subklass = nullptr;
3025     if (_igvn.type(obj_or_subklass)->isa_klassptr()) {
3026       subklass = obj_or_subklass;
3027     } else {
3028       Node* k_adr = basic_plus_adr(obj_or_subklass, oopDesc::klass_offset_in_bytes());
3029       subklass = _igvn.transform(LoadKlassNode::make(_igvn, C->immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
3030     }
3031 
3032     Node* not_subtype_ctrl = Phase::gen_subtype_check(subklass, superklass, &ctrl, nullptr, _igvn, check->method(), check->bci());
3033 
3034     _igvn.replace_input_of(iff, 0, C->top());
3035     _igvn.replace_node(iftrue, not_subtype_ctrl);
3036     _igvn.replace_node(iffalse, ctrl);
3037   }
3038   _igvn.replace_node(check, C->top());
3039 }
3040 
3041 // FlatArrayCheckNode (array1 array2 ...) is expanded into:
3042 //
3043 // long mark = array1.mark | array2.mark | ...;
3044 // long locked_bit = markWord::unlocked_value & array1.mark & array2.mark & ...;
3045 // if (locked_bit == 0) {
3046 //   // One array is locked, load prototype header from the klass
3047 //   mark = array1.klass.proto | array2.klass.proto | ...
3048 // }
3049 // if ((mark & markWord::flat_array_bit_in_place) == 0) {
3050 //    ...
3051 // }
3052 void PhaseMacroExpand::expand_flatarraycheck_node(FlatArrayCheckNode* check) {
3053   bool array_inputs = _igvn.type(check->in(FlatArrayCheckNode::ArrayOrKlass))->isa_oopptr() != nullptr;
3054   if (array_inputs) {
3055     Node* mark = MakeConX(0);
3056     Node* locked_bit = MakeConX(markWord::unlocked_value);
3057     Node* mem = check->in(FlatArrayCheckNode::Memory);
3058     for (uint i = FlatArrayCheckNode::ArrayOrKlass; i < check->req(); ++i) {
3059       Node* ary = check->in(i);
3060       const TypeOopPtr* t = _igvn.type(ary)->isa_oopptr();
3061       assert(t != nullptr, "Mixing array and klass inputs");
3062       assert(!t->is_flat() && !t->is_not_flat(), "Should have been optimized out");
3063       Node* mark_adr = basic_plus_adr(ary, oopDesc::mark_offset_in_bytes());
3064       Node* mark_load = _igvn.transform(LoadNode::make(_igvn, nullptr, mem, mark_adr, mark_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
3065       mark = _igvn.transform(new OrXNode(mark, mark_load));
3066       locked_bit = _igvn.transform(new AndXNode(locked_bit, mark_load));
3067     }
3068     assert(!mark->is_Con(), "Should have been optimized out");
3069     Node* cmp = _igvn.transform(new CmpXNode(locked_bit, MakeConX(0)));
3070     Node* is_unlocked = _igvn.transform(new BoolNode(cmp, BoolTest::ne));
3071 
3072     // BoolNode might be shared, replace each if user
3073     Node* old_bol = check->unique_out();
3074     assert(old_bol->is_Bool() && old_bol->as_Bool()->_test._test == BoolTest::ne, "unexpected condition");
3075     for (DUIterator_Last imin, i = old_bol->last_outs(imin); i >= imin; --i) {
3076       IfNode* old_iff = old_bol->last_out(i)->as_If();
3077       Node* ctrl = old_iff->in(0);
3078       RegionNode* region = new RegionNode(3);
3079       Node* mark_phi = new PhiNode(region, TypeX_X);
3080 
3081       // Check if array is unlocked
3082       IfNode* iff = _igvn.transform(new IfNode(ctrl, is_unlocked, PROB_MAX, COUNT_UNKNOWN))->as_If();
3083 
3084       // Unlocked: Use bits from mark word
3085       region->init_req(1, _igvn.transform(new IfTrueNode(iff)));
3086       mark_phi->init_req(1, mark);
3087 
3088       // Locked: Load prototype header from klass
3089       ctrl = _igvn.transform(new IfFalseNode(iff));
3090       Node* proto = MakeConX(0);
3091       for (uint i = FlatArrayCheckNode::ArrayOrKlass; i < check->req(); ++i) {
3092         Node* ary = check->in(i);
3093         // Make loads control dependent to make sure they are only executed if array is locked
3094         Node* klass_adr = basic_plus_adr(ary, oopDesc::klass_offset_in_bytes());
3095         Node* klass = _igvn.transform(LoadKlassNode::make(_igvn, C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
3096         Node* proto_adr = basic_plus_adr(top(), klass, in_bytes(Klass::prototype_header_offset()));
3097         Node* proto_load = _igvn.transform(LoadNode::make(_igvn, ctrl, C->immutable_memory(), proto_adr, proto_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
3098         proto = _igvn.transform(new OrXNode(proto, proto_load));
3099       }
3100       region->init_req(2, ctrl);
3101       mark_phi->init_req(2, proto);
3102 
3103       // Check if flat array bits are set
3104       Node* mask = MakeConX(markWord::flat_array_bit_in_place);
3105       Node* masked = _igvn.transform(new AndXNode(_igvn.transform(mark_phi), mask));
3106       cmp = _igvn.transform(new CmpXNode(masked, MakeConX(0)));
3107       Node* is_not_flat = _igvn.transform(new BoolNode(cmp, BoolTest::eq));
3108 
3109       ctrl = _igvn.transform(region);
3110       iff = _igvn.transform(new IfNode(ctrl, is_not_flat, PROB_MAX, COUNT_UNKNOWN))->as_If();
3111       _igvn.replace_node(old_iff, iff);
3112     }
3113     _igvn.replace_node(check, C->top());
3114   } else {
3115     // Fall back to layout helper check
3116     Node* lhs = intcon(0);
3117     for (uint i = FlatArrayCheckNode::ArrayOrKlass; i < check->req(); ++i) {
3118       Node* array_or_klass = check->in(i);
3119       Node* klass = nullptr;
3120       const TypePtr* t = _igvn.type(array_or_klass)->is_ptr();
3121       assert(!t->is_flat() && !t->is_not_flat(), "Should have been optimized out");
3122       if (t->isa_oopptr() != nullptr) {
3123         Node* klass_adr = basic_plus_adr(array_or_klass, oopDesc::klass_offset_in_bytes());
3124         klass = transform_later(LoadKlassNode::make(_igvn, C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
3125       } else {
3126         assert(t->isa_klassptr(), "Unexpected input type");
3127         klass = array_or_klass;
3128       }
3129       Node* lh_addr = basic_plus_adr(top(), klass, in_bytes(Klass::layout_helper_offset()));
3130       Node* lh_val = _igvn.transform(LoadNode::make(_igvn, nullptr, C->immutable_memory(), lh_addr, lh_addr->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered));
3131       lhs = _igvn.transform(new OrINode(lhs, lh_val));
3132     }
3133     Node* masked = transform_later(new AndINode(lhs, intcon(Klass::_lh_array_tag_flat_value_bit_inplace)));
3134     Node* cmp = transform_later(new CmpINode(masked, intcon(0)));
3135     Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq));
3136     Node* m2b = transform_later(new Conv2BNode(masked));
3137     // The matcher expects the input to If/CMove nodes to be produced by a Bool(CmpI..)
3138     // pattern, but the input to other potential users (e.g. Phi) to be some
3139     // other pattern (e.g. a Conv2B node, possibly idealized as a CMoveI).
3140     Node* old_bol = check->unique_out();
3141     for (DUIterator_Last imin, i = old_bol->last_outs(imin); i >= imin; --i) {
3142       Node* user = old_bol->last_out(i);
3143       for (uint j = 0; j < user->req(); j++) {
3144         Node* n = user->in(j);
3145         if (n == old_bol) {
3146           _igvn.replace_input_of(user, j, (user->is_If() || user->is_CMove()) ? bol : m2b);
3147         }
3148       }
3149     }
3150     _igvn.replace_node(check, C->top());
3151   }
3152 }
3153 
3154 // Perform refining of strip mined loop nodes in the macro nodes list.
3155 void PhaseMacroExpand::refine_strip_mined_loop_macro_nodes() {
3156    for (int i = C->macro_count(); i > 0; i--) {
3157     Node* n = C->macro_node(i - 1);
3158     if (n->is_OuterStripMinedLoop()) {
3159       n->as_OuterStripMinedLoop()->adjust_strip_mined_loop(&_igvn);
3160     }
3161   }
3162 }
3163 
3164 //---------------------------eliminate_macro_nodes----------------------
3165 // Eliminate scalar replaced allocations and associated locks.
3166 void PhaseMacroExpand::eliminate_macro_nodes(bool eliminate_locks) {
3167   if (C->macro_count() == 0) {
3168     return;
3169   }
3170 
3171   if (StressMacroElimination) {
3172     C->shuffle_macro_nodes();
3173   }
3174   NOT_PRODUCT(int membar_before = count_MemBar(C);)
3175 
3176   int iteration = 0;
3177   while (C->macro_count() > 0) {
3178     if (iteration++ > 100) {
3179       assert(false, "Too slow convergence of macro elimination");
3180       break;


3181     }










3182 
3183     // Postpone lock elimination to after EA when most allocations are eliminated
3184     // because they might block lock elimination if their escape state isn't
3185     // determined yet and we only got one chance at eliminating the lock.
3186     if (eliminate_locks) {
3187       // Before elimination may re-mark (change to Nested or NonEscObj)
3188       // all associated (same box and obj) lock and unlock nodes.
3189       int cnt = C->macro_count();
3190       for (int i=0; i < cnt; i++) {
3191         Node *n = C->macro_node(i);
3192         if (n->is_AbstractLock()) { // Lock and Unlock nodes
3193           mark_eliminated_locking_nodes(n->as_AbstractLock());


3194         }

3195       }
3196       // Re-marking may break consistency of Coarsened locks.
3197       if (!C->coarsened_locks_consistent()) {
3198         return; // recompile without Coarsened locks if broken
3199       } else {
3200         // After coarsened locks are eliminated locking regions
3201         // become unbalanced. We should not execute any more
3202         // locks elimination optimizations on them.
3203         C->mark_unbalanced_boxes();
3204       }
3205     }
3206 
3207     bool progress = false;



3208     for (int i = C->macro_count(); i > 0; i = MIN2(i - 1, C->macro_count())) { // more than 1 element can be eliminated at once
3209       Node* n = C->macro_node(i - 1);
3210       bool success = false;
3211       DEBUG_ONLY(int old_macro_count = C->macro_count();)
3212       switch (n->class_id()) {
3213       case Node::Class_Allocate:
3214       case Node::Class_AllocateArray:
3215         success = eliminate_allocate_node(n->as_Allocate());
3216 #ifndef PRODUCT
3217         if (success && PrintOptoStatistics) {
3218           AtomicAccess::inc(&PhaseMacroExpand::_objs_scalar_replaced_counter);
3219         }
3220 #endif
3221         break;
3222       case Node::Class_CallStaticJava: {
3223         CallStaticJavaNode* call = n->as_CallStaticJava();
3224         if (!call->method()->is_method_handle_intrinsic()) {
3225           success = eliminate_boxing_node(n->as_CallStaticJava());
3226         }
3227         break;
3228       }
3229       case Node::Class_Lock:
3230       case Node::Class_Unlock:
3231         if (eliminate_locks) {
3232           success = eliminate_locking_node(n->as_AbstractLock());
3233 #ifndef PRODUCT
3234           if (success && PrintOptoStatistics) {
3235             AtomicAccess::inc(&PhaseMacroExpand::_monitor_objects_removed_counter);
3236           }
3237 #endif
3238         }
3239         break;
3240       case Node::Class_ArrayCopy:
3241         break;
3242       case Node::Class_OuterStripMinedLoop:
3243         break;
3244       case Node::Class_SubTypeCheck:
3245         break;
3246       case Node::Class_Opaque1:
3247         break;
3248       case Node::Class_FlatArrayCheck:
3249         break;
3250       default:
3251         assert(n->Opcode() == Op_LoopLimit ||
3252                n->Opcode() == Op_ModD ||
3253                n->Opcode() == Op_ModF ||
3254                n->Opcode() == Op_PowD ||
3255                n->is_OpaqueConstantBool()    ||
3256                n->is_OpaqueInitializedAssertionPredicate() ||
3257                n->Opcode() == Op_MaxL      ||
3258                n->Opcode() == Op_MinL      ||
3259                BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(n),
3260                "unknown node type in macro list");
3261       }
3262       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
3263       progress = progress || success;
3264       if (success) {
3265         C->print_method(PHASE_AFTER_MACRO_ELIMINATION_STEP, 5, n);
3266       }
3267     }
3268 
3269     // Ensure the graph after PhaseMacroExpand::eliminate_macro_nodes is canonical (no igvn
3270     // transformation is pending). If an allocation is used only in safepoints, elimination of
3271     // other macro nodes can remove all these safepoints, allowing the allocation to be removed.
3272     // Hence after igvn we retry removing macro nodes if some progress that has been made in this
3273     // iteration.
3274     _igvn.set_delay_transform(false);
3275     _igvn.optimize();
3276     if (C->failing()) {
3277       return;
3278     }
3279     _igvn.set_delay_transform(true);
3280 
3281     if (!progress) {
3282       break;
3283     }
3284   }
3285 #ifndef PRODUCT
3286   if (PrintOptoStatistics) {
3287     int membar_after = count_MemBar(C);
3288     AtomicAccess::add(&PhaseMacroExpand::_memory_barriers_removed_counter, membar_before - membar_after);
3289   }
3290 #endif
3291 }
3292 
3293 void PhaseMacroExpand::eliminate_opaque_looplimit_macro_nodes() {
3294   if (C->macro_count() == 0) {
3295     return;
3296   }
3297   refine_strip_mined_loop_macro_nodes();
3298   // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
3299   bool progress = true;
3300   while (progress) {
3301     progress = false;
3302     for (int i = C->macro_count(); i > 0; i--) {
3303       Node* n = C->macro_node(i-1);
3304       bool success = false;
3305       DEBUG_ONLY(int old_macro_count = C->macro_count();)
3306       if (n->Opcode() == Op_LoopLimit) {
3307         // Remove it from macro list and put on IGVN worklist to optimize.
3308         C->remove_macro_node(n);
3309         _igvn._worklist.push(n);
3310         success = true;
3311       } else if (n->Opcode() == Op_CallStaticJava) {
3312         CallStaticJavaNode* call = n->as_CallStaticJava();
3313         if (!call->method()->is_method_handle_intrinsic()) {
3314           // Remove it from macro list and put on IGVN worklist to optimize.
3315           C->remove_macro_node(n);
3316           _igvn._worklist.push(n);
3317           success = true;
3318         }
3319       } else if (n->is_Opaque1()) {
3320         _igvn.replace_node(n, n->in(1));
3321         success = true;
3322       } else if (n->is_OpaqueConstantBool()) {
3323         // Tests with OpaqueConstantBool nodes are implicitly known. Replace the node with true/false. In debug builds,
3324         // we leave the test in the graph to have an additional sanity check at runtime. If the test fails (i.e. a bug),
3325         // we will execute a Halt node.
3326 #ifdef ASSERT
3327         _igvn.replace_node(n, n->in(1));
3328 #else
3329         _igvn.replace_node(n, _igvn.intcon(n->as_OpaqueConstantBool()->constant()));
3330 #endif
3331         success = true;
3332       } else if (n->is_OpaqueInitializedAssertionPredicate()) {
3333           // Initialized Assertion Predicates must always evaluate to true. Therefore, we get rid of them in product
3334           // builds as they are useless. In debug builds we keep them as additional verification code. Even though
3335           // loop opts are already over, we want to keep Initialized Assertion Predicates alive as long as possible to
3336           // enable folding of dead control paths within which cast nodes become top after due to impossible types -
3337           // even after loop opts are over. Therefore, we delay the removal of these opaque nodes until now.
3338 #ifdef ASSERT

3407     // Worst case is a macro node gets expanded into about 200 nodes.
3408     // Allow 50% more for optimization.
3409     if (C->check_node_count(300, "out of nodes before macro expansion")) {
3410       return true;
3411     }
3412 
3413     DEBUG_ONLY(int old_macro_count = C->macro_count();)
3414     switch (n->class_id()) {
3415     case Node::Class_Lock:
3416       expand_lock_node(n->as_Lock());
3417       break;
3418     case Node::Class_Unlock:
3419       expand_unlock_node(n->as_Unlock());
3420       break;
3421     case Node::Class_ArrayCopy:
3422       expand_arraycopy_node(n->as_ArrayCopy());
3423       break;
3424     case Node::Class_SubTypeCheck:
3425       expand_subtypecheck_node(n->as_SubTypeCheck());
3426       break;
3427     case Node::Class_CallStaticJava:
3428       expand_mh_intrinsic_return(n->as_CallStaticJava());
3429       C->remove_macro_node(n);
3430       break;
3431     case Node::Class_FlatArrayCheck:
3432       expand_flatarraycheck_node(n->as_FlatArrayCheck());
3433       break;
3434     default:
3435       switch (n->Opcode()) {
3436       case Op_ModD:
3437       case Op_ModF:
3438       case Op_PowD: {
3439         CallLeafPureNode* call_macro = n->as_CallLeafPure();
3440         CallLeafPureNode* call = call_macro->inline_call_leaf_pure_node();
3441         _igvn.replace_node(call_macro, call);
3442         transform_later(call);
3443         break;
3444       }
3445       default:
3446         assert(false, "unknown node type in macro list");
3447       }
3448     }
3449     assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list");
3450     if (C->failing())  return true;
3451     C->print_method(PHASE_AFTER_MACRO_EXPANSION_STEP, 5, n);
3452 
3453     // Clean up the graph so we're less likely to hit the maximum node
< prev index next >