1 /*
   2  * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "compiler/compileLog.hpp"
  26 #include "gc/shared/collectedHeap.inline.hpp"
  27 #include "gc/shared/tlab_globals.hpp"
  28 #include "libadt/vectset.hpp"
  29 #include "memory/universe.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/arraycopynode.hpp"
  32 #include "opto/callnode.hpp"
  33 #include "opto/castnode.hpp"
  34 #include "opto/cfgnode.hpp"
  35 #include "opto/compile.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/graphKit.hpp"
  38 #include "opto/intrinsicnode.hpp"
  39 #include "opto/locknode.hpp"
  40 #include "opto/loopnode.hpp"
  41 #include "opto/macro.hpp"
  42 #include "opto/memnode.hpp"
  43 #include "opto/narrowptrnode.hpp"
  44 #include "opto/node.hpp"
  45 #include "opto/opaquenode.hpp"
  46 #include "opto/phaseX.hpp"
  47 #include "opto/reachability.hpp"
  48 #include "opto/rootnode.hpp"
  49 #include "opto/runtime.hpp"
  50 #include "opto/subnode.hpp"
  51 #include "opto/subtypenode.hpp"
  52 #include "opto/type.hpp"
  53 #include "prims/jvmtiExport.hpp"
  54 #include "runtime/continuation.hpp"
  55 #include "runtime/sharedRuntime.hpp"
  56 #include "utilities/globalDefinitions.hpp"
  57 #include "utilities/macros.hpp"
  58 #include "utilities/powerOfTwo.hpp"
  59 #if INCLUDE_G1GC
  60 #include "gc/g1/g1ThreadLocalData.hpp"
  61 #endif // INCLUDE_G1GC
  62 
  63 
  64 //
  65 // Replace any references to "oldref" in inputs to "use" with "newref".
  66 // Returns the number of replacements made.
  67 //
  68 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
  69   int nreplacements = 0;
  70   uint req = use->req();
  71   for (uint j = 0; j < use->len(); j++) {
  72     Node *uin = use->in(j);
  73     if (uin == oldref) {
  74       if (j < req)
  75         use->set_req(j, newref);
  76       else
  77         use->set_prec(j, newref);
  78       nreplacements++;
  79     } else if (j >= req && uin == nullptr) {
  80       break;
  81     }
  82   }
  83   return nreplacements;
  84 }
  85 
  86 void PhaseMacroExpand::migrate_outs(Node *old, Node *target) {
  87   assert(old != nullptr, "sanity");
  88   for (DUIterator_Fast imax, i = old->fast_outs(imax); i < imax; i++) {
  89     Node* use = old->fast_out(i);
  90     _igvn.rehash_node_delayed(use);
  91     imax -= replace_input(use, old, target);
  92     // back up iterator
  93     --i;
  94   }
  95   assert(old->outcnt() == 0, "all uses must be deleted");
  96 }
  97 
  98 Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word) {
  99   Node* cmp = word;
 100   Node* bol = transform_later(new BoolNode(cmp, BoolTest::ne));
 101   IfNode* iff = new IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN );
 102   transform_later(iff);
 103 
 104   // Fast path taken.
 105   Node *fast_taken = transform_later(new IfFalseNode(iff));
 106 
 107   // Fast path not-taken, i.e. slow path
 108   Node *slow_taken = transform_later(new IfTrueNode(iff));
 109 
 110     region->init_req(edge, fast_taken); // Capture fast-control
 111     return slow_taken;
 112 }
 113 
 114 //--------------------copy_predefined_input_for_runtime_call--------------------
 115 void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call) {
 116   // Set fixed predefined input arguments
 117   call->init_req( TypeFunc::Control, ctrl );
 118   call->init_req( TypeFunc::I_O    , oldcall->in( TypeFunc::I_O) );
 119   call->init_req( TypeFunc::Memory , oldcall->in( TypeFunc::Memory ) ); // ?????
 120   call->init_req( TypeFunc::ReturnAdr, oldcall->in( TypeFunc::ReturnAdr ) );
 121   call->init_req( TypeFunc::FramePtr, oldcall->in( TypeFunc::FramePtr ) );
 122 }
 123 
 124 //------------------------------make_slow_call---------------------------------
 125 CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type,
 126                                            address slow_call, const char* leaf_name, Node* slow_path,
 127                                            Node* parm0, Node* parm1, Node* parm2) {
 128 
 129   // Slow-path call
 130  CallNode *call = leaf_name
 131    ? (CallNode*)new CallLeafNode      ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM )
 132    : (CallNode*)new CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), TypeRawPtr::BOTTOM );
 133 
 134   // Slow path call has no side-effects, uses few values
 135   copy_predefined_input_for_runtime_call(slow_path, oldcall, call );
 136   if (parm0 != nullptr)  call->init_req(TypeFunc::Parms+0, parm0);
 137   if (parm1 != nullptr)  call->init_req(TypeFunc::Parms+1, parm1);
 138   if (parm2 != nullptr)  call->init_req(TypeFunc::Parms+2, parm2);
 139   call->copy_call_debug_info(&_igvn, oldcall);
 140   call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
 141   _igvn.replace_node(oldcall, call);
 142   transform_later(call);
 143 
 144   return call;
 145 }
 146 
 147 void PhaseMacroExpand::eliminate_gc_barrier(Node* p2x) {
 148   BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
 149   bs->eliminate_gc_barrier(this, p2x);
 150 #ifndef PRODUCT
 151   if (PrintOptoStatistics) {
 152     AtomicAccess::inc(&PhaseMacroExpand::_GC_barriers_removed_counter);
 153   }
 154 #endif
 155 }
 156 
 157 // Search for a memory operation for the specified memory slice.
 158 static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) {
 159   Node *orig_mem = mem;
 160   Node *alloc_mem = alloc->as_Allocate()->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
 161   assert(alloc_mem != nullptr, "Allocation without a memory projection.");
 162   const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
 163   while (true) {
 164     if (mem == alloc_mem || mem == start_mem ) {
 165       return mem;  // hit one of our sentinels
 166     } else if (mem->is_MergeMem()) {
 167       mem = mem->as_MergeMem()->memory_at(alias_idx);
 168     } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) {
 169       Node *in = mem->in(0);
 170       // we can safely skip over safepoints, calls, locks and membars because we
 171       // already know that the object is safe to eliminate.
 172       if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) {
 173         return in;
 174       } else if (in->is_Call()) {
 175         CallNode *call = in->as_Call();
 176         if (call->may_modify(tinst, phase)) {
 177           assert(call->is_ArrayCopy(), "ArrayCopy is the only call node that doesn't make allocation escape");
 178           if (call->as_ArrayCopy()->modifies(offset, offset, phase, false)) {
 179             return in;
 180           }
 181         }
 182         mem = in->in(TypeFunc::Memory);
 183       } else if (in->is_MemBar()) {
 184         ArrayCopyNode* ac = nullptr;
 185         if (ArrayCopyNode::may_modify(tinst, in->as_MemBar(), phase, ac)) {
 186           if (ac != nullptr) {
 187             assert(ac->is_clonebasic(), "Only basic clone is a non escaping clone");
 188             return ac;
 189           }
 190         }
 191         mem = in->in(TypeFunc::Memory);
 192       } else {
 193 #ifdef ASSERT
 194         in->dump();
 195         mem->dump();
 196         assert(false, "unexpected projection");
 197 #endif
 198       }
 199     } else if (mem->is_Store()) {
 200       const TypePtr* atype = mem->as_Store()->adr_type();
 201       int adr_idx = phase->C->get_alias_index(atype);
 202       if (adr_idx == alias_idx) {
 203         assert(atype->isa_oopptr(), "address type must be oopptr");
 204         int adr_offset = atype->offset();
 205         uint adr_iid = atype->is_oopptr()->instance_id();
 206         // Array elements references have the same alias_idx
 207         // but different offset and different instance_id.
 208         if (adr_offset == offset && adr_iid == alloc->_idx) {
 209           return mem;
 210         }
 211       } else {
 212         assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
 213       }
 214       mem = mem->in(MemNode::Memory);
 215     } else if (mem->is_ClearArray()) {
 216       if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
 217         // Can not bypass initialization of the instance
 218         // we are looking.
 219         DEBUG_ONLY(intptr_t offset;)
 220         assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
 221         InitializeNode* init = alloc->as_Allocate()->initialization();
 222         // We are looking for stored value, return Initialize node
 223         // or memory edge from Allocate node.
 224         if (init != nullptr) {
 225           return init;
 226         } else {
 227           return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers).
 228         }
 229       }
 230       // Otherwise skip it (the call updated 'mem' value).
 231     } else if (mem->Opcode() == Op_SCMemProj) {
 232       mem = mem->in(0);
 233       Node* adr = nullptr;
 234       if (mem->is_LoadStore()) {
 235         adr = mem->in(MemNode::Address);
 236       } else {
 237         assert(mem->Opcode() == Op_EncodeISOArray ||
 238                mem->Opcode() == Op_StrCompressedCopy, "sanity");
 239         adr = mem->in(3); // Destination array
 240       }
 241       const TypePtr* atype = adr->bottom_type()->is_ptr();
 242       int adr_idx = phase->C->get_alias_index(atype);
 243       if (adr_idx == alias_idx) {
 244         DEBUG_ONLY(mem->dump();)
 245         assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
 246         return nullptr;
 247       }
 248       mem = mem->in(MemNode::Memory);
 249    } else if (mem->Opcode() == Op_StrInflatedCopy) {
 250       Node* adr = mem->in(3); // Destination array
 251       const TypePtr* atype = adr->bottom_type()->is_ptr();
 252       int adr_idx = phase->C->get_alias_index(atype);
 253       if (adr_idx == alias_idx) {
 254         DEBUG_ONLY(mem->dump();)
 255         assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
 256         return nullptr;
 257       }
 258       mem = mem->in(MemNode::Memory);
 259     } else {
 260       return mem;
 261     }
 262     assert(mem != orig_mem, "dead memory loop");
 263   }
 264 }
 265 
 266 // Determine if there is an interfering store between a rematerialization load and an arraycopy that is in the process
 267 // of being elided. Starting from the given rematerialization load this method starts a BFS traversal upwards through
 268 // the memory graph towards the provided ArrayCopyNode. For every node encountered on the traversal, check that it is
 269 // independent from the provided rematerialization. Returns false if every node on the traversal is independent and
 270 // true otherwise.
 271 bool has_interfering_store(const ArrayCopyNode* ac, LoadNode* load, PhaseGVN* phase) {
 272   assert(ac != nullptr && load != nullptr, "sanity");
 273   AccessAnalyzer acc(phase, load);
 274   ResourceMark rm;
 275   Unique_Node_List to_visit;
 276   to_visit.push(load->in(MemNode::Memory));
 277 
 278   for (uint worklist_idx = 0; worklist_idx < to_visit.size(); worklist_idx++) {
 279     Node* mem = to_visit.at(worklist_idx);
 280 
 281     if (mem->is_Proj() && mem->in(0) == ac) {
 282       // Reached the target, so visit what is left on the worklist.
 283       continue;
 284     }
 285 
 286     if (mem->is_Phi()) {
 287       assert(mem->bottom_type() == Type::MEMORY, "do not leave memory graph");
 288       // Add all non-control inputs of phis to be visited.
 289       for (uint phi_in = 1; phi_in < mem->len(); phi_in++) {
 290         Node* input = mem->in(phi_in);
 291         if (input != nullptr) {
 292           to_visit.push(input);
 293         }
 294       }
 295       continue;
 296     }
 297 
 298     AccessAnalyzer::AccessIndependence ind = acc.detect_access_independence(mem);
 299     if (ind.independent) {
 300       to_visit.push(ind.mem);
 301     } else {
 302       return true;
 303     }
 304   }
 305   // Did not find modification of source element in memory graph.
 306   return false;
 307 }
 308 
 309 // Generate loads from source of the arraycopy for fields of destination needed at a deoptimization point.
 310 // Returns nullptr if the load cannot be created because the arraycopy is not suitable for elimination
 311 // (e.g. copy inside the array with non-constant offsets) or the inputs do not match our assumptions (e.g.
 312 // the arraycopy does not actually write something at the provided offset).
 313 Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type* ftype, AllocateNode* alloc) {
 314   assert((ctl == ac->control() && mem == ac->memory()) != (mem != ac->memory() && ctl->is_Proj() && ctl->as_Proj()->is_uncommon_trap_proj()),
 315     "Either the control and memory are the same as for the arraycopy or they are pinned in an uncommon trap.");
 316   BasicType bt = ft;
 317   const Type *type = ftype;
 318   if (ft == T_NARROWOOP) {
 319     bt = T_OBJECT;
 320     type = ftype->make_oopptr();
 321   }
 322   Node* base = ac->in(ArrayCopyNode::Src);
 323   Node* adr = nullptr;
 324   const TypePtr* adr_type = nullptr;
 325 
 326   if (ac->is_clonebasic()) {
 327     assert(ac->in(ArrayCopyNode::Src) != ac->in(ArrayCopyNode::Dest), "clone source equals destination");
 328     adr = _igvn.transform(AddPNode::make_with_base(base, _igvn.MakeConX(offset)));
 329     adr_type = _igvn.type(base)->is_ptr()->add_offset(offset);
 330   } else {
 331     if (!ac->modifies(offset, offset, &_igvn, true)) {
 332       // If the arraycopy does not copy to this offset, we cannot generate a rematerialization load for it.
 333       return nullptr;
 334     }
 335     assert(ac->in(ArrayCopyNode::Dest) == alloc->result_cast(), "arraycopy destination should be allocation's result");
 336     uint shift = exact_log2(type2aelembytes(bt));
 337     Node* src_pos = ac->in(ArrayCopyNode::SrcPos);
 338     Node* dest_pos = ac->in(ArrayCopyNode::DestPos);
 339     const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int();
 340     const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int();
 341 
 342     if (src_pos_t->is_con() && dest_pos_t->is_con()) {
 343       intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset;
 344       adr = _igvn.transform(AddPNode::make_with_base(base, _igvn.MakeConX(off)));
 345       adr_type = _igvn.type(base)->is_ptr()->add_offset(off);
 346       if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
 347         // Don't emit a new load from src if src == dst but try to get the value from memory instead
 348         return value_from_mem(ac, ctl, ft, ftype, adr_type->isa_oopptr(), alloc);
 349       }
 350     } else {
 351       Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
 352 #ifdef _LP64
 353       diff = _igvn.transform(new ConvI2LNode(diff));
 354 #endif
 355       diff = _igvn.transform(new LShiftXNode(diff, _igvn.intcon(shift)));
 356 
 357       Node* off = _igvn.transform(new AddXNode(_igvn.MakeConX(offset), diff));
 358       adr = _igvn.transform(AddPNode::make_with_base(base, off));
 359       adr_type = _igvn.type(base)->is_ptr()->add_offset(Type::OffsetBot);
 360       if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
 361         // Non constant offset in the array: we can't statically
 362         // determine the value
 363         return nullptr;
 364       }
 365     }
 366   }
 367   assert(adr != nullptr && adr_type != nullptr, "sanity");
 368 
 369   // Create the rematerialization load ...
 370   MergeMemNode* mergemem = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem();
 371   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 372   Node* res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemem, adr, adr_type, type, bt);
 373   assert(res != nullptr, "load should have been created");
 374 
 375   // ... and ensure that pinning the rematerialization load inside the uncommon path is safe.
 376   if (mem != ac->memory() && ctl->is_Proj() && ctl->as_Proj()->is_uncommon_trap_proj() && res->is_Load() &&
 377       has_interfering_store(ac, res->as_Load(), &_igvn)) {
 378     // Not safe: use control and memory from the arraycopy to ensure correct memory state.
 379     _igvn.remove_dead_node(res, PhaseIterGVN::NodeOrigin::Graph); // Clean up the unusable rematerialization load.
 380     return make_arraycopy_load(ac, offset, ac->control(), ac->memory(), ft, ftype, alloc);
 381   }
 382 
 383   if (ftype->isa_narrowoop()) {
 384     // PhaseMacroExpand::scalar_replacement adds DecodeN nodes
 385     res = _igvn.transform(new EncodePNode(res, ftype));
 386   }
 387   return res;
 388 }
 389 
 390 //
 391 // Given a Memory Phi, compute a value Phi containing the values from stores
 392 // on the input paths.
 393 // Note: this function is recursive, its depth is limited by the "level" argument
 394 // Returns the computed Phi, or null if it cannot compute it.
 395 Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) {
 396   assert(mem->is_Phi(), "sanity");
 397   int alias_idx = C->get_alias_index(adr_t);
 398   int offset = adr_t->offset();
 399   int instance_id = adr_t->instance_id();
 400 
 401   // Check if an appropriate value phi already exists.
 402   Node* region = mem->in(0);
 403   for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
 404     Node* phi = region->fast_out(k);
 405     if (phi->is_Phi() && phi != mem &&
 406         phi->as_Phi()->is_same_inst_field(phi_type, (int)mem->_idx, instance_id, alias_idx, offset)) {
 407       return phi;
 408     }
 409   }
 410   // Check if an appropriate new value phi already exists.
 411   Node* new_phi = value_phis->find(mem->_idx);
 412   if (new_phi != nullptr)
 413     return new_phi;
 414 
 415   if (level <= 0) {
 416     return nullptr; // Give up: phi tree too deep
 417   }
 418   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
 419   Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
 420   assert(alloc_mem != nullptr, "Allocation without a memory projection.");
 421 
 422   uint length = mem->req();
 423   GrowableArray <Node *> values(length, length, nullptr);
 424 
 425   // create a new Phi for the value
 426   PhiNode *phi = new PhiNode(mem->in(0), phi_type, nullptr, mem->_idx, instance_id, alias_idx, offset);
 427   transform_later(phi);
 428   value_phis->push(phi, mem->_idx);
 429 
 430   for (uint j = 1; j < length; j++) {
 431     Node *in = mem->in(j);
 432     if (in == nullptr || in->is_top()) {
 433       values.at_put(j, in);
 434     } else  {
 435       Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
 436       if (val == start_mem || val == alloc_mem) {
 437         // hit a sentinel, return appropriate 0 value
 438         values.at_put(j, _igvn.zerocon(ft));
 439         continue;
 440       }
 441       if (val->is_Initialize()) {
 442         val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 443       }
 444       if (val == nullptr) {
 445         return nullptr;  // can't find a value on this path
 446       }
 447       if (val == mem) {
 448         values.at_put(j, mem);
 449       } else if (val->is_Store()) {
 450         Node* n = val->in(MemNode::ValueIn);
 451         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 452         n = bs->step_over_gc_barrier(n);
 453         if (is_subword_type(ft)) {
 454           n = Compile::narrow_value(ft, n, phi_type, &_igvn, true);
 455         }
 456         values.at_put(j, n);
 457       } else if(val->is_Proj() && val->in(0) == alloc) {
 458         values.at_put(j, _igvn.zerocon(ft));
 459       } else if (val->is_Phi()) {
 460         val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
 461         if (val == nullptr) {
 462           return nullptr;
 463         }
 464         values.at_put(j, val);
 465       } else if (val->Opcode() == Op_SCMemProj) {
 466         assert(val->in(0)->is_LoadStore() ||
 467                val->in(0)->Opcode() == Op_EncodeISOArray ||
 468                val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity");
 469         assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
 470         return nullptr;
 471       } else if (val->is_ArrayCopy()) {
 472         Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), val->in(TypeFunc::Memory), ft, phi_type, alloc);
 473         if (res == nullptr) {
 474           return nullptr;
 475         }
 476         values.at_put(j, res);
 477       } else if (val->is_top()) {
 478         // This indicates that this path into the phi is dead. Top will eventually also propagate into the Region.
 479         // IGVN will clean this up later.
 480         values.at_put(j, val);
 481       } else {
 482         DEBUG_ONLY( val->dump(); )
 483         assert(false, "unknown node on this path");
 484         return nullptr;  // unknown node on this path
 485       }
 486     }
 487   }
 488   // Set Phi's inputs
 489   for (uint j = 1; j < length; j++) {
 490     if (values.at(j) == mem) {
 491       phi->init_req(j, phi);
 492     } else {
 493       phi->init_req(j, values.at(j));
 494     }
 495   }
 496   return phi;
 497 }
 498 
 499 // Search the last value stored into the object's field.
 500 Node* PhaseMacroExpand::value_from_mem(Node* origin, Node* ctl, BasicType ft, const Type* ftype, const TypeOopPtr* adr_t, AllocateNode* alloc) {
 501   assert(adr_t->is_known_instance_field(), "instance required");
 502   int instance_id = adr_t->instance_id();
 503   assert((uint)instance_id == alloc->_idx, "wrong allocation");
 504 
 505   int alias_idx = C->get_alias_index(adr_t);
 506   int offset = adr_t->offset();
 507   Node* orig_mem = origin->in(TypeFunc::Memory);
 508   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
 509   Node *alloc_ctrl = alloc->in(TypeFunc::Control);
 510   Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
 511   assert(alloc_mem != nullptr, "Allocation without a memory projection.");
 512   VectorSet visited;
 513 
 514   bool done = orig_mem == alloc_mem;
 515   Node *mem = orig_mem;
 516   while (!done) {
 517     if (visited.test_set(mem->_idx)) {
 518       return nullptr;  // found a loop, give up
 519     }
 520     mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
 521     if (mem == start_mem || mem == alloc_mem) {
 522       done = true;  // hit a sentinel, return appropriate 0 value
 523     } else if (mem->is_Initialize()) {
 524       mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
 525       if (mem == nullptr) {
 526         done = true; // Something go wrong.
 527       } else if (mem->is_Store()) {
 528         const TypePtr* atype = mem->as_Store()->adr_type();
 529         assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice");
 530         done = true;
 531       }
 532     } else if (mem->is_Store()) {
 533       const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr();
 534       assert(atype != nullptr, "address type must be oopptr");
 535       assert(C->get_alias_index(atype) == alias_idx &&
 536              atype->is_known_instance_field() && atype->offset() == offset &&
 537              atype->instance_id() == instance_id, "store is correct memory slice");
 538       done = true;
 539     } else if (mem->is_Phi()) {
 540       // try to find a phi's unique input
 541       Node *unique_input = nullptr;
 542       Node *top = C->top();
 543       for (uint i = 1; i < mem->req(); i++) {
 544         Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn);
 545         if (n == nullptr || n == top || n == mem) {
 546           continue;
 547         } else if (unique_input == nullptr) {
 548           unique_input = n;
 549         } else if (unique_input != n) {
 550           unique_input = top;
 551           break;
 552         }
 553       }
 554       if (unique_input != nullptr && unique_input != top) {
 555         mem = unique_input;
 556       } else {
 557         done = true;
 558       }
 559     } else if (mem->is_ArrayCopy()) {
 560       done = true;
 561     } else if (mem->is_top()) {
 562       // The slice is on a dead path. Returning nullptr would lead to elimination
 563       // bailout, but we want to prevent that. Just forwarding the top is also legal,
 564       // and IGVN can just clean things up, and remove whatever receives top.
 565       return mem;
 566     } else {
 567       DEBUG_ONLY( mem->dump(); )
 568       assert(false, "unexpected node");
 569     }
 570   }
 571   if (mem != nullptr) {
 572     if (mem == start_mem || mem == alloc_mem) {
 573       // hit a sentinel, return appropriate 0 value
 574       return _igvn.zerocon(ft);
 575     } else if (mem->is_Store()) {
 576       Node* n = mem->in(MemNode::ValueIn);
 577       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 578       n = bs->step_over_gc_barrier(n);
 579       return n;
 580     } else if (mem->is_Phi()) {
 581       // attempt to produce a Phi reflecting the values on the input paths of the Phi
 582       Node_Stack value_phis(8);
 583       Node* phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
 584       if (phi != nullptr) {
 585         return phi;
 586       } else {
 587         // Kill all new Phis
 588         while(value_phis.is_nonempty()) {
 589           Node* n = value_phis.node();
 590           _igvn.replace_node(n, C->top());
 591           value_phis.pop();
 592         }
 593       }
 594     } else if (mem->is_ArrayCopy()) {
 595       // Rematerialize the scalar-replaced array. If possible, pin the loads to the uncommon path of the uncommon trap.
 596       // Check for each element of the source array, whether it was modified. If not, pin both memory and control to
 597       // the uncommon path. Otherwise, use the control and memory state of the arraycopy. Control and memory state must
 598       // come from the same source to prevent anti-dependence problems in the backend.
 599       ArrayCopyNode* ac = mem->as_ArrayCopy();
 600       Node* ac_ctl = ac->control();
 601       Node* ac_mem = ac->memory();
 602       if (ctl->is_Proj() && ctl->as_Proj()->is_uncommon_trap_proj()) {
 603         // pin the loads in the uncommon trap path
 604         ac_ctl = ctl;
 605         ac_mem = orig_mem;
 606       }
 607       return make_arraycopy_load(ac, offset, ac_ctl, ac_mem, ft, ftype, alloc);
 608     }
 609   }
 610   // Something went wrong.
 611   return nullptr;
 612 }
 613 
 614 // Check the possibility of scalar replacement.
 615 bool PhaseMacroExpand::can_eliminate_allocation(PhaseIterGVN* igvn, AllocateNode *alloc, GrowableArray <SafePointNode *>* safepoints) {
 616   //  Scan the uses of the allocation to check for anything that would
 617   //  prevent us from eliminating it.
 618   NOT_PRODUCT( const char* fail_eliminate = nullptr; )
 619   DEBUG_ONLY( Node* disq_node = nullptr; )
 620   bool can_eliminate = true;
 621   bool reduce_merge_precheck = (safepoints == nullptr);
 622 
 623   Node* res = alloc->result_cast();
 624   const TypeOopPtr* res_type = nullptr;
 625   if (res == nullptr) {
 626     // All users were eliminated.
 627   } else if (!res->is_CheckCastPP()) {
 628     NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
 629     can_eliminate = false;
 630   } else {
 631     res_type = igvn->type(res)->isa_oopptr();
 632     if (res_type == nullptr) {
 633       NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";)
 634       can_eliminate = false;
 635     } else if (!res_type->klass_is_exact()) {
 636       NOT_PRODUCT(fail_eliminate = "Not an exact type.";)
 637       can_eliminate = false;
 638     } else if (res_type->isa_aryptr()) {
 639       int length = alloc->in(AllocateNode::ALength)->find_int_con(-1);
 640       if (length < 0) {
 641         NOT_PRODUCT(fail_eliminate = "Array's size is not constant";)
 642         can_eliminate = false;
 643       }
 644     }
 645   }
 646 
 647   if (can_eliminate && res != nullptr) {
 648     BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
 649     for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
 650                                j < jmax && can_eliminate; j++) {
 651       Node* use = res->fast_out(j);
 652 
 653       if (use->is_AddP()) {
 654         const TypePtr* addp_type = igvn->type(use)->is_ptr();
 655         int offset = addp_type->offset();
 656 
 657         if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
 658           NOT_PRODUCT(fail_eliminate = "Undefined field reference";)
 659           can_eliminate = false;
 660           break;
 661         }
 662         for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
 663                                    k < kmax && can_eliminate; k++) {
 664           Node* n = use->fast_out(k);
 665           if (n->is_Mem() && n->as_Mem()->is_mismatched_access()) {
 666             DEBUG_ONLY(disq_node = n);
 667             NOT_PRODUCT(fail_eliminate = "Mismatched access");
 668             can_eliminate = false;
 669           }
 670           if (!n->is_Store() && n->Opcode() != Op_CastP2X && !bs->is_gc_pre_barrier_node(n) && !reduce_merge_precheck) {
 671             DEBUG_ONLY(disq_node = n;)
 672             if (n->is_Load() || n->is_LoadStore()) {
 673               NOT_PRODUCT(fail_eliminate = "Field load";)
 674             } else {
 675               NOT_PRODUCT(fail_eliminate = "Not store field reference";)
 676             }
 677             can_eliminate = false;
 678           }
 679         }
 680       } else if (use->is_ArrayCopy() &&
 681                  (use->as_ArrayCopy()->is_clonebasic() ||
 682                   use->as_ArrayCopy()->is_arraycopy_validated() ||
 683                   use->as_ArrayCopy()->is_copyof_validated() ||
 684                   use->as_ArrayCopy()->is_copyofrange_validated()) &&
 685                  use->in(ArrayCopyNode::Dest) == res) {
 686         // ok to eliminate
 687       } else if (use->is_ReachabilityFence() && OptimizeReachabilityFences) {
 688         // ok to eliminate
 689       } else if (use->is_SafePoint()) {
 690         SafePointNode* sfpt = use->as_SafePoint();
 691         if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
 692           // Object is passed as argument.
 693           DEBUG_ONLY(disq_node = use;)
 694           NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
 695           can_eliminate = false;
 696         }
 697         Node* sfptMem = sfpt->memory();
 698         if (sfptMem == nullptr || sfptMem->is_top()) {
 699           DEBUG_ONLY(disq_node = use;)
 700           NOT_PRODUCT(fail_eliminate = "null or TOP memory";)
 701           can_eliminate = false;
 702         } else if (!reduce_merge_precheck) {
 703           safepoints->append_if_missing(sfpt);
 704         }
 705       } else if (reduce_merge_precheck &&
 706                  (use->is_Phi() || use->is_EncodeP() ||
 707                   use->Opcode() == Op_MemBarRelease ||
 708                   (UseStoreStoreForCtor && use->Opcode() == Op_MemBarStoreStore))) {
 709         // Nothing to do
 710       } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark
 711         if (use->is_Phi()) {
 712           if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
 713             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 714           } else {
 715             NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
 716           }
 717           DEBUG_ONLY(disq_node = use;)
 718         } else {
 719           if (use->Opcode() == Op_Return) {
 720             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 721           } else {
 722             NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
 723           }
 724           DEBUG_ONLY(disq_node = use;)
 725         }
 726         can_eliminate = false;
 727       }
 728     }
 729   }
 730 
 731 #ifndef PRODUCT
 732   if (PrintEliminateAllocations && safepoints != nullptr) {
 733     if (can_eliminate) {
 734       tty->print("Scalar ");
 735       if (res == nullptr)
 736         alloc->dump();
 737       else
 738         res->dump();
 739     } else if (alloc->_is_scalar_replaceable) {
 740       tty->print("NotScalar (%s)", fail_eliminate);
 741       if (res == nullptr)
 742         alloc->dump();
 743       else
 744         res->dump();
 745 #ifdef ASSERT
 746       if (disq_node != nullptr) {
 747           tty->print("  >>>> ");
 748           disq_node->dump();
 749       }
 750 #endif /*ASSERT*/
 751     }
 752   }
 753 
 754   if (TraceReduceAllocationMerges && !can_eliminate && reduce_merge_precheck) {
 755     tty->print_cr("\tCan't eliminate allocation because '%s': ", fail_eliminate != nullptr ? fail_eliminate : "");
 756     DEBUG_ONLY(if (disq_node != nullptr) disq_node->dump();)
 757   }
 758 #endif
 759   return can_eliminate;
 760 }
 761 
 762 void PhaseMacroExpand::undo_previous_scalarizations(GrowableArray <SafePointNode *> safepoints_done, AllocateNode* alloc) {
 763   Node* res = alloc->result_cast();
 764   int nfields = 0;
 765   assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");
 766 
 767   if (res != nullptr) {
 768     const TypeOopPtr* res_type = _igvn.type(res)->isa_oopptr();
 769 
 770     if (res_type->isa_instptr()) {
 771       // find the fields of the class which will be needed for safepoint debug information
 772       ciInstanceKlass* iklass = res_type->is_instptr()->instance_klass();
 773       nfields = iklass->nof_nonstatic_fields();
 774     } else {
 775       // find the array's elements which will be needed for safepoint debug information
 776       nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
 777       assert(nfields >= 0, "must be an array klass.");
 778     }
 779   }
 780 
 781   // rollback processed safepoints
 782   while (safepoints_done.length() > 0) {
 783     SafePointNode* sfpt_done = safepoints_done.pop();
 784 
 785     SafePointNode::NodeEdgeTempStorage non_debug_edges_worklist(igvn());
 786 
 787     sfpt_done->remove_non_debug_edges(non_debug_edges_worklist);
 788 
 789     // remove any extra entries we added to the safepoint
 790     assert(sfpt_done->jvms()->endoff() == sfpt_done->req(), "no extra edges past debug info allowed");
 791     uint last = sfpt_done->req() - 1;
 792     for (int k = 0;  k < nfields; k++) {
 793       sfpt_done->del_req(last--);
 794     }
 795     JVMState *jvms = sfpt_done->jvms();
 796     jvms->set_endoff(sfpt_done->req());
 797     // Now make a pass over the debug information replacing any references
 798     // to SafePointScalarObjectNode with the allocated object.
 799     int start = jvms->debug_start();
 800     int end   = jvms->debug_end();
 801     for (int i = start; i < end; i++) {
 802       if (sfpt_done->in(i)->is_SafePointScalarObject()) {
 803         SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
 804         if (scobj->first_index(jvms) == sfpt_done->req() &&
 805             scobj->n_fields() == (uint)nfields) {
 806           assert(scobj->alloc() == alloc, "sanity");
 807           sfpt_done->set_req(i, res);
 808         }
 809       }
 810     }
 811 
 812     sfpt_done->restore_non_debug_edges(non_debug_edges_worklist);
 813 
 814     _igvn._worklist.push(sfpt_done);
 815   }
 816 }
 817 
 818 #ifdef ASSERT
 819   // Verify if a value can be written into a field.
 820   void verify_type_compatability(const Type* value_type, const Type* field_type) {
 821     BasicType value_bt = value_type->basic_type();
 822     BasicType field_bt = field_type->basic_type();
 823 
 824     // Primitive types must match.
 825     if (is_java_primitive(value_bt) && value_bt == field_bt) { return; }
 826 
 827     // I have been struggling to make a similar assert for non-primitive
 828     // types. I we can add one in the future. For now, I just let them
 829     // pass without checks.
 830     // In particular, I was struggling with a value that came from a call,
 831     // and had only a non-null check CastPP. There was also a checkcast
 832     // in the graph to verify the interface, but the corresponding
 833     // CheckCastPP result was not updated in the stack slot, and so
 834     // we ended up using the CastPP. That means that the field knows
 835     // that it should get an oop from an interface, but the value lost
 836     // that information, and so it is not a subtype.
 837     // There may be other issues, feel free to investigate further!
 838     if (!is_java_primitive(value_bt)) { return; }
 839 
 840     tty->print_cr("value not compatible for field: %s vs %s",
 841                   type2name(value_bt),
 842                   type2name(field_bt));
 843     tty->print("value_type: ");
 844     value_type->dump();
 845     tty->cr();
 846     tty->print("field_type: ");
 847     field_type->dump();
 848     tty->cr();
 849     assert(false, "value_type does not fit field_type");
 850   }
 851 #endif
 852 
 853 SafePointScalarObjectNode* PhaseMacroExpand::create_scalarized_object_description(AllocateNode *alloc, SafePointNode* sfpt) {
 854   assert(sfpt->jvms()->endoff() == sfpt->req(), "no extra edges past debug info allowed");
 855 
 856   // Fields of scalar objs are referenced only at the end
 857   // of regular debuginfo at the last (youngest) JVMS.
 858   // Record relative start index.
 859   ciInstanceKlass* iklass    = nullptr;
 860   BasicType basic_elem_type  = T_ILLEGAL;
 861   const Type* field_type     = nullptr;
 862   const TypeOopPtr* res_type = nullptr;
 863   int nfields                = 0;
 864   int array_base             = 0;
 865   int element_size           = 0;
 866   uint first_ind             = (sfpt->req() - sfpt->jvms()->scloff());
 867   Node* res                  = alloc->result_cast();
 868 
 869   assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");
 870   assert(sfpt->jvms() != nullptr, "missed JVMS");
 871 
 872   if (res != nullptr) { // Could be null when there are no users
 873     res_type = _igvn.type(res)->isa_oopptr();
 874 
 875     if (res_type->isa_instptr()) {
 876       // find the fields of the class which will be needed for safepoint debug information
 877       iklass = res_type->is_instptr()->instance_klass();
 878       nfields = iklass->nof_nonstatic_fields();
 879     } else {
 880       // find the array's elements which will be needed for safepoint debug information
 881       nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
 882       assert(nfields >= 0, "must be an array klass.");
 883       basic_elem_type = res_type->is_aryptr()->elem()->array_element_basic_type();
 884       array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
 885       element_size = type2aelembytes(basic_elem_type);
 886       field_type = res_type->is_aryptr()->elem();
 887     }
 888   }
 889 
 890   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(res_type, alloc, first_ind, sfpt->jvms()->depth(), nfields);
 891   sobj->init_req(0, C->root());
 892   transform_later(sobj);
 893 
 894   // Scan object's fields adding an input to the safepoint for each field.
 895   for (int j = 0; j < nfields; j++) {
 896     intptr_t offset;
 897     ciField* field = nullptr;
 898     if (iklass != nullptr) {
 899       field = iklass->nonstatic_field_at(j);
 900       offset = field->offset_in_bytes();
 901       ciType* elem_type = field->type();
 902       basic_elem_type = field->layout_type();
 903 
 904       // The next code is taken from Parse::do_get_xxx().
 905       if (is_reference_type(basic_elem_type)) {
 906         if (!elem_type->is_loaded()) {
 907           field_type = TypeInstPtr::BOTTOM;
 908         } else if (field != nullptr && field->is_static_constant()) {
 909           ciObject* con = field->constant_value().as_object();
 910           // Do not "join" in the previous type; it doesn't add value,
 911           // and may yield a vacuous result if the field is of interface type.
 912           field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 913           assert(field_type != nullptr, "field singleton type must be consistent");
 914         } else {
 915           field_type = TypeOopPtr::make_from_klass(elem_type->as_klass());
 916         }
 917         if (UseCompressedOops) {
 918           field_type = field_type->make_narrowoop();
 919           basic_elem_type = T_NARROWOOP;
 920         }
 921       } else {
 922         field_type = Type::get_const_basic_type(basic_elem_type);
 923       }
 924     } else {
 925       offset = array_base + j * (intptr_t)element_size;
 926     }
 927 
 928     const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr();
 929 
 930     Node* field_val = value_from_mem(sfpt, sfpt->control(), basic_elem_type, field_type, field_addr_type, alloc);
 931 
 932     // We weren't able to find a value for this field,
 933     // give up on eliminating this allocation.
 934     if (field_val == nullptr) {
 935       uint last = sfpt->req() - 1;
 936       for (int k = 0;  k < j; k++) {
 937         sfpt->del_req(last--);
 938       }
 939       _igvn._worklist.push(sfpt);
 940 
 941 #ifndef PRODUCT
 942       if (PrintEliminateAllocations) {
 943         if (field != nullptr) {
 944           tty->print("=== At SafePoint node %d can't find value of field: ", sfpt->_idx);
 945           field->print();
 946           int field_idx = C->get_alias_index(field_addr_type);
 947           tty->print(" (alias_idx=%d)", field_idx);
 948         } else { // Array's element
 949           tty->print("=== At SafePoint node %d can't find value of array element [%d]", sfpt->_idx, j);
 950         }
 951         tty->print(", which prevents elimination of: ");
 952         if (res == nullptr)
 953           alloc->dump();
 954         else
 955           res->dump();
 956       }
 957 #endif
 958 
 959       return nullptr;
 960     }
 961 
 962     if (UseCompressedOops && field_type->isa_narrowoop()) {
 963       // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
 964       // to be able scalar replace the allocation.
 965       if (field_val->is_EncodeP()) {
 966         field_val = field_val->in(1);
 967       } else {
 968         field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type()));
 969       }
 970     }
 971     DEBUG_ONLY(verify_type_compatability(field_val->bottom_type(), field_type);)
 972     sfpt->add_req(field_val);
 973   }
 974 
 975   sfpt->jvms()->set_endoff(sfpt->req());
 976 
 977   return sobj;
 978 }
 979 
 980 // Do scalar replacement.
 981 bool PhaseMacroExpand::scalar_replacement(AllocateNode* alloc, GrowableArray<SafePointNode*>& safepoints) {
 982   GrowableArray<SafePointNode*> safepoints_done;
 983   Node* res = alloc->result_cast();
 984   assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");
 985 
 986   // Process the safepoint uses
 987   while (safepoints.length() > 0) {
 988     SafePointNode* sfpt = safepoints.pop();
 989 
 990   SafePointNode::NodeEdgeTempStorage non_debug_edges_worklist(igvn());
 991 
 992     // All sfpt inputs are implicitly included into debug info during the scalarization process below.
 993     // Keep non-debug inputs separately, so they stay non-debug.
 994     sfpt->remove_non_debug_edges(non_debug_edges_worklist);
 995 
 996     SafePointScalarObjectNode* sobj = create_scalarized_object_description(alloc, sfpt);
 997 
 998     if (sobj == nullptr) {
 999       sfpt->restore_non_debug_edges(non_debug_edges_worklist);
1000       undo_previous_scalarizations(safepoints_done, alloc);
1001       return false;
1002     }
1003 
1004     // Now make a pass over the debug information replacing any references
1005     // to the allocated object with "sobj"
1006     JVMState *jvms = sfpt->jvms();
1007     sfpt->replace_edges_in_range(res, sobj, jvms->debug_start(), jvms->debug_end(), &_igvn);
1008     non_debug_edges_worklist.remove_edge_if_present(res); // drop scalarized input from non-debug info
1009     sfpt->restore_non_debug_edges(non_debug_edges_worklist);
1010     _igvn._worklist.push(sfpt);
1011 
1012     // keep it for rollback
1013     safepoints_done.append_if_missing(sfpt);
1014   }
1015 
1016   return true;
1017 }
1018 
1019 static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
1020   Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
1021   Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory);
1022   if (ctl_proj != nullptr) {
1023     igvn.replace_node(ctl_proj, n->in(0));
1024   }
1025   if (mem_proj != nullptr) {
1026     igvn.replace_node(mem_proj, n->in(TypeFunc::Memory));
1027   }
1028 }
1029 
1030 // Process users of eliminated allocation.
1031 void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
1032   Node* res = alloc->result_cast();
1033   if (res != nullptr) {
1034     for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
1035       Node *use = res->last_out(j);
1036       uint oc1 = res->outcnt();
1037 
1038       if (use->is_AddP()) {
1039         for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
1040           Node *n = use->last_out(k);
1041           uint oc2 = use->outcnt();
1042           if (n->is_Store()) {
1043 #ifdef ASSERT
1044             // Verify that there is no dependent MemBarVolatile nodes,
1045             // they should be removed during IGVN, see MemBarNode::Ideal().
1046             for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
1047                                        p < pmax; p++) {
1048               Node* mb = n->fast_out(p);
1049               assert(mb->is_Initialize() || !mb->is_MemBar() ||
1050                      mb->req() <= MemBarNode::Precedent ||
1051                      mb->in(MemBarNode::Precedent) != n,
1052                      "MemBarVolatile should be eliminated for non-escaping object");
1053             }
1054 #endif
1055             _igvn.replace_node(n, n->in(MemNode::Memory));
1056           } else {
1057             eliminate_gc_barrier(n);
1058           }
1059           k -= (oc2 - use->outcnt());
1060         }
1061         _igvn.remove_dead_node(use, PhaseIterGVN::NodeOrigin::Graph);
1062       } else if (use->is_ArrayCopy()) {
1063         // Disconnect ArrayCopy node
1064         ArrayCopyNode* ac = use->as_ArrayCopy();
1065         if (ac->is_clonebasic()) {
1066           Node* membar_after = ac->proj_out(TypeFunc::Control)->unique_ctrl_out();
1067           disconnect_projections(ac, _igvn);
1068           assert(alloc->in(TypeFunc::Memory)->is_Proj() && alloc->in(TypeFunc::Memory)->in(0)->Opcode() == Op_MemBarCPUOrder, "mem barrier expected before allocation");
1069           Node* membar_before = alloc->in(TypeFunc::Memory)->in(0);
1070           disconnect_projections(membar_before->as_MemBar(), _igvn);
1071           if (membar_after->is_MemBar()) {
1072             disconnect_projections(membar_after->as_MemBar(), _igvn);
1073           }
1074         } else {
1075           assert(ac->is_arraycopy_validated() ||
1076                  ac->is_copyof_validated() ||
1077                  ac->is_copyofrange_validated(), "unsupported");
1078           CallProjections callprojs;
1079           ac->extract_projections(&callprojs, true);
1080 
1081           _igvn.replace_node(callprojs.fallthrough_ioproj, ac->in(TypeFunc::I_O));
1082           _igvn.replace_node(callprojs.fallthrough_memproj, ac->in(TypeFunc::Memory));
1083           _igvn.replace_node(callprojs.fallthrough_catchproj, ac->in(TypeFunc::Control));
1084 
1085           // Set control to top. IGVN will remove the remaining projections
1086           ac->set_req(0, top());
1087           ac->replace_edge(res, top(), &_igvn);
1088 
1089           // Disconnect src right away: it can help find new
1090           // opportunities for allocation elimination
1091           Node* src = ac->in(ArrayCopyNode::Src);
1092           ac->replace_edge(src, top(), &_igvn);
1093           // src can be top at this point if src and dest of the
1094           // arraycopy were the same
1095           if (src->outcnt() == 0 && !src->is_top()) {
1096             _igvn.remove_dead_node(src, PhaseIterGVN::NodeOrigin::Graph);
1097           }
1098         }
1099         _igvn._worklist.push(ac);
1100       } else if (use->is_ReachabilityFence() && OptimizeReachabilityFences) {
1101         use->as_ReachabilityFence()->clear_referent(_igvn); // redundant fence; will be removed during IGVN
1102       } else {
1103         eliminate_gc_barrier(use);
1104       }
1105       j -= (oc1 - res->outcnt());
1106     }
1107     assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
1108     _igvn.remove_dead_node(res, PhaseIterGVN::NodeOrigin::Graph);
1109   }
1110 
1111   //
1112   // Process other users of allocation's projections
1113   //
1114   if (_callprojs.resproj != nullptr && _callprojs.resproj->outcnt() != 0) {
1115     // First disconnect stores captured by Initialize node.
1116     // If Initialize node is eliminated first in the following code,
1117     // it will kill such stores and DUIterator_Last will assert.
1118     for (DUIterator_Fast jmax, j = _callprojs.resproj->fast_outs(jmax);  j < jmax; j++) {
1119       Node* use = _callprojs.resproj->fast_out(j);
1120       if (use->is_AddP()) {
1121         // raw memory addresses used only by the initialization
1122         _igvn.replace_node(use, C->top());
1123         --j; --jmax;
1124       }
1125     }
1126     for (DUIterator_Last jmin, j = _callprojs.resproj->last_outs(jmin); j >= jmin; ) {
1127       Node* use = _callprojs.resproj->last_out(j);
1128       uint oc1 = _callprojs.resproj->outcnt();
1129       if (use->is_Initialize()) {
1130         // Eliminate Initialize node.
1131         InitializeNode *init = use->as_Initialize();
1132         Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
1133         if (ctrl_proj != nullptr) {
1134           _igvn.replace_node(ctrl_proj, init->in(TypeFunc::Control));
1135 #ifdef ASSERT
1136           // If the InitializeNode has no memory out, it will die, and tmp will become null
1137           Node* tmp = init->in(TypeFunc::Control);
1138           assert(tmp == nullptr || tmp == _callprojs.fallthrough_catchproj, "allocation control projection");
1139 #endif
1140         }
1141         Node* mem = init->in(TypeFunc::Memory);
1142 #ifdef ASSERT
1143         if (init->number_of_projs(TypeFunc::Memory) > 0) {
1144           if (mem->is_MergeMem()) {
1145             assert(mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == _callprojs.fallthrough_memproj, "allocation memory projection");
1146           } else {
1147             assert(mem == _callprojs.fallthrough_memproj, "allocation memory projection");
1148           }
1149         }
1150 #endif
1151         init->replace_mem_projs_by(mem, &_igvn);
1152         assert(init->outcnt() == 0, "should only have had a control and some memory projections, and we removed them");
1153       } else  {
1154         assert(false, "only Initialize or AddP expected");
1155       }
1156       j -= (oc1 - _callprojs.resproj->outcnt());
1157     }
1158   }
1159   if (_callprojs.fallthrough_catchproj != nullptr) {
1160     _igvn.replace_node(_callprojs.fallthrough_catchproj, alloc->in(TypeFunc::Control));
1161   }
1162   if (_callprojs.fallthrough_memproj != nullptr) {
1163     _igvn.replace_node(_callprojs.fallthrough_memproj, alloc->in(TypeFunc::Memory));
1164   }
1165   if (_callprojs.catchall_memproj != nullptr) {
1166     _igvn.replace_node(_callprojs.catchall_memproj, C->top());
1167   }
1168   if (_callprojs.fallthrough_ioproj != nullptr) {
1169     _igvn.replace_node(_callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
1170   }
1171   if (_callprojs.catchall_ioproj != nullptr) {
1172     _igvn.replace_node(_callprojs.catchall_ioproj, C->top());
1173   }
1174   if (_callprojs.catchall_catchproj != nullptr) {
1175     _igvn.replace_node(_callprojs.catchall_catchproj, C->top());
1176   }
1177 }
1178 
1179 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
1180   // If reallocation fails during deoptimization we'll pop all
1181   // interpreter frames for this compiled frame and that won't play
1182   // nice with JVMTI popframe.
1183   // We avoid this issue by eager reallocation when the popframe request
1184   // is received.
1185   if (!EliminateAllocations || !alloc->_is_non_escaping) {
1186     return false;
1187   }
1188   Node* klass = alloc->in(AllocateNode::KlassNode);
1189   const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
1190   Node* res = alloc->result_cast();
1191   // Eliminate boxing allocations which are not used
1192   // regardless scalar replaceable status.
1193   bool boxing_alloc = C->eliminate_boxing() &&
1194                       tklass->isa_instklassptr() &&
1195                       tklass->is_instklassptr()->instance_klass()->is_box_klass();
1196   if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != nullptr))) {
1197     return false;
1198   }
1199 
1200   alloc->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1201 
1202   GrowableArray <SafePointNode *> safepoints;
1203   if (!can_eliminate_allocation(&_igvn, alloc, &safepoints)) {
1204     return false;
1205   }
1206 
1207   if (!alloc->_is_scalar_replaceable) {
1208     assert(res == nullptr, "sanity");
1209     // We can only eliminate allocation if all debug info references
1210     // are already replaced with SafePointScalarObject because
1211     // we can't search for a fields value without instance_id.
1212     if (safepoints.length() > 0) {
1213       return false;
1214     }
1215   }
1216 
1217   if (!scalar_replacement(alloc, safepoints)) {
1218     return false;
1219   }
1220 
1221   CompileLog* log = C->log();
1222   if (log != nullptr) {
1223     log->head("eliminate_allocation type='%d'",
1224               log->identify(tklass->exact_klass()));
1225     JVMState* p = alloc->jvms();
1226     while (p != nullptr) {
1227       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1228       p = p->caller();
1229     }
1230     log->tail("eliminate_allocation");
1231   }
1232 
1233   process_users_of_allocation(alloc);
1234 
1235 #ifndef PRODUCT
1236   if (PrintEliminateAllocations) {
1237     if (alloc->is_AllocateArray())
1238       tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1239     else
1240       tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1241   }
1242 #endif
1243 
1244   return true;
1245 }
1246 
1247 bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
1248   // EA should remove all uses of non-escaping boxing node.
1249   if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != nullptr) {
1250     return false;
1251   }
1252 
1253   assert(boxing->result_cast() == nullptr, "unexpected boxing node result");
1254 
1255   boxing->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1256 
1257   const TypeTuple* r = boxing->tf()->range();
1258   assert(r->cnt() > TypeFunc::Parms, "sanity");
1259   const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
1260   assert(t != nullptr, "sanity");
1261 
1262   CompileLog* log = C->log();
1263   if (log != nullptr) {
1264     log->head("eliminate_boxing type='%d'",
1265               log->identify(t->instance_klass()));
1266     JVMState* p = boxing->jvms();
1267     while (p != nullptr) {
1268       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1269       p = p->caller();
1270     }
1271     log->tail("eliminate_boxing");
1272   }
1273 
1274   process_users_of_allocation(boxing);
1275 
1276 #ifndef PRODUCT
1277   if (PrintEliminateAllocations) {
1278     tty->print("++++ Eliminated: %d ", boxing->_idx);
1279     boxing->method()->print_short_name(tty);
1280     tty->cr();
1281   }
1282 #endif
1283 
1284   return true;
1285 }
1286 
1287 
1288 Node* PhaseMacroExpand::make_load_raw(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {
1289   Node* adr = off_heap_plus_addr(base, offset);
1290   const TypePtr* adr_type = adr->bottom_type()->is_ptr();
1291   Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt, MemNode::unordered);
1292   transform_later(value);
1293   return value;
1294 }
1295 
1296 
1297 Node* PhaseMacroExpand::make_store_raw(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) {
1298   Node* adr = off_heap_plus_addr(base, offset);
1299   mem = StoreNode::make(_igvn, ctl, mem, adr, nullptr, value, bt, MemNode::unordered);
1300   transform_later(mem);
1301   return mem;
1302 }
1303 
1304 //=============================================================================
1305 //
1306 //                              A L L O C A T I O N
1307 //
1308 // Allocation attempts to be fast in the case of frequent small objects.
1309 // It breaks down like this:
1310 //
1311 // 1) Size in doublewords is computed.  This is a constant for objects and
1312 // variable for most arrays.  Doubleword units are used to avoid size
1313 // overflow of huge doubleword arrays.  We need doublewords in the end for
1314 // rounding.
1315 //
1316 // 2) Size is checked for being 'too large'.  Too-large allocations will go
1317 // the slow path into the VM.  The slow path can throw any required
1318 // exceptions, and does all the special checks for very large arrays.  The
1319 // size test can constant-fold away for objects.  For objects with
1320 // finalizers it constant-folds the otherway: you always go slow with
1321 // finalizers.
1322 //
1323 // 3) If NOT using TLABs, this is the contended loop-back point.
1324 // Load-Locked the heap top.  If using TLABs normal-load the heap top.
1325 //
1326 // 4) Check that heap top + size*8 < max.  If we fail go the slow ` route.
1327 // NOTE: "top+size*8" cannot wrap the 4Gig line!  Here's why: for largish
1328 // "size*8" we always enter the VM, where "largish" is a constant picked small
1329 // enough that there's always space between the eden max and 4Gig (old space is
1330 // there so it's quite large) and large enough that the cost of entering the VM
1331 // is dwarfed by the cost to initialize the space.
1332 //
1333 // 5) If NOT using TLABs, Store-Conditional the adjusted heap top back
1334 // down.  If contended, repeat at step 3.  If using TLABs normal-store
1335 // adjusted heap top back down; there is no contention.
1336 //
1337 // 6) If !ZeroTLAB then Bulk-clear the object/array.  Fill in klass & mark
1338 // fields.
1339 //
1340 // 7) Merge with the slow-path; cast the raw memory pointer to the correct
1341 // oop flavor.
1342 //
1343 //=============================================================================
1344 // FastAllocateSizeLimit value is in DOUBLEWORDS.
1345 // Allocations bigger than this always go the slow route.
1346 // This value must be small enough that allocation attempts that need to
1347 // trigger exceptions go the slow route.  Also, it must be small enough so
1348 // that heap_top + size_in_bytes does not wrap around the 4Gig limit.
1349 //=============================================================================j//
1350 // %%% Here is an old comment from parseHelper.cpp; is it outdated?
1351 // The allocator will coalesce int->oop copies away.  See comment in
1352 // coalesce.cpp about how this works.  It depends critically on the exact
1353 // code shape produced here, so if you are changing this code shape
1354 // make sure the GC info for the heap-top is correct in and around the
1355 // slow-path call.
1356 //
1357 
1358 void PhaseMacroExpand::expand_allocate_common(
1359             AllocateNode* alloc, // allocation node to be expanded
1360             Node* length,  // array length for an array allocation
1361             const TypeFunc* slow_call_type, // Type of slow call
1362             address slow_call_address,  // Address of slow call
1363             Node* valid_length_test // whether length is valid or not
1364     )
1365 {
1366   Node* ctrl = alloc->in(TypeFunc::Control);
1367   Node* mem  = alloc->in(TypeFunc::Memory);
1368   Node* i_o  = alloc->in(TypeFunc::I_O);
1369   Node* size_in_bytes     = alloc->in(AllocateNode::AllocSize);
1370   Node* klass_node        = alloc->in(AllocateNode::KlassNode);
1371   Node* initial_slow_test = alloc->in(AllocateNode::InitialTest);
1372   assert(ctrl != nullptr, "must have control");
1373 
1374   // We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
1375   // they will not be used if "always_slow" is set
1376   enum { slow_result_path = 1, fast_result_path = 2 };
1377   Node *result_region = nullptr;
1378   Node *result_phi_rawmem = nullptr;
1379   Node *result_phi_rawoop = nullptr;
1380   Node *result_phi_i_o = nullptr;
1381 
1382   // The initial slow comparison is a size check, the comparison
1383   // we want to do is a BoolTest::gt
1384   bool expand_fast_path = true;
1385   int tv = _igvn.find_int_con(initial_slow_test, -1);
1386   if (tv >= 0) {
1387     // InitialTest has constant result
1388     //   0 - can fit in TLAB
1389     //   1 - always too big or negative
1390     assert(tv <= 1, "0 or 1 if a constant");
1391     expand_fast_path = (tv == 0);
1392     initial_slow_test = nullptr;
1393   } else {
1394     initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn);
1395   }
1396 
1397   if (!UseTLAB) {
1398     // Force slow-path allocation
1399     expand_fast_path = false;
1400     initial_slow_test = nullptr;
1401   }
1402 
1403   // ArrayCopyNode right after an allocation operates on the raw result projection for the Allocate node so it's not
1404   // safe to remove such an allocation even if it has no result cast.
1405   bool allocation_has_use = (alloc->result_cast() != nullptr) || (alloc->initialization() != nullptr && alloc->initialization()->is_complete_with_arraycopy());
1406   if (!allocation_has_use) {
1407     InitializeNode* init = alloc->initialization();
1408     if (init != nullptr) {
1409       init->remove(&_igvn);
1410     }
1411     if (expand_fast_path && (initial_slow_test == nullptr)) {
1412       // Remove allocation node and return.
1413       // Size is a non-negative constant -> no initial check needed -> directly to fast path.
1414       // Also, no usages -> empty fast path -> no fall out to slow path -> nothing left.
1415 #ifndef PRODUCT
1416       if (PrintEliminateAllocations) {
1417         tty->print("NotUsed ");
1418         Node* res = alloc->proj_out_or_null(TypeFunc::Parms);
1419         if (res != nullptr) {
1420           res->dump();
1421         } else {
1422           alloc->dump();
1423         }
1424       }
1425 #endif
1426       yank_alloc_node(alloc);
1427       return;
1428     }
1429   }
1430 
1431   enum { too_big_or_final_path = 1, need_gc_path = 2 };
1432   Node *slow_region = nullptr;
1433   Node *toobig_false = ctrl;
1434 
1435   // generate the initial test if necessary
1436   if (initial_slow_test != nullptr ) {
1437     assert (expand_fast_path, "Only need test if there is a fast path");
1438     slow_region = new RegionNode(3);
1439 
1440     // Now make the initial failure test.  Usually a too-big test but
1441     // might be a TRUE for finalizers.
1442     IfNode *toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
1443     transform_later(toobig_iff);
1444     // Plug the failing-too-big test into the slow-path region
1445     Node *toobig_true = new IfTrueNode( toobig_iff );
1446     transform_later(toobig_true);
1447     slow_region    ->init_req( too_big_or_final_path, toobig_true );
1448     toobig_false = new IfFalseNode( toobig_iff );
1449     transform_later(toobig_false);
1450   } else {
1451     // No initial test, just fall into next case
1452     assert(allocation_has_use || !expand_fast_path, "Should already have been handled");
1453     toobig_false = ctrl;
1454     DEBUG_ONLY(slow_region = NodeSentinel);
1455   }
1456 
1457   // If we are here there are several possibilities
1458   // - expand_fast_path is false - then only a slow path is expanded. That's it.
1459   // no_initial_check means a constant allocation.
1460   // - If check always evaluates to false -> expand_fast_path is false (see above)
1461   // - If check always evaluates to true -> directly into fast path (but may bailout to slowpath)
1462   // if !allocation_has_use the fast path is empty
1463   // if !allocation_has_use && no_initial_check
1464   // - Then there are no fastpath that can fall out to slowpath -> no allocation code at all.
1465   //   removed by yank_alloc_node above.
1466 
1467   Node *slow_mem = mem;  // save the current memory state for slow path
1468   // generate the fast allocation code unless we know that the initial test will always go slow
1469   if (expand_fast_path) {
1470     // Fast path modifies only raw memory.
1471     if (mem->is_MergeMem()) {
1472       mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1473     }
1474 
1475     // allocate the Region and Phi nodes for the result
1476     result_region = new RegionNode(3);
1477     result_phi_rawmem = new PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1478     result_phi_i_o    = new PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch
1479 
1480     // Grab regular I/O before optional prefetch may change it.
1481     // Slow-path does no I/O so just set it to the original I/O.
1482     result_phi_i_o->init_req(slow_result_path, i_o);
1483 
1484     // Name successful fast-path variables
1485     Node* fast_oop_ctrl;
1486     Node* fast_oop_rawmem;
1487     if (allocation_has_use) {
1488       Node* needgc_ctrl = nullptr;
1489       result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM);
1490 
1491       intx prefetch_lines = length != nullptr ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
1492       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1493       Node* fast_oop = bs->obj_allocate(this, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl,
1494                                         fast_oop_ctrl, fast_oop_rawmem,
1495                                         prefetch_lines);
1496 
1497       if (initial_slow_test != nullptr) {
1498         // This completes all paths into the slow merge point
1499         slow_region->init_req(need_gc_path, needgc_ctrl);
1500         transform_later(slow_region);
1501       } else {
1502         // No initial slow path needed!
1503         // Just fall from the need-GC path straight into the VM call.
1504         slow_region = needgc_ctrl;
1505       }
1506 
1507       InitializeNode* init = alloc->initialization();
1508       fast_oop_rawmem = initialize_object(alloc,
1509                                           fast_oop_ctrl, fast_oop_rawmem, fast_oop,
1510                                           klass_node, length, size_in_bytes);
1511       expand_initialize_membar(alloc, init, fast_oop_ctrl, fast_oop_rawmem);
1512       expand_dtrace_alloc_probe(alloc, fast_oop, fast_oop_ctrl, fast_oop_rawmem);
1513 
1514       result_phi_rawoop->init_req(fast_result_path, fast_oop);
1515     } else {
1516       assert (initial_slow_test != nullptr, "sanity");
1517       fast_oop_ctrl   = toobig_false;
1518       fast_oop_rawmem = mem;
1519       transform_later(slow_region);
1520     }
1521 
1522     // Plug in the successful fast-path into the result merge point
1523     result_region    ->init_req(fast_result_path, fast_oop_ctrl);
1524     result_phi_i_o   ->init_req(fast_result_path, i_o);
1525     result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem);
1526   } else {
1527     slow_region = ctrl;
1528     result_phi_i_o = i_o; // Rename it to use in the following code.
1529   }
1530 
1531   // Generate slow-path call
1532   CallNode *call = new CallStaticJavaNode(slow_call_type, slow_call_address,
1533                                OptoRuntime::stub_name(slow_call_address),
1534                                TypePtr::BOTTOM);
1535   call->init_req(TypeFunc::Control,   slow_region);
1536   call->init_req(TypeFunc::I_O,       top());    // does no i/o
1537   call->init_req(TypeFunc::Memory,    slow_mem); // may gc ptrs
1538   call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
1539   call->init_req(TypeFunc::FramePtr,  alloc->in(TypeFunc::FramePtr));
1540 
1541   call->init_req(TypeFunc::Parms+0, klass_node);
1542   if (length != nullptr) {
1543     call->init_req(TypeFunc::Parms+1, length);
1544   }
1545 
1546   // Copy debug information and adjust JVMState information, then replace
1547   // allocate node with the call
1548   call->copy_call_debug_info(&_igvn, alloc);
1549   // For array allocations, copy the valid length check to the call node so Compile::final_graph_reshaping() can verify
1550   // that the call has the expected number of CatchProj nodes (in case the allocation always fails and the fallthrough
1551   // path dies).
1552   if (valid_length_test != nullptr) {
1553     call->add_req(valid_length_test);
1554   }
1555   if (expand_fast_path) {
1556     call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
1557   } else {
1558     // Hook i_o projection to avoid its elimination during allocation
1559     // replacement (when only a slow call is generated).
1560     call->set_req(TypeFunc::I_O, result_phi_i_o);
1561   }
1562   _igvn.replace_node(alloc, call);
1563   transform_later(call);
1564 
1565   // Identify the output projections from the allocate node and
1566   // adjust any references to them.
1567   // The control and io projections look like:
1568   //
1569   //        v---Proj(ctrl) <-----+   v---CatchProj(ctrl)
1570   //  Allocate                   Catch
1571   //        ^---Proj(io) <-------+   ^---CatchProj(io)
1572   //
1573   //  We are interested in the CatchProj nodes.
1574   //
1575   call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1576 
1577   // An allocate node has separate memory projections for the uses on
1578   // the control and i_o paths. Replace the control memory projection with
1579   // result_phi_rawmem (unless we are only generating a slow call when
1580   // both memory projections are combined)
1581   if (expand_fast_path && _callprojs.fallthrough_memproj != nullptr) {
1582     migrate_outs(_callprojs.fallthrough_memproj, result_phi_rawmem);
1583   }
1584   // Now change uses of catchall_memproj to use fallthrough_memproj and delete
1585   // catchall_memproj so we end up with a call that has only 1 memory projection.
1586   if (_callprojs.catchall_memproj != nullptr ) {
1587     if (_callprojs.fallthrough_memproj == nullptr) {
1588       _callprojs.fallthrough_memproj = new ProjNode(call, TypeFunc::Memory);
1589       transform_later(_callprojs.fallthrough_memproj);
1590     }
1591     migrate_outs(_callprojs.catchall_memproj, _callprojs.fallthrough_memproj);
1592     _igvn.remove_dead_node(_callprojs.catchall_memproj, PhaseIterGVN::NodeOrigin::Graph);
1593   }
1594 
1595   // An allocate node has separate i_o projections for the uses on the control
1596   // and i_o paths. Always replace the control i_o projection with result i_o
1597   // otherwise incoming i_o become dead when only a slow call is generated
1598   // (it is different from memory projections where both projections are
1599   // combined in such case).
1600   if (_callprojs.fallthrough_ioproj != nullptr) {
1601     migrate_outs(_callprojs.fallthrough_ioproj, result_phi_i_o);
1602   }
1603   // Now change uses of catchall_ioproj to use fallthrough_ioproj and delete
1604   // catchall_ioproj so we end up with a call that has only 1 i_o projection.
1605   if (_callprojs.catchall_ioproj != nullptr ) {
1606     if (_callprojs.fallthrough_ioproj == nullptr) {
1607       _callprojs.fallthrough_ioproj = new ProjNode(call, TypeFunc::I_O);
1608       transform_later(_callprojs.fallthrough_ioproj);
1609     }
1610     migrate_outs(_callprojs.catchall_ioproj, _callprojs.fallthrough_ioproj);
1611     _igvn.remove_dead_node(_callprojs.catchall_ioproj, PhaseIterGVN::NodeOrigin::Graph);
1612   }
1613 
1614   // if we generated only a slow call, we are done
1615   if (!expand_fast_path) {
1616     // Now we can unhook i_o.
1617     if (result_phi_i_o->outcnt() > 1) {
1618       call->set_req(TypeFunc::I_O, top());
1619     } else {
1620       assert(result_phi_i_o->unique_ctrl_out() == call, "sanity");
1621       // Case of new array with negative size known during compilation.
1622       // AllocateArrayNode::Ideal() optimization disconnect unreachable
1623       // following code since call to runtime will throw exception.
1624       // As result there will be no users of i_o after the call.
1625       // Leave i_o attached to this call to avoid problems in preceding graph.
1626     }
1627     return;
1628   }
1629 
1630   if (_callprojs.fallthrough_catchproj != nullptr) {
1631     ctrl = _callprojs.fallthrough_catchproj->clone();
1632     transform_later(ctrl);
1633     _igvn.replace_node(_callprojs.fallthrough_catchproj, result_region);
1634   } else {
1635     ctrl = top();
1636   }
1637   Node *slow_result;
1638   if (_callprojs.resproj == nullptr) {
1639     // no uses of the allocation result
1640     slow_result = top();
1641   } else {
1642     slow_result = _callprojs.resproj->clone();
1643     transform_later(slow_result);
1644     _igvn.replace_node(_callprojs.resproj, result_phi_rawoop);
1645   }
1646 
1647   // Plug slow-path into result merge point
1648   result_region->init_req( slow_result_path, ctrl);
1649   transform_later(result_region);
1650   if (allocation_has_use) {
1651     result_phi_rawoop->init_req(slow_result_path, slow_result);
1652     transform_later(result_phi_rawoop);
1653   }
1654   result_phi_rawmem->init_req(slow_result_path, _callprojs.fallthrough_memproj);
1655   transform_later(result_phi_rawmem);
1656   transform_later(result_phi_i_o);
1657   // This completes all paths into the result merge point
1658 }
1659 
1660 // Remove alloc node that has no uses.
1661 void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
1662   Node* ctrl = alloc->in(TypeFunc::Control);
1663   Node* mem  = alloc->in(TypeFunc::Memory);
1664   Node* i_o  = alloc->in(TypeFunc::I_O);
1665 
1666   alloc->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1667   if (_callprojs.resproj != nullptr) {
1668     for (DUIterator_Fast imax, i = _callprojs.resproj->fast_outs(imax); i < imax; i++) {
1669       Node* use = _callprojs.resproj->fast_out(i);
1670       use->isa_MemBar()->remove(&_igvn);
1671       --imax;
1672       --i; // back up iterator
1673     }
1674     assert(_callprojs.resproj->outcnt() == 0, "all uses must be deleted");
1675     _igvn.remove_dead_node(_callprojs.resproj, PhaseIterGVN::NodeOrigin::Graph);
1676   }
1677   if (_callprojs.fallthrough_catchproj != nullptr) {
1678     migrate_outs(_callprojs.fallthrough_catchproj, ctrl);
1679     _igvn.remove_dead_node(_callprojs.fallthrough_catchproj, PhaseIterGVN::NodeOrigin::Graph);
1680   }
1681   if (_callprojs.catchall_catchproj != nullptr) {
1682     _igvn.rehash_node_delayed(_callprojs.catchall_catchproj);
1683     _callprojs.catchall_catchproj->set_req(0, top());
1684   }
1685   if (_callprojs.fallthrough_proj != nullptr) {
1686     Node* catchnode = _callprojs.fallthrough_proj->unique_ctrl_out();
1687     _igvn.remove_dead_node(catchnode, PhaseIterGVN::NodeOrigin::Graph);
1688     _igvn.remove_dead_node(_callprojs.fallthrough_proj, PhaseIterGVN::NodeOrigin::Graph);
1689   }
1690   if (_callprojs.fallthrough_memproj != nullptr) {
1691     migrate_outs(_callprojs.fallthrough_memproj, mem);
1692     _igvn.remove_dead_node(_callprojs.fallthrough_memproj, PhaseIterGVN::NodeOrigin::Graph);
1693   }
1694   if (_callprojs.fallthrough_ioproj != nullptr) {
1695     migrate_outs(_callprojs.fallthrough_ioproj, i_o);
1696     _igvn.remove_dead_node(_callprojs.fallthrough_ioproj, PhaseIterGVN::NodeOrigin::Graph);
1697   }
1698   if (_callprojs.catchall_memproj != nullptr) {
1699     _igvn.rehash_node_delayed(_callprojs.catchall_memproj);
1700     _callprojs.catchall_memproj->set_req(0, top());
1701   }
1702   if (_callprojs.catchall_ioproj != nullptr) {
1703     _igvn.rehash_node_delayed(_callprojs.catchall_ioproj);
1704     _callprojs.catchall_ioproj->set_req(0, top());
1705   }
1706 #ifndef PRODUCT
1707   if (PrintEliminateAllocations) {
1708     if (alloc->is_AllocateArray()) {
1709       tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1710     } else {
1711       tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1712     }
1713   }
1714 #endif
1715   _igvn.remove_dead_node(alloc, PhaseIterGVN::NodeOrigin::Graph);
1716 }
1717 
1718 void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeNode* init,
1719                                                 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem) {
1720   // If initialization is performed by an array copy, any required
1721   // MemBarStoreStore was already added. If the object does not
1722   // escape no need for a MemBarStoreStore. If the object does not
1723   // escape in its initializer and memory barrier (MemBarStoreStore or
1724   // stronger) is already added at exit of initializer, also no need
1725   // for a MemBarStoreStore. Otherwise we need a MemBarStoreStore
1726   // so that stores that initialize this object can't be reordered
1727   // with a subsequent store that makes this object accessible by
1728   // other threads.
1729   // Other threads include java threads and JVM internal threads
1730   // (for example concurrent GC threads). Current concurrent GC
1731   // implementation: G1 will not scan newly created object,
1732   // so it's safe to skip storestore barrier when allocation does
1733   // not escape.
1734   if (!alloc->does_not_escape_thread() &&
1735     !alloc->is_allocation_MemBar_redundant() &&
1736     (init == nullptr || !init->is_complete_with_arraycopy())) {
1737     if (init == nullptr || init->req() < InitializeNode::RawStores) {
1738       // No InitializeNode or no stores captured by zeroing
1739       // elimination. Simply add the MemBarStoreStore after object
1740       // initialization.
1741       // What we want is to prevent the compiler and the CPU from re-ordering the stores that initialize this object
1742       // with subsequent stores to any slice. As a consequence, this MemBar should capture the entire memory state at
1743       // this point in the IR and produce a new memory state that should cover all slices. However, the Initialize node
1744       // only captures/produces a partial memory state making it complicated to insert such a MemBar. Because
1745       // re-ordering by the compiler can't happen by construction (a later Store that publishes the just allocated
1746       // object reference is indirectly control dependent on the Initialize node), preventing reordering by the CPU is
1747       // sufficient. For that a MemBar on the raw memory slice is good enough.
1748       // If init is null, this allocation does have an InitializeNode but this logic can't locate it (see comment in
1749       // PhaseMacroExpand::initialize_object()).
1750       MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxRaw);
1751       transform_later(mb);
1752 
1753       mb->init_req(TypeFunc::Memory, fast_oop_rawmem);
1754       mb->init_req(TypeFunc::Control, fast_oop_ctrl);
1755       fast_oop_ctrl = new ProjNode(mb, TypeFunc::Control);
1756       transform_later(fast_oop_ctrl);
1757       fast_oop_rawmem = new ProjNode(mb, TypeFunc::Memory);
1758       transform_later(fast_oop_rawmem);
1759     } else {
1760       // Add the MemBarStoreStore after the InitializeNode so that
1761       // all stores performing the initialization that were moved
1762       // before the InitializeNode happen before the storestore
1763       // barrier.
1764 
1765       Node* init_ctrl = init->proj_out_or_null(TypeFunc::Control);
1766 
1767       // See comment above that explains why a raw memory MemBar is good enough.
1768       MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxRaw);
1769       transform_later(mb);
1770 
1771       Node* ctrl = new ProjNode(init, TypeFunc::Control);
1772       transform_later(ctrl);
1773       Node* old_raw_mem_proj = nullptr;
1774       auto find_raw_mem = [&](ProjNode* proj) {
1775         if (C->get_alias_index(proj->adr_type()) == Compile::AliasIdxRaw) {
1776           assert(old_raw_mem_proj == nullptr, "only one expected");
1777           old_raw_mem_proj = proj;
1778         }
1779       };
1780       init->for_each_proj(find_raw_mem, TypeFunc::Memory);
1781       assert(old_raw_mem_proj != nullptr, "should have found raw mem Proj");
1782       Node* raw_mem_proj = new ProjNode(init, TypeFunc::Memory);
1783       transform_later(raw_mem_proj);
1784 
1785       // The MemBarStoreStore depends on control and memory coming
1786       // from the InitializeNode
1787       mb->init_req(TypeFunc::Memory, raw_mem_proj);
1788       mb->init_req(TypeFunc::Control, ctrl);
1789 
1790       ctrl = new ProjNode(mb, TypeFunc::Control);
1791       transform_later(ctrl);
1792       Node* mem = new ProjNode(mb, TypeFunc::Memory);
1793       transform_later(mem);
1794 
1795       // All nodes that depended on the InitializeNode for control
1796       // and memory must now depend on the MemBarNode that itself
1797       // depends on the InitializeNode
1798       if (init_ctrl != nullptr) {
1799         _igvn.replace_node(init_ctrl, ctrl);
1800       }
1801       _igvn.replace_node(old_raw_mem_proj, mem);
1802     }
1803   }
1804 }
1805 
1806 void PhaseMacroExpand::expand_dtrace_alloc_probe(AllocateNode* alloc, Node* oop,
1807                                                 Node*& ctrl, Node*& rawmem) {
1808   if (C->env()->dtrace_alloc_probes()) {
1809     // Slow-path call
1810     int size = TypeFunc::Parms + 2;
1811     CallLeafNode *call = new CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(),
1812                                           CAST_FROM_FN_PTR(address,
1813                                           static_cast<int (*)(JavaThread*, oopDesc*)>(SharedRuntime::dtrace_object_alloc)),
1814                                           "dtrace_object_alloc",
1815                                           TypeRawPtr::BOTTOM);
1816 
1817     // Get base of thread-local storage area
1818     Node* thread = new ThreadLocalNode();
1819     transform_later(thread);
1820 
1821     call->init_req(TypeFunc::Parms + 0, thread);
1822     call->init_req(TypeFunc::Parms + 1, oop);
1823     call->init_req(TypeFunc::Control, ctrl);
1824     call->init_req(TypeFunc::I_O    , top()); // does no i/o
1825     call->init_req(TypeFunc::Memory , rawmem);
1826     call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
1827     call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
1828     transform_later(call);
1829     ctrl = new ProjNode(call, TypeFunc::Control);
1830     transform_later(ctrl);
1831     rawmem = new ProjNode(call, TypeFunc::Memory);
1832     transform_later(rawmem);
1833   }
1834 }
1835 
1836 // Helper for PhaseMacroExpand::expand_allocate_common.
1837 // Initializes the newly-allocated storage.
1838 Node*
1839 PhaseMacroExpand::initialize_object(AllocateNode* alloc,
1840                                     Node* control, Node* rawmem, Node* object,
1841                                     Node* klass_node, Node* length,
1842                                     Node* size_in_bytes) {
1843   InitializeNode* init = alloc->initialization();
1844   // Store the klass & mark bits
1845   Node* mark_node = alloc->make_ideal_mark(&_igvn, control, rawmem);
1846   if (!mark_node->is_Con()) {
1847     transform_later(mark_node);
1848   }
1849   rawmem = make_store_raw(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type());
1850 
1851   if (!UseCompactObjectHeaders) {
1852     rawmem = make_store_raw(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
1853   }
1854   int header_size = alloc->minimum_header_size();  // conservatively small
1855 
1856   // Array length
1857   if (length != nullptr) {         // Arrays need length field
1858     rawmem = make_store_raw(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
1859     // conservatively small header size:
1860     header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1861     if (_igvn.type(klass_node)->isa_aryklassptr()) {   // we know the exact header size in most cases:
1862       BasicType elem = _igvn.type(klass_node)->is_klassptr()->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
1863       if (is_reference_type(elem, true)) {
1864         elem = T_OBJECT;
1865       }
1866       header_size = Klass::layout_helper_header_size(Klass::array_layout_helper(elem));
1867     }
1868   }
1869 
1870   // Clear the object body, if necessary.
1871   if (init == nullptr) {
1872     // The init has somehow disappeared; be cautious and clear everything.
1873     //
1874     // This can happen if a node is allocated but an uncommon trap occurs
1875     // immediately.  In this case, the Initialize gets associated with the
1876     // trap, and may be placed in a different (outer) loop, if the Allocate
1877     // is in a loop.  If (this is rare) the inner loop gets unrolled, then
1878     // there can be two Allocates to one Initialize.  The answer in all these
1879     // edge cases is safety first.  It is always safe to clear immediately
1880     // within an Allocate, and then (maybe or maybe not) clear some more later.
1881     if (!(UseTLAB && ZeroTLAB)) {
1882       rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
1883                                             header_size, size_in_bytes,
1884                                             true,
1885                                             &_igvn);
1886     }
1887   } else {
1888     if (!init->is_complete()) {
1889       // Try to win by zeroing only what the init does not store.
1890       // We can also try to do some peephole optimizations,
1891       // such as combining some adjacent subword stores.
1892       rawmem = init->complete_stores(control, rawmem, object,
1893                                      header_size, size_in_bytes, &_igvn);
1894     }
1895     // We have no more use for this link, since the AllocateNode goes away:
1896     init->set_req(InitializeNode::RawAddress, top());
1897     // (If we keep the link, it just confuses the register allocator,
1898     // who thinks he sees a real use of the address by the membar.)
1899   }
1900 
1901   return rawmem;
1902 }
1903 
1904 // Generate prefetch instructions for next allocations.
1905 Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
1906                                         Node*& contended_phi_rawmem,
1907                                         Node* old_eden_top, Node* new_eden_top,
1908                                         intx lines) {
1909    enum { fall_in_path = 1, pf_path = 2 };
1910    if (UseTLAB && AllocatePrefetchStyle == 2) {
1911       // Generate prefetch allocation with watermark check.
1912       // As an allocation hits the watermark, we will prefetch starting
1913       // at a "distance" away from watermark.
1914 
1915       Node* pf_region = new RegionNode(3);
1916       Node* pf_phi_rawmem = new PhiNode(pf_region, Type::MEMORY,
1917                                                 TypeRawPtr::BOTTOM);
1918       // I/O is used for Prefetch
1919       Node* pf_phi_abio = new PhiNode(pf_region, Type::ABIO);
1920 
1921       Node* thread = new ThreadLocalNode();
1922       transform_later(thread);
1923 
1924       Node* eden_pf_adr = AddPNode::make_off_heap(thread,
1925                    _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())));
1926       transform_later(eden_pf_adr);
1927 
1928       Node* old_pf_wm = new LoadPNode(needgc_false,
1929                                    contended_phi_rawmem, eden_pf_adr,
1930                                    TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,
1931                                    MemNode::unordered);
1932       transform_later(old_pf_wm);
1933 
1934       // check against new_eden_top
1935       Node* need_pf_cmp = new CmpPNode(new_eden_top, old_pf_wm);
1936       transform_later(need_pf_cmp);
1937       Node* need_pf_bol = new BoolNode(need_pf_cmp, BoolTest::ge);
1938       transform_later(need_pf_bol);
1939       IfNode* need_pf_iff = new IfNode(needgc_false, need_pf_bol,
1940                                        PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
1941       transform_later(need_pf_iff);
1942 
1943       // true node, add prefetchdistance
1944       Node* need_pf_true = new IfTrueNode(need_pf_iff);
1945       transform_later(need_pf_true);
1946 
1947       Node* need_pf_false = new IfFalseNode(need_pf_iff);
1948       transform_later(need_pf_false);
1949 
1950       Node* new_pf_wmt = AddPNode::make_off_heap(old_pf_wm,
1951                                                  _igvn.MakeConX(AllocatePrefetchDistance));
1952       transform_later(new_pf_wmt);
1953       new_pf_wmt->set_req(0, need_pf_true);
1954 
1955       Node* store_new_wmt = new StorePNode(need_pf_true,
1956                                        contended_phi_rawmem, eden_pf_adr,
1957                                        TypeRawPtr::BOTTOM, new_pf_wmt,
1958                                        MemNode::unordered);
1959       transform_later(store_new_wmt);
1960 
1961       // adding prefetches
1962       pf_phi_abio->init_req(fall_in_path, i_o);
1963 
1964       Node* prefetch_adr;
1965       Node* prefetch;
1966       uint step_size = AllocatePrefetchStepSize;
1967       uint distance = 0;
1968 
1969       for (intx i = 0; i < lines; i++) {
1970         prefetch_adr = AddPNode::make_off_heap(new_pf_wmt,
1971                                                _igvn.MakeConX(distance));
1972         transform_later(prefetch_adr);
1973         prefetch = new PrefetchAllocationNode(i_o, prefetch_adr);
1974         transform_later(prefetch);
1975         distance += step_size;
1976         i_o = prefetch;
1977       }
1978       pf_phi_abio->set_req(pf_path, i_o);
1979 
1980       pf_region->init_req(fall_in_path, need_pf_false);
1981       pf_region->init_req(pf_path, need_pf_true);
1982 
1983       pf_phi_rawmem->init_req(fall_in_path, contended_phi_rawmem);
1984       pf_phi_rawmem->init_req(pf_path, store_new_wmt);
1985 
1986       transform_later(pf_region);
1987       transform_later(pf_phi_rawmem);
1988       transform_later(pf_phi_abio);
1989 
1990       needgc_false = pf_region;
1991       contended_phi_rawmem = pf_phi_rawmem;
1992       i_o = pf_phi_abio;
1993    } else if (UseTLAB && AllocatePrefetchStyle == 3) {
1994       // Insert a prefetch instruction for each allocation.
1995       // This code is used to generate 1 prefetch instruction per cache line.
1996 
1997       // Generate several prefetch instructions.
1998       uint step_size = AllocatePrefetchStepSize;
1999       uint distance = AllocatePrefetchDistance;
2000 
2001       // Next cache address.
2002       Node* cache_adr = AddPNode::make_off_heap(old_eden_top,
2003                                                 _igvn.MakeConX(step_size + distance));
2004       transform_later(cache_adr);
2005       cache_adr = new CastP2XNode(needgc_false, cache_adr);
2006       transform_later(cache_adr);
2007       // Address is aligned to execute prefetch to the beginning of cache line size.
2008       Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1));
2009       cache_adr = new AndXNode(cache_adr, mask);
2010       transform_later(cache_adr);
2011       cache_adr = new CastX2PNode(cache_adr);
2012       transform_later(cache_adr);
2013 
2014       // Prefetch
2015       Node* prefetch = new PrefetchAllocationNode(contended_phi_rawmem, cache_adr);
2016       prefetch->set_req(0, needgc_false);
2017       transform_later(prefetch);
2018       contended_phi_rawmem = prefetch;
2019       Node* prefetch_adr;
2020       distance = step_size;
2021       for (intx i = 1; i < lines; i++) {
2022         prefetch_adr = AddPNode::make_off_heap(cache_adr,
2023                                                _igvn.MakeConX(distance));
2024         transform_later(prefetch_adr);
2025         prefetch = new PrefetchAllocationNode(contended_phi_rawmem, prefetch_adr);
2026         transform_later(prefetch);
2027         distance += step_size;
2028         contended_phi_rawmem = prefetch;
2029       }
2030    } else if (AllocatePrefetchStyle > 0) {
2031       // Insert a prefetch for each allocation only on the fast-path
2032       Node* prefetch_adr;
2033       Node* prefetch;
2034       // Generate several prefetch instructions.
2035       uint step_size = AllocatePrefetchStepSize;
2036       uint distance = AllocatePrefetchDistance;
2037       for (intx i = 0; i < lines; i++) {
2038         prefetch_adr = AddPNode::make_off_heap(new_eden_top,
2039                                                _igvn.MakeConX(distance));
2040         transform_later(prefetch_adr);
2041         prefetch = new PrefetchAllocationNode(i_o, prefetch_adr);
2042         // Do not let it float too high, since if eden_top == eden_end,
2043         // both might be null.
2044         if (i == 0) { // Set control for first prefetch, next follows it
2045           prefetch->init_req(0, needgc_false);
2046         }
2047         transform_later(prefetch);
2048         distance += step_size;
2049         i_o = prefetch;
2050       }
2051    }
2052    return i_o;
2053 }
2054 
2055 
2056 void PhaseMacroExpand::expand_allocate(AllocateNode *alloc) {
2057   expand_allocate_common(alloc, nullptr,
2058                          OptoRuntime::new_instance_Type(),
2059                          OptoRuntime::new_instance_Java(), nullptr);
2060 }
2061 
2062 void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) {
2063   Node* length = alloc->in(AllocateNode::ALength);
2064   Node* valid_length_test = alloc->in(AllocateNode::ValidLengthTest);
2065   InitializeNode* init = alloc->initialization();
2066   Node* klass_node = alloc->in(AllocateNode::KlassNode);
2067   const TypeAryKlassPtr* ary_klass_t = _igvn.type(klass_node)->isa_aryklassptr();
2068   address slow_call_address;  // Address of slow call
2069   if (init != nullptr && init->is_complete_with_arraycopy() &&
2070       ary_klass_t && ary_klass_t->elem()->isa_klassptr() == nullptr) {
2071     // Don't zero type array during slow allocation in VM since
2072     // it will be initialized later by arraycopy in compiled code.
2073     slow_call_address = OptoRuntime::new_array_nozero_Java();
2074   } else {
2075     slow_call_address = OptoRuntime::new_array_Java();
2076   }
2077   expand_allocate_common(alloc, length,
2078                          OptoRuntime::new_array_Type(),
2079                          slow_call_address, valid_length_test);
2080 }
2081 
2082 //-------------------mark_eliminated_box----------------------------------
2083 //
2084 // During EA obj may point to several objects but after few ideal graph
2085 // transformations (CCP) it may point to only one non escaping object
2086 // (but still using phi), corresponding locks and unlocks will be marked
2087 // for elimination. Later obj could be replaced with a new node (new phi)
2088 // and which does not have escape information. And later after some graph
2089 // reshape other locks and unlocks (which were not marked for elimination
2090 // before) are connected to this new obj (phi) but they still will not be
2091 // marked for elimination since new obj has no escape information.
2092 // Mark all associated (same box and obj) lock and unlock nodes for
2093 // elimination if some of them marked already.
2094 void PhaseMacroExpand::mark_eliminated_box(Node* box, Node* obj) {
2095   BoxLockNode* oldbox = box->as_BoxLock();
2096   if (oldbox->is_eliminated()) {
2097     return; // This BoxLock node was processed already.
2098   }
2099   assert(!oldbox->is_unbalanced(), "this should not be called for unbalanced region");
2100   // New implementation (EliminateNestedLocks) has separate BoxLock
2101   // node for each locked region so mark all associated locks/unlocks as
2102   // eliminated even if different objects are referenced in one locked region
2103   // (for example, OSR compilation of nested loop inside locked scope).
2104   if (EliminateNestedLocks ||
2105       oldbox->as_BoxLock()->is_simple_lock_region(nullptr, obj, nullptr)) {
2106     // Box is used only in one lock region. Mark this box as eliminated.
2107     oldbox->set_local();      // This verifies correct state of BoxLock
2108     _igvn.hash_delete(oldbox);
2109     oldbox->set_eliminated(); // This changes box's hash value
2110      _igvn.hash_insert(oldbox);
2111 
2112     for (uint i = 0; i < oldbox->outcnt(); i++) {
2113       Node* u = oldbox->raw_out(i);
2114       if (u->is_AbstractLock() && !u->as_AbstractLock()->is_non_esc_obj()) {
2115         AbstractLockNode* alock = u->as_AbstractLock();
2116         // Check lock's box since box could be referenced by Lock's debug info.
2117         if (alock->box_node() == oldbox) {
2118           // Mark eliminated all related locks and unlocks.
2119 #ifdef ASSERT
2120           alock->log_lock_optimization(C, "eliminate_lock_set_non_esc4");
2121 #endif
2122           alock->set_non_esc_obj();
2123         }
2124       }
2125     }
2126     return;
2127   }
2128 
2129   // Create new "eliminated" BoxLock node and use it in monitor debug info
2130   // instead of oldbox for the same object.
2131   BoxLockNode* newbox = oldbox->clone()->as_BoxLock();
2132 
2133   // Note: BoxLock node is marked eliminated only here and it is used
2134   // to indicate that all associated lock and unlock nodes are marked
2135   // for elimination.
2136   newbox->set_local(); // This verifies correct state of BoxLock
2137   newbox->set_eliminated();
2138   transform_later(newbox);
2139 
2140   // Replace old box node with new box for all users of the same object.
2141   for (uint i = 0; i < oldbox->outcnt();) {
2142     bool next_edge = true;
2143 
2144     Node* u = oldbox->raw_out(i);
2145     if (u->is_AbstractLock()) {
2146       AbstractLockNode* alock = u->as_AbstractLock();
2147       if (alock->box_node() == oldbox && alock->obj_node()->eqv_uncast(obj)) {
2148         // Replace Box and mark eliminated all related locks and unlocks.
2149 #ifdef ASSERT
2150         alock->log_lock_optimization(C, "eliminate_lock_set_non_esc5");
2151 #endif
2152         alock->set_non_esc_obj();
2153         _igvn.rehash_node_delayed(alock);
2154         alock->set_box_node(newbox);
2155         next_edge = false;
2156       }
2157     }
2158     if (u->is_FastLock() && u->as_FastLock()->obj_node()->eqv_uncast(obj)) {
2159       FastLockNode* flock = u->as_FastLock();
2160       assert(flock->box_node() == oldbox, "sanity");
2161       _igvn.rehash_node_delayed(flock);
2162       flock->set_box_node(newbox);
2163       next_edge = false;
2164     }
2165 
2166     // Replace old box in monitor debug info.
2167     if (u->is_SafePoint() && u->as_SafePoint()->jvms()) {
2168       SafePointNode* sfn = u->as_SafePoint();
2169       JVMState* youngest_jvms = sfn->jvms();
2170       int max_depth = youngest_jvms->depth();
2171       for (int depth = 1; depth <= max_depth; depth++) {
2172         JVMState* jvms = youngest_jvms->of_depth(depth);
2173         int num_mon  = jvms->nof_monitors();
2174         // Loop over monitors
2175         for (int idx = 0; idx < num_mon; idx++) {
2176           Node* obj_node = sfn->monitor_obj(jvms, idx);
2177           Node* box_node = sfn->monitor_box(jvms, idx);
2178           if (box_node == oldbox && obj_node->eqv_uncast(obj)) {
2179             int j = jvms->monitor_box_offset(idx);
2180             _igvn.replace_input_of(u, j, newbox);
2181             next_edge = false;
2182           }
2183         }
2184       }
2185     }
2186     if (next_edge) i++;
2187   }
2188 }
2189 
2190 //-----------------------mark_eliminated_locking_nodes-----------------------
2191 void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) {
2192   if (!alock->is_balanced()) {
2193     return; // Can't do any more elimination for this locking region
2194   }
2195   if (EliminateNestedLocks) {
2196     if (alock->is_nested()) {
2197        assert(alock->box_node()->as_BoxLock()->is_eliminated(), "sanity");
2198        return;
2199     } else if (!alock->is_non_esc_obj()) { // Not eliminated or coarsened
2200       // Only Lock node has JVMState needed here.
2201       // Not that preceding claim is documented anywhere else.
2202       if (alock->jvms() != nullptr) {
2203         if (alock->as_Lock()->is_nested_lock_region()) {
2204           // Mark eliminated related nested locks and unlocks.
2205           Node* obj = alock->obj_node();
2206           BoxLockNode* box_node = alock->box_node()->as_BoxLock();
2207           assert(!box_node->is_eliminated(), "should not be marked yet");
2208           // Note: BoxLock node is marked eliminated only here
2209           // and it is used to indicate that all associated lock
2210           // and unlock nodes are marked for elimination.
2211           box_node->set_eliminated(); // Box's hash is always NO_HASH here
2212           for (uint i = 0; i < box_node->outcnt(); i++) {
2213             Node* u = box_node->raw_out(i);
2214             if (u->is_AbstractLock()) {
2215               alock = u->as_AbstractLock();
2216               if (alock->box_node() == box_node) {
2217                 // Verify that this Box is referenced only by related locks.
2218                 assert(alock->obj_node()->eqv_uncast(obj), "");
2219                 // Mark all related locks and unlocks.
2220 #ifdef ASSERT
2221                 alock->log_lock_optimization(C, "eliminate_lock_set_nested");
2222 #endif
2223                 alock->set_nested();
2224               }
2225             }
2226           }
2227         } else {
2228 #ifdef ASSERT
2229           alock->log_lock_optimization(C, "eliminate_lock_NOT_nested_lock_region");
2230           if (C->log() != nullptr)
2231             alock->as_Lock()->is_nested_lock_region(C); // rerun for debugging output
2232 #endif
2233         }
2234       }
2235       return;
2236     }
2237     // Process locks for non escaping object
2238     assert(alock->is_non_esc_obj(), "");
2239   } // EliminateNestedLocks
2240 
2241   if (alock->is_non_esc_obj()) { // Lock is used for non escaping object
2242     // Look for all locks of this object and mark them and
2243     // corresponding BoxLock nodes as eliminated.
2244     Node* obj = alock->obj_node();
2245     for (uint j = 0; j < obj->outcnt(); j++) {
2246       Node* o = obj->raw_out(j);
2247       if (o->is_AbstractLock() &&
2248           o->as_AbstractLock()->obj_node()->eqv_uncast(obj)) {
2249         alock = o->as_AbstractLock();
2250         Node* box = alock->box_node();
2251         // Replace old box node with new eliminated box for all users
2252         // of the same object and mark related locks as eliminated.
2253         mark_eliminated_box(box, obj);
2254       }
2255     }
2256   }
2257 }
2258 
2259 // we have determined that this lock/unlock can be eliminated, we simply
2260 // eliminate the node without expanding it.
2261 //
2262 // Note:  The membar's associated with the lock/unlock are currently not
2263 //        eliminated.  This should be investigated as a future enhancement.
2264 //
2265 bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
2266 
2267   if (!alock->is_eliminated()) {
2268     return false;
2269   }
2270 #ifdef ASSERT
2271   if (!alock->is_coarsened()) {
2272     // Check that new "eliminated" BoxLock node is created.
2273     BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
2274     assert(oldbox->is_eliminated(), "should be done already");
2275   }
2276 #endif
2277 
2278   alock->log_lock_optimization(C, "eliminate_lock");
2279 
2280 #ifndef PRODUCT
2281   if (PrintEliminateLocks) {
2282     tty->print_cr("++++ Eliminated: %d %s '%s'", alock->_idx, (alock->is_Lock() ? "Lock" : "Unlock"), alock->kind_as_string());
2283   }
2284 #endif
2285 
2286   Node* mem  = alock->in(TypeFunc::Memory);
2287   Node* ctrl = alock->in(TypeFunc::Control);
2288   guarantee(ctrl != nullptr, "missing control projection, cannot replace_node() with null");
2289 
2290   alock->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
2291   // There are 2 projections from the lock.  The lock node will
2292   // be deleted when its last use is subsumed below.
2293   assert(alock->outcnt() == 2 &&
2294          _callprojs.fallthrough_proj != nullptr &&
2295          _callprojs.fallthrough_memproj != nullptr,
2296          "Unexpected projections from Lock/Unlock");
2297 
2298   Node* fallthroughproj = _callprojs.fallthrough_proj;
2299   Node* memproj_fallthrough = _callprojs.fallthrough_memproj;
2300 
2301   // The memory projection from a lock/unlock is RawMem
2302   // The input to a Lock is merged memory, so extract its RawMem input
2303   // (unless the MergeMem has been optimized away.)
2304   if (alock->is_Lock()) {
2305     // Search for MemBarAcquireLock node and delete it also.
2306     MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
2307     assert(membar != nullptr && membar->Opcode() == Op_MemBarAcquireLock, "");
2308     Node* ctrlproj = membar->proj_out(TypeFunc::Control);
2309     Node* memproj = membar->proj_out(TypeFunc::Memory);
2310     _igvn.replace_node(ctrlproj, fallthroughproj);
2311     _igvn.replace_node(memproj, memproj_fallthrough);
2312 
2313     // Delete FastLock node also if this Lock node is unique user
2314     // (a loop peeling may clone a Lock node).
2315     Node* flock = alock->as_Lock()->fastlock_node();
2316     if (flock->outcnt() == 1) {
2317       assert(flock->unique_out() == alock, "sanity");
2318       _igvn.replace_node(flock, top());
2319     }
2320   }
2321 
2322   // Search for MemBarReleaseLock node and delete it also.
2323   if (alock->is_Unlock() && ctrl->is_Proj() && ctrl->in(0)->is_MemBar()) {
2324     MemBarNode* membar = ctrl->in(0)->as_MemBar();
2325     assert(membar->Opcode() == Op_MemBarReleaseLock &&
2326            mem->is_Proj() && membar == mem->in(0), "");
2327     _igvn.replace_node(fallthroughproj, ctrl);
2328     _igvn.replace_node(memproj_fallthrough, mem);
2329     fallthroughproj = ctrl;
2330     memproj_fallthrough = mem;
2331     ctrl = membar->in(TypeFunc::Control);
2332     mem  = membar->in(TypeFunc::Memory);
2333   }
2334 
2335   _igvn.replace_node(fallthroughproj, ctrl);
2336   _igvn.replace_node(memproj_fallthrough, mem);
2337   return true;
2338 }
2339 
2340 
2341 //------------------------------expand_lock_node----------------------
2342 void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
2343 
2344   Node* ctrl = lock->in(TypeFunc::Control);
2345   Node* mem = lock->in(TypeFunc::Memory);
2346   Node* obj = lock->obj_node();
2347   Node* box = lock->box_node();
2348   Node* flock = lock->fastlock_node();
2349 
2350   assert(!box->as_BoxLock()->is_eliminated(), "sanity");
2351 
2352   // Make the merge point
2353   Node *region;
2354   Node *mem_phi;
2355   Node *slow_path;
2356 
2357   region  = new RegionNode(3);
2358   // create a Phi for the memory state
2359   mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
2360 
2361   // Optimize test; set region slot 2
2362   slow_path = opt_bits_test(ctrl, region, 2, flock);
2363   mem_phi->init_req(2, mem);
2364 
2365   // Make slow path call
2366   CallNode* call = make_slow_call(lock, OptoRuntime::complete_monitor_enter_Type(),
2367                                   OptoRuntime::complete_monitor_locking_Java(), nullptr, slow_path,
2368                                   obj, box, nullptr);
2369 
2370   call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
2371 
2372   // Slow path can only throw asynchronous exceptions, which are always
2373   // de-opted.  So the compiler thinks the slow-call can never throw an
2374   // exception.  If it DOES throw an exception we would need the debug
2375   // info removed first (since if it throws there is no monitor).
2376   assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr &&
2377          _callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock");
2378 
2379   // Capture slow path
2380   // disconnect fall-through projection from call and create a new one
2381   // hook up users of fall-through projection to region
2382   Node *slow_ctrl = _callprojs.fallthrough_proj->clone();
2383   transform_later(slow_ctrl);
2384   _igvn.hash_delete(_callprojs.fallthrough_proj);
2385   _callprojs.fallthrough_proj->disconnect_inputs(C);
2386   region->init_req(1, slow_ctrl);
2387   // region inputs are now complete
2388   transform_later(region);
2389   _igvn.replace_node(_callprojs.fallthrough_proj, region);
2390 
2391   Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory));
2392 
2393   mem_phi->init_req(1, memproj);
2394 
2395   transform_later(mem_phi);
2396 
2397   _igvn.replace_node(_callprojs.fallthrough_memproj, mem_phi);
2398 }
2399 
2400 //------------------------------expand_unlock_node----------------------
2401 void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
2402 
2403   Node* ctrl = unlock->in(TypeFunc::Control);
2404   Node* mem = unlock->in(TypeFunc::Memory);
2405   Node* obj = unlock->obj_node();
2406   Node* box = unlock->box_node();
2407 
2408   assert(!box->as_BoxLock()->is_eliminated(), "sanity");
2409 
2410   // No need for a null check on unlock
2411 
2412   // Make the merge point
2413   Node* region = new RegionNode(3);
2414 
2415   FastUnlockNode *funlock = new FastUnlockNode( ctrl, obj, box );
2416   funlock = transform_later( funlock )->as_FastUnlock();
2417   // Optimize test; set region slot 2
2418   Node *slow_path = opt_bits_test(ctrl, region, 2, funlock);
2419   Node *thread = transform_later(new ThreadLocalNode());
2420 
2421   CallNode *call = make_slow_call((CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(),
2422                                   CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C),
2423                                   "complete_monitor_unlocking_C", slow_path, obj, box, thread);
2424 
2425   call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
2426   assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr &&
2427          _callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock");
2428 
2429   // No exceptions for unlocking
2430   // Capture slow path
2431   // disconnect fall-through projection from call and create a new one
2432   // hook up users of fall-through projection to region
2433   Node *slow_ctrl = _callprojs.fallthrough_proj->clone();
2434   transform_later(slow_ctrl);
2435   _igvn.hash_delete(_callprojs.fallthrough_proj);
2436   _callprojs.fallthrough_proj->disconnect_inputs(C);
2437   region->init_req(1, slow_ctrl);
2438   // region inputs are now complete
2439   transform_later(region);
2440   _igvn.replace_node(_callprojs.fallthrough_proj, region);
2441 
2442   if (_callprojs.fallthrough_memproj != nullptr) {
2443     // create a Phi for the memory state
2444     Node* mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
2445     Node* memproj = transform_later(new ProjNode(call, TypeFunc::Memory));
2446     mem_phi->init_req(1, memproj);
2447     mem_phi->init_req(2, mem);
2448     transform_later(mem_phi);
2449     _igvn.replace_node(_callprojs.fallthrough_memproj, mem_phi);
2450   }
2451 }
2452 
2453 void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) {
2454   assert(check->in(SubTypeCheckNode::Control) == nullptr, "should be pinned");
2455   Node* bol = check->unique_out();
2456   Node* obj_or_subklass = check->in(SubTypeCheckNode::ObjOrSubKlass);
2457   Node* superklass = check->in(SubTypeCheckNode::SuperKlass);
2458   assert(bol->is_Bool() && bol->as_Bool()->_test._test == BoolTest::ne, "unexpected bool node");
2459 
2460   for (DUIterator_Last imin, i = bol->last_outs(imin); i >= imin; --i) {
2461     Node* iff = bol->last_out(i);
2462     assert(iff->is_If(), "where's the if?");
2463 
2464     if (iff->in(0)->is_top()) {
2465       _igvn.replace_input_of(iff, 1, C->top());
2466       continue;
2467     }
2468 
2469     IfTrueNode* iftrue = iff->as_If()->true_proj();
2470     IfFalseNode* iffalse = iff->as_If()->false_proj();
2471     Node* ctrl = iff->in(0);
2472 
2473     Node* subklass = nullptr;
2474     if (_igvn.type(obj_or_subklass)->isa_klassptr()) {
2475       subklass = obj_or_subklass;
2476     } else {
2477       Node* k_adr = basic_plus_adr(obj_or_subklass, oopDesc::klass_offset_in_bytes());
2478       subklass = _igvn.transform(LoadKlassNode::make(_igvn, C->immutable_memory(), k_adr, TypeInstPtr::KLASS));
2479     }
2480 
2481     Node* not_subtype_ctrl = Phase::gen_subtype_check(subklass, superklass, &ctrl, nullptr, _igvn, check->method(), check->bci());
2482 
2483     _igvn.replace_input_of(iff, 0, C->top());
2484     _igvn.replace_node(iftrue, not_subtype_ctrl);
2485     _igvn.replace_node(iffalse, ctrl);
2486   }
2487   _igvn.replace_node(check, C->top());
2488 }
2489 
2490 // Perform refining of strip mined loop nodes in the macro nodes list.
2491 void PhaseMacroExpand::refine_strip_mined_loop_macro_nodes() {
2492    for (int i = C->macro_count(); i > 0; i--) {
2493     Node* n = C->macro_node(i - 1);
2494     if (n->is_OuterStripMinedLoop()) {
2495       n->as_OuterStripMinedLoop()->adjust_strip_mined_loop(&_igvn);
2496     }
2497   }
2498 }
2499 
2500 //---------------------------eliminate_macro_nodes----------------------
2501 // Eliminate scalar replaced allocations and associated locks.
2502 void PhaseMacroExpand::eliminate_macro_nodes() {
2503   if (C->macro_count() == 0)
2504     return;
2505 
2506   if (StressMacroElimination) {
2507     C->shuffle_macro_nodes();
2508   }
2509   NOT_PRODUCT(int membar_before = count_MemBar(C);)
2510 
2511   // Before elimination may re-mark (change to Nested or NonEscObj)
2512   // all associated (same box and obj) lock and unlock nodes.
2513   int cnt = C->macro_count();
2514   for (int i=0; i < cnt; i++) {
2515     Node *n = C->macro_node(i);
2516     if (n->is_AbstractLock()) { // Lock and Unlock nodes
2517       mark_eliminated_locking_nodes(n->as_AbstractLock());
2518     }
2519   }
2520   // Re-marking may break consistency of Coarsened locks.
2521   if (!C->coarsened_locks_consistent()) {
2522     return; // recompile without Coarsened locks if broken
2523   } else {
2524     // After coarsened locks are eliminated locking regions
2525     // become unbalanced. We should not execute any more
2526     // locks elimination optimizations on them.
2527     C->mark_unbalanced_boxes();
2528   }
2529 
2530   // First, attempt to eliminate locks
2531   bool progress = true;
2532   while (progress) {
2533     progress = false;
2534     for (int i = C->macro_count(); i > 0; i = MIN2(i - 1, C->macro_count())) { // more than 1 element can be eliminated at once
2535       Node* n = C->macro_node(i - 1);
2536       bool success = false;
2537       DEBUG_ONLY(int old_macro_count = C->macro_count();)
2538       if (n->is_AbstractLock()) {
2539         success = eliminate_locking_node(n->as_AbstractLock());
2540 #ifndef PRODUCT
2541         if (success && PrintOptoStatistics) {
2542           AtomicAccess::inc(&PhaseMacroExpand::_monitor_objects_removed_counter);
2543         }
2544 #endif
2545       }
2546       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2547       progress = progress || success;
2548       if (success) {
2549         C->print_method(PHASE_AFTER_MACRO_ELIMINATION_STEP, 5, n);
2550       }
2551     }
2552   }
2553   // Next, attempt to eliminate allocations
2554   progress = true;
2555   while (progress) {
2556     progress = false;
2557     for (int i = C->macro_count(); i > 0; i = MIN2(i - 1, C->macro_count())) { // more than 1 element can be eliminated at once
2558       Node* n = C->macro_node(i - 1);
2559       bool success = false;
2560       DEBUG_ONLY(int old_macro_count = C->macro_count();)
2561       switch (n->class_id()) {
2562       case Node::Class_Allocate:
2563       case Node::Class_AllocateArray:
2564         success = eliminate_allocate_node(n->as_Allocate());
2565 #ifndef PRODUCT
2566         if (success && PrintOptoStatistics) {
2567           AtomicAccess::inc(&PhaseMacroExpand::_objs_scalar_replaced_counter);
2568         }
2569 #endif
2570         break;
2571       case Node::Class_CallStaticJava:
2572         success = eliminate_boxing_node(n->as_CallStaticJava());
2573         break;
2574       case Node::Class_Lock:
2575       case Node::Class_Unlock:
2576         assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
2577         break;
2578       case Node::Class_ArrayCopy:
2579         break;
2580       case Node::Class_OuterStripMinedLoop:
2581         break;
2582       case Node::Class_SubTypeCheck:
2583         break;
2584       case Node::Class_Opaque1:
2585         break;
2586       default:
2587         assert(n->Opcode() == Op_LoopLimit ||
2588                n->Opcode() == Op_ModD ||
2589                n->Opcode() == Op_ModF ||
2590                n->Opcode() == Op_PowD ||
2591                n->is_OpaqueConstantBool()    ||
2592                n->is_OpaqueInitializedAssertionPredicate() ||
2593                n->Opcode() == Op_MaxL      ||
2594                n->Opcode() == Op_MinL      ||
2595                BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(n),
2596                "unknown node type in macro list");
2597       }
2598       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2599       progress = progress || success;
2600       if (success) {
2601         C->print_method(PHASE_AFTER_MACRO_ELIMINATION_STEP, 5, n);
2602       }
2603     }
2604   }
2605 #ifndef PRODUCT
2606   if (PrintOptoStatistics) {
2607     int membar_after = count_MemBar(C);
2608     AtomicAccess::add(&PhaseMacroExpand::_memory_barriers_removed_counter, membar_before - membar_after);
2609   }
2610 #endif
2611 }
2612 
2613 void PhaseMacroExpand::eliminate_opaque_looplimit_macro_nodes() {
2614   if (C->macro_count() == 0) {
2615     return;
2616   }
2617   refine_strip_mined_loop_macro_nodes();
2618   // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
2619   bool progress = true;
2620   while (progress) {
2621     progress = false;
2622     for (int i = C->macro_count(); i > 0; i--) {
2623       Node* n = C->macro_node(i-1);
2624       bool success = false;
2625       DEBUG_ONLY(int old_macro_count = C->macro_count();)
2626       if (n->Opcode() == Op_LoopLimit) {
2627         // Remove it from macro list and put on IGVN worklist to optimize.
2628         C->remove_macro_node(n);
2629         _igvn._worklist.push(n);
2630         success = true;
2631       } else if (n->Opcode() == Op_CallStaticJava) {
2632         // Remove it from macro list and put on IGVN worklist to optimize.
2633         C->remove_macro_node(n);
2634         _igvn._worklist.push(n);
2635         success = true;
2636       } else if (n->is_Opaque1()) {
2637         _igvn.replace_node(n, n->in(1));
2638         success = true;
2639       } else if (n->is_OpaqueConstantBool()) {
2640         // Tests with OpaqueConstantBool nodes are implicitly known. Replace the node with true/false. In debug builds,
2641         // we leave the test in the graph to have an additional sanity check at runtime. If the test fails (i.e. a bug),
2642         // we will execute a Halt node.
2643 #ifdef ASSERT
2644         _igvn.replace_node(n, n->in(1));
2645 #else
2646         _igvn.replace_node(n, _igvn.intcon(n->as_OpaqueConstantBool()->constant()));
2647 #endif
2648         success = true;
2649       } else if (n->is_OpaqueInitializedAssertionPredicate()) {
2650           // Initialized Assertion Predicates must always evaluate to true. Therefore, we get rid of them in product
2651           // builds as they are useless. In debug builds we keep them as additional verification code. Even though
2652           // loop opts are already over, we want to keep Initialized Assertion Predicates alive as long as possible to
2653           // enable folding of dead control paths within which cast nodes become top after due to impossible types -
2654           // even after loop opts are over. Therefore, we delay the removal of these opaque nodes until now.
2655 #ifdef ASSERT
2656         _igvn.replace_node(n, n->in(1));
2657 #else
2658         _igvn.replace_node(n, _igvn.intcon(1));
2659 #endif // ASSERT
2660       } else if (n->Opcode() == Op_OuterStripMinedLoop) {
2661         C->remove_macro_node(n);
2662         success = true;
2663       } else if (n->Opcode() == Op_MaxL) {
2664         // Since MaxL and MinL are not implemented in the backend, we expand them to
2665         // a CMoveL construct now. At least until here, the type could be computed
2666         // precisely. CMoveL is not so smart, but we can give it at least the best
2667         // type we know abouot n now.
2668         Node* repl = MinMaxNode::signed_max(n->in(1), n->in(2), _igvn.type(n), _igvn);
2669         _igvn.replace_node(n, repl);
2670         success = true;
2671       } else if (n->Opcode() == Op_MinL) {
2672         Node* repl = MinMaxNode::signed_min(n->in(1), n->in(2), _igvn.type(n), _igvn);
2673         _igvn.replace_node(n, repl);
2674         success = true;
2675       }
2676       assert(!success || (C->macro_count() == (old_macro_count - 1)), "elimination must have deleted one node from macro list");
2677       progress = progress || success;
2678       if (success) {
2679         C->print_method(PHASE_AFTER_MACRO_ELIMINATION_STEP, 5, n);
2680       }
2681     }
2682   }
2683 }
2684 
2685 //------------------------------expand_macro_nodes----------------------
2686 //  Returns true if a failure occurred.
2687 bool PhaseMacroExpand::expand_macro_nodes() {
2688   if (StressMacroExpansion) {
2689     C->shuffle_macro_nodes();
2690   }
2691 
2692   // Clean up the graph so we're less likely to hit the maximum node
2693   // limit
2694   _igvn.set_delay_transform(false);
2695   _igvn.optimize();
2696   if (C->failing())  return true;
2697   _igvn.set_delay_transform(true);
2698 
2699 
2700   // Because we run IGVN after each expansion, some macro nodes may go
2701   // dead and be removed from the list as we iterate over it. Move
2702   // Allocate nodes (processed in a second pass) at the beginning of
2703   // the list and then iterate from the last element of the list until
2704   // an Allocate node is seen. This is robust to random deletion in
2705   // the list due to nodes going dead.
2706   C->sort_macro_nodes();
2707 
2708   // expand arraycopy "macro" nodes first
2709   // For ReduceBulkZeroing, we must first process all arraycopy nodes
2710   // before the allocate nodes are expanded.
2711   while (C->macro_count() > 0) {
2712     int macro_count = C->macro_count();
2713     Node * n = C->macro_node(macro_count-1);
2714     assert(n->is_macro(), "only macro nodes expected here");
2715     if (_igvn.type(n) == Type::TOP || (n->in(0) != nullptr && n->in(0)->is_top())) {
2716       // node is unreachable, so don't try to expand it
2717       C->remove_macro_node(n);
2718       continue;
2719     }
2720     if (n->is_Allocate()) {
2721       break;
2722     }
2723     // Make sure expansion will not cause node limit to be exceeded.
2724     // Worst case is a macro node gets expanded into about 200 nodes.
2725     // Allow 50% more for optimization.
2726     if (C->check_node_count(300, "out of nodes before macro expansion")) {
2727       return true;
2728     }
2729 
2730     DEBUG_ONLY(int old_macro_count = C->macro_count();)
2731     switch (n->class_id()) {
2732     case Node::Class_Lock:
2733       expand_lock_node(n->as_Lock());
2734       break;
2735     case Node::Class_Unlock:
2736       expand_unlock_node(n->as_Unlock());
2737       break;
2738     case Node::Class_ArrayCopy:
2739       expand_arraycopy_node(n->as_ArrayCopy());
2740       break;
2741     case Node::Class_SubTypeCheck:
2742       expand_subtypecheck_node(n->as_SubTypeCheck());
2743       break;
2744     default:
2745       switch (n->Opcode()) {
2746       case Op_ModD:
2747       case Op_ModF:
2748       case Op_PowD: {
2749         CallLeafPureNode* call_macro = n->as_CallLeafPure();
2750         CallLeafPureNode* call = call_macro->inline_call_leaf_pure_node();
2751         _igvn.replace_node(call_macro, call);
2752         transform_later(call);
2753         break;
2754       }
2755       default:
2756         assert(false, "unknown node type in macro list");
2757       }
2758     }
2759     assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list");
2760     if (C->failing())  return true;
2761     C->print_method(PHASE_AFTER_MACRO_EXPANSION_STEP, 5, n);
2762 
2763     // Clean up the graph so we're less likely to hit the maximum node
2764     // limit
2765     _igvn.set_delay_transform(false);
2766     _igvn.optimize();
2767     if (C->failing())  return true;
2768     _igvn.set_delay_transform(true);
2769   }
2770 
2771   // All nodes except Allocate nodes are expanded now. There could be
2772   // new optimization opportunities (such as folding newly created
2773   // load from a just allocated object). Run IGVN.
2774 
2775   // expand "macro" nodes
2776   // nodes are removed from the macro list as they are processed
2777   while (C->macro_count() > 0) {
2778     int macro_count = C->macro_count();
2779     Node * n = C->macro_node(macro_count-1);
2780     assert(n->is_macro(), "only macro nodes expected here");
2781     if (_igvn.type(n) == Type::TOP || (n->in(0) != nullptr && n->in(0)->is_top())) {
2782       // node is unreachable, so don't try to expand it
2783       C->remove_macro_node(n);
2784       continue;
2785     }
2786     // Make sure expansion will not cause node limit to be exceeded.
2787     // Worst case is a macro node gets expanded into about 200 nodes.
2788     // Allow 50% more for optimization.
2789     if (C->check_node_count(300, "out of nodes before macro expansion")) {
2790       return true;
2791     }
2792     switch (n->class_id()) {
2793     case Node::Class_Allocate:
2794       expand_allocate(n->as_Allocate());
2795       break;
2796     case Node::Class_AllocateArray:
2797       expand_allocate_array(n->as_AllocateArray());
2798       break;
2799     default:
2800       assert(false, "unknown node type in macro list");
2801     }
2802     assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
2803     if (C->failing())  return true;
2804     C->print_method(PHASE_AFTER_MACRO_EXPANSION_STEP, 5, n);
2805 
2806     // Clean up the graph so we're less likely to hit the maximum node
2807     // limit
2808     _igvn.set_delay_transform(false);
2809     _igvn.optimize();
2810     if (C->failing())  return true;
2811     _igvn.set_delay_transform(true);
2812   }
2813 
2814   _igvn.set_delay_transform(false);
2815   return false;
2816 }
2817 
2818 #ifndef PRODUCT
2819 int PhaseMacroExpand::_objs_scalar_replaced_counter = 0;
2820 int PhaseMacroExpand::_monitor_objects_removed_counter = 0;
2821 int PhaseMacroExpand::_GC_barriers_removed_counter = 0;
2822 int PhaseMacroExpand::_memory_barriers_removed_counter = 0;
2823 
2824 void PhaseMacroExpand::print_statistics() {
2825   tty->print("Objects scalar replaced = %d, ", AtomicAccess::load(&_objs_scalar_replaced_counter));
2826   tty->print("Monitor objects removed = %d, ", AtomicAccess::load(&_monitor_objects_removed_counter));
2827   tty->print("GC barriers removed = %d, ", AtomicAccess::load(&_GC_barriers_removed_counter));
2828   tty->print_cr("Memory barriers removed = %d", AtomicAccess::load(&_memory_barriers_removed_counter));
2829 }
2830 
2831 int PhaseMacroExpand::count_MemBar(Compile *C) {
2832   if (!PrintOptoStatistics) {
2833     return 0;
2834   }
2835   Unique_Node_List ideal_nodes;
2836   int total = 0;
2837   ideal_nodes.map(C->live_nodes(), nullptr);
2838   ideal_nodes.push(C->root());
2839   for (uint next = 0; next < ideal_nodes.size(); ++next) {
2840     Node* n = ideal_nodes.at(next);
2841     if (n->is_MemBar()) {
2842       total++;
2843     }
2844     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2845       Node* m = n->fast_out(i);
2846       ideal_nodes.push(m);
2847     }
2848   }
2849   return total;
2850 }
2851 #endif