1 /* 2 * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "compiler/compileLog.hpp" 26 #include "gc/shared/collectedHeap.inline.hpp" 27 #include "gc/shared/tlab_globals.hpp" 28 #include "libadt/vectset.hpp" 29 #include "memory/universe.hpp" 30 #include "opto/addnode.hpp" 31 #include "opto/arraycopynode.hpp" 32 #include "opto/callnode.hpp" 33 #include "opto/castnode.hpp" 34 #include "opto/cfgnode.hpp" 35 #include "opto/compile.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/graphKit.hpp" 38 #include "opto/intrinsicnode.hpp" 39 #include "opto/locknode.hpp" 40 #include "opto/loopnode.hpp" 41 #include "opto/macro.hpp" 42 #include "opto/memnode.hpp" 43 #include "opto/narrowptrnode.hpp" 44 #include "opto/node.hpp" 45 #include "opto/opaquenode.hpp" 46 #include "opto/phaseX.hpp" 47 #include "opto/rootnode.hpp" 48 #include "opto/runtime.hpp" 49 #include "opto/subnode.hpp" 50 #include "opto/subtypenode.hpp" 51 #include "opto/type.hpp" 52 #include "prims/jvmtiExport.hpp" 53 #include "runtime/continuation.hpp" 54 #include "runtime/sharedRuntime.hpp" 55 #include "utilities/macros.hpp" 56 #include "utilities/powerOfTwo.hpp" 57 #if INCLUDE_G1GC 58 #include "gc/g1/g1ThreadLocalData.hpp" 59 #endif // INCLUDE_G1GC 60 61 62 // 63 // Replace any references to "oldref" in inputs to "use" with "newref". 64 // Returns the number of replacements made. 65 // 66 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) { 67 int nreplacements = 0; 68 uint req = use->req(); 69 for (uint j = 0; j < use->len(); j++) { 70 Node *uin = use->in(j); 71 if (uin == oldref) { 72 if (j < req) 73 use->set_req(j, newref); 74 else 75 use->set_prec(j, newref); 76 nreplacements++; 77 } else if (j >= req && uin == nullptr) { 78 break; 79 } 80 } 81 return nreplacements; 82 } 83 84 void PhaseMacroExpand::migrate_outs(Node *old, Node *target) { 85 assert(old != nullptr, "sanity"); 86 for (DUIterator_Fast imax, i = old->fast_outs(imax); i < imax; i++) { 87 Node* use = old->fast_out(i); 88 _igvn.rehash_node_delayed(use); 89 imax -= replace_input(use, old, target); 90 // back up iterator 91 --i; 92 } 93 assert(old->outcnt() == 0, "all uses must be deleted"); 94 } 95 96 Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) { 97 Node* cmp; 98 if (mask != 0) { 99 Node* and_node = transform_later(new AndXNode(word, MakeConX(mask))); 100 cmp = transform_later(new CmpXNode(and_node, MakeConX(bits))); 101 } else { 102 cmp = word; 103 } 104 Node* bol = transform_later(new BoolNode(cmp, BoolTest::ne)); 105 IfNode* iff = new IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN ); 106 transform_later(iff); 107 108 // Fast path taken. 109 Node *fast_taken = transform_later(new IfFalseNode(iff)); 110 111 // Fast path not-taken, i.e. slow path 112 Node *slow_taken = transform_later(new IfTrueNode(iff)); 113 114 if (return_fast_path) { 115 region->init_req(edge, slow_taken); // Capture slow-control 116 return fast_taken; 117 } else { 118 region->init_req(edge, fast_taken); // Capture fast-control 119 return slow_taken; 120 } 121 } 122 123 //--------------------copy_predefined_input_for_runtime_call-------------------- 124 void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call) { 125 // Set fixed predefined input arguments 126 call->init_req( TypeFunc::Control, ctrl ); 127 call->init_req( TypeFunc::I_O , oldcall->in( TypeFunc::I_O) ); 128 call->init_req( TypeFunc::Memory , oldcall->in( TypeFunc::Memory ) ); // ????? 129 call->init_req( TypeFunc::ReturnAdr, oldcall->in( TypeFunc::ReturnAdr ) ); 130 call->init_req( TypeFunc::FramePtr, oldcall->in( TypeFunc::FramePtr ) ); 131 } 132 133 //------------------------------make_slow_call--------------------------------- 134 CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, 135 address slow_call, const char* leaf_name, Node* slow_path, 136 Node* parm0, Node* parm1, Node* parm2) { 137 138 // Slow-path call 139 CallNode *call = leaf_name 140 ? (CallNode*)new CallLeafNode ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM ) 141 : (CallNode*)new CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), TypeRawPtr::BOTTOM ); 142 143 // Slow path call has no side-effects, uses few values 144 copy_predefined_input_for_runtime_call(slow_path, oldcall, call ); 145 if (parm0 != nullptr) call->init_req(TypeFunc::Parms+0, parm0); 146 if (parm1 != nullptr) call->init_req(TypeFunc::Parms+1, parm1); 147 if (parm2 != nullptr) call->init_req(TypeFunc::Parms+2, parm2); 148 call->copy_call_debug_info(&_igvn, oldcall); 149 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. 150 _igvn.replace_node(oldcall, call); 151 transform_later(call); 152 153 return call; 154 } 155 156 void PhaseMacroExpand::eliminate_gc_barrier(Node* p2x) { 157 BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2(); 158 bs->eliminate_gc_barrier(this, p2x); 159 #ifndef PRODUCT 160 if (PrintOptoStatistics) { 161 Atomic::inc(&PhaseMacroExpand::_GC_barriers_removed_counter); 162 } 163 #endif 164 } 165 166 // Search for a memory operation for the specified memory slice. 167 static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) { 168 Node *orig_mem = mem; 169 Node *alloc_mem = alloc->as_Allocate()->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false); 170 assert(alloc_mem != nullptr, "Allocation without a memory projection."); 171 const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr(); 172 while (true) { 173 if (mem == alloc_mem || mem == start_mem ) { 174 return mem; // hit one of our sentinels 175 } else if (mem->is_MergeMem()) { 176 mem = mem->as_MergeMem()->memory_at(alias_idx); 177 } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) { 178 Node *in = mem->in(0); 179 // we can safely skip over safepoints, calls, locks and membars because we 180 // already know that the object is safe to eliminate. 181 if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) { 182 return in; 183 } else if (in->is_Call()) { 184 CallNode *call = in->as_Call(); 185 if (call->may_modify(tinst, phase)) { 186 assert(call->is_ArrayCopy(), "ArrayCopy is the only call node that doesn't make allocation escape"); 187 if (call->as_ArrayCopy()->modifies(offset, offset, phase, false)) { 188 return in; 189 } 190 } 191 mem = in->in(TypeFunc::Memory); 192 } else if (in->is_MemBar()) { 193 ArrayCopyNode* ac = nullptr; 194 if (ArrayCopyNode::may_modify(tinst, in->as_MemBar(), phase, ac)) { 195 if (ac != nullptr) { 196 assert(ac->is_clonebasic(), "Only basic clone is a non escaping clone"); 197 return ac; 198 } 199 } 200 mem = in->in(TypeFunc::Memory); 201 } else { 202 #ifdef ASSERT 203 in->dump(); 204 mem->dump(); 205 assert(false, "unexpected projection"); 206 #endif 207 } 208 } else if (mem->is_Store()) { 209 const TypePtr* atype = mem->as_Store()->adr_type(); 210 int adr_idx = phase->C->get_alias_index(atype); 211 if (adr_idx == alias_idx) { 212 assert(atype->isa_oopptr(), "address type must be oopptr"); 213 int adr_offset = atype->offset(); 214 uint adr_iid = atype->is_oopptr()->instance_id(); 215 // Array elements references have the same alias_idx 216 // but different offset and different instance_id. 217 if (adr_offset == offset && adr_iid == alloc->_idx) { 218 return mem; 219 } 220 } else { 221 assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw"); 222 } 223 mem = mem->in(MemNode::Memory); 224 } else if (mem->is_ClearArray()) { 225 if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) { 226 // Can not bypass initialization of the instance 227 // we are looking. 228 debug_only(intptr_t offset;) 229 assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity"); 230 InitializeNode* init = alloc->as_Allocate()->initialization(); 231 // We are looking for stored value, return Initialize node 232 // or memory edge from Allocate node. 233 if (init != nullptr) { 234 return init; 235 } else { 236 return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers). 237 } 238 } 239 // Otherwise skip it (the call updated 'mem' value). 240 } else if (mem->Opcode() == Op_SCMemProj) { 241 mem = mem->in(0); 242 Node* adr = nullptr; 243 if (mem->is_LoadStore()) { 244 adr = mem->in(MemNode::Address); 245 } else { 246 assert(mem->Opcode() == Op_EncodeISOArray || 247 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 248 adr = mem->in(3); // Destination array 249 } 250 const TypePtr* atype = adr->bottom_type()->is_ptr(); 251 int adr_idx = phase->C->get_alias_index(atype); 252 if (adr_idx == alias_idx) { 253 DEBUG_ONLY(mem->dump();) 254 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 255 return nullptr; 256 } 257 mem = mem->in(MemNode::Memory); 258 } else if (mem->Opcode() == Op_StrInflatedCopy) { 259 Node* adr = mem->in(3); // Destination array 260 const TypePtr* atype = adr->bottom_type()->is_ptr(); 261 int adr_idx = phase->C->get_alias_index(atype); 262 if (adr_idx == alias_idx) { 263 DEBUG_ONLY(mem->dump();) 264 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 265 return nullptr; 266 } 267 mem = mem->in(MemNode::Memory); 268 } else { 269 return mem; 270 } 271 assert(mem != orig_mem, "dead memory loop"); 272 } 273 } 274 275 // Generate loads from source of the arraycopy for fields of 276 // destination needed at a deoptimization point 277 Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type *ftype, AllocateNode *alloc) { 278 BasicType bt = ft; 279 const Type *type = ftype; 280 if (ft == T_NARROWOOP) { 281 bt = T_OBJECT; 282 type = ftype->make_oopptr(); 283 } 284 Node* res = nullptr; 285 if (ac->is_clonebasic()) { 286 assert(ac->in(ArrayCopyNode::Src) != ac->in(ArrayCopyNode::Dest), "clone source equals destination"); 287 Node* base = ac->in(ArrayCopyNode::Src); 288 Node* adr = _igvn.transform(new AddPNode(base, base, _igvn.MakeConX(offset))); 289 const TypePtr* adr_type = _igvn.type(base)->is_ptr()->add_offset(offset); 290 MergeMemNode* mergemen = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem(); 291 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 292 res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemen, adr, adr_type, type, bt); 293 } else { 294 if (ac->modifies(offset, offset, &_igvn, true)) { 295 assert(ac->in(ArrayCopyNode::Dest) == alloc->result_cast(), "arraycopy destination should be allocation's result"); 296 uint shift = exact_log2(type2aelembytes(bt)); 297 Node* src_pos = ac->in(ArrayCopyNode::SrcPos); 298 Node* dest_pos = ac->in(ArrayCopyNode::DestPos); 299 const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int(); 300 const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int(); 301 302 Node* adr = nullptr; 303 const TypePtr* adr_type = nullptr; 304 if (src_pos_t->is_con() && dest_pos_t->is_con()) { 305 intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset; 306 Node* base = ac->in(ArrayCopyNode::Src); 307 adr = _igvn.transform(new AddPNode(base, base, _igvn.MakeConX(off))); 308 adr_type = _igvn.type(base)->is_ptr()->add_offset(off); 309 if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) { 310 // Don't emit a new load from src if src == dst but try to get the value from memory instead 311 return value_from_mem(ac->in(TypeFunc::Memory), ctl, ft, ftype, adr_type->isa_oopptr(), alloc); 312 } 313 } else { 314 Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos))); 315 #ifdef _LP64 316 diff = _igvn.transform(new ConvI2LNode(diff)); 317 #endif 318 diff = _igvn.transform(new LShiftXNode(diff, _igvn.intcon(shift))); 319 320 Node* off = _igvn.transform(new AddXNode(_igvn.MakeConX(offset), diff)); 321 Node* base = ac->in(ArrayCopyNode::Src); 322 adr = _igvn.transform(new AddPNode(base, base, off)); 323 adr_type = _igvn.type(base)->is_ptr()->add_offset(Type::OffsetBot); 324 if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) { 325 // Non constant offset in the array: we can't statically 326 // determine the value 327 return nullptr; 328 } 329 } 330 MergeMemNode* mergemen = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem(); 331 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 332 res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemen, adr, adr_type, type, bt); 333 } 334 } 335 if (res != nullptr) { 336 if (ftype->isa_narrowoop()) { 337 // PhaseMacroExpand::scalar_replacement adds DecodeN nodes 338 res = _igvn.transform(new EncodePNode(res, ftype)); 339 } 340 return res; 341 } 342 return nullptr; 343 } 344 345 // 346 // Given a Memory Phi, compute a value Phi containing the values from stores 347 // on the input paths. 348 // Note: this function is recursive, its depth is limited by the "level" argument 349 // Returns the computed Phi, or null if it cannot compute it. 350 Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) { 351 assert(mem->is_Phi(), "sanity"); 352 int alias_idx = C->get_alias_index(adr_t); 353 int offset = adr_t->offset(); 354 int instance_id = adr_t->instance_id(); 355 356 // Check if an appropriate value phi already exists. 357 Node* region = mem->in(0); 358 for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) { 359 Node* phi = region->fast_out(k); 360 if (phi->is_Phi() && phi != mem && 361 phi->as_Phi()->is_same_inst_field(phi_type, (int)mem->_idx, instance_id, alias_idx, offset)) { 362 return phi; 363 } 364 } 365 // Check if an appropriate new value phi already exists. 366 Node* new_phi = value_phis->find(mem->_idx); 367 if (new_phi != nullptr) 368 return new_phi; 369 370 if (level <= 0) { 371 return nullptr; // Give up: phi tree too deep 372 } 373 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 374 Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false); 375 assert(alloc_mem != nullptr, "Allocation without a memory projection."); 376 377 uint length = mem->req(); 378 GrowableArray <Node *> values(length, length, nullptr); 379 380 // create a new Phi for the value 381 PhiNode *phi = new PhiNode(mem->in(0), phi_type, nullptr, mem->_idx, instance_id, alias_idx, offset); 382 transform_later(phi); 383 value_phis->push(phi, mem->_idx); 384 385 for (uint j = 1; j < length; j++) { 386 Node *in = mem->in(j); 387 if (in == nullptr || in->is_top()) { 388 values.at_put(j, in); 389 } else { 390 Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn); 391 if (val == start_mem || val == alloc_mem) { 392 // hit a sentinel, return appropriate 0 value 393 values.at_put(j, _igvn.zerocon(ft)); 394 continue; 395 } 396 if (val->is_Initialize()) { 397 val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); 398 } 399 if (val == nullptr) { 400 return nullptr; // can't find a value on this path 401 } 402 if (val == mem) { 403 values.at_put(j, mem); 404 } else if (val->is_Store()) { 405 Node* n = val->in(MemNode::ValueIn); 406 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 407 n = bs->step_over_gc_barrier(n); 408 if (is_subword_type(ft)) { 409 n = Compile::narrow_value(ft, n, phi_type, &_igvn, true); 410 } 411 values.at_put(j, n); 412 } else if(val->is_Proj() && val->in(0) == alloc) { 413 values.at_put(j, _igvn.zerocon(ft)); 414 } else if (val->is_Phi()) { 415 val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1); 416 if (val == nullptr) { 417 return nullptr; 418 } 419 values.at_put(j, val); 420 } else if (val->Opcode() == Op_SCMemProj) { 421 assert(val->in(0)->is_LoadStore() || 422 val->in(0)->Opcode() == Op_EncodeISOArray || 423 val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity"); 424 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 425 return nullptr; 426 } else if (val->is_ArrayCopy()) { 427 Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), val->in(TypeFunc::Memory), ft, phi_type, alloc); 428 if (res == nullptr) { 429 return nullptr; 430 } 431 values.at_put(j, res); 432 } else if (val->is_top()) { 433 // This indicates that this path into the phi is dead. Top will eventually also propagate into the Region. 434 // IGVN will clean this up later. 435 values.at_put(j, val); 436 } else { 437 DEBUG_ONLY( val->dump(); ) 438 assert(false, "unknown node on this path"); 439 return nullptr; // unknown node on this path 440 } 441 } 442 } 443 // Set Phi's inputs 444 for (uint j = 1; j < length; j++) { 445 if (values.at(j) == mem) { 446 phi->init_req(j, phi); 447 } else { 448 phi->init_req(j, values.at(j)); 449 } 450 } 451 return phi; 452 } 453 454 // Search the last value stored into the object's field. 455 Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc) { 456 assert(adr_t->is_known_instance_field(), "instance required"); 457 int instance_id = adr_t->instance_id(); 458 assert((uint)instance_id == alloc->_idx, "wrong allocation"); 459 460 int alias_idx = C->get_alias_index(adr_t); 461 int offset = adr_t->offset(); 462 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 463 Node *alloc_ctrl = alloc->in(TypeFunc::Control); 464 Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false); 465 assert(alloc_mem != nullptr, "Allocation without a memory projection."); 466 VectorSet visited; 467 468 bool done = sfpt_mem == alloc_mem; 469 Node *mem = sfpt_mem; 470 while (!done) { 471 if (visited.test_set(mem->_idx)) { 472 return nullptr; // found a loop, give up 473 } 474 mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn); 475 if (mem == start_mem || mem == alloc_mem) { 476 done = true; // hit a sentinel, return appropriate 0 value 477 } else if (mem->is_Initialize()) { 478 mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); 479 if (mem == nullptr) { 480 done = true; // Something go wrong. 481 } else if (mem->is_Store()) { 482 const TypePtr* atype = mem->as_Store()->adr_type(); 483 assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice"); 484 done = true; 485 } 486 } else if (mem->is_Store()) { 487 const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr(); 488 assert(atype != nullptr, "address type must be oopptr"); 489 assert(C->get_alias_index(atype) == alias_idx && 490 atype->is_known_instance_field() && atype->offset() == offset && 491 atype->instance_id() == instance_id, "store is correct memory slice"); 492 done = true; 493 } else if (mem->is_Phi()) { 494 // try to find a phi's unique input 495 Node *unique_input = nullptr; 496 Node *top = C->top(); 497 for (uint i = 1; i < mem->req(); i++) { 498 Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn); 499 if (n == nullptr || n == top || n == mem) { 500 continue; 501 } else if (unique_input == nullptr) { 502 unique_input = n; 503 } else if (unique_input != n) { 504 unique_input = top; 505 break; 506 } 507 } 508 if (unique_input != nullptr && unique_input != top) { 509 mem = unique_input; 510 } else { 511 done = true; 512 } 513 } else if (mem->is_ArrayCopy()) { 514 done = true; 515 } else { 516 DEBUG_ONLY( mem->dump(); ) 517 assert(false, "unexpected node"); 518 } 519 } 520 if (mem != nullptr) { 521 if (mem == start_mem || mem == alloc_mem) { 522 // hit a sentinel, return appropriate 0 value 523 return _igvn.zerocon(ft); 524 } else if (mem->is_Store()) { 525 Node* n = mem->in(MemNode::ValueIn); 526 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 527 n = bs->step_over_gc_barrier(n); 528 return n; 529 } else if (mem->is_Phi()) { 530 // attempt to produce a Phi reflecting the values on the input paths of the Phi 531 Node_Stack value_phis(8); 532 Node* phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit); 533 if (phi != nullptr) { 534 return phi; 535 } else { 536 // Kill all new Phis 537 while(value_phis.is_nonempty()) { 538 Node* n = value_phis.node(); 539 _igvn.replace_node(n, C->top()); 540 value_phis.pop(); 541 } 542 } 543 } else if (mem->is_ArrayCopy()) { 544 Node* ctl = mem->in(0); 545 Node* m = mem->in(TypeFunc::Memory); 546 if (sfpt_ctl->is_Proj() && sfpt_ctl->as_Proj()->is_uncommon_trap_proj()) { 547 // pin the loads in the uncommon trap path 548 ctl = sfpt_ctl; 549 m = sfpt_mem; 550 } 551 return make_arraycopy_load(mem->as_ArrayCopy(), offset, ctl, m, ft, ftype, alloc); 552 } 553 } 554 // Something go wrong. 555 return nullptr; 556 } 557 558 // Check the possibility of scalar replacement. 559 bool PhaseMacroExpand::can_eliminate_allocation(PhaseIterGVN* igvn, AllocateNode *alloc, GrowableArray <SafePointNode *>* safepoints) { 560 // Scan the uses of the allocation to check for anything that would 561 // prevent us from eliminating it. 562 NOT_PRODUCT( const char* fail_eliminate = nullptr; ) 563 DEBUG_ONLY( Node* disq_node = nullptr; ) 564 bool can_eliminate = true; 565 bool reduce_merge_precheck = (safepoints == nullptr); 566 567 Node* res = alloc->result_cast(); 568 const TypeOopPtr* res_type = nullptr; 569 if (res == nullptr) { 570 // All users were eliminated. 571 } else if (!res->is_CheckCastPP()) { 572 NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";) 573 can_eliminate = false; 574 } else { 575 res_type = igvn->type(res)->isa_oopptr(); 576 if (res_type == nullptr) { 577 NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";) 578 can_eliminate = false; 579 } else if (!res_type->klass_is_exact()) { 580 NOT_PRODUCT(fail_eliminate = "Not an exact type.";) 581 can_eliminate = false; 582 } else if (res_type->isa_aryptr()) { 583 int length = alloc->in(AllocateNode::ALength)->find_int_con(-1); 584 if (length < 0) { 585 NOT_PRODUCT(fail_eliminate = "Array's size is not constant";) 586 can_eliminate = false; 587 } 588 } 589 } 590 591 if (can_eliminate && res != nullptr) { 592 BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2(); 593 for (DUIterator_Fast jmax, j = res->fast_outs(jmax); 594 j < jmax && can_eliminate; j++) { 595 Node* use = res->fast_out(j); 596 597 if (use->is_AddP()) { 598 const TypePtr* addp_type = igvn->type(use)->is_ptr(); 599 int offset = addp_type->offset(); 600 601 if (offset == Type::OffsetTop || offset == Type::OffsetBot) { 602 NOT_PRODUCT(fail_eliminate = "Undefined field reference";) 603 can_eliminate = false; 604 break; 605 } 606 for (DUIterator_Fast kmax, k = use->fast_outs(kmax); 607 k < kmax && can_eliminate; k++) { 608 Node* n = use->fast_out(k); 609 if (!n->is_Store() && n->Opcode() != Op_CastP2X && !bs->is_gc_pre_barrier_node(n) && !reduce_merge_precheck) { 610 DEBUG_ONLY(disq_node = n;) 611 if (n->is_Load() || n->is_LoadStore()) { 612 NOT_PRODUCT(fail_eliminate = "Field load";) 613 } else { 614 NOT_PRODUCT(fail_eliminate = "Not store field reference";) 615 } 616 can_eliminate = false; 617 } 618 } 619 } else if (use->is_ArrayCopy() && 620 (use->as_ArrayCopy()->is_clonebasic() || 621 use->as_ArrayCopy()->is_arraycopy_validated() || 622 use->as_ArrayCopy()->is_copyof_validated() || 623 use->as_ArrayCopy()->is_copyofrange_validated()) && 624 use->in(ArrayCopyNode::Dest) == res) { 625 // ok to eliminate 626 } else if (use->is_SafePoint()) { 627 SafePointNode* sfpt = use->as_SafePoint(); 628 if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) { 629 // Object is passed as argument. 630 DEBUG_ONLY(disq_node = use;) 631 NOT_PRODUCT(fail_eliminate = "Object is passed as argument";) 632 can_eliminate = false; 633 } 634 Node* sfptMem = sfpt->memory(); 635 if (sfptMem == nullptr || sfptMem->is_top()) { 636 DEBUG_ONLY(disq_node = use;) 637 NOT_PRODUCT(fail_eliminate = "null or TOP memory";) 638 can_eliminate = false; 639 } else if (!reduce_merge_precheck) { 640 safepoints->append_if_missing(sfpt); 641 } 642 } else if (reduce_merge_precheck && 643 (use->is_Phi() || use->is_EncodeP() || 644 use->Opcode() == Op_MemBarRelease || 645 (UseStoreStoreForCtor && use->Opcode() == Op_MemBarStoreStore))) { 646 // Nothing to do 647 } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark 648 if (use->is_Phi()) { 649 if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) { 650 NOT_PRODUCT(fail_eliminate = "Object is return value";) 651 } else { 652 NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";) 653 } 654 DEBUG_ONLY(disq_node = use;) 655 } else { 656 if (use->Opcode() == Op_Return) { 657 NOT_PRODUCT(fail_eliminate = "Object is return value";) 658 } else { 659 NOT_PRODUCT(fail_eliminate = "Object is referenced by node";) 660 } 661 DEBUG_ONLY(disq_node = use;) 662 } 663 can_eliminate = false; 664 } 665 } 666 } 667 668 #ifndef PRODUCT 669 if (PrintEliminateAllocations && safepoints != nullptr) { 670 if (can_eliminate) { 671 tty->print("Scalar "); 672 if (res == nullptr) 673 alloc->dump(); 674 else 675 res->dump(); 676 } else if (alloc->_is_scalar_replaceable) { 677 tty->print("NotScalar (%s)", fail_eliminate); 678 if (res == nullptr) 679 alloc->dump(); 680 else 681 res->dump(); 682 #ifdef ASSERT 683 if (disq_node != nullptr) { 684 tty->print(" >>>> "); 685 disq_node->dump(); 686 } 687 #endif /*ASSERT*/ 688 } 689 } 690 691 if (TraceReduceAllocationMerges && !can_eliminate && reduce_merge_precheck) { 692 tty->print_cr("\tCan't eliminate allocation because '%s': ", fail_eliminate != nullptr ? fail_eliminate : ""); 693 DEBUG_ONLY(if (disq_node != nullptr) disq_node->dump();) 694 } 695 #endif 696 return can_eliminate; 697 } 698 699 void PhaseMacroExpand::undo_previous_scalarizations(GrowableArray <SafePointNode *> safepoints_done, AllocateNode* alloc) { 700 Node* res = alloc->result_cast(); 701 int nfields = 0; 702 assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result"); 703 704 if (res != nullptr) { 705 const TypeOopPtr* res_type = _igvn.type(res)->isa_oopptr(); 706 707 if (res_type->isa_instptr()) { 708 // find the fields of the class which will be needed for safepoint debug information 709 ciInstanceKlass* iklass = res_type->is_instptr()->instance_klass(); 710 nfields = iklass->nof_nonstatic_fields(); 711 } else { 712 // find the array's elements which will be needed for safepoint debug information 713 nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1); 714 assert(nfields >= 0, "must be an array klass."); 715 } 716 } 717 718 // rollback processed safepoints 719 while (safepoints_done.length() > 0) { 720 SafePointNode* sfpt_done = safepoints_done.pop(); 721 // remove any extra entries we added to the safepoint 722 uint last = sfpt_done->req() - 1; 723 for (int k = 0; k < nfields; k++) { 724 sfpt_done->del_req(last--); 725 } 726 JVMState *jvms = sfpt_done->jvms(); 727 jvms->set_endoff(sfpt_done->req()); 728 // Now make a pass over the debug information replacing any references 729 // to SafePointScalarObjectNode with the allocated object. 730 int start = jvms->debug_start(); 731 int end = jvms->debug_end(); 732 for (int i = start; i < end; i++) { 733 if (sfpt_done->in(i)->is_SafePointScalarObject()) { 734 SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject(); 735 if (scobj->first_index(jvms) == sfpt_done->req() && 736 scobj->n_fields() == (uint)nfields) { 737 assert(scobj->alloc() == alloc, "sanity"); 738 sfpt_done->set_req(i, res); 739 } 740 } 741 } 742 _igvn._worklist.push(sfpt_done); 743 } 744 } 745 746 SafePointScalarObjectNode* PhaseMacroExpand::create_scalarized_object_description(AllocateNode *alloc, SafePointNode* sfpt) { 747 // Fields of scalar objs are referenced only at the end 748 // of regular debuginfo at the last (youngest) JVMS. 749 // Record relative start index. 750 ciInstanceKlass* iklass = nullptr; 751 BasicType basic_elem_type = T_ILLEGAL; 752 const Type* field_type = nullptr; 753 const TypeOopPtr* res_type = nullptr; 754 int nfields = 0; 755 int array_base = 0; 756 int element_size = 0; 757 uint first_ind = (sfpt->req() - sfpt->jvms()->scloff()); 758 Node* res = alloc->result_cast(); 759 760 assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result"); 761 assert(sfpt->jvms() != nullptr, "missed JVMS"); 762 763 if (res != nullptr) { // Could be null when there are no users 764 res_type = _igvn.type(res)->isa_oopptr(); 765 766 if (res_type->isa_instptr()) { 767 // find the fields of the class which will be needed for safepoint debug information 768 iklass = res_type->is_instptr()->instance_klass(); 769 nfields = iklass->nof_nonstatic_fields(); 770 } else { 771 // find the array's elements which will be needed for safepoint debug information 772 nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1); 773 assert(nfields >= 0, "must be an array klass."); 774 basic_elem_type = res_type->is_aryptr()->elem()->array_element_basic_type(); 775 array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type); 776 element_size = type2aelembytes(basic_elem_type); 777 field_type = res_type->is_aryptr()->elem(); 778 } 779 } 780 781 SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(res_type, alloc, first_ind, sfpt->jvms()->depth(), nfields); 782 sobj->init_req(0, C->root()); 783 transform_later(sobj); 784 785 // Scan object's fields adding an input to the safepoint for each field. 786 for (int j = 0; j < nfields; j++) { 787 intptr_t offset; 788 ciField* field = nullptr; 789 if (iklass != nullptr) { 790 field = iklass->nonstatic_field_at(j); 791 offset = field->offset_in_bytes(); 792 ciType* elem_type = field->type(); 793 basic_elem_type = field->layout_type(); 794 795 // The next code is taken from Parse::do_get_xxx(). 796 if (is_reference_type(basic_elem_type)) { 797 if (!elem_type->is_loaded()) { 798 field_type = TypeInstPtr::BOTTOM; 799 } else if (field != nullptr && field->is_static_constant()) { 800 ciObject* con = field->constant_value().as_object(); 801 // Do not "join" in the previous type; it doesn't add value, 802 // and may yield a vacuous result if the field is of interface type. 803 field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); 804 assert(field_type != nullptr, "field singleton type must be consistent"); 805 } else { 806 field_type = TypeOopPtr::make_from_klass(elem_type->as_klass()); 807 } 808 if (UseCompressedOops) { 809 field_type = field_type->make_narrowoop(); 810 basic_elem_type = T_NARROWOOP; 811 } 812 } else { 813 field_type = Type::get_const_basic_type(basic_elem_type); 814 } 815 } else { 816 offset = array_base + j * (intptr_t)element_size; 817 } 818 819 const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr(); 820 821 Node *field_val = value_from_mem(sfpt->memory(), sfpt->control(), basic_elem_type, field_type, field_addr_type, alloc); 822 823 // We weren't able to find a value for this field, 824 // give up on eliminating this allocation. 825 if (field_val == nullptr) { 826 uint last = sfpt->req() - 1; 827 for (int k = 0; k < j; k++) { 828 sfpt->del_req(last--); 829 } 830 _igvn._worklist.push(sfpt); 831 832 #ifndef PRODUCT 833 if (PrintEliminateAllocations) { 834 if (field != nullptr) { 835 tty->print("=== At SafePoint node %d can't find value of field: ", sfpt->_idx); 836 field->print(); 837 int field_idx = C->get_alias_index(field_addr_type); 838 tty->print(" (alias_idx=%d)", field_idx); 839 } else { // Array's element 840 tty->print("=== At SafePoint node %d can't find value of array element [%d]", sfpt->_idx, j); 841 } 842 tty->print(", which prevents elimination of: "); 843 if (res == nullptr) 844 alloc->dump(); 845 else 846 res->dump(); 847 } 848 #endif 849 850 return nullptr; 851 } 852 853 if (UseCompressedOops && field_type->isa_narrowoop()) { 854 // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation 855 // to be able scalar replace the allocation. 856 if (field_val->is_EncodeP()) { 857 field_val = field_val->in(1); 858 } else { 859 field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type())); 860 } 861 } 862 sfpt->add_req(field_val); 863 } 864 865 sfpt->jvms()->set_endoff(sfpt->req()); 866 867 return sobj; 868 } 869 870 // Do scalar replacement. 871 bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) { 872 GrowableArray <SafePointNode *> safepoints_done; 873 Node* res = alloc->result_cast(); 874 assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result"); 875 876 // Process the safepoint uses 877 while (safepoints.length() > 0) { 878 SafePointNode* sfpt = safepoints.pop(); 879 SafePointScalarObjectNode* sobj = create_scalarized_object_description(alloc, sfpt); 880 881 if (sobj == nullptr) { 882 undo_previous_scalarizations(safepoints_done, alloc); 883 return false; 884 } 885 886 // Now make a pass over the debug information replacing any references 887 // to the allocated object with "sobj" 888 JVMState *jvms = sfpt->jvms(); 889 sfpt->replace_edges_in_range(res, sobj, jvms->debug_start(), jvms->debug_end(), &_igvn); 890 _igvn._worklist.push(sfpt); 891 892 // keep it for rollback 893 safepoints_done.append_if_missing(sfpt); 894 } 895 896 return true; 897 } 898 899 static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) { 900 Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control); 901 Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory); 902 if (ctl_proj != nullptr) { 903 igvn.replace_node(ctl_proj, n->in(0)); 904 } 905 if (mem_proj != nullptr) { 906 igvn.replace_node(mem_proj, n->in(TypeFunc::Memory)); 907 } 908 } 909 910 // Process users of eliminated allocation. 911 void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) { 912 Node* res = alloc->result_cast(); 913 if (res != nullptr) { 914 for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) { 915 Node *use = res->last_out(j); 916 uint oc1 = res->outcnt(); 917 918 if (use->is_AddP()) { 919 for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) { 920 Node *n = use->last_out(k); 921 uint oc2 = use->outcnt(); 922 if (n->is_Store()) { 923 #ifdef ASSERT 924 // Verify that there is no dependent MemBarVolatile nodes, 925 // they should be removed during IGVN, see MemBarNode::Ideal(). 926 for (DUIterator_Fast pmax, p = n->fast_outs(pmax); 927 p < pmax; p++) { 928 Node* mb = n->fast_out(p); 929 assert(mb->is_Initialize() || !mb->is_MemBar() || 930 mb->req() <= MemBarNode::Precedent || 931 mb->in(MemBarNode::Precedent) != n, 932 "MemBarVolatile should be eliminated for non-escaping object"); 933 } 934 #endif 935 _igvn.replace_node(n, n->in(MemNode::Memory)); 936 } else { 937 eliminate_gc_barrier(n); 938 } 939 k -= (oc2 - use->outcnt()); 940 } 941 _igvn.remove_dead_node(use); 942 } else if (use->is_ArrayCopy()) { 943 // Disconnect ArrayCopy node 944 ArrayCopyNode* ac = use->as_ArrayCopy(); 945 if (ac->is_clonebasic()) { 946 Node* membar_after = ac->proj_out(TypeFunc::Control)->unique_ctrl_out(); 947 disconnect_projections(ac, _igvn); 948 assert(alloc->in(TypeFunc::Memory)->is_Proj() && alloc->in(TypeFunc::Memory)->in(0)->Opcode() == Op_MemBarCPUOrder, "mem barrier expected before allocation"); 949 Node* membar_before = alloc->in(TypeFunc::Memory)->in(0); 950 disconnect_projections(membar_before->as_MemBar(), _igvn); 951 if (membar_after->is_MemBar()) { 952 disconnect_projections(membar_after->as_MemBar(), _igvn); 953 } 954 } else { 955 assert(ac->is_arraycopy_validated() || 956 ac->is_copyof_validated() || 957 ac->is_copyofrange_validated(), "unsupported"); 958 CallProjections callprojs; 959 ac->extract_projections(&callprojs, true); 960 961 _igvn.replace_node(callprojs.fallthrough_ioproj, ac->in(TypeFunc::I_O)); 962 _igvn.replace_node(callprojs.fallthrough_memproj, ac->in(TypeFunc::Memory)); 963 _igvn.replace_node(callprojs.fallthrough_catchproj, ac->in(TypeFunc::Control)); 964 965 // Set control to top. IGVN will remove the remaining projections 966 ac->set_req(0, top()); 967 ac->replace_edge(res, top(), &_igvn); 968 969 // Disconnect src right away: it can help find new 970 // opportunities for allocation elimination 971 Node* src = ac->in(ArrayCopyNode::Src); 972 ac->replace_edge(src, top(), &_igvn); 973 // src can be top at this point if src and dest of the 974 // arraycopy were the same 975 if (src->outcnt() == 0 && !src->is_top()) { 976 _igvn.remove_dead_node(src); 977 } 978 } 979 _igvn._worklist.push(ac); 980 } else { 981 eliminate_gc_barrier(use); 982 } 983 j -= (oc1 - res->outcnt()); 984 } 985 assert(res->outcnt() == 0, "all uses of allocated objects must be deleted"); 986 _igvn.remove_dead_node(res); 987 } 988 989 // 990 // Process other users of allocation's projections 991 // 992 if (_callprojs.resproj != nullptr && _callprojs.resproj->outcnt() != 0) { 993 // First disconnect stores captured by Initialize node. 994 // If Initialize node is eliminated first in the following code, 995 // it will kill such stores and DUIterator_Last will assert. 996 for (DUIterator_Fast jmax, j = _callprojs.resproj->fast_outs(jmax); j < jmax; j++) { 997 Node* use = _callprojs.resproj->fast_out(j); 998 if (use->is_AddP()) { 999 // raw memory addresses used only by the initialization 1000 _igvn.replace_node(use, C->top()); 1001 --j; --jmax; 1002 } 1003 } 1004 for (DUIterator_Last jmin, j = _callprojs.resproj->last_outs(jmin); j >= jmin; ) { 1005 Node* use = _callprojs.resproj->last_out(j); 1006 uint oc1 = _callprojs.resproj->outcnt(); 1007 if (use->is_Initialize()) { 1008 // Eliminate Initialize node. 1009 InitializeNode *init = use->as_Initialize(); 1010 assert(init->outcnt() <= 2, "only a control and memory projection expected"); 1011 Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control); 1012 if (ctrl_proj != nullptr) { 1013 _igvn.replace_node(ctrl_proj, init->in(TypeFunc::Control)); 1014 #ifdef ASSERT 1015 // If the InitializeNode has no memory out, it will die, and tmp will become null 1016 Node* tmp = init->in(TypeFunc::Control); 1017 assert(tmp == nullptr || tmp == _callprojs.fallthrough_catchproj, "allocation control projection"); 1018 #endif 1019 } 1020 Node *mem_proj = init->proj_out_or_null(TypeFunc::Memory); 1021 if (mem_proj != nullptr) { 1022 Node *mem = init->in(TypeFunc::Memory); 1023 #ifdef ASSERT 1024 if (mem->is_MergeMem()) { 1025 assert(mem->in(TypeFunc::Memory) == _callprojs.fallthrough_memproj, "allocation memory projection"); 1026 } else { 1027 assert(mem == _callprojs.fallthrough_memproj, "allocation memory projection"); 1028 } 1029 #endif 1030 _igvn.replace_node(mem_proj, mem); 1031 } 1032 } else { 1033 assert(false, "only Initialize or AddP expected"); 1034 } 1035 j -= (oc1 - _callprojs.resproj->outcnt()); 1036 } 1037 } 1038 if (_callprojs.fallthrough_catchproj != nullptr) { 1039 _igvn.replace_node(_callprojs.fallthrough_catchproj, alloc->in(TypeFunc::Control)); 1040 } 1041 if (_callprojs.fallthrough_memproj != nullptr) { 1042 _igvn.replace_node(_callprojs.fallthrough_memproj, alloc->in(TypeFunc::Memory)); 1043 } 1044 if (_callprojs.catchall_memproj != nullptr) { 1045 _igvn.replace_node(_callprojs.catchall_memproj, C->top()); 1046 } 1047 if (_callprojs.fallthrough_ioproj != nullptr) { 1048 _igvn.replace_node(_callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O)); 1049 } 1050 if (_callprojs.catchall_ioproj != nullptr) { 1051 _igvn.replace_node(_callprojs.catchall_ioproj, C->top()); 1052 } 1053 if (_callprojs.catchall_catchproj != nullptr) { 1054 _igvn.replace_node(_callprojs.catchall_catchproj, C->top()); 1055 } 1056 } 1057 1058 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { 1059 // If reallocation fails during deoptimization we'll pop all 1060 // interpreter frames for this compiled frame and that won't play 1061 // nice with JVMTI popframe. 1062 // We avoid this issue by eager reallocation when the popframe request 1063 // is received. 1064 if (!EliminateAllocations || !alloc->_is_non_escaping) { 1065 return false; 1066 } 1067 Node* klass = alloc->in(AllocateNode::KlassNode); 1068 const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr(); 1069 Node* res = alloc->result_cast(); 1070 // Eliminate boxing allocations which are not used 1071 // regardless scalar replaceable status. 1072 bool boxing_alloc = C->eliminate_boxing() && 1073 tklass->isa_instklassptr() && 1074 tklass->is_instklassptr()->instance_klass()->is_box_klass(); 1075 if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != nullptr))) { 1076 return false; 1077 } 1078 1079 alloc->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/); 1080 1081 GrowableArray <SafePointNode *> safepoints; 1082 if (!can_eliminate_allocation(&_igvn, alloc, &safepoints)) { 1083 return false; 1084 } 1085 1086 if (!alloc->_is_scalar_replaceable) { 1087 assert(res == nullptr, "sanity"); 1088 // We can only eliminate allocation if all debug info references 1089 // are already replaced with SafePointScalarObject because 1090 // we can't search for a fields value without instance_id. 1091 if (safepoints.length() > 0) { 1092 return false; 1093 } 1094 } 1095 1096 if (!scalar_replacement(alloc, safepoints)) { 1097 return false; 1098 } 1099 1100 CompileLog* log = C->log(); 1101 if (log != nullptr) { 1102 log->head("eliminate_allocation type='%d'", 1103 log->identify(tklass->exact_klass())); 1104 JVMState* p = alloc->jvms(); 1105 while (p != nullptr) { 1106 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 1107 p = p->caller(); 1108 } 1109 log->tail("eliminate_allocation"); 1110 } 1111 1112 process_users_of_allocation(alloc); 1113 1114 #ifndef PRODUCT 1115 if (PrintEliminateAllocations) { 1116 if (alloc->is_AllocateArray()) 1117 tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx); 1118 else 1119 tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx); 1120 } 1121 #endif 1122 1123 return true; 1124 } 1125 1126 bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) { 1127 // EA should remove all uses of non-escaping boxing node. 1128 if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != nullptr) { 1129 return false; 1130 } 1131 1132 assert(boxing->result_cast() == nullptr, "unexpected boxing node result"); 1133 1134 boxing->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/); 1135 1136 const TypeTuple* r = boxing->tf()->range(); 1137 assert(r->cnt() > TypeFunc::Parms, "sanity"); 1138 const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr(); 1139 assert(t != nullptr, "sanity"); 1140 1141 CompileLog* log = C->log(); 1142 if (log != nullptr) { 1143 log->head("eliminate_boxing type='%d'", 1144 log->identify(t->instance_klass())); 1145 JVMState* p = boxing->jvms(); 1146 while (p != nullptr) { 1147 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 1148 p = p->caller(); 1149 } 1150 log->tail("eliminate_boxing"); 1151 } 1152 1153 process_users_of_allocation(boxing); 1154 1155 #ifndef PRODUCT 1156 if (PrintEliminateAllocations) { 1157 tty->print("++++ Eliminated: %d ", boxing->_idx); 1158 boxing->method()->print_short_name(tty); 1159 tty->cr(); 1160 } 1161 #endif 1162 1163 return true; 1164 } 1165 1166 1167 Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) { 1168 Node* adr = basic_plus_adr(base, offset); 1169 const TypePtr* adr_type = adr->bottom_type()->is_ptr(); 1170 Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt, MemNode::unordered); 1171 transform_later(value); 1172 return value; 1173 } 1174 1175 1176 Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) { 1177 Node* adr = basic_plus_adr(base, offset); 1178 mem = StoreNode::make(_igvn, ctl, mem, adr, nullptr, value, bt, MemNode::unordered); 1179 transform_later(mem); 1180 return mem; 1181 } 1182 1183 //============================================================================= 1184 // 1185 // A L L O C A T I O N 1186 // 1187 // Allocation attempts to be fast in the case of frequent small objects. 1188 // It breaks down like this: 1189 // 1190 // 1) Size in doublewords is computed. This is a constant for objects and 1191 // variable for most arrays. Doubleword units are used to avoid size 1192 // overflow of huge doubleword arrays. We need doublewords in the end for 1193 // rounding. 1194 // 1195 // 2) Size is checked for being 'too large'. Too-large allocations will go 1196 // the slow path into the VM. The slow path can throw any required 1197 // exceptions, and does all the special checks for very large arrays. The 1198 // size test can constant-fold away for objects. For objects with 1199 // finalizers it constant-folds the otherway: you always go slow with 1200 // finalizers. 1201 // 1202 // 3) If NOT using TLABs, this is the contended loop-back point. 1203 // Load-Locked the heap top. If using TLABs normal-load the heap top. 1204 // 1205 // 4) Check that heap top + size*8 < max. If we fail go the slow ` route. 1206 // NOTE: "top+size*8" cannot wrap the 4Gig line! Here's why: for largish 1207 // "size*8" we always enter the VM, where "largish" is a constant picked small 1208 // enough that there's always space between the eden max and 4Gig (old space is 1209 // there so it's quite large) and large enough that the cost of entering the VM 1210 // is dwarfed by the cost to initialize the space. 1211 // 1212 // 5) If NOT using TLABs, Store-Conditional the adjusted heap top back 1213 // down. If contended, repeat at step 3. If using TLABs normal-store 1214 // adjusted heap top back down; there is no contention. 1215 // 1216 // 6) If !ZeroTLAB then Bulk-clear the object/array. Fill in klass & mark 1217 // fields. 1218 // 1219 // 7) Merge with the slow-path; cast the raw memory pointer to the correct 1220 // oop flavor. 1221 // 1222 //============================================================================= 1223 // FastAllocateSizeLimit value is in DOUBLEWORDS. 1224 // Allocations bigger than this always go the slow route. 1225 // This value must be small enough that allocation attempts that need to 1226 // trigger exceptions go the slow route. Also, it must be small enough so 1227 // that heap_top + size_in_bytes does not wrap around the 4Gig limit. 1228 //=============================================================================j// 1229 // %%% Here is an old comment from parseHelper.cpp; is it outdated? 1230 // The allocator will coalesce int->oop copies away. See comment in 1231 // coalesce.cpp about how this works. It depends critically on the exact 1232 // code shape produced here, so if you are changing this code shape 1233 // make sure the GC info for the heap-top is correct in and around the 1234 // slow-path call. 1235 // 1236 1237 void PhaseMacroExpand::expand_allocate_common( 1238 AllocateNode* alloc, // allocation node to be expanded 1239 Node* length, // array length for an array allocation 1240 const TypeFunc* slow_call_type, // Type of slow call 1241 address slow_call_address, // Address of slow call 1242 Node* valid_length_test // whether length is valid or not 1243 ) 1244 { 1245 Node* ctrl = alloc->in(TypeFunc::Control); 1246 Node* mem = alloc->in(TypeFunc::Memory); 1247 Node* i_o = alloc->in(TypeFunc::I_O); 1248 Node* size_in_bytes = alloc->in(AllocateNode::AllocSize); 1249 Node* klass_node = alloc->in(AllocateNode::KlassNode); 1250 Node* initial_slow_test = alloc->in(AllocateNode::InitialTest); 1251 assert(ctrl != nullptr, "must have control"); 1252 1253 // We need a Region and corresponding Phi's to merge the slow-path and fast-path results. 1254 // they will not be used if "always_slow" is set 1255 enum { slow_result_path = 1, fast_result_path = 2 }; 1256 Node *result_region = nullptr; 1257 Node *result_phi_rawmem = nullptr; 1258 Node *result_phi_rawoop = nullptr; 1259 Node *result_phi_i_o = nullptr; 1260 1261 // The initial slow comparison is a size check, the comparison 1262 // we want to do is a BoolTest::gt 1263 bool expand_fast_path = true; 1264 int tv = _igvn.find_int_con(initial_slow_test, -1); 1265 if (tv >= 0) { 1266 // InitialTest has constant result 1267 // 0 - can fit in TLAB 1268 // 1 - always too big or negative 1269 assert(tv <= 1, "0 or 1 if a constant"); 1270 expand_fast_path = (tv == 0); 1271 initial_slow_test = nullptr; 1272 } else { 1273 initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn); 1274 } 1275 1276 if (!UseTLAB) { 1277 // Force slow-path allocation 1278 expand_fast_path = false; 1279 initial_slow_test = nullptr; 1280 } 1281 1282 bool allocation_has_use = (alloc->result_cast() != nullptr); 1283 if (!allocation_has_use) { 1284 InitializeNode* init = alloc->initialization(); 1285 if (init != nullptr) { 1286 init->remove(&_igvn); 1287 } 1288 if (expand_fast_path && (initial_slow_test == nullptr)) { 1289 // Remove allocation node and return. 1290 // Size is a non-negative constant -> no initial check needed -> directly to fast path. 1291 // Also, no usages -> empty fast path -> no fall out to slow path -> nothing left. 1292 #ifndef PRODUCT 1293 if (PrintEliminateAllocations) { 1294 tty->print("NotUsed "); 1295 Node* res = alloc->proj_out_or_null(TypeFunc::Parms); 1296 if (res != nullptr) { 1297 res->dump(); 1298 } else { 1299 alloc->dump(); 1300 } 1301 } 1302 #endif 1303 yank_alloc_node(alloc); 1304 return; 1305 } 1306 } 1307 1308 enum { too_big_or_final_path = 1, need_gc_path = 2 }; 1309 Node *slow_region = nullptr; 1310 Node *toobig_false = ctrl; 1311 1312 // generate the initial test if necessary 1313 if (initial_slow_test != nullptr ) { 1314 assert (expand_fast_path, "Only need test if there is a fast path"); 1315 slow_region = new RegionNode(3); 1316 1317 // Now make the initial failure test. Usually a too-big test but 1318 // might be a TRUE for finalizers. 1319 IfNode *toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN); 1320 transform_later(toobig_iff); 1321 // Plug the failing-too-big test into the slow-path region 1322 Node *toobig_true = new IfTrueNode( toobig_iff ); 1323 transform_later(toobig_true); 1324 slow_region ->init_req( too_big_or_final_path, toobig_true ); 1325 toobig_false = new IfFalseNode( toobig_iff ); 1326 transform_later(toobig_false); 1327 } else { 1328 // No initial test, just fall into next case 1329 assert(allocation_has_use || !expand_fast_path, "Should already have been handled"); 1330 toobig_false = ctrl; 1331 debug_only(slow_region = NodeSentinel); 1332 } 1333 1334 // If we are here there are several possibilities 1335 // - expand_fast_path is false - then only a slow path is expanded. That's it. 1336 // no_initial_check means a constant allocation. 1337 // - If check always evaluates to false -> expand_fast_path is false (see above) 1338 // - If check always evaluates to true -> directly into fast path (but may bailout to slowpath) 1339 // if !allocation_has_use the fast path is empty 1340 // if !allocation_has_use && no_initial_check 1341 // - Then there are no fastpath that can fall out to slowpath -> no allocation code at all. 1342 // removed by yank_alloc_node above. 1343 1344 Node *slow_mem = mem; // save the current memory state for slow path 1345 // generate the fast allocation code unless we know that the initial test will always go slow 1346 if (expand_fast_path) { 1347 // Fast path modifies only raw memory. 1348 if (mem->is_MergeMem()) { 1349 mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw); 1350 } 1351 1352 // allocate the Region and Phi nodes for the result 1353 result_region = new RegionNode(3); 1354 result_phi_rawmem = new PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM); 1355 result_phi_i_o = new PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch 1356 1357 // Grab regular I/O before optional prefetch may change it. 1358 // Slow-path does no I/O so just set it to the original I/O. 1359 result_phi_i_o->init_req(slow_result_path, i_o); 1360 1361 // Name successful fast-path variables 1362 Node* fast_oop_ctrl; 1363 Node* fast_oop_rawmem; 1364 if (allocation_has_use) { 1365 Node* needgc_ctrl = nullptr; 1366 result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM); 1367 1368 intx prefetch_lines = length != nullptr ? AllocatePrefetchLines : AllocateInstancePrefetchLines; 1369 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1370 Node* fast_oop = bs->obj_allocate(this, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl, 1371 fast_oop_ctrl, fast_oop_rawmem, 1372 prefetch_lines); 1373 1374 if (initial_slow_test != nullptr) { 1375 // This completes all paths into the slow merge point 1376 slow_region->init_req(need_gc_path, needgc_ctrl); 1377 transform_later(slow_region); 1378 } else { 1379 // No initial slow path needed! 1380 // Just fall from the need-GC path straight into the VM call. 1381 slow_region = needgc_ctrl; 1382 } 1383 1384 InitializeNode* init = alloc->initialization(); 1385 fast_oop_rawmem = initialize_object(alloc, 1386 fast_oop_ctrl, fast_oop_rawmem, fast_oop, 1387 klass_node, length, size_in_bytes); 1388 expand_initialize_membar(alloc, init, fast_oop_ctrl, fast_oop_rawmem); 1389 expand_dtrace_alloc_probe(alloc, fast_oop, fast_oop_ctrl, fast_oop_rawmem); 1390 1391 result_phi_rawoop->init_req(fast_result_path, fast_oop); 1392 } else { 1393 assert (initial_slow_test != nullptr, "sanity"); 1394 fast_oop_ctrl = toobig_false; 1395 fast_oop_rawmem = mem; 1396 transform_later(slow_region); 1397 } 1398 1399 // Plug in the successful fast-path into the result merge point 1400 result_region ->init_req(fast_result_path, fast_oop_ctrl); 1401 result_phi_i_o ->init_req(fast_result_path, i_o); 1402 result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem); 1403 } else { 1404 slow_region = ctrl; 1405 result_phi_i_o = i_o; // Rename it to use in the following code. 1406 } 1407 1408 // Generate slow-path call 1409 CallNode *call = new CallStaticJavaNode(slow_call_type, slow_call_address, 1410 OptoRuntime::stub_name(slow_call_address), 1411 TypePtr::BOTTOM); 1412 call->init_req(TypeFunc::Control, slow_region); 1413 call->init_req(TypeFunc::I_O, top()); // does no i/o 1414 call->init_req(TypeFunc::Memory, slow_mem); // may gc ptrs 1415 call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr)); 1416 call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr)); 1417 1418 call->init_req(TypeFunc::Parms+0, klass_node); 1419 if (length != nullptr) { 1420 call->init_req(TypeFunc::Parms+1, length); 1421 } 1422 1423 // Copy debug information and adjust JVMState information, then replace 1424 // allocate node with the call 1425 call->copy_call_debug_info(&_igvn, alloc); 1426 // For array allocations, copy the valid length check to the call node so Compile::final_graph_reshaping() can verify 1427 // that the call has the expected number of CatchProj nodes (in case the allocation always fails and the fallthrough 1428 // path dies). 1429 if (valid_length_test != nullptr) { 1430 call->add_req(valid_length_test); 1431 } 1432 if (expand_fast_path) { 1433 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. 1434 } else { 1435 // Hook i_o projection to avoid its elimination during allocation 1436 // replacement (when only a slow call is generated). 1437 call->set_req(TypeFunc::I_O, result_phi_i_o); 1438 } 1439 _igvn.replace_node(alloc, call); 1440 transform_later(call); 1441 1442 // Identify the output projections from the allocate node and 1443 // adjust any references to them. 1444 // The control and io projections look like: 1445 // 1446 // v---Proj(ctrl) <-----+ v---CatchProj(ctrl) 1447 // Allocate Catch 1448 // ^---Proj(io) <-------+ ^---CatchProj(io) 1449 // 1450 // We are interested in the CatchProj nodes. 1451 // 1452 call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/); 1453 1454 // An allocate node has separate memory projections for the uses on 1455 // the control and i_o paths. Replace the control memory projection with 1456 // result_phi_rawmem (unless we are only generating a slow call when 1457 // both memory projections are combined) 1458 if (expand_fast_path && _callprojs.fallthrough_memproj != nullptr) { 1459 migrate_outs(_callprojs.fallthrough_memproj, result_phi_rawmem); 1460 } 1461 // Now change uses of catchall_memproj to use fallthrough_memproj and delete 1462 // catchall_memproj so we end up with a call that has only 1 memory projection. 1463 if (_callprojs.catchall_memproj != nullptr ) { 1464 if (_callprojs.fallthrough_memproj == nullptr) { 1465 _callprojs.fallthrough_memproj = new ProjNode(call, TypeFunc::Memory); 1466 transform_later(_callprojs.fallthrough_memproj); 1467 } 1468 migrate_outs(_callprojs.catchall_memproj, _callprojs.fallthrough_memproj); 1469 _igvn.remove_dead_node(_callprojs.catchall_memproj); 1470 } 1471 1472 // An allocate node has separate i_o projections for the uses on the control 1473 // and i_o paths. Always replace the control i_o projection with result i_o 1474 // otherwise incoming i_o become dead when only a slow call is generated 1475 // (it is different from memory projections where both projections are 1476 // combined in such case). 1477 if (_callprojs.fallthrough_ioproj != nullptr) { 1478 migrate_outs(_callprojs.fallthrough_ioproj, result_phi_i_o); 1479 } 1480 // Now change uses of catchall_ioproj to use fallthrough_ioproj and delete 1481 // catchall_ioproj so we end up with a call that has only 1 i_o projection. 1482 if (_callprojs.catchall_ioproj != nullptr ) { 1483 if (_callprojs.fallthrough_ioproj == nullptr) { 1484 _callprojs.fallthrough_ioproj = new ProjNode(call, TypeFunc::I_O); 1485 transform_later(_callprojs.fallthrough_ioproj); 1486 } 1487 migrate_outs(_callprojs.catchall_ioproj, _callprojs.fallthrough_ioproj); 1488 _igvn.remove_dead_node(_callprojs.catchall_ioproj); 1489 } 1490 1491 // if we generated only a slow call, we are done 1492 if (!expand_fast_path) { 1493 // Now we can unhook i_o. 1494 if (result_phi_i_o->outcnt() > 1) { 1495 call->set_req(TypeFunc::I_O, top()); 1496 } else { 1497 assert(result_phi_i_o->unique_ctrl_out() == call, "sanity"); 1498 // Case of new array with negative size known during compilation. 1499 // AllocateArrayNode::Ideal() optimization disconnect unreachable 1500 // following code since call to runtime will throw exception. 1501 // As result there will be no users of i_o after the call. 1502 // Leave i_o attached to this call to avoid problems in preceding graph. 1503 } 1504 return; 1505 } 1506 1507 if (_callprojs.fallthrough_catchproj != nullptr) { 1508 ctrl = _callprojs.fallthrough_catchproj->clone(); 1509 transform_later(ctrl); 1510 _igvn.replace_node(_callprojs.fallthrough_catchproj, result_region); 1511 } else { 1512 ctrl = top(); 1513 } 1514 Node *slow_result; 1515 if (_callprojs.resproj == nullptr) { 1516 // no uses of the allocation result 1517 slow_result = top(); 1518 } else { 1519 slow_result = _callprojs.resproj->clone(); 1520 transform_later(slow_result); 1521 _igvn.replace_node(_callprojs.resproj, result_phi_rawoop); 1522 } 1523 1524 // Plug slow-path into result merge point 1525 result_region->init_req( slow_result_path, ctrl); 1526 transform_later(result_region); 1527 if (allocation_has_use) { 1528 result_phi_rawoop->init_req(slow_result_path, slow_result); 1529 transform_later(result_phi_rawoop); 1530 } 1531 result_phi_rawmem->init_req(slow_result_path, _callprojs.fallthrough_memproj); 1532 transform_later(result_phi_rawmem); 1533 transform_later(result_phi_i_o); 1534 // This completes all paths into the result merge point 1535 } 1536 1537 // Remove alloc node that has no uses. 1538 void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) { 1539 Node* ctrl = alloc->in(TypeFunc::Control); 1540 Node* mem = alloc->in(TypeFunc::Memory); 1541 Node* i_o = alloc->in(TypeFunc::I_O); 1542 1543 alloc->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/); 1544 if (_callprojs.resproj != nullptr) { 1545 for (DUIterator_Fast imax, i = _callprojs.resproj->fast_outs(imax); i < imax; i++) { 1546 Node* use = _callprojs.resproj->fast_out(i); 1547 use->isa_MemBar()->remove(&_igvn); 1548 --imax; 1549 --i; // back up iterator 1550 } 1551 assert(_callprojs.resproj->outcnt() == 0, "all uses must be deleted"); 1552 _igvn.remove_dead_node(_callprojs.resproj); 1553 } 1554 if (_callprojs.fallthrough_catchproj != nullptr) { 1555 migrate_outs(_callprojs.fallthrough_catchproj, ctrl); 1556 _igvn.remove_dead_node(_callprojs.fallthrough_catchproj); 1557 } 1558 if (_callprojs.catchall_catchproj != nullptr) { 1559 _igvn.rehash_node_delayed(_callprojs.catchall_catchproj); 1560 _callprojs.catchall_catchproj->set_req(0, top()); 1561 } 1562 if (_callprojs.fallthrough_proj != nullptr) { 1563 Node* catchnode = _callprojs.fallthrough_proj->unique_ctrl_out(); 1564 _igvn.remove_dead_node(catchnode); 1565 _igvn.remove_dead_node(_callprojs.fallthrough_proj); 1566 } 1567 if (_callprojs.fallthrough_memproj != nullptr) { 1568 migrate_outs(_callprojs.fallthrough_memproj, mem); 1569 _igvn.remove_dead_node(_callprojs.fallthrough_memproj); 1570 } 1571 if (_callprojs.fallthrough_ioproj != nullptr) { 1572 migrate_outs(_callprojs.fallthrough_ioproj, i_o); 1573 _igvn.remove_dead_node(_callprojs.fallthrough_ioproj); 1574 } 1575 if (_callprojs.catchall_memproj != nullptr) { 1576 _igvn.rehash_node_delayed(_callprojs.catchall_memproj); 1577 _callprojs.catchall_memproj->set_req(0, top()); 1578 } 1579 if (_callprojs.catchall_ioproj != nullptr) { 1580 _igvn.rehash_node_delayed(_callprojs.catchall_ioproj); 1581 _callprojs.catchall_ioproj->set_req(0, top()); 1582 } 1583 #ifndef PRODUCT 1584 if (PrintEliminateAllocations) { 1585 if (alloc->is_AllocateArray()) { 1586 tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx); 1587 } else { 1588 tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx); 1589 } 1590 } 1591 #endif 1592 _igvn.remove_dead_node(alloc); 1593 } 1594 1595 void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeNode* init, 1596 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem) { 1597 // If initialization is performed by an array copy, any required 1598 // MemBarStoreStore was already added. If the object does not 1599 // escape no need for a MemBarStoreStore. If the object does not 1600 // escape in its initializer and memory barrier (MemBarStoreStore or 1601 // stronger) is already added at exit of initializer, also no need 1602 // for a MemBarStoreStore. Otherwise we need a MemBarStoreStore 1603 // so that stores that initialize this object can't be reordered 1604 // with a subsequent store that makes this object accessible by 1605 // other threads. 1606 // Other threads include java threads and JVM internal threads 1607 // (for example concurrent GC threads). Current concurrent GC 1608 // implementation: G1 will not scan newly created object, 1609 // so it's safe to skip storestore barrier when allocation does 1610 // not escape. 1611 if (!alloc->does_not_escape_thread() && 1612 !alloc->is_allocation_MemBar_redundant() && 1613 (init == nullptr || !init->is_complete_with_arraycopy())) { 1614 if (init == nullptr || init->req() < InitializeNode::RawStores) { 1615 // No InitializeNode or no stores captured by zeroing 1616 // elimination. Simply add the MemBarStoreStore after object 1617 // initialization. 1618 MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot); 1619 transform_later(mb); 1620 1621 mb->init_req(TypeFunc::Memory, fast_oop_rawmem); 1622 mb->init_req(TypeFunc::Control, fast_oop_ctrl); 1623 fast_oop_ctrl = new ProjNode(mb, TypeFunc::Control); 1624 transform_later(fast_oop_ctrl); 1625 fast_oop_rawmem = new ProjNode(mb, TypeFunc::Memory); 1626 transform_later(fast_oop_rawmem); 1627 } else { 1628 // Add the MemBarStoreStore after the InitializeNode so that 1629 // all stores performing the initialization that were moved 1630 // before the InitializeNode happen before the storestore 1631 // barrier. 1632 1633 Node* init_ctrl = init->proj_out_or_null(TypeFunc::Control); 1634 Node* init_mem = init->proj_out_or_null(TypeFunc::Memory); 1635 1636 MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot); 1637 transform_later(mb); 1638 1639 Node* ctrl = new ProjNode(init, TypeFunc::Control); 1640 transform_later(ctrl); 1641 Node* mem = new ProjNode(init, TypeFunc::Memory); 1642 transform_later(mem); 1643 1644 // The MemBarStoreStore depends on control and memory coming 1645 // from the InitializeNode 1646 mb->init_req(TypeFunc::Memory, mem); 1647 mb->init_req(TypeFunc::Control, ctrl); 1648 1649 ctrl = new ProjNode(mb, TypeFunc::Control); 1650 transform_later(ctrl); 1651 mem = new ProjNode(mb, TypeFunc::Memory); 1652 transform_later(mem); 1653 1654 // All nodes that depended on the InitializeNode for control 1655 // and memory must now depend on the MemBarNode that itself 1656 // depends on the InitializeNode 1657 if (init_ctrl != nullptr) { 1658 _igvn.replace_node(init_ctrl, ctrl); 1659 } 1660 if (init_mem != nullptr) { 1661 _igvn.replace_node(init_mem, mem); 1662 } 1663 } 1664 } 1665 } 1666 1667 void PhaseMacroExpand::expand_dtrace_alloc_probe(AllocateNode* alloc, Node* oop, 1668 Node*& ctrl, Node*& rawmem) { 1669 if (C->env()->dtrace_alloc_probes()) { 1670 // Slow-path call 1671 int size = TypeFunc::Parms + 2; 1672 CallLeafNode *call = new CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(), 1673 CAST_FROM_FN_PTR(address, 1674 static_cast<int (*)(JavaThread*, oopDesc*)>(SharedRuntime::dtrace_object_alloc)), 1675 "dtrace_object_alloc", 1676 TypeRawPtr::BOTTOM); 1677 1678 // Get base of thread-local storage area 1679 Node* thread = new ThreadLocalNode(); 1680 transform_later(thread); 1681 1682 call->init_req(TypeFunc::Parms + 0, thread); 1683 call->init_req(TypeFunc::Parms + 1, oop); 1684 call->init_req(TypeFunc::Control, ctrl); 1685 call->init_req(TypeFunc::I_O , top()); // does no i/o 1686 call->init_req(TypeFunc::Memory , rawmem); 1687 call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr)); 1688 call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr)); 1689 transform_later(call); 1690 ctrl = new ProjNode(call, TypeFunc::Control); 1691 transform_later(ctrl); 1692 rawmem = new ProjNode(call, TypeFunc::Memory); 1693 transform_later(rawmem); 1694 } 1695 } 1696 1697 // Helper for PhaseMacroExpand::expand_allocate_common. 1698 // Initializes the newly-allocated storage. 1699 Node* 1700 PhaseMacroExpand::initialize_object(AllocateNode* alloc, 1701 Node* control, Node* rawmem, Node* object, 1702 Node* klass_node, Node* length, 1703 Node* size_in_bytes) { 1704 InitializeNode* init = alloc->initialization(); 1705 // Store the klass & mark bits 1706 Node* mark_node = alloc->make_ideal_mark(&_igvn, object, control, rawmem); 1707 if (!mark_node->is_Con()) { 1708 transform_later(mark_node); 1709 } 1710 rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type()); 1711 1712 if (!UseCompactObjectHeaders) { 1713 rawmem = make_store(control, rawmem, object, Type::klass_offset(), klass_node, T_METADATA); 1714 } 1715 int header_size = alloc->minimum_header_size(); // conservatively small 1716 1717 // Array length 1718 if (length != nullptr) { // Arrays need length field 1719 rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT); 1720 // conservatively small header size: 1721 header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE); 1722 if (_igvn.type(klass_node)->isa_aryklassptr()) { // we know the exact header size in most cases: 1723 BasicType elem = _igvn.type(klass_node)->is_klassptr()->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type(); 1724 if (is_reference_type(elem, true)) { 1725 elem = T_OBJECT; 1726 } 1727 header_size = Klass::layout_helper_header_size(Klass::array_layout_helper(elem)); 1728 } 1729 } 1730 1731 // Clear the object body, if necessary. 1732 if (init == nullptr) { 1733 // The init has somehow disappeared; be cautious and clear everything. 1734 // 1735 // This can happen if a node is allocated but an uncommon trap occurs 1736 // immediately. In this case, the Initialize gets associated with the 1737 // trap, and may be placed in a different (outer) loop, if the Allocate 1738 // is in a loop. If (this is rare) the inner loop gets unrolled, then 1739 // there can be two Allocates to one Initialize. The answer in all these 1740 // edge cases is safety first. It is always safe to clear immediately 1741 // within an Allocate, and then (maybe or maybe not) clear some more later. 1742 if (!(UseTLAB && ZeroTLAB)) { 1743 rawmem = ClearArrayNode::clear_memory(control, rawmem, object, 1744 header_size, size_in_bytes, 1745 &_igvn); 1746 } 1747 } else { 1748 if (!init->is_complete()) { 1749 // Try to win by zeroing only what the init does not store. 1750 // We can also try to do some peephole optimizations, 1751 // such as combining some adjacent subword stores. 1752 rawmem = init->complete_stores(control, rawmem, object, 1753 header_size, size_in_bytes, &_igvn); 1754 } 1755 // We have no more use for this link, since the AllocateNode goes away: 1756 init->set_req(InitializeNode::RawAddress, top()); 1757 // (If we keep the link, it just confuses the register allocator, 1758 // who thinks he sees a real use of the address by the membar.) 1759 } 1760 1761 return rawmem; 1762 } 1763 1764 // Generate prefetch instructions for next allocations. 1765 Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false, 1766 Node*& contended_phi_rawmem, 1767 Node* old_eden_top, Node* new_eden_top, 1768 intx lines) { 1769 enum { fall_in_path = 1, pf_path = 2 }; 1770 if( UseTLAB && AllocatePrefetchStyle == 2 ) { 1771 // Generate prefetch allocation with watermark check. 1772 // As an allocation hits the watermark, we will prefetch starting 1773 // at a "distance" away from watermark. 1774 1775 Node *pf_region = new RegionNode(3); 1776 Node *pf_phi_rawmem = new PhiNode( pf_region, Type::MEMORY, 1777 TypeRawPtr::BOTTOM ); 1778 // I/O is used for Prefetch 1779 Node *pf_phi_abio = new PhiNode( pf_region, Type::ABIO ); 1780 1781 Node *thread = new ThreadLocalNode(); 1782 transform_later(thread); 1783 1784 Node *eden_pf_adr = new AddPNode( top()/*not oop*/, thread, 1785 _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())) ); 1786 transform_later(eden_pf_adr); 1787 1788 Node *old_pf_wm = new LoadPNode(needgc_false, 1789 contended_phi_rawmem, eden_pf_adr, 1790 TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, 1791 MemNode::unordered); 1792 transform_later(old_pf_wm); 1793 1794 // check against new_eden_top 1795 Node *need_pf_cmp = new CmpPNode( new_eden_top, old_pf_wm ); 1796 transform_later(need_pf_cmp); 1797 Node *need_pf_bol = new BoolNode( need_pf_cmp, BoolTest::ge ); 1798 transform_later(need_pf_bol); 1799 IfNode *need_pf_iff = new IfNode( needgc_false, need_pf_bol, 1800 PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN ); 1801 transform_later(need_pf_iff); 1802 1803 // true node, add prefetchdistance 1804 Node *need_pf_true = new IfTrueNode( need_pf_iff ); 1805 transform_later(need_pf_true); 1806 1807 Node *need_pf_false = new IfFalseNode( need_pf_iff ); 1808 transform_later(need_pf_false); 1809 1810 Node *new_pf_wmt = new AddPNode( top(), old_pf_wm, 1811 _igvn.MakeConX(AllocatePrefetchDistance) ); 1812 transform_later(new_pf_wmt ); 1813 new_pf_wmt->set_req(0, need_pf_true); 1814 1815 Node *store_new_wmt = new StorePNode(need_pf_true, 1816 contended_phi_rawmem, eden_pf_adr, 1817 TypeRawPtr::BOTTOM, new_pf_wmt, 1818 MemNode::unordered); 1819 transform_later(store_new_wmt); 1820 1821 // adding prefetches 1822 pf_phi_abio->init_req( fall_in_path, i_o ); 1823 1824 Node *prefetch_adr; 1825 Node *prefetch; 1826 uint step_size = AllocatePrefetchStepSize; 1827 uint distance = 0; 1828 1829 for ( intx i = 0; i < lines; i++ ) { 1830 prefetch_adr = new AddPNode( old_pf_wm, new_pf_wmt, 1831 _igvn.MakeConX(distance) ); 1832 transform_later(prefetch_adr); 1833 prefetch = new PrefetchAllocationNode( i_o, prefetch_adr ); 1834 transform_later(prefetch); 1835 distance += step_size; 1836 i_o = prefetch; 1837 } 1838 pf_phi_abio->set_req( pf_path, i_o ); 1839 1840 pf_region->init_req( fall_in_path, need_pf_false ); 1841 pf_region->init_req( pf_path, need_pf_true ); 1842 1843 pf_phi_rawmem->init_req( fall_in_path, contended_phi_rawmem ); 1844 pf_phi_rawmem->init_req( pf_path, store_new_wmt ); 1845 1846 transform_later(pf_region); 1847 transform_later(pf_phi_rawmem); 1848 transform_later(pf_phi_abio); 1849 1850 needgc_false = pf_region; 1851 contended_phi_rawmem = pf_phi_rawmem; 1852 i_o = pf_phi_abio; 1853 } else if( UseTLAB && AllocatePrefetchStyle == 3 ) { 1854 // Insert a prefetch instruction for each allocation. 1855 // This code is used to generate 1 prefetch instruction per cache line. 1856 1857 // Generate several prefetch instructions. 1858 uint step_size = AllocatePrefetchStepSize; 1859 uint distance = AllocatePrefetchDistance; 1860 1861 // Next cache address. 1862 Node *cache_adr = new AddPNode(old_eden_top, old_eden_top, 1863 _igvn.MakeConX(step_size + distance)); 1864 transform_later(cache_adr); 1865 cache_adr = new CastP2XNode(needgc_false, cache_adr); 1866 transform_later(cache_adr); 1867 // Address is aligned to execute prefetch to the beginning of cache line size 1868 // (it is important when BIS instruction is used on SPARC as prefetch). 1869 Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1)); 1870 cache_adr = new AndXNode(cache_adr, mask); 1871 transform_later(cache_adr); 1872 cache_adr = new CastX2PNode(cache_adr); 1873 transform_later(cache_adr); 1874 1875 // Prefetch 1876 Node *prefetch = new PrefetchAllocationNode( contended_phi_rawmem, cache_adr ); 1877 prefetch->set_req(0, needgc_false); 1878 transform_later(prefetch); 1879 contended_phi_rawmem = prefetch; 1880 Node *prefetch_adr; 1881 distance = step_size; 1882 for ( intx i = 1; i < lines; i++ ) { 1883 prefetch_adr = new AddPNode( cache_adr, cache_adr, 1884 _igvn.MakeConX(distance) ); 1885 transform_later(prefetch_adr); 1886 prefetch = new PrefetchAllocationNode( contended_phi_rawmem, prefetch_adr ); 1887 transform_later(prefetch); 1888 distance += step_size; 1889 contended_phi_rawmem = prefetch; 1890 } 1891 } else if( AllocatePrefetchStyle > 0 ) { 1892 // Insert a prefetch for each allocation only on the fast-path 1893 Node *prefetch_adr; 1894 Node *prefetch; 1895 // Generate several prefetch instructions. 1896 uint step_size = AllocatePrefetchStepSize; 1897 uint distance = AllocatePrefetchDistance; 1898 for ( intx i = 0; i < lines; i++ ) { 1899 prefetch_adr = new AddPNode( old_eden_top, new_eden_top, 1900 _igvn.MakeConX(distance) ); 1901 transform_later(prefetch_adr); 1902 prefetch = new PrefetchAllocationNode( i_o, prefetch_adr ); 1903 // Do not let it float too high, since if eden_top == eden_end, 1904 // both might be null. 1905 if( i == 0 ) { // Set control for first prefetch, next follows it 1906 prefetch->init_req(0, needgc_false); 1907 } 1908 transform_later(prefetch); 1909 distance += step_size; 1910 i_o = prefetch; 1911 } 1912 } 1913 return i_o; 1914 } 1915 1916 1917 void PhaseMacroExpand::expand_allocate(AllocateNode *alloc) { 1918 expand_allocate_common(alloc, nullptr, 1919 OptoRuntime::new_instance_Type(), 1920 OptoRuntime::new_instance_Java(), nullptr); 1921 } 1922 1923 void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) { 1924 Node* length = alloc->in(AllocateNode::ALength); 1925 Node* valid_length_test = alloc->in(AllocateNode::ValidLengthTest); 1926 InitializeNode* init = alloc->initialization(); 1927 Node* klass_node = alloc->in(AllocateNode::KlassNode); 1928 const TypeAryKlassPtr* ary_klass_t = _igvn.type(klass_node)->isa_aryklassptr(); 1929 address slow_call_address; // Address of slow call 1930 if (init != nullptr && init->is_complete_with_arraycopy() && 1931 ary_klass_t && ary_klass_t->elem()->isa_klassptr() == nullptr) { 1932 // Don't zero type array during slow allocation in VM since 1933 // it will be initialized later by arraycopy in compiled code. 1934 slow_call_address = OptoRuntime::new_array_nozero_Java(); 1935 } else { 1936 slow_call_address = OptoRuntime::new_array_Java(); 1937 } 1938 expand_allocate_common(alloc, length, 1939 OptoRuntime::new_array_Type(), 1940 slow_call_address, valid_length_test); 1941 } 1942 1943 //-------------------mark_eliminated_box---------------------------------- 1944 // 1945 // During EA obj may point to several objects but after few ideal graph 1946 // transformations (CCP) it may point to only one non escaping object 1947 // (but still using phi), corresponding locks and unlocks will be marked 1948 // for elimination. Later obj could be replaced with a new node (new phi) 1949 // and which does not have escape information. And later after some graph 1950 // reshape other locks and unlocks (which were not marked for elimination 1951 // before) are connected to this new obj (phi) but they still will not be 1952 // marked for elimination since new obj has no escape information. 1953 // Mark all associated (same box and obj) lock and unlock nodes for 1954 // elimination if some of them marked already. 1955 void PhaseMacroExpand::mark_eliminated_box(Node* box, Node* obj) { 1956 BoxLockNode* oldbox = box->as_BoxLock(); 1957 if (oldbox->is_eliminated()) { 1958 return; // This BoxLock node was processed already. 1959 } 1960 assert(!oldbox->is_unbalanced(), "this should not be called for unbalanced region"); 1961 // New implementation (EliminateNestedLocks) has separate BoxLock 1962 // node for each locked region so mark all associated locks/unlocks as 1963 // eliminated even if different objects are referenced in one locked region 1964 // (for example, OSR compilation of nested loop inside locked scope). 1965 if (EliminateNestedLocks || 1966 oldbox->as_BoxLock()->is_simple_lock_region(nullptr, obj, nullptr)) { 1967 // Box is used only in one lock region. Mark this box as eliminated. 1968 oldbox->set_local(); // This verifies correct state of BoxLock 1969 _igvn.hash_delete(oldbox); 1970 oldbox->set_eliminated(); // This changes box's hash value 1971 _igvn.hash_insert(oldbox); 1972 1973 for (uint i = 0; i < oldbox->outcnt(); i++) { 1974 Node* u = oldbox->raw_out(i); 1975 if (u->is_AbstractLock() && !u->as_AbstractLock()->is_non_esc_obj()) { 1976 AbstractLockNode* alock = u->as_AbstractLock(); 1977 // Check lock's box since box could be referenced by Lock's debug info. 1978 if (alock->box_node() == oldbox) { 1979 // Mark eliminated all related locks and unlocks. 1980 #ifdef ASSERT 1981 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc4"); 1982 #endif 1983 alock->set_non_esc_obj(); 1984 } 1985 } 1986 } 1987 return; 1988 } 1989 1990 // Create new "eliminated" BoxLock node and use it in monitor debug info 1991 // instead of oldbox for the same object. 1992 BoxLockNode* newbox = oldbox->clone()->as_BoxLock(); 1993 1994 // Note: BoxLock node is marked eliminated only here and it is used 1995 // to indicate that all associated lock and unlock nodes are marked 1996 // for elimination. 1997 newbox->set_local(); // This verifies correct state of BoxLock 1998 newbox->set_eliminated(); 1999 transform_later(newbox); 2000 2001 // Replace old box node with new box for all users of the same object. 2002 for (uint i = 0; i < oldbox->outcnt();) { 2003 bool next_edge = true; 2004 2005 Node* u = oldbox->raw_out(i); 2006 if (u->is_AbstractLock()) { 2007 AbstractLockNode* alock = u->as_AbstractLock(); 2008 if (alock->box_node() == oldbox && alock->obj_node()->eqv_uncast(obj)) { 2009 // Replace Box and mark eliminated all related locks and unlocks. 2010 #ifdef ASSERT 2011 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc5"); 2012 #endif 2013 alock->set_non_esc_obj(); 2014 _igvn.rehash_node_delayed(alock); 2015 alock->set_box_node(newbox); 2016 next_edge = false; 2017 } 2018 } 2019 if (u->is_FastLock() && u->as_FastLock()->obj_node()->eqv_uncast(obj)) { 2020 FastLockNode* flock = u->as_FastLock(); 2021 assert(flock->box_node() == oldbox, "sanity"); 2022 _igvn.rehash_node_delayed(flock); 2023 flock->set_box_node(newbox); 2024 next_edge = false; 2025 } 2026 2027 // Replace old box in monitor debug info. 2028 if (u->is_SafePoint() && u->as_SafePoint()->jvms()) { 2029 SafePointNode* sfn = u->as_SafePoint(); 2030 JVMState* youngest_jvms = sfn->jvms(); 2031 int max_depth = youngest_jvms->depth(); 2032 for (int depth = 1; depth <= max_depth; depth++) { 2033 JVMState* jvms = youngest_jvms->of_depth(depth); 2034 int num_mon = jvms->nof_monitors(); 2035 // Loop over monitors 2036 for (int idx = 0; idx < num_mon; idx++) { 2037 Node* obj_node = sfn->monitor_obj(jvms, idx); 2038 Node* box_node = sfn->monitor_box(jvms, idx); 2039 if (box_node == oldbox && obj_node->eqv_uncast(obj)) { 2040 int j = jvms->monitor_box_offset(idx); 2041 _igvn.replace_input_of(u, j, newbox); 2042 next_edge = false; 2043 } 2044 } 2045 } 2046 } 2047 if (next_edge) i++; 2048 } 2049 } 2050 2051 //-----------------------mark_eliminated_locking_nodes----------------------- 2052 void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) { 2053 if (!alock->is_balanced()) { 2054 return; // Can't do any more elimination for this locking region 2055 } 2056 if (EliminateNestedLocks) { 2057 if (alock->is_nested()) { 2058 assert(alock->box_node()->as_BoxLock()->is_eliminated(), "sanity"); 2059 return; 2060 } else if (!alock->is_non_esc_obj()) { // Not eliminated or coarsened 2061 // Only Lock node has JVMState needed here. 2062 // Not that preceding claim is documented anywhere else. 2063 if (alock->jvms() != nullptr) { 2064 if (alock->as_Lock()->is_nested_lock_region()) { 2065 // Mark eliminated related nested locks and unlocks. 2066 Node* obj = alock->obj_node(); 2067 BoxLockNode* box_node = alock->box_node()->as_BoxLock(); 2068 assert(!box_node->is_eliminated(), "should not be marked yet"); 2069 // Note: BoxLock node is marked eliminated only here 2070 // and it is used to indicate that all associated lock 2071 // and unlock nodes are marked for elimination. 2072 box_node->set_eliminated(); // Box's hash is always NO_HASH here 2073 for (uint i = 0; i < box_node->outcnt(); i++) { 2074 Node* u = box_node->raw_out(i); 2075 if (u->is_AbstractLock()) { 2076 alock = u->as_AbstractLock(); 2077 if (alock->box_node() == box_node) { 2078 // Verify that this Box is referenced only by related locks. 2079 assert(alock->obj_node()->eqv_uncast(obj), ""); 2080 // Mark all related locks and unlocks. 2081 #ifdef ASSERT 2082 alock->log_lock_optimization(C, "eliminate_lock_set_nested"); 2083 #endif 2084 alock->set_nested(); 2085 } 2086 } 2087 } 2088 } else { 2089 #ifdef ASSERT 2090 alock->log_lock_optimization(C, "eliminate_lock_NOT_nested_lock_region"); 2091 if (C->log() != nullptr) 2092 alock->as_Lock()->is_nested_lock_region(C); // rerun for debugging output 2093 #endif 2094 } 2095 } 2096 return; 2097 } 2098 // Process locks for non escaping object 2099 assert(alock->is_non_esc_obj(), ""); 2100 } // EliminateNestedLocks 2101 2102 if (alock->is_non_esc_obj()) { // Lock is used for non escaping object 2103 // Look for all locks of this object and mark them and 2104 // corresponding BoxLock nodes as eliminated. 2105 Node* obj = alock->obj_node(); 2106 for (uint j = 0; j < obj->outcnt(); j++) { 2107 Node* o = obj->raw_out(j); 2108 if (o->is_AbstractLock() && 2109 o->as_AbstractLock()->obj_node()->eqv_uncast(obj)) { 2110 alock = o->as_AbstractLock(); 2111 Node* box = alock->box_node(); 2112 // Replace old box node with new eliminated box for all users 2113 // of the same object and mark related locks as eliminated. 2114 mark_eliminated_box(box, obj); 2115 } 2116 } 2117 } 2118 } 2119 2120 // we have determined that this lock/unlock can be eliminated, we simply 2121 // eliminate the node without expanding it. 2122 // 2123 // Note: The membar's associated with the lock/unlock are currently not 2124 // eliminated. This should be investigated as a future enhancement. 2125 // 2126 bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) { 2127 2128 if (!alock->is_eliminated()) { 2129 return false; 2130 } 2131 #ifdef ASSERT 2132 if (!alock->is_coarsened()) { 2133 // Check that new "eliminated" BoxLock node is created. 2134 BoxLockNode* oldbox = alock->box_node()->as_BoxLock(); 2135 assert(oldbox->is_eliminated(), "should be done already"); 2136 } 2137 #endif 2138 2139 alock->log_lock_optimization(C, "eliminate_lock"); 2140 2141 #ifndef PRODUCT 2142 if (PrintEliminateLocks) { 2143 tty->print_cr("++++ Eliminated: %d %s '%s'", alock->_idx, (alock->is_Lock() ? "Lock" : "Unlock"), alock->kind_as_string()); 2144 } 2145 #endif 2146 2147 Node* mem = alock->in(TypeFunc::Memory); 2148 Node* ctrl = alock->in(TypeFunc::Control); 2149 guarantee(ctrl != nullptr, "missing control projection, cannot replace_node() with null"); 2150 2151 alock->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/); 2152 // There are 2 projections from the lock. The lock node will 2153 // be deleted when its last use is subsumed below. 2154 assert(alock->outcnt() == 2 && 2155 _callprojs.fallthrough_proj != nullptr && 2156 _callprojs.fallthrough_memproj != nullptr, 2157 "Unexpected projections from Lock/Unlock"); 2158 2159 Node* fallthroughproj = _callprojs.fallthrough_proj; 2160 Node* memproj_fallthrough = _callprojs.fallthrough_memproj; 2161 2162 // The memory projection from a lock/unlock is RawMem 2163 // The input to a Lock is merged memory, so extract its RawMem input 2164 // (unless the MergeMem has been optimized away.) 2165 if (alock->is_Lock()) { 2166 // Search for MemBarAcquireLock node and delete it also. 2167 MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar(); 2168 assert(membar != nullptr && membar->Opcode() == Op_MemBarAcquireLock, ""); 2169 Node* ctrlproj = membar->proj_out(TypeFunc::Control); 2170 Node* memproj = membar->proj_out(TypeFunc::Memory); 2171 _igvn.replace_node(ctrlproj, fallthroughproj); 2172 _igvn.replace_node(memproj, memproj_fallthrough); 2173 2174 // Delete FastLock node also if this Lock node is unique user 2175 // (a loop peeling may clone a Lock node). 2176 Node* flock = alock->as_Lock()->fastlock_node(); 2177 if (flock->outcnt() == 1) { 2178 assert(flock->unique_out() == alock, "sanity"); 2179 _igvn.replace_node(flock, top()); 2180 } 2181 } 2182 2183 // Search for MemBarReleaseLock node and delete it also. 2184 if (alock->is_Unlock() && ctrl->is_Proj() && ctrl->in(0)->is_MemBar()) { 2185 MemBarNode* membar = ctrl->in(0)->as_MemBar(); 2186 assert(membar->Opcode() == Op_MemBarReleaseLock && 2187 mem->is_Proj() && membar == mem->in(0), ""); 2188 _igvn.replace_node(fallthroughproj, ctrl); 2189 _igvn.replace_node(memproj_fallthrough, mem); 2190 fallthroughproj = ctrl; 2191 memproj_fallthrough = mem; 2192 ctrl = membar->in(TypeFunc::Control); 2193 mem = membar->in(TypeFunc::Memory); 2194 } 2195 2196 _igvn.replace_node(fallthroughproj, ctrl); 2197 _igvn.replace_node(memproj_fallthrough, mem); 2198 return true; 2199 } 2200 2201 2202 //------------------------------expand_lock_node---------------------- 2203 void PhaseMacroExpand::expand_lock_node(LockNode *lock) { 2204 2205 Node* ctrl = lock->in(TypeFunc::Control); 2206 Node* mem = lock->in(TypeFunc::Memory); 2207 Node* obj = lock->obj_node(); 2208 Node* box = lock->box_node(); 2209 Node* flock = lock->fastlock_node(); 2210 2211 assert(!box->as_BoxLock()->is_eliminated(), "sanity"); 2212 2213 // Make the merge point 2214 Node *region; 2215 Node *mem_phi; 2216 Node *slow_path; 2217 2218 region = new RegionNode(3); 2219 // create a Phi for the memory state 2220 mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); 2221 2222 // Optimize test; set region slot 2 2223 slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0); 2224 mem_phi->init_req(2, mem); 2225 2226 // Make slow path call 2227 CallNode* call = make_slow_call(lock, OptoRuntime::complete_monitor_enter_Type(), 2228 OptoRuntime::complete_monitor_locking_Java(), nullptr, slow_path, 2229 obj, box, nullptr); 2230 2231 call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/); 2232 2233 // Slow path can only throw asynchronous exceptions, which are always 2234 // de-opted. So the compiler thinks the slow-call can never throw an 2235 // exception. If it DOES throw an exception we would need the debug 2236 // info removed first (since if it throws there is no monitor). 2237 assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr && 2238 _callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock"); 2239 2240 // Capture slow path 2241 // disconnect fall-through projection from call and create a new one 2242 // hook up users of fall-through projection to region 2243 Node *slow_ctrl = _callprojs.fallthrough_proj->clone(); 2244 transform_later(slow_ctrl); 2245 _igvn.hash_delete(_callprojs.fallthrough_proj); 2246 _callprojs.fallthrough_proj->disconnect_inputs(C); 2247 region->init_req(1, slow_ctrl); 2248 // region inputs are now complete 2249 transform_later(region); 2250 _igvn.replace_node(_callprojs.fallthrough_proj, region); 2251 2252 Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory)); 2253 2254 mem_phi->init_req(1, memproj); 2255 2256 transform_later(mem_phi); 2257 2258 _igvn.replace_node(_callprojs.fallthrough_memproj, mem_phi); 2259 } 2260 2261 //------------------------------expand_unlock_node---------------------- 2262 void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) { 2263 2264 Node* ctrl = unlock->in(TypeFunc::Control); 2265 Node* mem = unlock->in(TypeFunc::Memory); 2266 Node* obj = unlock->obj_node(); 2267 Node* box = unlock->box_node(); 2268 2269 assert(!box->as_BoxLock()->is_eliminated(), "sanity"); 2270 2271 // No need for a null check on unlock 2272 2273 // Make the merge point 2274 Node *region; 2275 Node *mem_phi; 2276 2277 region = new RegionNode(3); 2278 // create a Phi for the memory state 2279 mem_phi = new PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); 2280 2281 FastUnlockNode *funlock = new FastUnlockNode( ctrl, obj, box ); 2282 funlock = transform_later( funlock )->as_FastUnlock(); 2283 // Optimize test; set region slot 2 2284 Node *slow_path = opt_bits_test(ctrl, region, 2, funlock, 0, 0); 2285 Node *thread = transform_later(new ThreadLocalNode()); 2286 2287 CallNode *call = make_slow_call((CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), 2288 CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), 2289 "complete_monitor_unlocking_C", slow_path, obj, box, thread); 2290 2291 call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/); 2292 assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr && 2293 _callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock"); 2294 2295 // No exceptions for unlocking 2296 // Capture slow path 2297 // disconnect fall-through projection from call and create a new one 2298 // hook up users of fall-through projection to region 2299 Node *slow_ctrl = _callprojs.fallthrough_proj->clone(); 2300 transform_later(slow_ctrl); 2301 _igvn.hash_delete(_callprojs.fallthrough_proj); 2302 _callprojs.fallthrough_proj->disconnect_inputs(C); 2303 region->init_req(1, slow_ctrl); 2304 // region inputs are now complete 2305 transform_later(region); 2306 _igvn.replace_node(_callprojs.fallthrough_proj, region); 2307 2308 Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory) ); 2309 mem_phi->init_req(1, memproj ); 2310 mem_phi->init_req(2, mem); 2311 transform_later(mem_phi); 2312 2313 _igvn.replace_node(_callprojs.fallthrough_memproj, mem_phi); 2314 } 2315 2316 void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) { 2317 assert(check->in(SubTypeCheckNode::Control) == nullptr, "should be pinned"); 2318 Node* bol = check->unique_out(); 2319 Node* obj_or_subklass = check->in(SubTypeCheckNode::ObjOrSubKlass); 2320 Node* superklass = check->in(SubTypeCheckNode::SuperKlass); 2321 assert(bol->is_Bool() && bol->as_Bool()->_test._test == BoolTest::ne, "unexpected bool node"); 2322 2323 for (DUIterator_Last imin, i = bol->last_outs(imin); i >= imin; --i) { 2324 Node* iff = bol->last_out(i); 2325 assert(iff->is_If(), "where's the if?"); 2326 2327 if (iff->in(0)->is_top()) { 2328 _igvn.replace_input_of(iff, 1, C->top()); 2329 continue; 2330 } 2331 2332 Node* iftrue = iff->as_If()->proj_out(1); 2333 Node* iffalse = iff->as_If()->proj_out(0); 2334 Node* ctrl = iff->in(0); 2335 2336 Node* subklass = nullptr; 2337 if (_igvn.type(obj_or_subklass)->isa_klassptr()) { 2338 subklass = obj_or_subklass; 2339 } else { 2340 Node* k_adr = basic_plus_adr(obj_or_subklass, Type::klass_offset()); 2341 subklass = _igvn.transform(LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), k_adr, TypeInstPtr::KLASS)); 2342 } 2343 2344 Node* not_subtype_ctrl = Phase::gen_subtype_check(subklass, superklass, &ctrl, nullptr, _igvn, check->method(), check->bci()); 2345 2346 _igvn.replace_input_of(iff, 0, C->top()); 2347 _igvn.replace_node(iftrue, not_subtype_ctrl); 2348 _igvn.replace_node(iffalse, ctrl); 2349 } 2350 _igvn.replace_node(check, C->top()); 2351 } 2352 2353 //---------------------------eliminate_macro_nodes---------------------- 2354 // Eliminate scalar replaced allocations and associated locks. 2355 void PhaseMacroExpand::eliminate_macro_nodes() { 2356 if (C->macro_count() == 0) 2357 return; 2358 NOT_PRODUCT(int membar_before = count_MemBar(C);) 2359 2360 // Before elimination may re-mark (change to Nested or NonEscObj) 2361 // all associated (same box and obj) lock and unlock nodes. 2362 int cnt = C->macro_count(); 2363 for (int i=0; i < cnt; i++) { 2364 Node *n = C->macro_node(i); 2365 if (n->is_AbstractLock()) { // Lock and Unlock nodes 2366 mark_eliminated_locking_nodes(n->as_AbstractLock()); 2367 } 2368 } 2369 // Re-marking may break consistency of Coarsened locks. 2370 if (!C->coarsened_locks_consistent()) { 2371 return; // recompile without Coarsened locks if broken 2372 } else { 2373 // After coarsened locks are eliminated locking regions 2374 // become unbalanced. We should not execute any more 2375 // locks elimination optimizations on them. 2376 C->mark_unbalanced_boxes(); 2377 } 2378 2379 // First, attempt to eliminate locks 2380 bool progress = true; 2381 while (progress) { 2382 progress = false; 2383 for (int i = C->macro_count(); i > 0; i = MIN2(i - 1, C->macro_count())) { // more than 1 element can be eliminated at once 2384 Node* n = C->macro_node(i - 1); 2385 bool success = false; 2386 DEBUG_ONLY(int old_macro_count = C->macro_count();) 2387 if (n->is_AbstractLock()) { 2388 success = eliminate_locking_node(n->as_AbstractLock()); 2389 #ifndef PRODUCT 2390 if (success && PrintOptoStatistics) { 2391 Atomic::inc(&PhaseMacroExpand::_monitor_objects_removed_counter); 2392 } 2393 #endif 2394 } 2395 assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); 2396 progress = progress || success; 2397 } 2398 } 2399 // Next, attempt to eliminate allocations 2400 _has_locks = false; 2401 progress = true; 2402 while (progress) { 2403 progress = false; 2404 for (int i = C->macro_count(); i > 0; i = MIN2(i - 1, C->macro_count())) { // more than 1 element can be eliminated at once 2405 Node* n = C->macro_node(i - 1); 2406 bool success = false; 2407 DEBUG_ONLY(int old_macro_count = C->macro_count();) 2408 switch (n->class_id()) { 2409 case Node::Class_Allocate: 2410 case Node::Class_AllocateArray: 2411 success = eliminate_allocate_node(n->as_Allocate()); 2412 #ifndef PRODUCT 2413 if (success && PrintOptoStatistics) { 2414 Atomic::inc(&PhaseMacroExpand::_objs_scalar_replaced_counter); 2415 } 2416 #endif 2417 break; 2418 case Node::Class_CallStaticJava: 2419 success = eliminate_boxing_node(n->as_CallStaticJava()); 2420 break; 2421 case Node::Class_Lock: 2422 case Node::Class_Unlock: 2423 assert(!n->as_AbstractLock()->is_eliminated(), "sanity"); 2424 _has_locks = true; 2425 break; 2426 case Node::Class_ArrayCopy: 2427 break; 2428 case Node::Class_OuterStripMinedLoop: 2429 break; 2430 case Node::Class_SubTypeCheck: 2431 break; 2432 case Node::Class_Opaque1: 2433 break; 2434 default: 2435 assert(n->Opcode() == Op_LoopLimit || 2436 n->Opcode() == Op_ModD || 2437 n->Opcode() == Op_ModF || 2438 n->is_OpaqueNotNull() || 2439 n->is_OpaqueInitializedAssertionPredicate() || 2440 n->Opcode() == Op_MaxL || 2441 n->Opcode() == Op_MinL || 2442 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(n), 2443 "unknown node type in macro list"); 2444 } 2445 assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); 2446 progress = progress || success; 2447 } 2448 } 2449 #ifndef PRODUCT 2450 if (PrintOptoStatistics) { 2451 int membar_after = count_MemBar(C); 2452 Atomic::add(&PhaseMacroExpand::_memory_barriers_removed_counter, membar_before - membar_after); 2453 } 2454 #endif 2455 } 2456 2457 //------------------------------expand_macro_nodes---------------------- 2458 // Returns true if a failure occurred. 2459 bool PhaseMacroExpand::expand_macro_nodes() { 2460 // Do not allow new macro nodes once we started to expand 2461 C->reset_allow_macro_nodes(); 2462 if (StressMacroExpansion) { 2463 C->shuffle_macro_nodes(); 2464 } 2465 // Last attempt to eliminate macro nodes. 2466 eliminate_macro_nodes(); 2467 if (C->failing()) return true; 2468 2469 // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations. 2470 bool progress = true; 2471 while (progress) { 2472 progress = false; 2473 for (int i = C->macro_count(); i > 0; i--) { 2474 Node* n = C->macro_node(i-1); 2475 bool success = false; 2476 DEBUG_ONLY(int old_macro_count = C->macro_count();) 2477 if (n->Opcode() == Op_LoopLimit) { 2478 // Remove it from macro list and put on IGVN worklist to optimize. 2479 C->remove_macro_node(n); 2480 _igvn._worklist.push(n); 2481 success = true; 2482 } else if (n->Opcode() == Op_CallStaticJava) { 2483 // Remove it from macro list and put on IGVN worklist to optimize. 2484 C->remove_macro_node(n); 2485 _igvn._worklist.push(n); 2486 success = true; 2487 } else if (n->is_Opaque1()) { 2488 _igvn.replace_node(n, n->in(1)); 2489 success = true; 2490 } else if (n->is_OpaqueNotNull()) { 2491 // Tests with OpaqueNotNull nodes are implicitly known to be true. Replace the node with true. In debug builds, 2492 // we leave the test in the graph to have an additional sanity check at runtime. If the test fails (i.e. a bug), 2493 // we will execute a Halt node. 2494 #ifdef ASSERT 2495 _igvn.replace_node(n, n->in(1)); 2496 #else 2497 _igvn.replace_node(n, _igvn.intcon(1)); 2498 #endif 2499 success = true; 2500 } else if (n->is_OpaqueInitializedAssertionPredicate()) { 2501 // Initialized Assertion Predicates must always evaluate to true. Therefore, we get rid of them in product 2502 // builds as they are useless. In debug builds we keep them as additional verification code. Even though 2503 // loop opts are already over, we want to keep Initialized Assertion Predicates alive as long as possible to 2504 // enable folding of dead control paths within which cast nodes become top after due to impossible types - 2505 // even after loop opts are over. Therefore, we delay the removal of these opaque nodes until now. 2506 #ifdef ASSERT 2507 _igvn.replace_node(n, n->in(1)); 2508 #else 2509 _igvn.replace_node(n, _igvn.intcon(1)); 2510 #endif // ASSERT 2511 } else if (n->Opcode() == Op_OuterStripMinedLoop) { 2512 n->as_OuterStripMinedLoop()->adjust_strip_mined_loop(&_igvn); 2513 C->remove_macro_node(n); 2514 success = true; 2515 } else if (n->Opcode() == Op_MaxL) { 2516 // Since MaxL and MinL are not implemented in the backend, we expand them to 2517 // a CMoveL construct now. At least until here, the type could be computed 2518 // precisely. CMoveL is not so smart, but we can give it at least the best 2519 // type we know abouot n now. 2520 Node* repl = MaxNode::signed_max(n->in(1), n->in(2), _igvn.type(n), _igvn); 2521 _igvn.replace_node(n, repl); 2522 success = true; 2523 } else if (n->Opcode() == Op_MinL) { 2524 Node* repl = MaxNode::signed_min(n->in(1), n->in(2), _igvn.type(n), _igvn); 2525 _igvn.replace_node(n, repl); 2526 success = true; 2527 } 2528 assert(!success || (C->macro_count() == (old_macro_count - 1)), "elimination must have deleted one node from macro list"); 2529 progress = progress || success; 2530 if (success) { 2531 C->print_method(PHASE_AFTER_MACRO_EXPANSION_STEP, 5, n); 2532 } 2533 } 2534 } 2535 2536 // Clean up the graph so we're less likely to hit the maximum node 2537 // limit 2538 _igvn.set_delay_transform(false); 2539 _igvn.optimize(); 2540 if (C->failing()) return true; 2541 _igvn.set_delay_transform(true); 2542 2543 2544 // Because we run IGVN after each expansion, some macro nodes may go 2545 // dead and be removed from the list as we iterate over it. Move 2546 // Allocate nodes (processed in a second pass) at the beginning of 2547 // the list and then iterate from the last element of the list until 2548 // an Allocate node is seen. This is robust to random deletion in 2549 // the list due to nodes going dead. 2550 C->sort_macro_nodes(); 2551 2552 // expand arraycopy "macro" nodes first 2553 // For ReduceBulkZeroing, we must first process all arraycopy nodes 2554 // before the allocate nodes are expanded. 2555 while (C->macro_count() > 0) { 2556 int macro_count = C->macro_count(); 2557 Node * n = C->macro_node(macro_count-1); 2558 assert(n->is_macro(), "only macro nodes expected here"); 2559 if (_igvn.type(n) == Type::TOP || (n->in(0) != nullptr && n->in(0)->is_top())) { 2560 // node is unreachable, so don't try to expand it 2561 C->remove_macro_node(n); 2562 continue; 2563 } 2564 if (n->is_Allocate()) { 2565 break; 2566 } 2567 // Make sure expansion will not cause node limit to be exceeded. 2568 // Worst case is a macro node gets expanded into about 200 nodes. 2569 // Allow 50% more for optimization. 2570 if (C->check_node_count(300, "out of nodes before macro expansion")) { 2571 return true; 2572 } 2573 2574 DEBUG_ONLY(int old_macro_count = C->macro_count();) 2575 switch (n->class_id()) { 2576 case Node::Class_Lock: 2577 expand_lock_node(n->as_Lock()); 2578 break; 2579 case Node::Class_Unlock: 2580 expand_unlock_node(n->as_Unlock()); 2581 break; 2582 case Node::Class_ArrayCopy: 2583 expand_arraycopy_node(n->as_ArrayCopy()); 2584 break; 2585 case Node::Class_SubTypeCheck: 2586 expand_subtypecheck_node(n->as_SubTypeCheck()); 2587 break; 2588 default: 2589 switch (n->Opcode()) { 2590 case Op_ModD: 2591 case Op_ModF: { 2592 bool is_drem = n->Opcode() == Op_ModD; 2593 CallNode* mod_macro = n->as_Call(); 2594 CallNode* call = new CallLeafNode(mod_macro->tf(), 2595 is_drem ? CAST_FROM_FN_PTR(address, SharedRuntime::drem) 2596 : CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2597 is_drem ? "drem" : "frem", TypeRawPtr::BOTTOM); 2598 call->init_req(TypeFunc::Control, mod_macro->in(TypeFunc::Control)); 2599 call->init_req(TypeFunc::I_O, mod_macro->in(TypeFunc::I_O)); 2600 call->init_req(TypeFunc::Memory, mod_macro->in(TypeFunc::Memory)); 2601 call->init_req(TypeFunc::ReturnAdr, mod_macro->in(TypeFunc::ReturnAdr)); 2602 call->init_req(TypeFunc::FramePtr, mod_macro->in(TypeFunc::FramePtr)); 2603 for (unsigned int i = 0; i < mod_macro->tf()->domain()->cnt() - TypeFunc::Parms; i++) { 2604 call->init_req(TypeFunc::Parms + i, mod_macro->in(TypeFunc::Parms + i)); 2605 } 2606 _igvn.replace_node(mod_macro, call); 2607 transform_later(call); 2608 break; 2609 } 2610 default: 2611 assert(false, "unknown node type in macro list"); 2612 } 2613 } 2614 assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list"); 2615 if (C->failing()) return true; 2616 C->print_method(PHASE_AFTER_MACRO_EXPANSION_STEP, 5, n); 2617 2618 // Clean up the graph so we're less likely to hit the maximum node 2619 // limit 2620 _igvn.set_delay_transform(false); 2621 _igvn.optimize(); 2622 if (C->failing()) return true; 2623 _igvn.set_delay_transform(true); 2624 } 2625 2626 // All nodes except Allocate nodes are expanded now. There could be 2627 // new optimization opportunities (such as folding newly created 2628 // load from a just allocated object). Run IGVN. 2629 2630 // expand "macro" nodes 2631 // nodes are removed from the macro list as they are processed 2632 while (C->macro_count() > 0) { 2633 int macro_count = C->macro_count(); 2634 Node * n = C->macro_node(macro_count-1); 2635 assert(n->is_macro(), "only macro nodes expected here"); 2636 if (_igvn.type(n) == Type::TOP || (n->in(0) != nullptr && n->in(0)->is_top())) { 2637 // node is unreachable, so don't try to expand it 2638 C->remove_macro_node(n); 2639 continue; 2640 } 2641 // Make sure expansion will not cause node limit to be exceeded. 2642 // Worst case is a macro node gets expanded into about 200 nodes. 2643 // Allow 50% more for optimization. 2644 if (C->check_node_count(300, "out of nodes before macro expansion")) { 2645 return true; 2646 } 2647 switch (n->class_id()) { 2648 case Node::Class_Allocate: 2649 expand_allocate(n->as_Allocate()); 2650 break; 2651 case Node::Class_AllocateArray: 2652 expand_allocate_array(n->as_AllocateArray()); 2653 break; 2654 default: 2655 assert(false, "unknown node type in macro list"); 2656 } 2657 assert(C->macro_count() < macro_count, "must have deleted a node from macro list"); 2658 if (C->failing()) return true; 2659 C->print_method(PHASE_AFTER_MACRO_EXPANSION_STEP, 5, n); 2660 2661 // Clean up the graph so we're less likely to hit the maximum node 2662 // limit 2663 _igvn.set_delay_transform(false); 2664 _igvn.optimize(); 2665 if (C->failing()) return true; 2666 _igvn.set_delay_transform(true); 2667 } 2668 2669 _igvn.set_delay_transform(false); 2670 return false; 2671 } 2672 2673 #ifndef PRODUCT 2674 int PhaseMacroExpand::_objs_scalar_replaced_counter = 0; 2675 int PhaseMacroExpand::_monitor_objects_removed_counter = 0; 2676 int PhaseMacroExpand::_GC_barriers_removed_counter = 0; 2677 int PhaseMacroExpand::_memory_barriers_removed_counter = 0; 2678 2679 void PhaseMacroExpand::print_statistics() { 2680 tty->print("Objects scalar replaced = %d, ", Atomic::load(&_objs_scalar_replaced_counter)); 2681 tty->print("Monitor objects removed = %d, ", Atomic::load(&_monitor_objects_removed_counter)); 2682 tty->print("GC barriers removed = %d, ", Atomic::load(&_GC_barriers_removed_counter)); 2683 tty->print_cr("Memory barriers removed = %d", Atomic::load(&_memory_barriers_removed_counter)); 2684 } 2685 2686 int PhaseMacroExpand::count_MemBar(Compile *C) { 2687 if (!PrintOptoStatistics) { 2688 return 0; 2689 } 2690 Unique_Node_List ideal_nodes; 2691 int total = 0; 2692 ideal_nodes.map(C->live_nodes(), nullptr); 2693 ideal_nodes.push(C->root()); 2694 for (uint next = 0; next < ideal_nodes.size(); ++next) { 2695 Node* n = ideal_nodes.at(next); 2696 if (n->is_MemBar()) { 2697 total++; 2698 } 2699 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2700 Node* m = n->fast_out(i); 2701 ideal_nodes.push(m); 2702 } 2703 } 2704 return total; 2705 } 2706 #endif