1 /* 2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "ci/bcEscapeAnalyzer.hpp" 26 #include "ci/ciFlatArrayKlass.hpp" 27 #include "ci/ciSymbols.hpp" 28 #include "code/vmreg.hpp" 29 #include "compiler/compileLog.hpp" 30 #include "compiler/oopMap.hpp" 31 #include "gc/shared/barrierSet.hpp" 32 #include "gc/shared/c2/barrierSetC2.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/convertnode.hpp" 38 #include "opto/escape.hpp" 39 #include "opto/inlinetypenode.hpp" 40 #include "opto/locknode.hpp" 41 #include "opto/machnode.hpp" 42 #include "opto/matcher.hpp" 43 #include "opto/memnode.hpp" 44 #include "opto/movenode.hpp" 45 #include "opto/parse.hpp" 46 #include "opto/regalloc.hpp" 47 #include "opto/regmask.hpp" 48 #include "opto/rootnode.hpp" 49 #include "opto/runtime.hpp" 50 #include "opto/type.hpp" 51 #include "runtime/arguments.hpp" 52 #include "runtime/sharedRuntime.hpp" 53 #include "runtime/stubRoutines.hpp" 54 #include "utilities/powerOfTwo.hpp" 55 56 // Portions of code courtesy of Clifford Click 57 58 // Optimization - Graph Style 59 60 //============================================================================= 61 uint StartNode::size_of() const { return sizeof(*this); } 62 bool StartNode::cmp( const Node &n ) const 63 { return _domain == ((StartNode&)n)._domain; } 64 const Type *StartNode::bottom_type() const { return _domain; } 65 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; } 66 #ifndef PRODUCT 67 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);} 68 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ } 69 #endif 70 71 //------------------------------Ideal------------------------------------------ 72 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ 73 return remove_dead_region(phase, can_reshape) ? this : nullptr; 74 } 75 76 //------------------------------calling_convention----------------------------- 77 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 78 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt); 79 } 80 81 //------------------------------Registers-------------------------------------- 82 const RegMask &StartNode::in_RegMask(uint) const { 83 return RegMask::EMPTY; 84 } 85 86 //------------------------------match------------------------------------------ 87 // Construct projections for incoming parameters, and their RegMask info 88 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { 89 switch (proj->_con) { 90 case TypeFunc::Control: 91 case TypeFunc::I_O: 92 case TypeFunc::Memory: 93 return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj); 94 case TypeFunc::FramePtr: 95 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); 96 case TypeFunc::ReturnAdr: 97 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); 98 case TypeFunc::Parms: 99 default: { 100 uint parm_num = proj->_con - TypeFunc::Parms; 101 const Type *t = _domain->field_at(proj->_con); 102 if (t->base() == Type::Half) // 2nd half of Longs and Doubles 103 return new ConNode(Type::TOP); 104 uint ideal_reg = t->ideal_reg(); 105 RegMask &rm = match->_calling_convention_mask[parm_num]; 106 return new MachProjNode(this,proj->_con,rm,ideal_reg); 107 } 108 } 109 return nullptr; 110 } 111 112 //============================================================================= 113 const char * const ParmNode::names[TypeFunc::Parms+1] = { 114 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms" 115 }; 116 117 #ifndef PRODUCT 118 void ParmNode::dump_spec(outputStream *st) const { 119 if( _con < TypeFunc::Parms ) { 120 st->print("%s", names[_con]); 121 } else { 122 st->print("Parm%d: ",_con-TypeFunc::Parms); 123 // Verbose and WizardMode dump bottom_type for all nodes 124 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); 125 } 126 } 127 128 void ParmNode::dump_compact_spec(outputStream *st) const { 129 if (_con < TypeFunc::Parms) { 130 st->print("%s", names[_con]); 131 } else { 132 st->print("%d:", _con-TypeFunc::Parms); 133 // unconditionally dump bottom_type 134 bottom_type()->dump_on(st); 135 } 136 } 137 #endif 138 139 uint ParmNode::ideal_reg() const { 140 switch( _con ) { 141 case TypeFunc::Control : // fall through 142 case TypeFunc::I_O : // fall through 143 case TypeFunc::Memory : return 0; 144 case TypeFunc::FramePtr : // fall through 145 case TypeFunc::ReturnAdr: return Op_RegP; 146 default : assert( _con > TypeFunc::Parms, "" ); 147 // fall through 148 case TypeFunc::Parms : { 149 // Type of argument being passed 150 const Type *t = in(0)->as_Start()->_domain->field_at(_con); 151 return t->ideal_reg(); 152 } 153 } 154 ShouldNotReachHere(); 155 return 0; 156 } 157 158 //============================================================================= 159 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { 160 init_req(TypeFunc::Control,cntrl); 161 init_req(TypeFunc::I_O,i_o); 162 init_req(TypeFunc::Memory,memory); 163 init_req(TypeFunc::FramePtr,frameptr); 164 init_req(TypeFunc::ReturnAdr,retadr); 165 } 166 167 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ 168 return remove_dead_region(phase, can_reshape) ? this : nullptr; 169 } 170 171 const Type* ReturnNode::Value(PhaseGVN* phase) const { 172 return ( phase->type(in(TypeFunc::Control)) == Type::TOP) 173 ? Type::TOP 174 : Type::BOTTOM; 175 } 176 177 // Do we Match on this edge index or not? No edges on return nodes 178 uint ReturnNode::match_edge(uint idx) const { 179 return 0; 180 } 181 182 183 #ifndef PRODUCT 184 void ReturnNode::dump_req(outputStream *st, DumpConfig* dc) const { 185 // Dump the required inputs, after printing "returns" 186 uint i; // Exit value of loop 187 for (i = 0; i < req(); i++) { // For all required inputs 188 if (i == TypeFunc::Parms) st->print("returns "); 189 Node* p = in(i); 190 if (p != nullptr) { 191 p->dump_idx(false, st, dc); 192 st->print(" "); 193 } else { 194 st->print("_ "); 195 } 196 } 197 } 198 #endif 199 200 //============================================================================= 201 RethrowNode::RethrowNode( 202 Node* cntrl, 203 Node* i_o, 204 Node* memory, 205 Node* frameptr, 206 Node* ret_adr, 207 Node* exception 208 ) : Node(TypeFunc::Parms + 1) { 209 init_req(TypeFunc::Control , cntrl ); 210 init_req(TypeFunc::I_O , i_o ); 211 init_req(TypeFunc::Memory , memory ); 212 init_req(TypeFunc::FramePtr , frameptr ); 213 init_req(TypeFunc::ReturnAdr, ret_adr); 214 init_req(TypeFunc::Parms , exception); 215 } 216 217 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ 218 return remove_dead_region(phase, can_reshape) ? this : nullptr; 219 } 220 221 const Type* RethrowNode::Value(PhaseGVN* phase) const { 222 return (phase->type(in(TypeFunc::Control)) == Type::TOP) 223 ? Type::TOP 224 : Type::BOTTOM; 225 } 226 227 uint RethrowNode::match_edge(uint idx) const { 228 return 0; 229 } 230 231 #ifndef PRODUCT 232 void RethrowNode::dump_req(outputStream *st, DumpConfig* dc) const { 233 // Dump the required inputs, after printing "exception" 234 uint i; // Exit value of loop 235 for (i = 0; i < req(); i++) { // For all required inputs 236 if (i == TypeFunc::Parms) st->print("exception "); 237 Node* p = in(i); 238 if (p != nullptr) { 239 p->dump_idx(false, st, dc); 240 st->print(" "); 241 } else { 242 st->print("_ "); 243 } 244 } 245 } 246 #endif 247 248 //============================================================================= 249 // Do we Match on this edge index or not? Match only target address & method 250 uint TailCallNode::match_edge(uint idx) const { 251 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 252 } 253 254 //============================================================================= 255 // Do we Match on this edge index or not? Match only target address & oop 256 uint TailJumpNode::match_edge(uint idx) const { 257 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 258 } 259 260 //============================================================================= 261 JVMState::JVMState(ciMethod* method, JVMState* caller) : 262 _method(method), 263 _receiver_info(nullptr) { 264 assert(method != nullptr, "must be valid call site"); 265 _bci = InvocationEntryBci; 266 _reexecute = Reexecute_Undefined; 267 DEBUG_ONLY(_bci = -99); // random garbage value 268 DEBUG_ONLY(_map = (SafePointNode*)-1); 269 _caller = caller; 270 _depth = 1 + (caller == nullptr ? 0 : caller->depth()); 271 _locoff = TypeFunc::Parms; 272 _stkoff = _locoff + _method->max_locals(); 273 _monoff = _stkoff + _method->max_stack(); 274 _scloff = _monoff; 275 _endoff = _monoff; 276 _sp = 0; 277 } 278 JVMState::JVMState(int stack_size) : 279 _method(nullptr), 280 _receiver_info(nullptr) { 281 _bci = InvocationEntryBci; 282 _reexecute = Reexecute_Undefined; 283 DEBUG_ONLY(_map = (SafePointNode*)-1); 284 _caller = nullptr; 285 _depth = 1; 286 _locoff = TypeFunc::Parms; 287 _stkoff = _locoff; 288 _monoff = _stkoff + stack_size; 289 _scloff = _monoff; 290 _endoff = _monoff; 291 _sp = 0; 292 } 293 294 //--------------------------------of_depth------------------------------------- 295 JVMState* JVMState::of_depth(int d) const { 296 const JVMState* jvmp = this; 297 assert(0 < d && (uint)d <= depth(), "oob"); 298 for (int skip = depth() - d; skip > 0; skip--) { 299 jvmp = jvmp->caller(); 300 } 301 assert(jvmp->depth() == (uint)d, "found the right one"); 302 return (JVMState*)jvmp; 303 } 304 305 //-----------------------------same_calls_as----------------------------------- 306 bool JVMState::same_calls_as(const JVMState* that) const { 307 if (this == that) return true; 308 if (this->depth() != that->depth()) return false; 309 const JVMState* p = this; 310 const JVMState* q = that; 311 for (;;) { 312 if (p->_method != q->_method) return false; 313 if (p->_method == nullptr) return true; // bci is irrelevant 314 if (p->_bci != q->_bci) return false; 315 if (p->_reexecute != q->_reexecute) return false; 316 p = p->caller(); 317 q = q->caller(); 318 if (p == q) return true; 319 assert(p != nullptr && q != nullptr, "depth check ensures we don't run off end"); 320 } 321 } 322 323 //------------------------------debug_start------------------------------------ 324 uint JVMState::debug_start() const { 325 DEBUG_ONLY(JVMState* jvmroot = of_depth(1)); 326 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); 327 return of_depth(1)->locoff(); 328 } 329 330 //-------------------------------debug_end------------------------------------- 331 uint JVMState::debug_end() const { 332 DEBUG_ONLY(JVMState* jvmroot = of_depth(1)); 333 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); 334 return endoff(); 335 } 336 337 //------------------------------debug_depth------------------------------------ 338 uint JVMState::debug_depth() const { 339 uint total = 0; 340 for (const JVMState* jvmp = this; jvmp != nullptr; jvmp = jvmp->caller()) { 341 total += jvmp->debug_size(); 342 } 343 return total; 344 } 345 346 #ifndef PRODUCT 347 348 //------------------------------format_helper---------------------------------- 349 // Given an allocation (a Chaitin object) and a Node decide if the Node carries 350 // any defined value or not. If it does, print out the register or constant. 351 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { 352 if (n == nullptr) { st->print(" null"); return; } 353 if (n->is_SafePointScalarObject()) { 354 // Scalar replacement. 355 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); 356 scobjs->append_if_missing(spobj); 357 int sco_n = scobjs->find(spobj); 358 assert(sco_n >= 0, ""); 359 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); 360 return; 361 } 362 if (regalloc->node_regs_max_index() > 0 && 363 OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined 364 char buf[50]; 365 regalloc->dump_register(n,buf,sizeof(buf)); 366 st->print(" %s%d]=%s",msg,i,buf); 367 } else { // No register, but might be constant 368 const Type *t = n->bottom_type(); 369 switch (t->base()) { 370 case Type::Int: 371 st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con()); 372 break; 373 case Type::AnyPtr: 374 assert( t == TypePtr::NULL_PTR || n->in_dump(), "" ); 375 st->print(" %s%d]=#null",msg,i); 376 break; 377 case Type::AryPtr: 378 case Type::InstPtr: 379 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop())); 380 break; 381 case Type::KlassPtr: 382 case Type::AryKlassPtr: 383 case Type::InstKlassPtr: 384 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->exact_klass())); 385 break; 386 case Type::MetadataPtr: 387 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata())); 388 break; 389 case Type::NarrowOop: 390 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop())); 391 break; 392 case Type::RawPtr: 393 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr())); 394 break; 395 case Type::DoubleCon: 396 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d); 397 break; 398 case Type::FloatCon: 399 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f); 400 break; 401 case Type::Long: 402 st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con())); 403 break; 404 case Type::Half: 405 case Type::Top: 406 st->print(" %s%d]=_",msg,i); 407 break; 408 default: ShouldNotReachHere(); 409 } 410 } 411 } 412 413 //---------------------print_method_with_lineno-------------------------------- 414 void JVMState::print_method_with_lineno(outputStream* st, bool show_name) const { 415 if (show_name) _method->print_short_name(st); 416 417 int lineno = _method->line_number_from_bci(_bci); 418 if (lineno != -1) { 419 st->print(" @ bci:%d (line %d)", _bci, lineno); 420 } else { 421 st->print(" @ bci:%d", _bci); 422 } 423 } 424 425 //------------------------------format----------------------------------------- 426 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { 427 st->print(" #"); 428 if (_method) { 429 print_method_with_lineno(st, true); 430 } else { 431 st->print_cr(" runtime stub "); 432 return; 433 } 434 if (n->is_MachSafePoint()) { 435 GrowableArray<SafePointScalarObjectNode*> scobjs; 436 MachSafePointNode *mcall = n->as_MachSafePoint(); 437 uint i; 438 // Print locals 439 for (i = 0; i < (uint)loc_size(); i++) 440 format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs); 441 // Print stack 442 for (i = 0; i < (uint)stk_size(); i++) { 443 if ((uint)(_stkoff + i) >= mcall->len()) 444 st->print(" oob "); 445 else 446 format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs); 447 } 448 for (i = 0; (int)i < nof_monitors(); i++) { 449 Node *box = mcall->monitor_box(this, i); 450 Node *obj = mcall->monitor_obj(this, i); 451 if (regalloc->node_regs_max_index() > 0 && 452 OptoReg::is_valid(regalloc->get_reg_first(box))) { 453 box = BoxLockNode::box_node(box); 454 format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs); 455 } else { 456 OptoReg::Name box_reg = BoxLockNode::reg(box); 457 st->print(" MON-BOX%d=%s+%d", 458 i, 459 OptoReg::regname(OptoReg::c_frame_pointer), 460 regalloc->reg2offset(box_reg)); 461 } 462 const char* obj_msg = "MON-OBJ["; 463 if (EliminateLocks) { 464 if (BoxLockNode::box_node(box)->is_eliminated()) 465 obj_msg = "MON-OBJ(LOCK ELIMINATED)["; 466 } 467 format_helper(regalloc, st, obj, obj_msg, i, &scobjs); 468 } 469 470 for (i = 0; i < (uint)scobjs.length(); i++) { 471 // Scalar replaced objects. 472 st->cr(); 473 st->print(" # ScObj" INT32_FORMAT " ", i); 474 SafePointScalarObjectNode* spobj = scobjs.at(i); 475 ciKlass* cik = spobj->bottom_type()->is_oopptr()->exact_klass(); 476 assert(cik->is_instance_klass() || 477 cik->is_array_klass(), "Not supported allocation."); 478 ciInstanceKlass *iklass = nullptr; 479 if (cik->is_instance_klass()) { 480 cik->print_name_on(st); 481 iklass = cik->as_instance_klass(); 482 } else if (cik->is_type_array_klass()) { 483 cik->as_array_klass()->base_element_type()->print_name_on(st); 484 st->print("[%d]", spobj->n_fields()); 485 } else if (cik->is_obj_array_klass()) { 486 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass(); 487 if (cie->is_instance_klass()) { 488 cie->print_name_on(st); 489 } else if (cie->is_type_array_klass()) { 490 cie->as_array_klass()->base_element_type()->print_name_on(st); 491 } else { 492 ShouldNotReachHere(); 493 } 494 st->print("[%d]", spobj->n_fields()); 495 int ndim = cik->as_array_klass()->dimension() - 1; 496 while (ndim-- > 0) { 497 st->print("[]"); 498 } 499 } else { 500 assert(false, "unexpected type %s", cik->name()->as_utf8()); 501 } 502 st->print("={"); 503 uint nf = spobj->n_fields(); 504 if (nf > 0) { 505 uint first_ind = spobj->first_index(mcall->jvms()); 506 if (iklass != nullptr && iklass->is_inlinetype()) { 507 Node* null_marker = mcall->in(first_ind++); 508 if (!null_marker->is_top()) { 509 st->print(" [null marker"); 510 format_helper(regalloc, st, null_marker, ":", -1, nullptr); 511 } 512 } 513 Node* fld_node = mcall->in(first_ind); 514 if (iklass != nullptr) { 515 st->print(" ["); 516 iklass->nonstatic_field_at(0)->print_name_on(st); 517 format_helper(regalloc, st, fld_node, ":", 0, &scobjs); 518 } else { 519 format_helper(regalloc, st, fld_node, "[", 0, &scobjs); 520 } 521 for (uint j = 1; j < nf; j++) { 522 fld_node = mcall->in(first_ind+j); 523 if (iklass != nullptr) { 524 st->print(", ["); 525 iklass->nonstatic_field_at(j)->print_name_on(st); 526 format_helper(regalloc, st, fld_node, ":", j, &scobjs); 527 } else { 528 format_helper(regalloc, st, fld_node, ", [", j, &scobjs); 529 } 530 } 531 } 532 st->print(" }"); 533 } 534 } 535 st->cr(); 536 if (caller() != nullptr) caller()->format(regalloc, n, st); 537 } 538 539 540 void JVMState::dump_spec(outputStream *st) const { 541 if (_method != nullptr) { 542 bool printed = false; 543 if (!Verbose) { 544 // The JVMS dumps make really, really long lines. 545 // Take out the most boring parts, which are the package prefixes. 546 char buf[500]; 547 stringStream namest(buf, sizeof(buf)); 548 _method->print_short_name(&namest); 549 if (namest.count() < sizeof(buf)) { 550 const char* name = namest.base(); 551 if (name[0] == ' ') ++name; 552 const char* endcn = strchr(name, ':'); // end of class name 553 if (endcn == nullptr) endcn = strchr(name, '('); 554 if (endcn == nullptr) endcn = name + strlen(name); 555 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') 556 --endcn; 557 st->print(" %s", endcn); 558 printed = true; 559 } 560 } 561 print_method_with_lineno(st, !printed); 562 if(_reexecute == Reexecute_True) 563 st->print(" reexecute"); 564 } else { 565 st->print(" runtime stub"); 566 } 567 if (caller() != nullptr) caller()->dump_spec(st); 568 } 569 570 571 void JVMState::dump_on(outputStream* st) const { 572 bool print_map = _map && !((uintptr_t)_map & 1) && 573 ((caller() == nullptr) || (caller()->map() != _map)); 574 if (print_map) { 575 if (_map->len() > _map->req()) { // _map->has_exceptions() 576 Node* ex = _map->in(_map->req()); // _map->next_exception() 577 // skip the first one; it's already being printed 578 while (ex != nullptr && ex->len() > ex->req()) { 579 ex = ex->in(ex->req()); // ex->next_exception() 580 ex->dump(1); 581 } 582 } 583 _map->dump(Verbose ? 2 : 1); 584 } 585 if (caller() != nullptr) { 586 caller()->dump_on(st); 587 } 588 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", 589 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); 590 if (_method == nullptr) { 591 st->print_cr("(none)"); 592 } else { 593 _method->print_name(st); 594 st->cr(); 595 if (bci() >= 0 && bci() < _method->code_size()) { 596 st->print(" bc: "); 597 _method->print_codes_on(bci(), bci()+1, st); 598 } 599 } 600 } 601 602 // Extra way to dump a jvms from the debugger, 603 // to avoid a bug with C++ member function calls. 604 void dump_jvms(JVMState* jvms) { 605 jvms->dump(); 606 } 607 #endif 608 609 //--------------------------clone_shallow-------------------------------------- 610 JVMState* JVMState::clone_shallow(Compile* C) const { 611 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); 612 n->set_bci(_bci); 613 n->_reexecute = _reexecute; 614 n->set_locoff(_locoff); 615 n->set_stkoff(_stkoff); 616 n->set_monoff(_monoff); 617 n->set_scloff(_scloff); 618 n->set_endoff(_endoff); 619 n->set_sp(_sp); 620 n->set_map(_map); 621 n->set_receiver_info(_receiver_info); 622 return n; 623 } 624 625 //---------------------------clone_deep---------------------------------------- 626 JVMState* JVMState::clone_deep(Compile* C) const { 627 JVMState* n = clone_shallow(C); 628 for (JVMState* p = n; p->_caller != nullptr; p = p->_caller) { 629 p->_caller = p->_caller->clone_shallow(C); 630 } 631 assert(n->depth() == depth(), "sanity"); 632 assert(n->debug_depth() == debug_depth(), "sanity"); 633 return n; 634 } 635 636 /** 637 * Reset map for all callers 638 */ 639 void JVMState::set_map_deep(SafePointNode* map) { 640 for (JVMState* p = this; p != nullptr; p = p->_caller) { 641 p->set_map(map); 642 } 643 } 644 645 // unlike set_map(), this is two-way setting. 646 void JVMState::bind_map(SafePointNode* map) { 647 set_map(map); 648 _map->set_jvms(this); 649 } 650 651 // Adapt offsets in in-array after adding or removing an edge. 652 // Prerequisite is that the JVMState is used by only one node. 653 void JVMState::adapt_position(int delta) { 654 for (JVMState* jvms = this; jvms != nullptr; jvms = jvms->caller()) { 655 jvms->set_locoff(jvms->locoff() + delta); 656 jvms->set_stkoff(jvms->stkoff() + delta); 657 jvms->set_monoff(jvms->monoff() + delta); 658 jvms->set_scloff(jvms->scloff() + delta); 659 jvms->set_endoff(jvms->endoff() + delta); 660 } 661 } 662 663 // Mirror the stack size calculation in the deopt code 664 // How much stack space would we need at this point in the program in 665 // case of deoptimization? 666 int JVMState::interpreter_frame_size() const { 667 const JVMState* jvms = this; 668 int size = 0; 669 int callee_parameters = 0; 670 int callee_locals = 0; 671 int extra_args = method()->max_stack() - stk_size(); 672 673 while (jvms != nullptr) { 674 int locks = jvms->nof_monitors(); 675 int temps = jvms->stk_size(); 676 bool is_top_frame = (jvms == this); 677 ciMethod* method = jvms->method(); 678 679 int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(), 680 temps + callee_parameters, 681 extra_args, 682 locks, 683 callee_parameters, 684 callee_locals, 685 is_top_frame); 686 size += frame_size; 687 688 callee_parameters = method->size_of_parameters(); 689 callee_locals = method->max_locals(); 690 extra_args = 0; 691 jvms = jvms->caller(); 692 } 693 return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord; 694 } 695 696 // Compute receiver info for a compiled lambda form at call site. 697 ciInstance* JVMState::compute_receiver_info(ciMethod* callee) const { 698 assert(callee != nullptr && callee->is_compiled_lambda_form(), ""); 699 if (has_method() && method()->is_compiled_lambda_form()) { // callee is not a MH invoker 700 Node* recv = map()->argument(this, 0); 701 assert(recv != nullptr, ""); 702 const TypeOopPtr* recv_toop = recv->bottom_type()->isa_oopptr(); 703 if (recv_toop != nullptr && recv_toop->const_oop() != nullptr) { 704 return recv_toop->const_oop()->as_instance(); 705 } 706 } 707 return nullptr; 708 } 709 710 //============================================================================= 711 bool CallNode::cmp( const Node &n ) const 712 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } 713 #ifndef PRODUCT 714 void CallNode::dump_req(outputStream *st, DumpConfig* dc) const { 715 // Dump the required inputs, enclosed in '(' and ')' 716 uint i; // Exit value of loop 717 for (i = 0; i < req(); i++) { // For all required inputs 718 if (i == TypeFunc::Parms) st->print("("); 719 Node* p = in(i); 720 if (p != nullptr) { 721 p->dump_idx(false, st, dc); 722 st->print(" "); 723 } else { 724 st->print("_ "); 725 } 726 } 727 st->print(")"); 728 } 729 730 void CallNode::dump_spec(outputStream *st) const { 731 st->print(" "); 732 if (tf() != nullptr) tf()->dump_on(st); 733 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); 734 if (jvms() != nullptr) jvms()->dump_spec(st); 735 } 736 737 void AllocateNode::dump_spec(outputStream* st) const { 738 st->print(" "); 739 if (tf() != nullptr) { 740 tf()->dump_on(st); 741 } 742 if (_cnt != COUNT_UNKNOWN) { 743 st->print(" C=%f", _cnt); 744 } 745 const Node* const klass_node = in(KlassNode); 746 if (klass_node != nullptr) { 747 const TypeKlassPtr* const klass_ptr = klass_node->bottom_type()->isa_klassptr(); 748 749 if (klass_ptr != nullptr && klass_ptr->klass_is_exact()) { 750 st->print(" allocationKlass:"); 751 klass_ptr->exact_klass()->print_name_on(st); 752 } 753 } 754 if (jvms() != nullptr) { 755 jvms()->dump_spec(st); 756 } 757 } 758 #endif 759 760 const Type *CallNode::bottom_type() const { return tf()->range_cc(); } 761 const Type* CallNode::Value(PhaseGVN* phase) const { 762 if (in(0) == nullptr || phase->type(in(0)) == Type::TOP) { 763 return Type::TOP; 764 } 765 return tf()->range_cc(); 766 } 767 768 //------------------------------calling_convention----------------------------- 769 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 770 if (_entry_point == StubRoutines::store_inline_type_fields_to_buf()) { 771 // The call to that stub is a special case: its inputs are 772 // multiple values returned from a call and so it should follow 773 // the return convention. 774 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); 775 return; 776 } 777 // Use the standard compiler calling convention 778 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt); 779 } 780 781 782 //------------------------------match------------------------------------------ 783 // Construct projections for control, I/O, memory-fields, ..., and 784 // return result(s) along with their RegMask info 785 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { 786 uint con = proj->_con; 787 const TypeTuple* range_cc = tf()->range_cc(); 788 if (con >= TypeFunc::Parms) { 789 if (tf()->returns_inline_type_as_fields()) { 790 // The call returns multiple values (inline type fields): we 791 // create one projection per returned value. 792 assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return"); 793 uint ideal_reg = range_cc->field_at(con)->ideal_reg(); 794 return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg); 795 } else { 796 if (con == TypeFunc::Parms) { 797 uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg(); 798 OptoRegPair regs = Opcode() == Op_CallLeafVector 799 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine 800 : match->c_return_value(ideal_reg); 801 RegMask rm = RegMask(regs.first()); 802 803 if (Opcode() == Op_CallLeafVector) { 804 // If the return is in vector, compute appropriate regmask taking into account the whole range 805 if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) { 806 if(OptoReg::is_valid(regs.second())) { 807 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) { 808 rm.insert(r); 809 } 810 } 811 } 812 } 813 814 if (OptoReg::is_valid(regs.second())) { 815 rm.insert(regs.second()); 816 } 817 return new MachProjNode(this,con,rm,ideal_reg); 818 } else { 819 assert(con == TypeFunc::Parms+1, "only one return value"); 820 assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, ""); 821 return new MachProjNode(this,con, RegMask::EMPTY, (uint)OptoReg::Bad); 822 } 823 } 824 } 825 826 switch (con) { 827 case TypeFunc::Control: 828 case TypeFunc::I_O: 829 case TypeFunc::Memory: 830 return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj); 831 832 case TypeFunc::ReturnAdr: 833 case TypeFunc::FramePtr: 834 default: 835 ShouldNotReachHere(); 836 } 837 return nullptr; 838 } 839 840 // Do we Match on this edge index or not? Match no edges 841 uint CallNode::match_edge(uint idx) const { 842 return 0; 843 } 844 845 // 846 // Determine whether the call could modify the field of the specified 847 // instance at the specified offset. 848 // 849 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const { 850 assert((t_oop != nullptr), "sanity"); 851 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) { 852 const TypeTuple* args = _tf->domain_sig(); 853 Node* dest = nullptr; 854 // Stubs that can be called once an ArrayCopyNode is expanded have 855 // different signatures. Look for the second pointer argument, 856 // that is the destination of the copy. 857 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { 858 if (args->field_at(i)->isa_ptr()) { 859 j++; 860 if (j == 2) { 861 dest = in(i); 862 break; 863 } 864 } 865 } 866 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!"); 867 if (phase->type(dest)->isa_rawptr()) { 868 // may happen for an arraycopy that initializes a newly allocated object. Conservatively return true; 869 return true; 870 } 871 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) { 872 return true; 873 } 874 return false; 875 } 876 if (t_oop->is_known_instance()) { 877 // The instance_id is set only for scalar-replaceable allocations which 878 // are not passed as arguments according to Escape Analysis. 879 return false; 880 } 881 if (t_oop->is_ptr_to_boxed_value()) { 882 ciKlass* boxing_klass = t_oop->is_instptr()->instance_klass(); 883 if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) { 884 // Skip unrelated boxing methods. 885 Node* proj = proj_out_or_null(TypeFunc::Parms); 886 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) { 887 return false; 888 } 889 } 890 if (is_CallJava() && as_CallJava()->method() != nullptr) { 891 ciMethod* meth = as_CallJava()->method(); 892 if (meth->is_getter()) { 893 return false; 894 } 895 // May modify (by reflection) if an boxing object is passed 896 // as argument or returned. 897 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr; 898 if (proj != nullptr) { 899 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr(); 900 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() || 901 (inst_t->instance_klass() == boxing_klass))) { 902 return true; 903 } 904 } 905 const TypeTuple* d = tf()->domain_cc(); 906 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 907 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); 908 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() || 909 (inst_t->instance_klass() == boxing_klass))) { 910 return true; 911 } 912 } 913 return false; 914 } 915 } 916 return true; 917 } 918 919 // Does this call have a direct reference to n other than debug information? 920 bool CallNode::has_non_debug_use(const Node* n) { 921 const TypeTuple* d = tf()->domain_cc(); 922 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 923 if (in(i) == n) { 924 return true; 925 } 926 } 927 return false; 928 } 929 930 bool CallNode::has_debug_use(const Node* n) const { 931 if (jvms() != nullptr) { 932 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) { 933 if (in(i) == n) { 934 return true; 935 } 936 } 937 } 938 return false; 939 } 940 941 // Returns the unique CheckCastPP of a call 942 // or 'this' if there are several CheckCastPP or unexpected uses 943 // or returns null if there is no one. 944 Node *CallNode::result_cast() { 945 Node *cast = nullptr; 946 947 Node *p = proj_out_or_null(TypeFunc::Parms); 948 if (p == nullptr) 949 return nullptr; 950 951 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { 952 Node *use = p->fast_out(i); 953 if (use->is_CheckCastPP()) { 954 if (cast != nullptr) { 955 return this; // more than 1 CheckCastPP 956 } 957 cast = use; 958 } else if (!use->is_Initialize() && 959 !use->is_AddP() && 960 use->Opcode() != Op_MemBarStoreStore) { 961 // Expected uses are restricted to a CheckCastPP, an Initialize 962 // node, a MemBarStoreStore (clone) and AddP nodes. If we 963 // encounter any other use (a Phi node can be seen in rare 964 // cases) return this to prevent incorrect optimizations. 965 return this; 966 } 967 } 968 return cast; 969 } 970 971 972 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts, bool allow_handlers) const { 973 uint max_res = TypeFunc::Parms-1; 974 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 975 ProjNode *pn = fast_out(i)->as_Proj(); 976 max_res = MAX2(max_res, pn->_con); 977 } 978 979 assert(max_res < _tf->range_cc()->cnt(), "result out of bounds"); 980 981 uint projs_size = sizeof(CallProjections); 982 if (max_res > TypeFunc::Parms) { 983 projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*); 984 } 985 char* projs_storage = resource_allocate_bytes(projs_size); 986 CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1); 987 988 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 989 ProjNode *pn = fast_out(i)->as_Proj(); 990 if (pn->outcnt() == 0) continue; 991 switch (pn->_con) { 992 case TypeFunc::Control: 993 { 994 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj 995 projs->fallthrough_proj = pn; 996 const Node* cn = pn->unique_ctrl_out_or_null(); 997 if (cn != nullptr && cn->is_Catch()) { 998 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { 999 CatchProjNode* cpn = cn->fast_out(k)->as_CatchProj(); 1000 assert(allow_handlers || !cpn->is_handler_proj(), "not allowed"); 1001 if (cpn->_con == CatchProjNode::fall_through_index) { 1002 assert(cpn->handler_bci() == CatchProjNode::no_handler_bci, ""); 1003 projs->fallthrough_catchproj = cpn; 1004 } else if (!cpn->is_handler_proj()) { 1005 projs->catchall_catchproj = cpn; 1006 } 1007 } 1008 } 1009 break; 1010 } 1011 case TypeFunc::I_O: 1012 if (pn->_is_io_use) { 1013 projs->catchall_ioproj = pn; 1014 } else { 1015 projs->fallthrough_ioproj = pn; 1016 } 1017 for (DUIterator j = pn->outs(); pn->has_out(j); j++) { 1018 Node* e = pn->out(j); 1019 if (e->Opcode() == Op_CreateEx && e->outcnt() > 0) { 1020 CatchProjNode* ecpn = e->in(0)->isa_CatchProj(); 1021 assert(allow_handlers || ecpn == nullptr || !ecpn->is_handler_proj(), "not allowed"); 1022 if (ecpn != nullptr && ecpn->_con != CatchProjNode::fall_through_index && !ecpn->is_handler_proj()) { 1023 assert(projs->exobj == nullptr, "only one"); 1024 projs->exobj = e; 1025 } 1026 } 1027 } 1028 break; 1029 case TypeFunc::Memory: 1030 if (pn->_is_io_use) 1031 projs->catchall_memproj = pn; 1032 else 1033 projs->fallthrough_memproj = pn; 1034 break; 1035 case TypeFunc::Parms: 1036 projs->resproj[0] = pn; 1037 break; 1038 default: 1039 assert(pn->_con <= max_res, "unexpected projection from allocation node."); 1040 projs->resproj[pn->_con-TypeFunc::Parms] = pn; 1041 break; 1042 } 1043 } 1044 1045 // The resproj may not exist because the result could be ignored 1046 // and the exception object may not exist if an exception handler 1047 // swallows the exception but all the other must exist and be found. 1048 do_asserts = do_asserts && !Compile::current()->inlining_incrementally(); 1049 assert(!do_asserts || projs->fallthrough_proj != nullptr, "must be found"); 1050 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found"); 1051 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found"); 1052 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found"); 1053 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found"); 1054 if (separate_io_proj) { 1055 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found"); 1056 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found"); 1057 } 1058 return projs; 1059 } 1060 1061 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1062 #ifdef ASSERT 1063 // Validate attached generator 1064 CallGenerator* cg = generator(); 1065 if (cg != nullptr) { 1066 assert((is_CallStaticJava() && cg->is_mh_late_inline()) || 1067 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch"); 1068 } 1069 #endif // ASSERT 1070 return SafePointNode::Ideal(phase, can_reshape); 1071 } 1072 1073 bool CallNode::is_call_to_arraycopystub() const { 1074 if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) { 1075 return true; 1076 } 1077 return false; 1078 } 1079 1080 bool CallNode::is_call_to_multianewarray_stub() const { 1081 if (_name != nullptr && 1082 strstr(_name, "multianewarray") != nullptr && 1083 strstr(_name, "C2 runtime") != nullptr) { 1084 return true; 1085 } 1086 return false; 1087 } 1088 1089 //============================================================================= 1090 uint CallJavaNode::size_of() const { return sizeof(*this); } 1091 bool CallJavaNode::cmp( const Node &n ) const { 1092 CallJavaNode &call = (CallJavaNode&)n; 1093 return CallNode::cmp(call) && _method == call._method && 1094 _override_symbolic_info == call._override_symbolic_info; 1095 } 1096 1097 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) { 1098 // Copy debug information and adjust JVMState information 1099 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain_sig()->cnt() : (uint)TypeFunc::Parms+1; 1100 uint new_dbg_start = tf()->domain_sig()->cnt(); 1101 int jvms_adj = new_dbg_start - old_dbg_start; 1102 assert (new_dbg_start == req(), "argument count mismatch"); 1103 Compile* C = phase->C; 1104 1105 // SafePointScalarObject node could be referenced several times in debug info. 1106 // Use Dict to record cloned nodes. 1107 Dict* sosn_map = new Dict(cmpkey,hashkey); 1108 for (uint i = old_dbg_start; i < sfpt->req(); i++) { 1109 Node* old_in = sfpt->in(i); 1110 // Clone old SafePointScalarObjectNodes, adjusting their field contents. 1111 if (old_in != nullptr && old_in->is_SafePointScalarObject()) { 1112 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); 1113 bool new_node; 1114 Node* new_in = old_sosn->clone(sosn_map, new_node); 1115 if (new_node) { // New node? 1116 new_in->set_req(0, C->root()); // reset control edge 1117 new_in = phase->transform(new_in); // Register new node. 1118 } 1119 old_in = new_in; 1120 } 1121 add_req(old_in); 1122 } 1123 1124 // JVMS may be shared so clone it before we modify it 1125 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr); 1126 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) { 1127 jvms->set_map(this); 1128 jvms->set_locoff(jvms->locoff()+jvms_adj); 1129 jvms->set_stkoff(jvms->stkoff()+jvms_adj); 1130 jvms->set_monoff(jvms->monoff()+jvms_adj); 1131 jvms->set_scloff(jvms->scloff()+jvms_adj); 1132 jvms->set_endoff(jvms->endoff()+jvms_adj); 1133 } 1134 } 1135 1136 #ifdef ASSERT 1137 bool CallJavaNode::validate_symbolic_info() const { 1138 if (method() == nullptr) { 1139 return true; // call into runtime or uncommon trap 1140 } 1141 Bytecodes::Code bc = jvms()->method()->java_code_at_bci(jvms()->bci()); 1142 if (Arguments::is_valhalla_enabled() && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) { 1143 return true; 1144 } 1145 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci()); 1146 ciMethod* callee = method(); 1147 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) { 1148 assert(override_symbolic_info(), "should be set"); 1149 } 1150 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info"); 1151 return true; 1152 } 1153 #endif 1154 1155 #ifndef PRODUCT 1156 void CallJavaNode::dump_spec(outputStream* st) const { 1157 if( _method ) _method->print_short_name(st); 1158 CallNode::dump_spec(st); 1159 } 1160 1161 void CallJavaNode::dump_compact_spec(outputStream* st) const { 1162 if (_method) { 1163 _method->print_short_name(st); 1164 } else { 1165 st->print("<?>"); 1166 } 1167 } 1168 #endif 1169 1170 void CallJavaNode::register_for_late_inline() { 1171 if (generator() != nullptr) { 1172 Compile::current()->prepend_late_inline(generator()); 1173 set_generator(nullptr); 1174 } else { 1175 assert(false, "repeated inline attempt"); 1176 } 1177 } 1178 1179 //============================================================================= 1180 uint CallStaticJavaNode::size_of() const { return sizeof(*this); } 1181 bool CallStaticJavaNode::cmp( const Node &n ) const { 1182 CallStaticJavaNode &call = (CallStaticJavaNode&)n; 1183 return CallJavaNode::cmp(call); 1184 } 1185 1186 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1187 if (can_reshape && uncommon_trap_request() != 0) { 1188 PhaseIterGVN* igvn = phase->is_IterGVN(); 1189 if (remove_unknown_flat_array_load(igvn, control(), memory(), in(TypeFunc::Parms))) { 1190 if (!control()->is_Region()) { 1191 igvn->replace_input_of(this, 0, phase->C->top()); 1192 } 1193 return this; 1194 } 1195 } 1196 1197 // Try to replace the runtime call to the substitutability test emitted by acmp if we can reason 1198 // about the operands 1199 if (can_reshape && !control()->is_top() && method() != nullptr && 1200 method()->holder() == phase->C->env()->ValueObjectMethods_klass() && 1201 method()->name() == ciSymbols::isSubstitutable_name()) { 1202 Node* res = replace_is_substitutable(phase->is_IterGVN()); 1203 if (res != nullptr) { 1204 return res; 1205 } 1206 } 1207 1208 CallGenerator* cg = generator(); 1209 if (can_reshape && cg != nullptr) { 1210 if (cg->is_mh_late_inline()) { 1211 assert(IncrementalInlineMH, "required"); 1212 assert(cg->call_node() == this, "mismatch"); 1213 assert(cg->method()->is_method_handle_intrinsic(), "required"); 1214 1215 // Check whether this MH handle call becomes a candidate for inlining. 1216 ciMethod* callee = cg->method(); 1217 vmIntrinsics::ID iid = callee->intrinsic_id(); 1218 if (iid == vmIntrinsics::_invokeBasic) { 1219 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) { 1220 register_for_late_inline(); 1221 } 1222 } else if (iid == vmIntrinsics::_linkToNative) { 1223 // never retry 1224 } else { 1225 assert(callee->has_member_arg(), "wrong type of call?"); 1226 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) { 1227 register_for_late_inline(); 1228 } 1229 } 1230 } else { 1231 assert(IncrementalInline, "required"); 1232 assert(!cg->method()->is_method_handle_intrinsic(), "required"); 1233 if (phase->C->print_inlining()) { 1234 phase->C->inline_printer()->record(cg->method(), cg->call_node()->jvms(), InliningResult::FAILURE, 1235 "static call node changed: trying again"); 1236 } 1237 register_for_late_inline(); 1238 } 1239 } 1240 return CallNode::Ideal(phase, can_reshape); 1241 } 1242 1243 //----------------------------is_uncommon_trap---------------------------- 1244 // Returns true if this is an uncommon trap. 1245 bool CallStaticJavaNode::is_uncommon_trap() const { 1246 return (_name != nullptr && !strcmp(_name, "uncommon_trap")); 1247 } 1248 1249 //----------------------------uncommon_trap_request---------------------------- 1250 // If this is an uncommon trap, return the request code, else zero. 1251 int CallStaticJavaNode::uncommon_trap_request() const { 1252 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0; 1253 } 1254 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { 1255 #ifndef PRODUCT 1256 if (!(call->req() > TypeFunc::Parms && 1257 call->in(TypeFunc::Parms) != nullptr && 1258 call->in(TypeFunc::Parms)->is_Con() && 1259 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) { 1260 assert(in_dump() != 0, "OK if dumping"); 1261 tty->print("[bad uncommon trap]"); 1262 return 0; 1263 } 1264 #endif 1265 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); 1266 } 1267 1268 // Split if can cause the flat array branch of an array load with unknown type (see 1269 // Parse::array_load) to end in an uncommon trap. In that case, the call to 1270 // 'load_unknown_inline' is useless. Replace it with an uncommon trap with the same JVMState. 1271 bool CallStaticJavaNode::remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg) { 1272 if (ctl == nullptr || ctl->is_top() || mem == nullptr || mem->is_top() || !mem->is_MergeMem()) { 1273 return false; 1274 } 1275 if (ctl->is_Region()) { 1276 bool res = false; 1277 for (uint i = 1; i < ctl->req(); i++) { 1278 MergeMemNode* mm = mem->clone()->as_MergeMem(); 1279 for (MergeMemStream mms(mm); mms.next_non_empty(); ) { 1280 Node* m = mms.memory(); 1281 if (m->is_Phi() && m->in(0) == ctl) { 1282 mms.set_memory(m->in(i)); 1283 } 1284 } 1285 if (remove_unknown_flat_array_load(igvn, ctl->in(i), mm, unc_arg)) { 1286 res = true; 1287 if (!ctl->in(i)->is_Region()) { 1288 igvn->replace_input_of(ctl, i, igvn->C->top()); 1289 } 1290 } 1291 igvn->remove_dead_node(mm, PhaseIterGVN::NodeOrigin::Speculative); 1292 } 1293 return res; 1294 } 1295 // Verify the control flow is ok 1296 Node* call = ctl; 1297 MemBarNode* membar = nullptr; 1298 for (;;) { 1299 if (call == nullptr || call->is_top()) { 1300 return false; 1301 } 1302 if (call->is_Proj() || call->is_Catch() || call->is_MemBar()) { 1303 call = call->in(0); 1304 } else if (call->Opcode() == Op_CallStaticJava && !call->in(0)->is_top() && 1305 call->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) { 1306 // If there is no explicit flat array accesses in the compilation unit, there would be no 1307 // membar here 1308 if (call->in(0)->is_Proj() && call->in(0)->in(0)->is_MemBar()) { 1309 membar = call->in(0)->in(0)->as_MemBar(); 1310 } 1311 break; 1312 } else { 1313 return false; 1314 } 1315 } 1316 1317 JVMState* jvms = call->jvms(); 1318 if (igvn->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) { 1319 return false; 1320 } 1321 1322 Node* call_mem = call->in(TypeFunc::Memory); 1323 if (call_mem == nullptr || call_mem->is_top()) { 1324 return false; 1325 } 1326 if (!call_mem->is_MergeMem()) { 1327 call_mem = MergeMemNode::make(call_mem); 1328 igvn->register_new_node_with_optimizer(call_mem); 1329 } 1330 1331 // Verify that there's no unexpected side effect 1332 for (MergeMemStream mms2(mem->as_MergeMem(), call_mem->as_MergeMem()); mms2.next_non_empty2(); ) { 1333 Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory(); 1334 Node* m2 = mms2.memory2(); 1335 1336 for (uint i = 0; i < 100; i++) { 1337 if (m1 == m2) { 1338 break; 1339 } else if (m1->is_Proj()) { 1340 m1 = m1->in(0); 1341 } else if (m1->is_MemBar()) { 1342 m1 = m1->in(TypeFunc::Memory); 1343 } else if (m1->Opcode() == Op_CallStaticJava && 1344 m1->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) { 1345 if (m1 != call) { 1346 if (call_mem->outcnt() == 0) { 1347 igvn->remove_dead_node(call_mem, PhaseIterGVN::NodeOrigin::Speculative); 1348 } 1349 return false; 1350 } 1351 break; 1352 } else if (m1->is_MergeMem()) { 1353 MergeMemNode* mm = m1->as_MergeMem(); 1354 int idx = mms2.alias_idx(); 1355 if (idx == Compile::AliasIdxBot) { 1356 m1 = mm->base_memory(); 1357 } else { 1358 m1 = mm->memory_at(idx); 1359 } 1360 } else { 1361 if (call_mem->outcnt() == 0) { 1362 igvn->remove_dead_node(call_mem, PhaseIterGVN::NodeOrigin::Speculative); 1363 } 1364 return false; 1365 } 1366 } 1367 } 1368 if (call_mem->outcnt() == 0) { 1369 igvn->remove_dead_node(call_mem, PhaseIterGVN::NodeOrigin::Speculative); 1370 } 1371 1372 // Remove membar preceding the call 1373 if (membar != nullptr) { 1374 membar->remove(igvn); 1375 } 1376 1377 address call_addr = OptoRuntime::uncommon_trap_blob()->entry_point(); 1378 CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", nullptr); 1379 unc->init_req(TypeFunc::Control, call->in(0)); 1380 unc->init_req(TypeFunc::I_O, call->in(TypeFunc::I_O)); 1381 unc->init_req(TypeFunc::Memory, call->in(TypeFunc::Memory)); 1382 unc->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr)); 1383 unc->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr)); 1384 unc->init_req(TypeFunc::Parms+0, unc_arg); 1385 unc->set_cnt(PROB_UNLIKELY_MAG(4)); 1386 unc->copy_call_debug_info(igvn, call->as_CallStaticJava()); 1387 1388 // Replace the call with an uncommon trap 1389 igvn->replace_input_of(call, 0, igvn->C->top()); 1390 1391 igvn->register_new_node_with_optimizer(unc); 1392 1393 Node* ctrl = igvn->transform(new ProjNode(unc, TypeFunc::Control)); 1394 Node* halt = igvn->transform(new HaltNode(ctrl, call->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen")); 1395 igvn->add_input_to(igvn->C->root(), halt); 1396 1397 return true; 1398 } 1399 1400 // Try to replace a runtime call to the substitutability test by either a simple pointer comparison 1401 // if either operand is not a value object, or comparing their fields if either operand is an 1402 // object of a known value type 1403 Node* CallStaticJavaNode::replace_is_substitutable(PhaseIterGVN* igvn) { 1404 Node* left = in(TypeFunc::Parms); 1405 Node* right = in(TypeFunc::Parms + 1); 1406 if (!InlineTypeNode::can_emit_substitutability_check(left, right)) { 1407 return nullptr; 1408 } 1409 1410 // Delay IGVN during macro expansion 1411 assert(!igvn->delay_transform(), "must not delay during Ideal"); 1412 igvn->set_delay_transform(true); 1413 GraphKit kit(this, *igvn); 1414 1415 Node* replace = InlineTypeNode::emit_substitutability_check(&kit, left, right); 1416 igvn->set_delay_transform(false); 1417 assert(replace != nullptr, "must succeed"); 1418 1419 if (UseAcmpFastPath) { 1420 // Sabotage the fast acmp path 1421 IfNode* fast_path_if = Parse::acmp_fast_path_if_from_substitutable_call(igvn, this); 1422 if (fast_path_if != nullptr) { 1423 fast_path_if->set_req(1, igvn->intcon(1)); 1424 igvn->_worklist.push(fast_path_if); 1425 } 1426 } 1427 1428 // Kill exception projections and return a tuple that will replace the call 1429 CallProjections* projs = extract_projections(false /*separate_io_proj*/); 1430 if (projs->fallthrough_catchproj != nullptr) { 1431 igvn->replace_node(projs->fallthrough_catchproj, kit.control()); 1432 } 1433 if (projs->catchall_memproj != nullptr) { 1434 igvn->replace_node(projs->catchall_memproj, igvn->C->top()); 1435 } 1436 if (projs->catchall_ioproj != nullptr) { 1437 igvn->replace_node(projs->catchall_ioproj, igvn->C->top()); 1438 } 1439 if (projs->catchall_catchproj != nullptr) { 1440 igvn->replace_node(projs->catchall_catchproj, igvn->C->top()); 1441 } 1442 Node* new_mem = kit.reset_memory(); 1443 assert(in(TypeFunc::Memory) == new_mem, "must not modify memory"); 1444 return TupleNode::make(tf()->range_cc(), igvn->C->top(), kit.i_o(), new_mem, kit.frameptr(), kit.returnadr(), replace); 1445 } 1446 1447 #ifndef PRODUCT 1448 void CallStaticJavaNode::dump_spec(outputStream *st) const { 1449 st->print("# Static "); 1450 if (_name != nullptr) { 1451 st->print("%s", _name); 1452 int trap_req = uncommon_trap_request(); 1453 if (trap_req != 0) { 1454 char buf[100]; 1455 st->print("(%s)", 1456 Deoptimization::format_trap_request(buf, sizeof(buf), 1457 trap_req)); 1458 } 1459 st->print(" "); 1460 } 1461 CallJavaNode::dump_spec(st); 1462 } 1463 1464 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const { 1465 if (_method) { 1466 _method->print_short_name(st); 1467 } else if (_name) { 1468 st->print("%s", _name); 1469 } else { 1470 st->print("<?>"); 1471 } 1472 } 1473 #endif 1474 1475 //============================================================================= 1476 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } 1477 bool CallDynamicJavaNode::cmp( const Node &n ) const { 1478 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; 1479 return CallJavaNode::cmp(call); 1480 } 1481 1482 Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1483 CallGenerator* cg = generator(); 1484 if (can_reshape && cg != nullptr) { 1485 if (cg->is_virtual_late_inline()) { 1486 assert(IncrementalInlineVirtual, "required"); 1487 assert(cg->call_node() == this, "mismatch"); 1488 1489 if (cg->callee_method() == nullptr) { 1490 // Recover symbolic info for method resolution. 1491 ciMethod* caller = jvms()->method(); 1492 ciBytecodeStream iter(caller); 1493 iter.force_bci(jvms()->bci()); 1494 1495 bool not_used1; 1496 ciSignature* not_used2; 1497 ciMethod* orig_callee = iter.get_method(not_used1, ¬_used2); // callee in the bytecode 1498 ciKlass* holder = iter.get_declared_method_holder(); 1499 if (orig_callee->is_method_handle_intrinsic()) { 1500 assert(_override_symbolic_info, "required"); 1501 orig_callee = method(); 1502 holder = method()->holder(); 1503 } 1504 1505 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); 1506 1507 Node* receiver_node = in(TypeFunc::Parms); 1508 const TypeOopPtr* receiver_type = phase->type(receiver_node)->isa_oopptr(); 1509 1510 int not_used3; 1511 bool call_does_dispatch; 1512 ciMethod* callee = phase->C->optimize_virtual_call(caller, klass, holder, orig_callee, receiver_type, true /*is_virtual*/, 1513 call_does_dispatch, not_used3); // out-parameters 1514 if (!call_does_dispatch) { 1515 cg->set_callee_method(callee); 1516 } 1517 } 1518 if (cg->callee_method() != nullptr) { 1519 // Register for late inlining. 1520 register_for_late_inline(); // MH late inlining prepends to the list, so do the same 1521 } 1522 } else { 1523 assert(IncrementalInline, "required"); 1524 if (phase->C->print_inlining()) { 1525 phase->C->inline_printer()->record(cg->method(), cg->call_node()->jvms(), InliningResult::FAILURE, 1526 "dynamic call node changed: trying again"); 1527 } 1528 register_for_late_inline(); 1529 } 1530 } 1531 return CallNode::Ideal(phase, can_reshape); 1532 } 1533 1534 #ifndef PRODUCT 1535 void CallDynamicJavaNode::dump_spec(outputStream *st) const { 1536 st->print("# Dynamic "); 1537 CallJavaNode::dump_spec(st); 1538 } 1539 #endif 1540 1541 //============================================================================= 1542 uint CallRuntimeNode::size_of() const { return sizeof(*this); } 1543 bool CallRuntimeNode::cmp( const Node &n ) const { 1544 CallRuntimeNode &call = (CallRuntimeNode&)n; 1545 return CallNode::cmp(call) && !strcmp(_name,call._name); 1546 } 1547 #ifndef PRODUCT 1548 void CallRuntimeNode::dump_spec(outputStream *st) const { 1549 st->print("# "); 1550 st->print("%s", _name); 1551 CallNode::dump_spec(st); 1552 } 1553 #endif 1554 uint CallLeafVectorNode::size_of() const { return sizeof(*this); } 1555 bool CallLeafVectorNode::cmp( const Node &n ) const { 1556 CallLeafVectorNode &call = (CallLeafVectorNode&)n; 1557 return CallLeafNode::cmp(call) && _num_bits == call._num_bits; 1558 } 1559 1560 //------------------------------calling_convention----------------------------- 1561 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 1562 if (_entry_point == nullptr) { 1563 // The call to that stub is a special case: its inputs are 1564 // multiple values returned from a call and so it should follow 1565 // the return convention. 1566 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); 1567 return; 1568 } 1569 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt); 1570 } 1571 1572 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 1573 #ifdef ASSERT 1574 assert(tf()->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits, 1575 "return vector size must match"); 1576 const TypeTuple* d = tf()->domain_sig(); 1577 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1578 Node* arg = in(i); 1579 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits, 1580 "vector argument size must match"); 1581 } 1582 #endif 1583 1584 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt); 1585 } 1586 1587 //============================================================================= 1588 //------------------------------calling_convention----------------------------- 1589 1590 1591 //============================================================================= 1592 bool CallLeafPureNode::is_unused() const { 1593 return proj_out_or_null(TypeFunc::Parms) == nullptr; 1594 } 1595 1596 bool CallLeafPureNode::is_dead() const { 1597 return proj_out_or_null(TypeFunc::Control) == nullptr; 1598 } 1599 1600 /* We make a tuple of the global input state + TOP for the output values. 1601 * We use this to delete a pure function that is not used: by replacing the call with 1602 * such a tuple, we let output Proj's idealization pick the corresponding input of the 1603 * pure call, so jumping over it, and effectively, removing the call from the graph. 1604 * This avoids doing the graph surgery manually, but leaves that to IGVN 1605 * that is specialized for doing that right. We need also tuple components for output 1606 * values of the function to respect the return arity, and in case there is a projection 1607 * that would pick an output (which shouldn't happen at the moment). 1608 */ 1609 TupleNode* CallLeafPureNode::make_tuple_of_input_state_and_top_return_values(const Compile* C) const { 1610 // Transparently propagate input state but parameters 1611 TupleNode* tuple = TupleNode::make( 1612 tf()->range_cc(), 1613 in(TypeFunc::Control), 1614 in(TypeFunc::I_O), 1615 in(TypeFunc::Memory), 1616 in(TypeFunc::FramePtr), 1617 in(TypeFunc::ReturnAdr)); 1618 1619 // And add TOPs for the return values 1620 for (uint i = TypeFunc::Parms; i < tf()->range_cc()->cnt(); i++) { 1621 tuple->set_req(i, C->top()); 1622 } 1623 1624 return tuple; 1625 } 1626 1627 CallLeafPureNode* CallLeafPureNode::inline_call_leaf_pure_node(Node* control) const { 1628 Node* top = Compile::current()->top(); 1629 if (control == nullptr) { 1630 control = in(TypeFunc::Control); 1631 } 1632 1633 CallLeafPureNode* call = new CallLeafPureNode(tf(), entry_point(), _name); 1634 call->init_req(TypeFunc::Control, control); 1635 call->init_req(TypeFunc::I_O, top); 1636 call->init_req(TypeFunc::Memory, top); 1637 call->init_req(TypeFunc::ReturnAdr, top); 1638 call->init_req(TypeFunc::FramePtr, top); 1639 for (unsigned int i = 0; i < tf()->domain_cc()->cnt() - TypeFunc::Parms; i++) { 1640 call->init_req(TypeFunc::Parms + i, in(TypeFunc::Parms + i)); 1641 } 1642 1643 return call; 1644 } 1645 1646 Node* CallLeafPureNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1647 if (is_dead()) { 1648 return nullptr; 1649 } 1650 1651 // We need to wait until IGVN because during parsing, usages might still be missing 1652 // and we would remove the call immediately. 1653 if (can_reshape && is_unused()) { 1654 // The result is not used. We remove the call by replacing it with a tuple, that 1655 // is later disintegrated by the projections. 1656 return make_tuple_of_input_state_and_top_return_values(phase->C); 1657 } 1658 1659 return CallRuntimeNode::Ideal(phase, can_reshape); 1660 } 1661 1662 #ifndef PRODUCT 1663 void CallLeafNode::dump_spec(outputStream *st) const { 1664 st->print("# "); 1665 st->print("%s", _name); 1666 CallNode::dump_spec(st); 1667 } 1668 #endif 1669 1670 uint CallLeafNoFPNode::match_edge(uint idx) const { 1671 // Null entry point is a special case for which the target is in a 1672 // register. Need to match that edge. 1673 return entry_point() == nullptr && idx == TypeFunc::Parms; 1674 } 1675 1676 //============================================================================= 1677 1678 void SafePointNode::set_local(const JVMState* jvms, uint idx, Node *c) { 1679 assert(verify_jvms(jvms), "jvms must match"); 1680 int loc = jvms->locoff() + idx; 1681 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { 1682 // If current local idx is top then local idx - 1 could 1683 // be a long/double that needs to be killed since top could 1684 // represent the 2nd half of the long/double. 1685 uint ideal = in(loc -1)->ideal_reg(); 1686 if (ideal == Op_RegD || ideal == Op_RegL) { 1687 // set other (low index) half to top 1688 set_req(loc - 1, in(loc)); 1689 } 1690 } 1691 set_req(loc, c); 1692 } 1693 1694 uint SafePointNode::size_of() const { return sizeof(*this); } 1695 bool SafePointNode::cmp( const Node &n ) const { 1696 return (&n == this); // Always fail except on self 1697 } 1698 1699 //-------------------------set_next_exception---------------------------------- 1700 void SafePointNode::set_next_exception(SafePointNode* n) { 1701 assert(n == nullptr || n->Opcode() == Op_SafePoint, "correct value for next_exception"); 1702 if (len() == req()) { 1703 if (n != nullptr) add_prec(n); 1704 } else { 1705 set_prec(req(), n); 1706 } 1707 } 1708 1709 1710 //----------------------------next_exception----------------------------------- 1711 SafePointNode* SafePointNode::next_exception() const { 1712 if (len() == req()) { 1713 return nullptr; 1714 } else { 1715 Node* n = in(req()); 1716 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); 1717 return (SafePointNode*) n; 1718 } 1719 } 1720 1721 1722 //------------------------------Ideal------------------------------------------ 1723 // Skip over any collapsed Regions 1724 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1725 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState"); 1726 if (remove_dead_region(phase, can_reshape)) { 1727 return this; 1728 } 1729 // Scalarize inline types in safepoint debug info. 1730 // Delay this until all inlining is over to avoid getting inconsistent debug info. 1731 if (phase->C->scalarize_in_safepoints() && can_reshape && jvms() != nullptr) { 1732 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) { 1733 Node* n = in(i)->uncast(); 1734 if (n->is_InlineType()) { 1735 n->as_InlineType()->make_scalar_in_safepoints(phase->is_IterGVN()); 1736 } 1737 } 1738 } 1739 return nullptr; 1740 } 1741 1742 //------------------------------Identity--------------------------------------- 1743 // Remove obviously duplicate safepoints 1744 Node* SafePointNode::Identity(PhaseGVN* phase) { 1745 1746 // If you have back to back safepoints, remove one 1747 if (in(TypeFunc::Control)->is_SafePoint()) { 1748 Node* out_c = unique_ctrl_out_or_null(); 1749 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the 1750 // outer loop's safepoint could confuse removal of the outer loop. 1751 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) { 1752 return in(TypeFunc::Control); 1753 } 1754 } 1755 1756 // Transforming long counted loops requires a safepoint node. Do not 1757 // eliminate a safepoint until loop opts are over. 1758 if (in(0)->is_Proj() && !phase->C->major_progress()) { 1759 Node *n0 = in(0)->in(0); 1760 // Check if he is a call projection (except Leaf Call) 1761 if( n0->is_Catch() ) { 1762 n0 = n0->in(0)->in(0); 1763 assert( n0->is_Call(), "expect a call here" ); 1764 } 1765 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { 1766 // Don't remove a safepoint belonging to an OuterStripMinedLoopEndNode. 1767 // If the loop dies, they will be removed together. 1768 if (has_out_with(Op_OuterStripMinedLoopEnd)) { 1769 return this; 1770 } 1771 // Useless Safepoint, so remove it 1772 return in(TypeFunc::Control); 1773 } 1774 } 1775 1776 return this; 1777 } 1778 1779 //------------------------------Value------------------------------------------ 1780 const Type* SafePointNode::Value(PhaseGVN* phase) const { 1781 if (phase->type(in(0)) == Type::TOP) { 1782 return Type::TOP; 1783 } 1784 if (in(0) == this) { 1785 return Type::TOP; // Dead infinite loop 1786 } 1787 return Type::CONTROL; 1788 } 1789 1790 #ifndef PRODUCT 1791 void SafePointNode::dump_spec(outputStream *st) const { 1792 st->print(" SafePoint "); 1793 _replaced_nodes.dump(st); 1794 } 1795 #endif 1796 1797 const RegMask &SafePointNode::in_RegMask(uint idx) const { 1798 if (idx < TypeFunc::Parms) { 1799 return RegMask::EMPTY; 1800 } 1801 // Values outside the domain represent debug info 1802 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1803 } 1804 const RegMask &SafePointNode::out_RegMask() const { 1805 return RegMask::EMPTY; 1806 } 1807 1808 1809 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { 1810 assert((int)grow_by > 0, "sanity"); 1811 int monoff = jvms->monoff(); 1812 int scloff = jvms->scloff(); 1813 int endoff = jvms->endoff(); 1814 assert(endoff == (int)req(), "no other states or debug info after me"); 1815 Node* top = Compile::current()->top(); 1816 for (uint i = 0; i < grow_by; i++) { 1817 ins_req(monoff, top); 1818 } 1819 jvms->set_monoff(monoff + grow_by); 1820 jvms->set_scloff(scloff + grow_by); 1821 jvms->set_endoff(endoff + grow_by); 1822 } 1823 1824 void SafePointNode::push_monitor(const FastLockNode *lock) { 1825 // Add a LockNode, which points to both the original BoxLockNode (the 1826 // stack space for the monitor) and the Object being locked. 1827 const int MonitorEdges = 2; 1828 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1829 assert(req() == jvms()->endoff(), "correct sizing"); 1830 int nextmon = jvms()->scloff(); 1831 ins_req(nextmon, lock->box_node()); 1832 ins_req(nextmon+1, lock->obj_node()); 1833 jvms()->set_scloff(nextmon + MonitorEdges); 1834 jvms()->set_endoff(req()); 1835 } 1836 1837 void SafePointNode::pop_monitor() { 1838 // Delete last monitor from debug info 1839 DEBUG_ONLY(int num_before_pop = jvms()->nof_monitors()); 1840 const int MonitorEdges = 2; 1841 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1842 int scloff = jvms()->scloff(); 1843 int endoff = jvms()->endoff(); 1844 int new_scloff = scloff - MonitorEdges; 1845 int new_endoff = endoff - MonitorEdges; 1846 jvms()->set_scloff(new_scloff); 1847 jvms()->set_endoff(new_endoff); 1848 while (scloff > new_scloff) del_req_ordered(--scloff); 1849 assert(jvms()->nof_monitors() == num_before_pop-1, ""); 1850 } 1851 1852 Node *SafePointNode::peek_monitor_box() const { 1853 int mon = jvms()->nof_monitors() - 1; 1854 assert(mon >= 0, "must have a monitor"); 1855 return monitor_box(jvms(), mon); 1856 } 1857 1858 Node *SafePointNode::peek_monitor_obj() const { 1859 int mon = jvms()->nof_monitors() - 1; 1860 assert(mon >= 0, "must have a monitor"); 1861 return monitor_obj(jvms(), mon); 1862 } 1863 1864 Node* SafePointNode::peek_operand(uint off) const { 1865 assert(jvms()->sp() > 0, "must have an operand"); 1866 assert(off < jvms()->sp(), "off is out-of-range"); 1867 return stack(jvms(), jvms()->sp() - off - 1); 1868 } 1869 1870 // Do we Match on this edge index or not? Match no edges 1871 uint SafePointNode::match_edge(uint idx) const { 1872 return (TypeFunc::Parms == idx); 1873 } 1874 1875 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) { 1876 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops"); 1877 int nb = igvn->C->root()->find_prec_edge(this); 1878 if (nb != -1) { 1879 igvn->delete_precedence_of(igvn->C->root(), nb); 1880 } 1881 } 1882 1883 void SafePointNode::remove_non_debug_edges(NodeEdgeTempStorage& non_debug_edges) { 1884 assert(non_debug_edges._state == NodeEdgeTempStorage::state_initial, "not processed"); 1885 assert(non_debug_edges.is_empty(), "edges not processed"); 1886 1887 while (req() > jvms()->endoff()) { 1888 uint last = req() - 1; 1889 non_debug_edges.push(in(last)); 1890 del_req(last); 1891 } 1892 1893 assert(jvms()->endoff() == req(), "no extra edges past debug info allowed"); 1894 DEBUG_ONLY(non_debug_edges._state = NodeEdgeTempStorage::state_populated); 1895 } 1896 1897 void SafePointNode::restore_non_debug_edges(NodeEdgeTempStorage& non_debug_edges) { 1898 assert(non_debug_edges._state == NodeEdgeTempStorage::state_populated, "not populated"); 1899 assert(jvms()->endoff() == req(), "no extra edges past debug info allowed"); 1900 1901 while (!non_debug_edges.is_empty()) { 1902 Node* non_debug_edge = non_debug_edges.pop(); 1903 add_req(non_debug_edge); 1904 } 1905 1906 assert(non_debug_edges.is_empty(), "edges not processed"); 1907 DEBUG_ONLY(non_debug_edges._state = NodeEdgeTempStorage::state_processed); 1908 } 1909 1910 //============== SafePointScalarObjectNode ============== 1911 1912 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) : 1913 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1914 _first_index(first_index), 1915 _depth(depth), 1916 _n_fields(n_fields), 1917 _alloc(alloc) 1918 { 1919 #ifdef ASSERT 1920 if (alloc != nullptr && !alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) { 1921 alloc->dump(); 1922 assert(false, "unexpected call node"); 1923 } 1924 #endif 1925 init_class_id(Class_SafePointScalarObject); 1926 } 1927 1928 // Do not allow value-numbering for SafePointScalarObject node. 1929 uint SafePointScalarObjectNode::hash() const { return NO_HASH; } 1930 bool SafePointScalarObjectNode::cmp( const Node &n ) const { 1931 return (&n == this); // Always fail except on self 1932 } 1933 1934 uint SafePointScalarObjectNode::ideal_reg() const { 1935 return 0; // No matching to machine instruction 1936 } 1937 1938 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { 1939 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1940 } 1941 1942 const RegMask &SafePointScalarObjectNode::out_RegMask() const { 1943 return RegMask::EMPTY; 1944 } 1945 1946 uint SafePointScalarObjectNode::match_edge(uint idx) const { 1947 return 0; 1948 } 1949 1950 SafePointScalarObjectNode* 1951 SafePointScalarObjectNode::clone(Dict* sosn_map, bool& new_node) const { 1952 void* cached = (*sosn_map)[(void*)this]; 1953 if (cached != nullptr) { 1954 new_node = false; 1955 return (SafePointScalarObjectNode*)cached; 1956 } 1957 new_node = true; 1958 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); 1959 sosn_map->Insert((void*)this, (void*)res); 1960 return res; 1961 } 1962 1963 1964 #ifndef PRODUCT 1965 void SafePointScalarObjectNode::dump_spec(outputStream *st) const { 1966 st->print(" # fields@[%d..%d]", first_index(), first_index() + n_fields() - 1); 1967 } 1968 #endif 1969 1970 //============== SafePointScalarMergeNode ============== 1971 1972 SafePointScalarMergeNode::SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx) : 1973 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1974 _merge_pointer_idx(merge_pointer_idx) 1975 { 1976 init_class_id(Class_SafePointScalarMerge); 1977 } 1978 1979 // Do not allow value-numbering for SafePointScalarMerge node. 1980 uint SafePointScalarMergeNode::hash() const { return NO_HASH; } 1981 bool SafePointScalarMergeNode::cmp( const Node &n ) const { 1982 return (&n == this); // Always fail except on self 1983 } 1984 1985 uint SafePointScalarMergeNode::ideal_reg() const { 1986 return 0; // No matching to machine instruction 1987 } 1988 1989 const RegMask &SafePointScalarMergeNode::in_RegMask(uint idx) const { 1990 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1991 } 1992 1993 const RegMask &SafePointScalarMergeNode::out_RegMask() const { 1994 return RegMask::EMPTY; 1995 } 1996 1997 uint SafePointScalarMergeNode::match_edge(uint idx) const { 1998 return 0; 1999 } 2000 2001 SafePointScalarMergeNode* 2002 SafePointScalarMergeNode::clone(Dict* sosn_map, bool& new_node) const { 2003 void* cached = (*sosn_map)[(void*)this]; 2004 if (cached != nullptr) { 2005 new_node = false; 2006 return (SafePointScalarMergeNode*)cached; 2007 } 2008 new_node = true; 2009 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone(); 2010 sosn_map->Insert((void*)this, (void*)res); 2011 return res; 2012 } 2013 2014 #ifndef PRODUCT 2015 void SafePointScalarMergeNode::dump_spec(outputStream *st) const { 2016 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1); 2017 } 2018 #endif 2019 2020 //============================================================================= 2021 uint AllocateNode::size_of() const { return sizeof(*this); } 2022 2023 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, 2024 Node *ctrl, Node *mem, Node *abio, 2025 Node *size, Node *klass_node, 2026 Node* initial_test, 2027 InlineTypeNode* inline_type_node) 2028 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM) 2029 { 2030 init_class_id(Class_Allocate); 2031 init_flags(Flag_is_macro); 2032 _is_scalar_replaceable = false; 2033 _is_non_escaping = false; 2034 _is_allocation_MemBar_redundant = false; 2035 Node *topnode = C->top(); 2036 2037 init_req( TypeFunc::Control , ctrl ); 2038 init_req( TypeFunc::I_O , abio ); 2039 init_req( TypeFunc::Memory , mem ); 2040 init_req( TypeFunc::ReturnAdr, topnode ); 2041 init_req( TypeFunc::FramePtr , topnode ); 2042 init_req( AllocSize , size); 2043 init_req( KlassNode , klass_node); 2044 init_req( InitialTest , initial_test); 2045 init_req( ALength , topnode); 2046 init_req( ValidLengthTest , topnode); 2047 init_req( InlineType , inline_type_node); 2048 // DefaultValue defaults to nullptr 2049 // RawDefaultValue defaults to nullptr 2050 C->add_macro_node(this); 2051 } 2052 2053 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) 2054 { 2055 assert(initializer != nullptr && 2056 (initializer->is_object_constructor() || initializer->is_class_initializer()), 2057 "unexpected initializer method"); 2058 BCEscapeAnalyzer* analyzer = initializer->get_bcea(); 2059 if (analyzer == nullptr) { 2060 return; 2061 } 2062 2063 // Allocation node is first parameter in its initializer 2064 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) { 2065 _is_allocation_MemBar_redundant = true; 2066 } 2067 } 2068 2069 Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) { 2070 Node* mark_node = nullptr; 2071 if (UseCompactObjectHeaders || Arguments::is_valhalla_enabled()) { 2072 Node* klass_node = in(AllocateNode::KlassNode); 2073 Node* proto_adr = phase->transform(AddPNode::make_with_base(phase->C->top(), klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset())))); 2074 mark_node = LoadNode::make(*phase, control, mem, proto_adr, phase->type(proto_adr)->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered); 2075 } else { 2076 // For now only enable fast locking for non-array types 2077 mark_node = phase->MakeConX(markWord::prototype().value()); 2078 } 2079 return mark_node; 2080 } 2081 2082 // Retrieve the length from the AllocateArrayNode. Narrow the type with a 2083 // CastII, if appropriate. If we are not allowed to create new nodes, and 2084 // a CastII is appropriate, return null. 2085 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) { 2086 Node *length = in(AllocateNode::ALength); 2087 assert(length != nullptr, "length is not null"); 2088 2089 const TypeInt* length_type = phase->find_int_type(length); 2090 const TypeAryPtr* ary_type = oop_type->isa_aryptr(); 2091 2092 if (ary_type != nullptr && length_type != nullptr) { 2093 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); 2094 if (narrow_length_type != length_type) { 2095 // Assert one of: 2096 // - the narrow_length is 0 2097 // - the narrow_length is not wider than length 2098 assert(narrow_length_type == TypeInt::ZERO || 2099 (length_type->is_con() && narrow_length_type->is_con() && 2100 (narrow_length_type->_hi <= length_type->_lo)) || 2101 (narrow_length_type->_hi <= length_type->_hi && 2102 narrow_length_type->_lo >= length_type->_lo), 2103 "narrow type must be narrower than length type"); 2104 2105 // Return null if new nodes are not allowed 2106 if (!allow_new_nodes) { 2107 return nullptr; 2108 } 2109 // Create a cast which is control dependent on the initialization to 2110 // propagate the fact that the array length must be positive. 2111 InitializeNode* init = initialization(); 2112 if (init != nullptr) { 2113 length = new CastIINode(init->proj_out_or_null(TypeFunc::Control), length, narrow_length_type); 2114 } 2115 } 2116 } 2117 2118 return length; 2119 } 2120 2121 //============================================================================= 2122 const TypeFunc* LockNode::_lock_type_Type = nullptr; 2123 2124 uint LockNode::size_of() const { return sizeof(*this); } 2125 2126 // Redundant lock elimination 2127 // 2128 // There are various patterns of locking where we release and 2129 // immediately reacquire a lock in a piece of code where no operations 2130 // occur in between that would be observable. In those cases we can 2131 // skip releasing and reacquiring the lock without violating any 2132 // fairness requirements. Doing this around a loop could cause a lock 2133 // to be held for a very long time so we concentrate on non-looping 2134 // control flow. We also require that the operations are fully 2135 // redundant meaning that we don't introduce new lock operations on 2136 // some paths so to be able to eliminate it on others ala PRE. This 2137 // would probably require some more extensive graph manipulation to 2138 // guarantee that the memory edges were all handled correctly. 2139 // 2140 // Assuming p is a simple predicate which can't trap in any way and s 2141 // is a synchronized method consider this code: 2142 // 2143 // s(); 2144 // if (p) 2145 // s(); 2146 // else 2147 // s(); 2148 // s(); 2149 // 2150 // 1. The unlocks of the first call to s can be eliminated if the 2151 // locks inside the then and else branches are eliminated. 2152 // 2153 // 2. The unlocks of the then and else branches can be eliminated if 2154 // the lock of the final call to s is eliminated. 2155 // 2156 // Either of these cases subsumes the simple case of sequential control flow 2157 // 2158 // Additionally we can eliminate versions without the else case: 2159 // 2160 // s(); 2161 // if (p) 2162 // s(); 2163 // s(); 2164 // 2165 // 3. In this case we eliminate the unlock of the first s, the lock 2166 // and unlock in the then case and the lock in the final s. 2167 // 2168 // Note also that in all these cases the then/else pieces don't have 2169 // to be trivial as long as they begin and end with synchronization 2170 // operations. 2171 // 2172 // s(); 2173 // if (p) 2174 // s(); 2175 // f(); 2176 // s(); 2177 // s(); 2178 // 2179 // The code will work properly for this case, leaving in the unlock 2180 // before the call to f and the relock after it. 2181 // 2182 // A potentially interesting case which isn't handled here is when the 2183 // locking is partially redundant. 2184 // 2185 // s(); 2186 // if (p) 2187 // s(); 2188 // 2189 // This could be eliminated putting unlocking on the else case and 2190 // eliminating the first unlock and the lock in the then side. 2191 // Alternatively the unlock could be moved out of the then side so it 2192 // was after the merge and the first unlock and second lock 2193 // eliminated. This might require less manipulation of the memory 2194 // state to get correct. 2195 // 2196 // Additionally we might allow work between a unlock and lock before 2197 // giving up eliminating the locks. The current code disallows any 2198 // conditional control flow between these operations. A formulation 2199 // similar to partial redundancy elimination computing the 2200 // availability of unlocking and the anticipatability of locking at a 2201 // program point would allow detection of fully redundant locking with 2202 // some amount of work in between. I'm not sure how often I really 2203 // think that would occur though. Most of the cases I've seen 2204 // indicate it's likely non-trivial work would occur in between. 2205 // There may be other more complicated constructs where we could 2206 // eliminate locking but I haven't seen any others appear as hot or 2207 // interesting. 2208 // 2209 // Locking and unlocking have a canonical form in ideal that looks 2210 // roughly like this: 2211 // 2212 // <obj> 2213 // | \\------+ 2214 // | \ \ 2215 // | BoxLock \ 2216 // | | | \ 2217 // | | \ \ 2218 // | | FastLock 2219 // | | / 2220 // | | / 2221 // | | | 2222 // 2223 // Lock 2224 // | 2225 // Proj #0 2226 // | 2227 // MembarAcquire 2228 // | 2229 // Proj #0 2230 // 2231 // MembarRelease 2232 // | 2233 // Proj #0 2234 // | 2235 // Unlock 2236 // | 2237 // Proj #0 2238 // 2239 // 2240 // This code proceeds by processing Lock nodes during PhaseIterGVN 2241 // and searching back through its control for the proper code 2242 // patterns. Once it finds a set of lock and unlock operations to 2243 // eliminate they are marked as eliminatable which causes the 2244 // expansion of the Lock and Unlock macro nodes to make the operation a NOP 2245 // 2246 //============================================================================= 2247 2248 // 2249 // Utility function to skip over uninteresting control nodes. Nodes skipped are: 2250 // - copy regions. (These may not have been optimized away yet.) 2251 // - eliminated locking nodes 2252 // 2253 static Node *next_control(Node *ctrl) { 2254 if (ctrl == nullptr) 2255 return nullptr; 2256 while (1) { 2257 if (ctrl->is_Region()) { 2258 RegionNode *r = ctrl->as_Region(); 2259 Node *n = r->is_copy(); 2260 if (n == nullptr) 2261 break; // hit a region, return it 2262 else 2263 ctrl = n; 2264 } else if (ctrl->is_Proj()) { 2265 Node *in0 = ctrl->in(0); 2266 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { 2267 ctrl = in0->in(0); 2268 } else { 2269 break; 2270 } 2271 } else { 2272 break; // found an interesting control 2273 } 2274 } 2275 return ctrl; 2276 } 2277 // 2278 // Given a control, see if it's the control projection of an Unlock which 2279 // operating on the same object as lock. 2280 // 2281 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, 2282 GrowableArray<AbstractLockNode*> &lock_ops) { 2283 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : nullptr; 2284 if (ctrl_proj != nullptr && ctrl_proj->_con == TypeFunc::Control) { 2285 Node *n = ctrl_proj->in(0); 2286 if (n != nullptr && n->is_Unlock()) { 2287 UnlockNode *unlock = n->as_Unlock(); 2288 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2289 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 2290 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); 2291 if (lock_obj->eqv_uncast(unlock_obj) && 2292 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && 2293 !unlock->is_eliminated()) { 2294 lock_ops.append(unlock); 2295 return true; 2296 } 2297 } 2298 } 2299 return false; 2300 } 2301 2302 // 2303 // Find the lock matching an unlock. Returns null if a safepoint 2304 // or complicated control is encountered first. 2305 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { 2306 LockNode *lock_result = nullptr; 2307 // find the matching lock, or an intervening safepoint 2308 Node *ctrl = next_control(unlock->in(0)); 2309 while (1) { 2310 assert(ctrl != nullptr, "invalid control graph"); 2311 assert(!ctrl->is_Start(), "missing lock for unlock"); 2312 if (ctrl->is_top()) break; // dead control path 2313 if (ctrl->is_Proj()) ctrl = ctrl->in(0); 2314 if (ctrl->is_SafePoint()) { 2315 break; // found a safepoint (may be the lock we are searching for) 2316 } else if (ctrl->is_Region()) { 2317 // Check for a simple diamond pattern. Punt on anything more complicated 2318 if (ctrl->req() == 3 && ctrl->in(1) != nullptr && ctrl->in(2) != nullptr) { 2319 Node *in1 = next_control(ctrl->in(1)); 2320 Node *in2 = next_control(ctrl->in(2)); 2321 if (((in1->is_IfTrue() && in2->is_IfFalse()) || 2322 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { 2323 ctrl = next_control(in1->in(0)->in(0)); 2324 } else { 2325 break; 2326 } 2327 } else { 2328 break; 2329 } 2330 } else { 2331 ctrl = next_control(ctrl->in(0)); // keep searching 2332 } 2333 } 2334 if (ctrl->is_Lock()) { 2335 LockNode *lock = ctrl->as_Lock(); 2336 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2337 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 2338 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); 2339 if (lock_obj->eqv_uncast(unlock_obj) && 2340 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { 2341 lock_result = lock; 2342 } 2343 } 2344 return lock_result; 2345 } 2346 2347 // This code corresponds to case 3 above. 2348 2349 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, 2350 GrowableArray<AbstractLockNode*> &lock_ops) { 2351 Node* if_node = node->in(0); 2352 bool if_true = node->is_IfTrue(); 2353 2354 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { 2355 Node *lock_ctrl = next_control(if_node->in(0)); 2356 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { 2357 Node* lock1_node = nullptr; 2358 ProjNode* proj = if_node->as_If()->proj_out(!if_true); 2359 if (if_true) { 2360 if (proj->is_IfFalse() && proj->outcnt() == 1) { 2361 lock1_node = proj->unique_out(); 2362 } 2363 } else { 2364 if (proj->is_IfTrue() && proj->outcnt() == 1) { 2365 lock1_node = proj->unique_out(); 2366 } 2367 } 2368 if (lock1_node != nullptr && lock1_node->is_Lock()) { 2369 LockNode *lock1 = lock1_node->as_Lock(); 2370 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2371 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 2372 Node* lock1_obj = bs->step_over_gc_barrier(lock1->obj_node()); 2373 if (lock_obj->eqv_uncast(lock1_obj) && 2374 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && 2375 !lock1->is_eliminated()) { 2376 lock_ops.append(lock1); 2377 return true; 2378 } 2379 } 2380 } 2381 } 2382 2383 lock_ops.trunc_to(0); 2384 return false; 2385 } 2386 2387 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, 2388 GrowableArray<AbstractLockNode*> &lock_ops) { 2389 // check each control merging at this point for a matching unlock. 2390 // in(0) should be self edge so skip it. 2391 for (int i = 1; i < (int)region->req(); i++) { 2392 Node *in_node = next_control(region->in(i)); 2393 if (in_node != nullptr) { 2394 if (find_matching_unlock(in_node, lock, lock_ops)) { 2395 // found a match so keep on checking. 2396 continue; 2397 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { 2398 continue; 2399 } 2400 2401 // If we fall through to here then it was some kind of node we 2402 // don't understand or there wasn't a matching unlock, so give 2403 // up trying to merge locks. 2404 lock_ops.trunc_to(0); 2405 return false; 2406 } 2407 } 2408 return true; 2409 2410 } 2411 2412 // Check that all locks/unlocks associated with object come from balanced regions. 2413 bool AbstractLockNode::is_balanced() { 2414 Node* obj = obj_node(); 2415 for (uint j = 0; j < obj->outcnt(); j++) { 2416 Node* n = obj->raw_out(j); 2417 if (n->is_AbstractLock() && 2418 n->as_AbstractLock()->obj_node()->eqv_uncast(obj)) { 2419 BoxLockNode* n_box = n->as_AbstractLock()->box_node()->as_BoxLock(); 2420 if (n_box->is_unbalanced()) { 2421 return false; 2422 } 2423 } 2424 } 2425 return true; 2426 } 2427 2428 const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"}; 2429 2430 const char * AbstractLockNode::kind_as_string() const { 2431 return _kind_names[_kind]; 2432 } 2433 2434 #ifndef PRODUCT 2435 // 2436 // Create a counter which counts the number of times this lock is acquired 2437 // 2438 void AbstractLockNode::create_lock_counter(JVMState* state) { 2439 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); 2440 } 2441 2442 void AbstractLockNode::set_eliminated_lock_counter() { 2443 if (_counter) { 2444 // Update the counter to indicate that this lock was eliminated. 2445 // The counter update code will stay around even though the 2446 // optimizer will eliminate the lock operation itself. 2447 _counter->set_tag(NamedCounter::EliminatedLockCounter); 2448 } 2449 } 2450 2451 void AbstractLockNode::dump_spec(outputStream* st) const { 2452 st->print("%s ", _kind_names[_kind]); 2453 CallNode::dump_spec(st); 2454 } 2455 2456 void AbstractLockNode::dump_compact_spec(outputStream* st) const { 2457 st->print("%s", _kind_names[_kind]); 2458 } 2459 #endif 2460 2461 //============================================================================= 2462 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2463 2464 // perform any generic optimizations first (returns 'this' or null) 2465 Node *result = SafePointNode::Ideal(phase, can_reshape); 2466 if (result != nullptr) return result; 2467 // Don't bother trying to transform a dead node 2468 if (in(0) && in(0)->is_top()) return nullptr; 2469 2470 // Now see if we can optimize away this lock. We don't actually 2471 // remove the locking here, we simply set the _eliminate flag which 2472 // prevents macro expansion from expanding the lock. Since we don't 2473 // modify the graph, the value returned from this function is the 2474 // one computed above. 2475 const Type* obj_type = phase->type(obj_node()); 2476 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) { 2477 // 2478 // If we are locking an non-escaped object, the lock/unlock is unnecessary 2479 // 2480 ConnectionGraph *cgr = phase->C->congraph(); 2481 if (cgr != nullptr && cgr->can_eliminate_lock(this)) { 2482 assert(!is_eliminated() || is_coarsened(), "sanity"); 2483 // The lock could be marked eliminated by lock coarsening 2484 // code during first IGVN before EA. Replace coarsened flag 2485 // to eliminate all associated locks/unlocks. 2486 #ifdef ASSERT 2487 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1"); 2488 #endif 2489 this->set_non_esc_obj(); 2490 return result; 2491 } 2492 2493 if (!phase->C->do_locks_coarsening()) { 2494 return result; // Compiling without locks coarsening 2495 } 2496 // 2497 // Try lock coarsening 2498 // 2499 PhaseIterGVN* iter = phase->is_IterGVN(); 2500 if (iter != nullptr && !is_eliminated()) { 2501 2502 GrowableArray<AbstractLockNode*> lock_ops; 2503 2504 Node *ctrl = next_control(in(0)); 2505 2506 // now search back for a matching Unlock 2507 if (find_matching_unlock(ctrl, this, lock_ops)) { 2508 // found an unlock directly preceding this lock. This is the 2509 // case of single unlock directly control dependent on a 2510 // single lock which is the trivial version of case 1 or 2. 2511 } else if (ctrl->is_Region() ) { 2512 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { 2513 // found lock preceded by multiple unlocks along all paths 2514 // joining at this point which is case 3 in description above. 2515 } 2516 } else { 2517 // see if this lock comes from either half of an if and the 2518 // predecessors merges unlocks and the other half of the if 2519 // performs a lock. 2520 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { 2521 // found unlock splitting to an if with locks on both branches. 2522 } 2523 } 2524 2525 if (lock_ops.length() > 0) { 2526 // add ourselves to the list of locks to be eliminated. 2527 lock_ops.append(this); 2528 2529 #ifndef PRODUCT 2530 if (PrintEliminateLocks) { 2531 int locks = 0; 2532 int unlocks = 0; 2533 if (Verbose) { 2534 tty->print_cr("=== Locks coarsening ==="); 2535 tty->print("Obj: "); 2536 obj_node()->dump(); 2537 } 2538 for (int i = 0; i < lock_ops.length(); i++) { 2539 AbstractLockNode* lock = lock_ops.at(i); 2540 if (lock->Opcode() == Op_Lock) 2541 locks++; 2542 else 2543 unlocks++; 2544 if (Verbose) { 2545 tty->print("Box %d: ", i); 2546 box_node()->dump(); 2547 tty->print(" %d: ", i); 2548 lock->dump(); 2549 } 2550 } 2551 tty->print_cr("=== Coarsened %d unlocks and %d locks", unlocks, locks); 2552 } 2553 #endif 2554 2555 // for each of the identified locks, mark them 2556 // as eliminatable 2557 for (int i = 0; i < lock_ops.length(); i++) { 2558 AbstractLockNode* lock = lock_ops.at(i); 2559 2560 // Mark it eliminated by coarsening and update any counters 2561 #ifdef ASSERT 2562 lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened"); 2563 #endif 2564 lock->set_coarsened(); 2565 } 2566 // Record this coarsened group. 2567 phase->C->add_coarsened_locks(lock_ops); 2568 } else if (ctrl->is_Region() && 2569 iter->_worklist.member(ctrl)) { 2570 // We weren't able to find any opportunities but the region this 2571 // lock is control dependent on hasn't been processed yet so put 2572 // this lock back on the worklist so we can check again once any 2573 // region simplification has occurred. 2574 iter->_worklist.push(this); 2575 } 2576 } 2577 } 2578 2579 return result; 2580 } 2581 2582 //============================================================================= 2583 bool LockNode::is_nested_lock_region() { 2584 return is_nested_lock_region(nullptr); 2585 } 2586 2587 // p is used for access to compilation log; no logging if null 2588 bool LockNode::is_nested_lock_region(Compile * c) { 2589 BoxLockNode* box = box_node()->as_BoxLock(); 2590 int stk_slot = box->stack_slot(); 2591 if (stk_slot <= 0) { 2592 #ifdef ASSERT 2593 this->log_lock_optimization(c, "eliminate_lock_INLR_1"); 2594 #endif 2595 return false; // External lock or it is not Box (Phi node). 2596 } 2597 2598 // Ignore complex cases: merged locks or multiple locks. 2599 Node* obj = obj_node(); 2600 LockNode* unique_lock = nullptr; 2601 Node* bad_lock = nullptr; 2602 if (!box->is_simple_lock_region(&unique_lock, obj, &bad_lock)) { 2603 #ifdef ASSERT 2604 this->log_lock_optimization(c, "eliminate_lock_INLR_2a", bad_lock); 2605 #endif 2606 return false; 2607 } 2608 if (unique_lock != this) { 2609 #ifdef ASSERT 2610 this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != nullptr ? unique_lock : bad_lock)); 2611 if (PrintEliminateLocks && Verbose) { 2612 tty->print_cr("=============== unique_lock != this ============"); 2613 tty->print(" this: "); 2614 this->dump(); 2615 tty->print(" box: "); 2616 box->dump(); 2617 tty->print(" obj: "); 2618 obj->dump(); 2619 if (unique_lock != nullptr) { 2620 tty->print(" unique_lock: "); 2621 unique_lock->dump(); 2622 } 2623 if (bad_lock != nullptr) { 2624 tty->print(" bad_lock: "); 2625 bad_lock->dump(); 2626 } 2627 tty->print_cr("==============="); 2628 } 2629 #endif 2630 return false; 2631 } 2632 2633 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2634 obj = bs->step_over_gc_barrier(obj); 2635 // Look for external lock for the same object. 2636 SafePointNode* sfn = this->as_SafePoint(); 2637 JVMState* youngest_jvms = sfn->jvms(); 2638 int max_depth = youngest_jvms->depth(); 2639 for (int depth = 1; depth <= max_depth; depth++) { 2640 JVMState* jvms = youngest_jvms->of_depth(depth); 2641 int num_mon = jvms->nof_monitors(); 2642 // Loop over monitors 2643 for (int idx = 0; idx < num_mon; idx++) { 2644 Node* obj_node = sfn->monitor_obj(jvms, idx); 2645 obj_node = bs->step_over_gc_barrier(obj_node); 2646 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock(); 2647 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { 2648 box->set_nested(); 2649 return true; 2650 } 2651 } 2652 } 2653 #ifdef ASSERT 2654 this->log_lock_optimization(c, "eliminate_lock_INLR_3"); 2655 #endif 2656 return false; 2657 } 2658 2659 //============================================================================= 2660 uint UnlockNode::size_of() const { return sizeof(*this); } 2661 2662 //============================================================================= 2663 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2664 2665 // perform any generic optimizations first (returns 'this' or null) 2666 Node *result = SafePointNode::Ideal(phase, can_reshape); 2667 if (result != nullptr) return result; 2668 // Don't bother trying to transform a dead node 2669 if (in(0) && in(0)->is_top()) return nullptr; 2670 2671 // Now see if we can optimize away this unlock. We don't actually 2672 // remove the unlocking here, we simply set the _eliminate flag which 2673 // prevents macro expansion from expanding the unlock. Since we don't 2674 // modify the graph, the value returned from this function is the 2675 // one computed above. 2676 // Escape state is defined after Parse phase. 2677 const Type* obj_type = phase->type(obj_node()); 2678 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) { 2679 // 2680 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary. 2681 // 2682 ConnectionGraph *cgr = phase->C->congraph(); 2683 if (cgr != nullptr && cgr->can_eliminate_lock(this)) { 2684 assert(!is_eliminated() || is_coarsened(), "sanity"); 2685 // The lock could be marked eliminated by lock coarsening 2686 // code during first IGVN before EA. Replace coarsened flag 2687 // to eliminate all associated locks/unlocks. 2688 #ifdef ASSERT 2689 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2"); 2690 #endif 2691 this->set_non_esc_obj(); 2692 } 2693 } 2694 return result; 2695 } 2696 2697 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const { 2698 if (C == nullptr) { 2699 return; 2700 } 2701 CompileLog* log = C->log(); 2702 if (log != nullptr) { 2703 Node* box = box_node(); 2704 Node* obj = obj_node(); 2705 int box_id = box != nullptr ? box->_idx : -1; 2706 int obj_id = obj != nullptr ? obj->_idx : -1; 2707 2708 log->begin_head("%s compile_id='%d' lock_id='%d' class='%s' kind='%s' box_id='%d' obj_id='%d' bad_id='%d'", 2709 tag, C->compile_id(), this->_idx, 2710 is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?", 2711 kind_as_string(), box_id, obj_id, (bad_lock != nullptr ? bad_lock->_idx : -1)); 2712 log->stamp(); 2713 log->end_head(); 2714 JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms(); 2715 while (p != nullptr) { 2716 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 2717 p = p->caller(); 2718 } 2719 log->tail(tag); 2720 } 2721 } 2722 2723 bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) const { 2724 if (dest_t->is_known_instance() && t_oop->is_known_instance()) { 2725 return dest_t->instance_id() == t_oop->instance_id(); 2726 } 2727 2728 if (dest_t->isa_instptr() && !dest_t->is_instptr()->instance_klass()->equals(phase->C->env()->Object_klass())) { 2729 // clone 2730 if (t_oop->isa_aryptr()) { 2731 return false; 2732 } 2733 if (!t_oop->isa_instptr()) { 2734 return true; 2735 } 2736 if (dest_t->maybe_java_subtype_of(t_oop) || t_oop->maybe_java_subtype_of(dest_t)) { 2737 return true; 2738 } 2739 // unrelated 2740 return false; 2741 } 2742 2743 if (dest_t->isa_aryptr()) { 2744 // arraycopy or array clone 2745 if (t_oop->isa_instptr()) { 2746 return false; 2747 } 2748 if (!t_oop->isa_aryptr()) { 2749 return true; 2750 } 2751 2752 const Type* elem = dest_t->is_aryptr()->elem(); 2753 if (elem == Type::BOTTOM) { 2754 // An array but we don't know what elements are 2755 return true; 2756 } 2757 2758 dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr(); 2759 t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot); 2760 uint dest_alias = phase->C->get_alias_index(dest_t); 2761 uint t_oop_alias = phase->C->get_alias_index(t_oop); 2762 2763 return dest_alias == t_oop_alias; 2764 } 2765 2766 return true; 2767 } 2768 2769 PowDNode::PowDNode(Compile* C, Node* base, Node* exp) 2770 : CallLeafPureNode( 2771 OptoRuntime::Math_DD_D_Type(), 2772 StubRoutines::dpow() != nullptr ? StubRoutines::dpow() : CAST_FROM_FN_PTR(address, SharedRuntime::dpow), 2773 "pow") { 2774 add_flag(Flag_is_macro); 2775 C->add_macro_node(this); 2776 2777 init_req(TypeFunc::Parms + 0, base); 2778 init_req(TypeFunc::Parms + 1, C->top()); // double slot padding 2779 init_req(TypeFunc::Parms + 2, exp); 2780 init_req(TypeFunc::Parms + 3, C->top()); // double slot padding 2781 } 2782 2783 const Type* PowDNode::Value(PhaseGVN* phase) const { 2784 const Type* t_base = phase->type(base()); 2785 const Type* t_exp = phase->type(exp()); 2786 2787 if (t_base == Type::TOP || t_exp == Type::TOP) { 2788 return Type::TOP; 2789 } 2790 2791 const TypeD* base_con = t_base->isa_double_constant(); 2792 const TypeD* exp_con = t_exp->isa_double_constant(); 2793 const TypeD* result_t = nullptr; 2794 2795 // constant folding: both inputs are constants 2796 if (base_con != nullptr && exp_con != nullptr) { 2797 result_t = TypeD::make(SharedRuntime::dpow(base_con->getd(), exp_con->getd())); 2798 } 2799 2800 // Special cases when only the exponent is known: 2801 if (exp_con != nullptr) { 2802 double e = exp_con->getd(); 2803 2804 // If the second argument is positive or negative zero, then the result is 1.0. 2805 // i.e., pow(x, +/-0.0D) => 1.0 2806 if (e == 0.0) { // true for both -0.0 and +0.0 2807 result_t = TypeD::ONE; 2808 } 2809 2810 // If the second argument is NaN, then the result is NaN. 2811 // i.e., pow(x, NaN) => NaN 2812 if (g_isnan(e)) { 2813 result_t = TypeD::make(NAN); 2814 } 2815 } 2816 2817 if (result_t != nullptr) { 2818 // We can't simply return a TypeD here, it must be a tuple type to be compatible with call nodes. 2819 const Type** fields = TypeTuple::fields(2); 2820 fields[TypeFunc::Parms + 0] = result_t; 2821 fields[TypeFunc::Parms + 1] = Type::HALF; 2822 return TypeTuple::make(TypeFunc::Parms + 2, fields); 2823 } 2824 2825 return tf()->range_cc(); 2826 } 2827 2828 Node* PowDNode::Ideal(PhaseGVN* phase, bool can_reshape) { 2829 if (!can_reshape) { 2830 return nullptr; // wait for igvn 2831 } 2832 2833 PhaseIterGVN* igvn = phase->is_IterGVN(); 2834 Node* base = this->base(); 2835 Node* exp = this->exp(); 2836 2837 const Type* t_exp = phase->type(exp); 2838 const TypeD* exp_con = t_exp->isa_double_constant(); 2839 2840 // Special cases when only the exponent is known: 2841 if (exp_con != nullptr) { 2842 double e = exp_con->getd(); 2843 2844 // If the second argument is 1.0, then the result is the same as the first argument. 2845 // i.e., pow(x, 1.0) => x 2846 if (e == 1.0) { 2847 return make_tuple_of_input_state_and_result(igvn, base); 2848 } 2849 2850 // If the second argument is 2.0, then strength reduce to multiplications. 2851 // i.e., pow(x, 2.0) => x * x 2852 if (e == 2.0) { 2853 Node* mul = igvn->transform(new MulDNode(base, base)); 2854 return make_tuple_of_input_state_and_result(igvn, mul); 2855 } 2856 2857 // If the second argument is 0.5, the strength reduce to square roots. 2858 // i.e., pow(x, 0.5) => sqrt(x) iff x > 0 2859 if (e == 0.5 && Matcher::match_rule_supported(Op_SqrtD)) { 2860 Node* ctrl = in(TypeFunc::Control); 2861 Node* zero = igvn->zerocon(T_DOUBLE); 2862 2863 // According to the API specs, pow(-0.0, 0.5) = 0.0 and sqrt(-0.0) = -0.0. 2864 // So pow(-0.0, 0.5) shouldn't be replaced with sqrt(-0.0). 2865 // -0.0/+0.0 are both excluded since floating-point comparison doesn't distinguish -0.0 from +0.0. 2866 Node* cmp = igvn->register_new_node_with_optimizer(new CmpDNode(base, zero)); 2867 Node* test = igvn->register_new_node_with_optimizer(new BoolNode(cmp, BoolTest::le)); 2868 2869 IfNode* iff = new IfNode(ctrl, test, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN); 2870 igvn->register_new_node_with_optimizer(iff); 2871 Node* if_slow = igvn->register_new_node_with_optimizer(new IfTrueNode(iff)); // x <= 0 2872 Node* if_fast = igvn->register_new_node_with_optimizer(new IfFalseNode(iff)); // x > 0 2873 2874 // slow path: call pow(x, 0.5) 2875 Node* call = igvn->register_new_node_with_optimizer(inline_call_leaf_pure_node(if_slow)); 2876 Node* call_ctrl = igvn->register_new_node_with_optimizer(new ProjNode(call, TypeFunc::Control)); 2877 Node* call_result = igvn->register_new_node_with_optimizer(new ProjNode(call, TypeFunc::Parms + 0)); 2878 2879 // fast path: sqrt(x) 2880 Node* sqrt = igvn->register_new_node_with_optimizer(new SqrtDNode(igvn->C, if_fast, base)); 2881 2882 // merge paths 2883 RegionNode* region = new RegionNode(3); 2884 igvn->register_new_node_with_optimizer(region); 2885 region->init_req(1, call_ctrl); // slow path 2886 region->init_req(2, if_fast); // fast path 2887 2888 PhiNode* phi = new PhiNode(region, Type::DOUBLE); 2889 igvn->register_new_node_with_optimizer(phi); 2890 phi->init_req(1, call_result); // slow: pow() result 2891 phi->init_req(2, sqrt); // fast: sqrt() result 2892 2893 igvn->C->set_has_split_ifs(true); // Has chance for split-if optimization 2894 2895 return make_tuple_of_input_state_and_result(igvn, phi, region); 2896 } 2897 } 2898 2899 return CallLeafPureNode::Ideal(phase, can_reshape); 2900 } 2901 2902 // We can't simply have Ideal() returning a Con or MulNode since the users are still expecting a Call node, but we could 2903 // produce a tuple that follows the same pattern so users can still get control, io, memory, etc.. 2904 TupleNode* PowDNode::make_tuple_of_input_state_and_result(PhaseIterGVN* phase, Node* result, Node* control) { 2905 if (control == nullptr) { 2906 control = in(TypeFunc::Control); 2907 } 2908 2909 Compile* C = phase->C; 2910 C->remove_macro_node(this); 2911 TupleNode* tuple = TupleNode::make( 2912 tf()->range_cc(), 2913 control, 2914 in(TypeFunc::I_O), 2915 in(TypeFunc::Memory), 2916 in(TypeFunc::FramePtr), 2917 in(TypeFunc::ReturnAdr), 2918 result, 2919 C->top()); 2920 return tuple; 2921 } --- EOF ---