1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "ci/bcEscapeAnalyzer.hpp" 28 #include "compiler/oopMap.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/c2/barrierSetC2.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "opto/callGenerator.hpp" 33 #include "opto/callnode.hpp" 34 #include "opto/castnode.hpp" 35 #include "opto/convertnode.hpp" 36 #include "opto/escape.hpp" 37 #include "opto/locknode.hpp" 38 #include "opto/machnode.hpp" 39 #include "opto/matcher.hpp" 40 #include "opto/parse.hpp" 41 #include "opto/regalloc.hpp" 42 #include "opto/regmask.hpp" 43 #include "opto/rootnode.hpp" 44 #include "opto/runtime.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "utilities/powerOfTwo.hpp" 47 #include "code/vmreg.hpp" 48 49 // Portions of code courtesy of Clifford Click 50 51 // Optimization - Graph Style 52 53 //============================================================================= 54 uint StartNode::size_of() const { return sizeof(*this); } 55 bool StartNode::cmp( const Node &n ) const 56 { return _domain == ((StartNode&)n)._domain; } 57 const Type *StartNode::bottom_type() const { return _domain; } 58 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; } 59 #ifndef PRODUCT 60 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);} 61 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ } 62 #endif 63 64 //------------------------------Ideal------------------------------------------ 65 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ 66 return remove_dead_region(phase, can_reshape) ? this : nullptr; 67 } 68 69 //------------------------------calling_convention----------------------------- 70 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 71 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt); 72 } 73 74 //------------------------------Registers-------------------------------------- 75 const RegMask &StartNode::in_RegMask(uint) const { 76 return RegMask::Empty; 77 } 78 79 //------------------------------match------------------------------------------ 80 // Construct projections for incoming parameters, and their RegMask info 81 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) { 82 switch (proj->_con) { 83 case TypeFunc::Control: 84 case TypeFunc::I_O: 85 case TypeFunc::Memory: 86 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 87 case TypeFunc::FramePtr: 88 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); 89 case TypeFunc::ReturnAdr: 90 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); 91 case TypeFunc::Parms: 92 default: { 93 uint parm_num = proj->_con - TypeFunc::Parms; 94 const Type *t = _domain->field_at(proj->_con); 95 if (t->base() == Type::Half) // 2nd half of Longs and Doubles 96 return new ConNode(Type::TOP); 97 uint ideal_reg = t->ideal_reg(); 98 RegMask &rm = match->_calling_convention_mask[parm_num]; 99 return new MachProjNode(this,proj->_con,rm,ideal_reg); 100 } 101 } 102 return nullptr; 103 } 104 105 //------------------------------StartOSRNode---------------------------------- 106 // The method start node for an on stack replacement adapter 107 108 //------------------------------osr_domain----------------------------- 109 const TypeTuple *StartOSRNode::osr_domain() { 110 const Type **fields = TypeTuple::fields(2); 111 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer 112 113 return TypeTuple::make(TypeFunc::Parms+1, fields); 114 } 115 116 //============================================================================= 117 const char * const ParmNode::names[TypeFunc::Parms+1] = { 118 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms" 119 }; 120 121 #ifndef PRODUCT 122 void ParmNode::dump_spec(outputStream *st) const { 123 if( _con < TypeFunc::Parms ) { 124 st->print("%s", names[_con]); 125 } else { 126 st->print("Parm%d: ",_con-TypeFunc::Parms); 127 // Verbose and WizardMode dump bottom_type for all nodes 128 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); 129 } 130 } 131 132 void ParmNode::dump_compact_spec(outputStream *st) const { 133 if (_con < TypeFunc::Parms) { 134 st->print("%s", names[_con]); 135 } else { 136 st->print("%d:", _con-TypeFunc::Parms); 137 // unconditionally dump bottom_type 138 bottom_type()->dump_on(st); 139 } 140 } 141 #endif 142 143 uint ParmNode::ideal_reg() const { 144 switch( _con ) { 145 case TypeFunc::Control : // fall through 146 case TypeFunc::I_O : // fall through 147 case TypeFunc::Memory : return 0; 148 case TypeFunc::FramePtr : // fall through 149 case TypeFunc::ReturnAdr: return Op_RegP; 150 default : assert( _con > TypeFunc::Parms, "" ); 151 // fall through 152 case TypeFunc::Parms : { 153 // Type of argument being passed 154 const Type *t = in(0)->as_Start()->_domain->field_at(_con); 155 return t->ideal_reg(); 156 } 157 } 158 ShouldNotReachHere(); 159 return 0; 160 } 161 162 //============================================================================= 163 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { 164 init_req(TypeFunc::Control,cntrl); 165 init_req(TypeFunc::I_O,i_o); 166 init_req(TypeFunc::Memory,memory); 167 init_req(TypeFunc::FramePtr,frameptr); 168 init_req(TypeFunc::ReturnAdr,retadr); 169 } 170 171 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ 172 return remove_dead_region(phase, can_reshape) ? this : nullptr; 173 } 174 175 const Type* ReturnNode::Value(PhaseGVN* phase) const { 176 return ( phase->type(in(TypeFunc::Control)) == Type::TOP) 177 ? Type::TOP 178 : Type::BOTTOM; 179 } 180 181 // Do we Match on this edge index or not? No edges on return nodes 182 uint ReturnNode::match_edge(uint idx) const { 183 return 0; 184 } 185 186 187 #ifndef PRODUCT 188 void ReturnNode::dump_req(outputStream *st, DumpConfig* dc) const { 189 // Dump the required inputs, after printing "returns" 190 uint i; // Exit value of loop 191 for (i = 0; i < req(); i++) { // For all required inputs 192 if (i == TypeFunc::Parms) st->print("returns "); 193 Node* p = in(i); 194 if (p != nullptr) { 195 p->dump_idx(false, st, dc); 196 st->print(" "); 197 } else { 198 st->print("_ "); 199 } 200 } 201 } 202 #endif 203 204 //============================================================================= 205 RethrowNode::RethrowNode( 206 Node* cntrl, 207 Node* i_o, 208 Node* memory, 209 Node* frameptr, 210 Node* ret_adr, 211 Node* exception 212 ) : Node(TypeFunc::Parms + 1) { 213 init_req(TypeFunc::Control , cntrl ); 214 init_req(TypeFunc::I_O , i_o ); 215 init_req(TypeFunc::Memory , memory ); 216 init_req(TypeFunc::FramePtr , frameptr ); 217 init_req(TypeFunc::ReturnAdr, ret_adr); 218 init_req(TypeFunc::Parms , exception); 219 } 220 221 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ 222 return remove_dead_region(phase, can_reshape) ? this : nullptr; 223 } 224 225 const Type* RethrowNode::Value(PhaseGVN* phase) const { 226 return (phase->type(in(TypeFunc::Control)) == Type::TOP) 227 ? Type::TOP 228 : Type::BOTTOM; 229 } 230 231 uint RethrowNode::match_edge(uint idx) const { 232 return 0; 233 } 234 235 #ifndef PRODUCT 236 void RethrowNode::dump_req(outputStream *st, DumpConfig* dc) const { 237 // Dump the required inputs, after printing "exception" 238 uint i; // Exit value of loop 239 for (i = 0; i < req(); i++) { // For all required inputs 240 if (i == TypeFunc::Parms) st->print("exception "); 241 Node* p = in(i); 242 if (p != nullptr) { 243 p->dump_idx(false, st, dc); 244 st->print(" "); 245 } else { 246 st->print("_ "); 247 } 248 } 249 } 250 #endif 251 252 //============================================================================= 253 // Do we Match on this edge index or not? Match only target address & method 254 uint TailCallNode::match_edge(uint idx) const { 255 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 256 } 257 258 //============================================================================= 259 // Do we Match on this edge index or not? Match only target address & oop 260 uint TailJumpNode::match_edge(uint idx) const { 261 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 262 } 263 264 //============================================================================= 265 JVMState::JVMState(ciMethod* method, JVMState* caller) : 266 _method(method) { 267 assert(method != nullptr, "must be valid call site"); 268 _bci = InvocationEntryBci; 269 _reexecute = Reexecute_Undefined; 270 debug_only(_bci = -99); // random garbage value 271 debug_only(_map = (SafePointNode*)-1); 272 _caller = caller; 273 _depth = 1 + (caller == nullptr ? 0 : caller->depth()); 274 _locoff = TypeFunc::Parms; 275 _stkoff = _locoff + _method->max_locals(); 276 _monoff = _stkoff + _method->max_stack(); 277 _scloff = _monoff; 278 _endoff = _monoff; 279 _sp = 0; 280 } 281 282 JVMState::JVMState(int stack_size) : 283 _method(nullptr) { 284 _bci = InvocationEntryBci; 285 _reexecute = Reexecute_Undefined; 286 debug_only(_map = (SafePointNode*)-1); 287 _caller = nullptr; 288 _depth = 1; 289 _locoff = TypeFunc::Parms; 290 _stkoff = _locoff; 291 _monoff = _stkoff + stack_size; 292 _scloff = _monoff; 293 _endoff = _monoff; 294 _sp = 0; 295 } 296 297 //--------------------------------of_depth------------------------------------- 298 JVMState* JVMState::of_depth(int d) const { 299 const JVMState* jvmp = this; 300 assert(0 < d && (uint)d <= depth(), "oob"); 301 for (int skip = depth() - d; skip > 0; skip--) { 302 jvmp = jvmp->caller(); 303 } 304 assert(jvmp->depth() == (uint)d, "found the right one"); 305 return (JVMState*)jvmp; 306 } 307 308 //-----------------------------same_calls_as----------------------------------- 309 bool JVMState::same_calls_as(const JVMState* that) const { 310 if (this == that) return true; 311 if (this->depth() != that->depth()) return false; 312 const JVMState* p = this; 313 const JVMState* q = that; 314 for (;;) { 315 if (p->_method != q->_method) return false; 316 if (p->_method == nullptr) return true; // bci is irrelevant 317 if (p->_bci != q->_bci) return false; 318 if (p->_reexecute != q->_reexecute) return false; 319 p = p->caller(); 320 q = q->caller(); 321 if (p == q) return true; 322 assert(p != nullptr && q != nullptr, "depth check ensures we don't run off end"); 323 } 324 } 325 326 //------------------------------debug_start------------------------------------ 327 uint JVMState::debug_start() const { 328 debug_only(JVMState* jvmroot = of_depth(1)); 329 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); 330 return of_depth(1)->locoff(); 331 } 332 333 //-------------------------------debug_end------------------------------------- 334 uint JVMState::debug_end() const { 335 debug_only(JVMState* jvmroot = of_depth(1)); 336 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); 337 return endoff(); 338 } 339 340 //------------------------------debug_depth------------------------------------ 341 uint JVMState::debug_depth() const { 342 uint total = 0; 343 for (const JVMState* jvmp = this; jvmp != nullptr; jvmp = jvmp->caller()) { 344 total += jvmp->debug_size(); 345 } 346 return total; 347 } 348 349 #ifndef PRODUCT 350 351 //------------------------------format_helper---------------------------------- 352 // Given an allocation (a Chaitin object) and a Node decide if the Node carries 353 // any defined value or not. If it does, print out the register or constant. 354 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { 355 if (n == nullptr) { st->print(" null"); return; } 356 if (n->is_SafePointScalarObject()) { 357 // Scalar replacement. 358 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); 359 scobjs->append_if_missing(spobj); 360 int sco_n = scobjs->find(spobj); 361 assert(sco_n >= 0, ""); 362 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); 363 return; 364 } 365 if (regalloc->node_regs_max_index() > 0 && 366 OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined 367 char buf[50]; 368 regalloc->dump_register(n,buf,sizeof(buf)); 369 st->print(" %s%d]=%s",msg,i,buf); 370 } else { // No register, but might be constant 371 const Type *t = n->bottom_type(); 372 switch (t->base()) { 373 case Type::Int: 374 st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con()); 375 break; 376 case Type::AnyPtr: 377 assert( t == TypePtr::NULL_PTR || n->in_dump(), "" ); 378 st->print(" %s%d]=#null",msg,i); 379 break; 380 case Type::AryPtr: 381 case Type::InstPtr: 382 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop())); 383 break; 384 case Type::KlassPtr: 385 case Type::AryKlassPtr: 386 case Type::InstKlassPtr: 387 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->exact_klass())); 388 break; 389 case Type::MetadataPtr: 390 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata())); 391 break; 392 case Type::NarrowOop: 393 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop())); 394 break; 395 case Type::RawPtr: 396 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr())); 397 break; 398 case Type::DoubleCon: 399 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d); 400 break; 401 case Type::FloatCon: 402 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f); 403 break; 404 case Type::Long: 405 st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con())); 406 break; 407 case Type::Half: 408 case Type::Top: 409 st->print(" %s%d]=_",msg,i); 410 break; 411 default: ShouldNotReachHere(); 412 } 413 } 414 } 415 416 //---------------------print_method_with_lineno-------------------------------- 417 void JVMState::print_method_with_lineno(outputStream* st, bool show_name) const { 418 if (show_name) _method->print_short_name(st); 419 420 int lineno = _method->line_number_from_bci(_bci); 421 if (lineno != -1) { 422 st->print(" @ bci:%d (line %d)", _bci, lineno); 423 } else { 424 st->print(" @ bci:%d", _bci); 425 } 426 } 427 428 //------------------------------format----------------------------------------- 429 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { 430 st->print(" #"); 431 if (_method) { 432 print_method_with_lineno(st, true); 433 } else { 434 st->print_cr(" runtime stub "); 435 return; 436 } 437 if (n->is_MachSafePoint()) { 438 GrowableArray<SafePointScalarObjectNode*> scobjs; 439 MachSafePointNode *mcall = n->as_MachSafePoint(); 440 uint i; 441 // Print locals 442 for (i = 0; i < (uint)loc_size(); i++) 443 format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs); 444 // Print stack 445 for (i = 0; i < (uint)stk_size(); i++) { 446 if ((uint)(_stkoff + i) >= mcall->len()) 447 st->print(" oob "); 448 else 449 format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs); 450 } 451 for (i = 0; (int)i < nof_monitors(); i++) { 452 Node *box = mcall->monitor_box(this, i); 453 Node *obj = mcall->monitor_obj(this, i); 454 if (regalloc->node_regs_max_index() > 0 && 455 OptoReg::is_valid(regalloc->get_reg_first(box))) { 456 box = BoxLockNode::box_node(box); 457 format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs); 458 } else { 459 OptoReg::Name box_reg = BoxLockNode::reg(box); 460 st->print(" MON-BOX%d=%s+%d", 461 i, 462 OptoReg::regname(OptoReg::c_frame_pointer), 463 regalloc->reg2offset(box_reg)); 464 } 465 const char* obj_msg = "MON-OBJ["; 466 if (EliminateLocks) { 467 if (BoxLockNode::box_node(box)->is_eliminated()) 468 obj_msg = "MON-OBJ(LOCK ELIMINATED)["; 469 } 470 format_helper(regalloc, st, obj, obj_msg, i, &scobjs); 471 } 472 473 for (i = 0; i < (uint)scobjs.length(); i++) { 474 // Scalar replaced objects. 475 st->cr(); 476 st->print(" # ScObj" INT32_FORMAT " ", i); 477 SafePointScalarObjectNode* spobj = scobjs.at(i); 478 ciKlass* cik = spobj->bottom_type()->is_oopptr()->exact_klass(); 479 assert(cik->is_instance_klass() || 480 cik->is_array_klass(), "Not supported allocation."); 481 ciInstanceKlass *iklass = nullptr; 482 if (cik->is_instance_klass()) { 483 cik->print_name_on(st); 484 iklass = cik->as_instance_klass(); 485 } else if (cik->is_type_array_klass()) { 486 cik->as_array_klass()->base_element_type()->print_name_on(st); 487 st->print("[%d]", spobj->n_fields()); 488 } else if (cik->is_obj_array_klass()) { 489 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass(); 490 if (cie->is_instance_klass()) { 491 cie->print_name_on(st); 492 } else if (cie->is_type_array_klass()) { 493 cie->as_array_klass()->base_element_type()->print_name_on(st); 494 } else { 495 ShouldNotReachHere(); 496 } 497 st->print("[%d]", spobj->n_fields()); 498 int ndim = cik->as_array_klass()->dimension() - 1; 499 while (ndim-- > 0) { 500 st->print("[]"); 501 } 502 } 503 st->print("={"); 504 uint nf = spobj->n_fields(); 505 if (nf > 0) { 506 uint first_ind = spobj->first_index(mcall->jvms()); 507 Node* fld_node = mcall->in(first_ind); 508 ciField* cifield; 509 if (iklass != nullptr) { 510 st->print(" ["); 511 cifield = iklass->nonstatic_field_at(0); 512 cifield->print_name_on(st); 513 format_helper(regalloc, st, fld_node, ":", 0, &scobjs); 514 } else { 515 format_helper(regalloc, st, fld_node, "[", 0, &scobjs); 516 } 517 for (uint j = 1; j < nf; j++) { 518 fld_node = mcall->in(first_ind+j); 519 if (iklass != nullptr) { 520 st->print(", ["); 521 cifield = iklass->nonstatic_field_at(j); 522 cifield->print_name_on(st); 523 format_helper(regalloc, st, fld_node, ":", j, &scobjs); 524 } else { 525 format_helper(regalloc, st, fld_node, ", [", j, &scobjs); 526 } 527 } 528 } 529 st->print(" }"); 530 } 531 } 532 st->cr(); 533 if (caller() != nullptr) caller()->format(regalloc, n, st); 534 } 535 536 537 void JVMState::dump_spec(outputStream *st) const { 538 if (_method != nullptr) { 539 bool printed = false; 540 if (!Verbose) { 541 // The JVMS dumps make really, really long lines. 542 // Take out the most boring parts, which are the package prefixes. 543 char buf[500]; 544 stringStream namest(buf, sizeof(buf)); 545 _method->print_short_name(&namest); 546 if (namest.count() < sizeof(buf)) { 547 const char* name = namest.base(); 548 if (name[0] == ' ') ++name; 549 const char* endcn = strchr(name, ':'); // end of class name 550 if (endcn == nullptr) endcn = strchr(name, '('); 551 if (endcn == nullptr) endcn = name + strlen(name); 552 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') 553 --endcn; 554 st->print(" %s", endcn); 555 printed = true; 556 } 557 } 558 print_method_with_lineno(st, !printed); 559 if(_reexecute == Reexecute_True) 560 st->print(" reexecute"); 561 } else { 562 st->print(" runtime stub"); 563 } 564 if (caller() != nullptr) caller()->dump_spec(st); 565 } 566 567 568 void JVMState::dump_on(outputStream* st) const { 569 bool print_map = _map && !((uintptr_t)_map & 1) && 570 ((caller() == nullptr) || (caller()->map() != _map)); 571 if (print_map) { 572 if (_map->len() > _map->req()) { // _map->has_exceptions() 573 Node* ex = _map->in(_map->req()); // _map->next_exception() 574 // skip the first one; it's already being printed 575 while (ex != nullptr && ex->len() > ex->req()) { 576 ex = ex->in(ex->req()); // ex->next_exception() 577 ex->dump(1); 578 } 579 } 580 _map->dump(Verbose ? 2 : 1); 581 } 582 if (caller() != nullptr) { 583 caller()->dump_on(st); 584 } 585 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", 586 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); 587 if (_method == nullptr) { 588 st->print_cr("(none)"); 589 } else { 590 _method->print_name(st); 591 st->cr(); 592 if (bci() >= 0 && bci() < _method->code_size()) { 593 st->print(" bc: "); 594 _method->print_codes_on(bci(), bci()+1, st); 595 } 596 } 597 } 598 599 // Extra way to dump a jvms from the debugger, 600 // to avoid a bug with C++ member function calls. 601 void dump_jvms(JVMState* jvms) { 602 jvms->dump(); 603 } 604 #endif 605 606 //--------------------------clone_shallow-------------------------------------- 607 JVMState* JVMState::clone_shallow(Compile* C) const { 608 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); 609 n->set_bci(_bci); 610 n->_reexecute = _reexecute; 611 n->set_locoff(_locoff); 612 n->set_stkoff(_stkoff); 613 n->set_monoff(_monoff); 614 n->set_scloff(_scloff); 615 n->set_endoff(_endoff); 616 n->set_sp(_sp); 617 n->set_map(_map); 618 n->_alloc_state = _alloc_state; 619 return n; 620 } 621 622 //---------------------------clone_deep---------------------------------------- 623 JVMState* JVMState::clone_deep(Compile* C) const { 624 JVMState* n = clone_shallow(C); 625 for (JVMState* p = n; p->_caller != nullptr; p = p->_caller) { 626 p->_caller = p->_caller->clone_shallow(C); 627 } 628 assert(n->depth() == depth(), "sanity"); 629 assert(n->debug_depth() == debug_depth(), "sanity"); 630 return n; 631 } 632 633 /** 634 * Reset map for all callers 635 */ 636 void JVMState::set_map_deep(SafePointNode* map) { 637 for (JVMState* p = this; p != nullptr; p = p->_caller) { 638 p->set_map(map); 639 } 640 } 641 642 // unlike set_map(), this is two-way setting. 643 void JVMState::bind_map(SafePointNode* map) { 644 set_map(map); 645 _map->set_jvms(this); 646 } 647 648 // Adapt offsets in in-array after adding or removing an edge. 649 // Prerequisite is that the JVMState is used by only one node. 650 void JVMState::adapt_position(int delta) { 651 for (JVMState* jvms = this; jvms != nullptr; jvms = jvms->caller()) { 652 jvms->set_locoff(jvms->locoff() + delta); 653 jvms->set_stkoff(jvms->stkoff() + delta); 654 jvms->set_monoff(jvms->monoff() + delta); 655 jvms->set_scloff(jvms->scloff() + delta); 656 jvms->set_endoff(jvms->endoff() + delta); 657 } 658 } 659 660 // Mirror the stack size calculation in the deopt code 661 // How much stack space would we need at this point in the program in 662 // case of deoptimization? 663 int JVMState::interpreter_frame_size() const { 664 const JVMState* jvms = this; 665 int size = 0; 666 int callee_parameters = 0; 667 int callee_locals = 0; 668 int extra_args = method()->max_stack() - stk_size(); 669 670 while (jvms != nullptr) { 671 int locks = jvms->nof_monitors(); 672 int temps = jvms->stk_size(); 673 bool is_top_frame = (jvms == this); 674 ciMethod* method = jvms->method(); 675 676 int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(), 677 temps + callee_parameters, 678 extra_args, 679 locks, 680 callee_parameters, 681 callee_locals, 682 is_top_frame); 683 size += frame_size; 684 685 callee_parameters = method->size_of_parameters(); 686 callee_locals = method->max_locals(); 687 extra_args = 0; 688 jvms = jvms->caller(); 689 } 690 return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord; 691 } 692 693 //============================================================================= 694 bool CallNode::cmp( const Node &n ) const 695 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } 696 #ifndef PRODUCT 697 void CallNode::dump_req(outputStream *st, DumpConfig* dc) const { 698 // Dump the required inputs, enclosed in '(' and ')' 699 uint i; // Exit value of loop 700 for (i = 0; i < req(); i++) { // For all required inputs 701 if (i == TypeFunc::Parms) st->print("("); 702 Node* p = in(i); 703 if (p != nullptr) { 704 p->dump_idx(false, st, dc); 705 st->print(" "); 706 } else { 707 st->print("_ "); 708 } 709 } 710 st->print(")"); 711 } 712 713 void CallNode::dump_spec(outputStream *st) const { 714 st->print(" "); 715 if (tf() != nullptr) tf()->dump_on(st); 716 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); 717 if (jvms() != nullptr) jvms()->dump_spec(st); 718 } 719 #endif 720 721 const Type *CallNode::bottom_type() const { return tf()->range(); } 722 const Type* CallNode::Value(PhaseGVN* phase) const { 723 if (phase->type(in(0)) == Type::TOP) return Type::TOP; 724 return tf()->range(); 725 } 726 727 //------------------------------calling_convention----------------------------- 728 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 729 // Use the standard compiler calling convention 730 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt); 731 } 732 733 734 //------------------------------match------------------------------------------ 735 // Construct projections for control, I/O, memory-fields, ..., and 736 // return result(s) along with their RegMask info 737 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) { 738 switch (proj->_con) { 739 case TypeFunc::Control: 740 case TypeFunc::I_O: 741 case TypeFunc::Memory: 742 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 743 744 case TypeFunc::Parms+1: // For LONG & DOUBLE returns 745 assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, ""); 746 // 2nd half of doubles and longs 747 return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad); 748 749 case TypeFunc::Parms: { // Normal returns 750 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg(); 751 OptoRegPair regs = Opcode() == Op_CallLeafVector 752 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine 753 : is_CallRuntime() 754 ? match->c_return_value(ideal_reg) // Calls into C runtime 755 : match-> return_value(ideal_reg); // Calls into compiled Java code 756 RegMask rm = RegMask(regs.first()); 757 758 if (Opcode() == Op_CallLeafVector) { 759 // If the return is in vector, compute appropriate regmask taking into account the whole range 760 if(ideal_reg >= Op_VecS && ideal_reg <= Op_VecZ) { 761 if(OptoReg::is_valid(regs.second())) { 762 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) { 763 rm.Insert(r); 764 } 765 } 766 } 767 } 768 769 if( OptoReg::is_valid(regs.second()) ) 770 rm.Insert( regs.second() ); 771 return new MachProjNode(this,proj->_con,rm,ideal_reg); 772 } 773 774 case TypeFunc::ReturnAdr: 775 case TypeFunc::FramePtr: 776 default: 777 ShouldNotReachHere(); 778 } 779 return nullptr; 780 } 781 782 // Do we Match on this edge index or not? Match no edges 783 uint CallNode::match_edge(uint idx) const { 784 return 0; 785 } 786 787 // 788 // Determine whether the call could modify the field of the specified 789 // instance at the specified offset. 790 // 791 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { 792 assert((t_oop != nullptr), "sanity"); 793 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) { 794 const TypeTuple* args = _tf->domain(); 795 Node* dest = nullptr; 796 // Stubs that can be called once an ArrayCopyNode is expanded have 797 // different signatures. Look for the second pointer argument, 798 // that is the destination of the copy. 799 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { 800 if (args->field_at(i)->isa_ptr()) { 801 j++; 802 if (j == 2) { 803 dest = in(i); 804 break; 805 } 806 } 807 } 808 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!"); 809 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) { 810 return true; 811 } 812 return false; 813 } 814 if (t_oop->is_known_instance()) { 815 // The instance_id is set only for scalar-replaceable allocations which 816 // are not passed as arguments according to Escape Analysis. 817 return false; 818 } 819 if (t_oop->is_ptr_to_boxed_value()) { 820 ciKlass* boxing_klass = t_oop->is_instptr()->instance_klass(); 821 if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) { 822 // Skip unrelated boxing methods. 823 Node* proj = proj_out_or_null(TypeFunc::Parms); 824 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) { 825 return false; 826 } 827 } 828 if (is_CallJava() && as_CallJava()->method() != nullptr) { 829 ciMethod* meth = as_CallJava()->method(); 830 if (meth->is_getter()) { 831 return false; 832 } 833 // May modify (by reflection) if an boxing object is passed 834 // as argument or returned. 835 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr; 836 if (proj != nullptr) { 837 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr(); 838 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() || 839 (inst_t->instance_klass() == boxing_klass))) { 840 return true; 841 } 842 } 843 const TypeTuple* d = tf()->domain(); 844 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 845 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); 846 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() || 847 (inst_t->instance_klass() == boxing_klass))) { 848 return true; 849 } 850 } 851 return false; 852 } 853 } 854 return true; 855 } 856 857 // Does this call have a direct reference to n other than debug information? 858 bool CallNode::has_non_debug_use(Node *n) { 859 const TypeTuple * d = tf()->domain(); 860 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 861 Node *arg = in(i); 862 if (arg == n) { 863 return true; 864 } 865 } 866 return false; 867 } 868 869 // Returns the unique CheckCastPP of a call 870 // or 'this' if there are several CheckCastPP or unexpected uses 871 // or returns null if there is no one. 872 Node *CallNode::result_cast() { 873 Node *cast = nullptr; 874 875 Node *p = proj_out_or_null(TypeFunc::Parms); 876 if (p == nullptr) 877 return nullptr; 878 879 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { 880 Node *use = p->fast_out(i); 881 if (use->is_CheckCastPP()) { 882 if (cast != nullptr) { 883 return this; // more than 1 CheckCastPP 884 } 885 cast = use; 886 } else if (!use->is_Initialize() && 887 !use->is_AddP() && 888 use->Opcode() != Op_MemBarStoreStore) { 889 // Expected uses are restricted to a CheckCastPP, an Initialize 890 // node, a MemBarStoreStore (clone) and AddP nodes. If we 891 // encounter any other use (a Phi node can be seen in rare 892 // cases) return this to prevent incorrect optimizations. 893 return this; 894 } 895 } 896 return cast; 897 } 898 899 900 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) { 901 projs->fallthrough_proj = nullptr; 902 projs->fallthrough_catchproj = nullptr; 903 projs->fallthrough_ioproj = nullptr; 904 projs->catchall_ioproj = nullptr; 905 projs->catchall_catchproj = nullptr; 906 projs->fallthrough_memproj = nullptr; 907 projs->catchall_memproj = nullptr; 908 projs->resproj = nullptr; 909 projs->exobj = nullptr; 910 911 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 912 ProjNode *pn = fast_out(i)->as_Proj(); 913 if (pn->outcnt() == 0) continue; 914 switch (pn->_con) { 915 case TypeFunc::Control: 916 { 917 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj 918 projs->fallthrough_proj = pn; 919 const Node* cn = pn->unique_ctrl_out_or_null(); 920 if (cn != nullptr && cn->is_Catch()) { 921 ProjNode *cpn = nullptr; 922 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { 923 cpn = cn->fast_out(k)->as_Proj(); 924 assert(cpn->is_CatchProj(), "must be a CatchProjNode"); 925 if (cpn->_con == CatchProjNode::fall_through_index) 926 projs->fallthrough_catchproj = cpn; 927 else { 928 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); 929 projs->catchall_catchproj = cpn; 930 } 931 } 932 } 933 break; 934 } 935 case TypeFunc::I_O: 936 if (pn->_is_io_use) 937 projs->catchall_ioproj = pn; 938 else 939 projs->fallthrough_ioproj = pn; 940 for (DUIterator j = pn->outs(); pn->has_out(j); j++) { 941 Node* e = pn->out(j); 942 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) { 943 assert(projs->exobj == nullptr, "only one"); 944 projs->exobj = e; 945 } 946 } 947 break; 948 case TypeFunc::Memory: 949 if (pn->_is_io_use) 950 projs->catchall_memproj = pn; 951 else 952 projs->fallthrough_memproj = pn; 953 break; 954 case TypeFunc::Parms: 955 projs->resproj = pn; 956 break; 957 default: 958 assert(false, "unexpected projection from allocation node."); 959 } 960 } 961 962 // The resproj may not exist because the result could be ignored 963 // and the exception object may not exist if an exception handler 964 // swallows the exception but all the other must exist and be found. 965 assert(projs->fallthrough_proj != nullptr, "must be found"); 966 do_asserts = do_asserts && !Compile::current()->inlining_incrementally(); 967 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found"); 968 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found"); 969 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found"); 970 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found"); 971 if (separate_io_proj) { 972 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found"); 973 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found"); 974 } 975 } 976 977 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) { 978 #ifdef ASSERT 979 // Validate attached generator 980 CallGenerator* cg = generator(); 981 if (cg != nullptr) { 982 assert((is_CallStaticJava() && cg->is_mh_late_inline()) || 983 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch"); 984 } 985 #endif // ASSERT 986 return SafePointNode::Ideal(phase, can_reshape); 987 } 988 989 bool CallNode::is_call_to_arraycopystub() const { 990 if (_name != nullptr && strstr(_name, "arraycopy") != 0) { 991 return true; 992 } 993 return false; 994 } 995 996 //============================================================================= 997 uint CallJavaNode::size_of() const { return sizeof(*this); } 998 bool CallJavaNode::cmp( const Node &n ) const { 999 CallJavaNode &call = (CallJavaNode&)n; 1000 return CallNode::cmp(call) && _method == call._method && 1001 _override_symbolic_info == call._override_symbolic_info; 1002 } 1003 1004 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) { 1005 // Copy debug information and adjust JVMState information 1006 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1; 1007 uint new_dbg_start = tf()->domain()->cnt(); 1008 int jvms_adj = new_dbg_start - old_dbg_start; 1009 assert (new_dbg_start == req(), "argument count mismatch"); 1010 Compile* C = phase->C; 1011 1012 // SafePointScalarObject node could be referenced several times in debug info. 1013 // Use Dict to record cloned nodes. 1014 Dict* sosn_map = new Dict(cmpkey,hashkey); 1015 for (uint i = old_dbg_start; i < sfpt->req(); i++) { 1016 Node* old_in = sfpt->in(i); 1017 // Clone old SafePointScalarObjectNodes, adjusting their field contents. 1018 if (old_in != nullptr && old_in->is_SafePointScalarObject()) { 1019 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); 1020 bool new_node; 1021 Node* new_in = old_sosn->clone(sosn_map, new_node); 1022 if (new_node) { // New node? 1023 new_in->set_req(0, C->root()); // reset control edge 1024 new_in = phase->transform(new_in); // Register new node. 1025 } 1026 old_in = new_in; 1027 } 1028 add_req(old_in); 1029 } 1030 1031 // JVMS may be shared so clone it before we modify it 1032 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr); 1033 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) { 1034 jvms->set_map(this); 1035 jvms->set_locoff(jvms->locoff()+jvms_adj); 1036 jvms->set_stkoff(jvms->stkoff()+jvms_adj); 1037 jvms->set_monoff(jvms->monoff()+jvms_adj); 1038 jvms->set_scloff(jvms->scloff()+jvms_adj); 1039 jvms->set_endoff(jvms->endoff()+jvms_adj); 1040 } 1041 } 1042 1043 #ifdef ASSERT 1044 bool CallJavaNode::validate_symbolic_info() const { 1045 if (method() == nullptr) { 1046 return true; // call into runtime or uncommon trap 1047 } 1048 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci()); 1049 ciMethod* callee = method(); 1050 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) { 1051 assert(override_symbolic_info(), "should be set"); 1052 } 1053 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info"); 1054 return true; 1055 } 1056 #endif 1057 1058 #ifndef PRODUCT 1059 void CallJavaNode::dump_spec(outputStream* st) const { 1060 if( _method ) _method->print_short_name(st); 1061 CallNode::dump_spec(st); 1062 } 1063 1064 void CallJavaNode::dump_compact_spec(outputStream* st) const { 1065 if (_method) { 1066 _method->print_short_name(st); 1067 } else { 1068 st->print("<?>"); 1069 } 1070 } 1071 #endif 1072 1073 //============================================================================= 1074 uint CallStaticJavaNode::size_of() const { return sizeof(*this); } 1075 bool CallStaticJavaNode::cmp( const Node &n ) const { 1076 CallStaticJavaNode &call = (CallStaticJavaNode&)n; 1077 return CallJavaNode::cmp(call); 1078 } 1079 1080 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1081 CallGenerator* cg = generator(); 1082 if (can_reshape && cg != nullptr) { 1083 assert(IncrementalInlineMH, "required"); 1084 assert(cg->call_node() == this, "mismatch"); 1085 assert(cg->is_mh_late_inline(), "not virtual"); 1086 1087 // Check whether this MH handle call becomes a candidate for inlining. 1088 ciMethod* callee = cg->method(); 1089 vmIntrinsics::ID iid = callee->intrinsic_id(); 1090 if (iid == vmIntrinsics::_invokeBasic) { 1091 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) { 1092 phase->C->prepend_late_inline(cg); 1093 set_generator(nullptr); 1094 } 1095 } else if (iid == vmIntrinsics::_linkToNative) { 1096 // never retry 1097 } else { 1098 assert(callee->has_member_arg(), "wrong type of call?"); 1099 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) { 1100 phase->C->prepend_late_inline(cg); 1101 set_generator(nullptr); 1102 } 1103 } 1104 } 1105 return CallNode::Ideal(phase, can_reshape); 1106 } 1107 1108 //----------------------------is_uncommon_trap---------------------------- 1109 // Returns true if this is an uncommon trap. 1110 bool CallStaticJavaNode::is_uncommon_trap() const { 1111 return (_name != nullptr && !strcmp(_name, "uncommon_trap")); 1112 } 1113 1114 //----------------------------uncommon_trap_request---------------------------- 1115 // If this is an uncommon trap, return the request code, else zero. 1116 int CallStaticJavaNode::uncommon_trap_request() const { 1117 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0; 1118 } 1119 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { 1120 #ifndef PRODUCT 1121 if (!(call->req() > TypeFunc::Parms && 1122 call->in(TypeFunc::Parms) != nullptr && 1123 call->in(TypeFunc::Parms)->is_Con() && 1124 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) { 1125 assert(in_dump() != 0, "OK if dumping"); 1126 tty->print("[bad uncommon trap]"); 1127 return 0; 1128 } 1129 #endif 1130 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); 1131 } 1132 1133 #ifndef PRODUCT 1134 void CallStaticJavaNode::dump_spec(outputStream *st) const { 1135 st->print("# Static "); 1136 if (_name != nullptr) { 1137 st->print("%s", _name); 1138 int trap_req = uncommon_trap_request(); 1139 if (trap_req != 0) { 1140 char buf[100]; 1141 st->print("(%s)", 1142 Deoptimization::format_trap_request(buf, sizeof(buf), 1143 trap_req)); 1144 } 1145 st->print(" "); 1146 } 1147 CallJavaNode::dump_spec(st); 1148 } 1149 1150 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const { 1151 if (_method) { 1152 _method->print_short_name(st); 1153 } else if (_name) { 1154 st->print("%s", _name); 1155 } else { 1156 st->print("<?>"); 1157 } 1158 } 1159 #endif 1160 1161 //============================================================================= 1162 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } 1163 bool CallDynamicJavaNode::cmp( const Node &n ) const { 1164 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; 1165 return CallJavaNode::cmp(call); 1166 } 1167 1168 Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1169 CallGenerator* cg = generator(); 1170 if (can_reshape && cg != nullptr) { 1171 assert(IncrementalInlineVirtual, "required"); 1172 assert(cg->call_node() == this, "mismatch"); 1173 assert(cg->is_virtual_late_inline(), "not virtual"); 1174 1175 // Recover symbolic info for method resolution. 1176 ciMethod* caller = jvms()->method(); 1177 ciBytecodeStream iter(caller); 1178 iter.force_bci(jvms()->bci()); 1179 1180 bool not_used1; 1181 ciSignature* not_used2; 1182 ciMethod* orig_callee = iter.get_method(not_used1, ¬_used2); // callee in the bytecode 1183 ciKlass* holder = iter.get_declared_method_holder(); 1184 if (orig_callee->is_method_handle_intrinsic()) { 1185 assert(_override_symbolic_info, "required"); 1186 orig_callee = method(); 1187 holder = method()->holder(); 1188 } 1189 1190 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); 1191 1192 Node* receiver_node = in(TypeFunc::Parms); 1193 const TypeOopPtr* receiver_type = phase->type(receiver_node)->isa_oopptr(); 1194 1195 int not_used3; 1196 bool call_does_dispatch; 1197 ciMethod* callee = phase->C->optimize_virtual_call(caller, klass, holder, orig_callee, receiver_type, true /*is_virtual*/, 1198 call_does_dispatch, not_used3); // out-parameters 1199 if (!call_does_dispatch) { 1200 // Register for late inlining. 1201 cg->set_callee_method(callee); 1202 phase->C->prepend_late_inline(cg); // MH late inlining prepends to the list, so do the same 1203 set_generator(nullptr); 1204 } 1205 } 1206 return CallNode::Ideal(phase, can_reshape); 1207 } 1208 1209 #ifndef PRODUCT 1210 void CallDynamicJavaNode::dump_spec(outputStream *st) const { 1211 st->print("# Dynamic "); 1212 CallJavaNode::dump_spec(st); 1213 } 1214 #endif 1215 1216 //============================================================================= 1217 uint CallRuntimeNode::size_of() const { return sizeof(*this); } 1218 bool CallRuntimeNode::cmp( const Node &n ) const { 1219 CallRuntimeNode &call = (CallRuntimeNode&)n; 1220 return CallNode::cmp(call) && !strcmp(_name,call._name); 1221 } 1222 #ifndef PRODUCT 1223 void CallRuntimeNode::dump_spec(outputStream *st) const { 1224 st->print("# "); 1225 st->print("%s", _name); 1226 CallNode::dump_spec(st); 1227 } 1228 #endif 1229 uint CallLeafVectorNode::size_of() const { return sizeof(*this); } 1230 bool CallLeafVectorNode::cmp( const Node &n ) const { 1231 CallLeafVectorNode &call = (CallLeafVectorNode&)n; 1232 return CallLeafNode::cmp(call) && _num_bits == call._num_bits; 1233 } 1234 1235 //------------------------------calling_convention----------------------------- 1236 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 1237 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt); 1238 } 1239 1240 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 1241 #ifdef ASSERT 1242 assert(tf()->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits, 1243 "return vector size must match"); 1244 const TypeTuple* d = tf()->domain(); 1245 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1246 Node* arg = in(i); 1247 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits, 1248 "vector argument size must match"); 1249 } 1250 #endif 1251 1252 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt); 1253 } 1254 1255 //============================================================================= 1256 //------------------------------calling_convention----------------------------- 1257 1258 1259 //============================================================================= 1260 #ifndef PRODUCT 1261 void CallLeafNode::dump_spec(outputStream *st) const { 1262 st->print("# "); 1263 st->print("%s", _name); 1264 CallNode::dump_spec(st); 1265 } 1266 #endif 1267 1268 //============================================================================= 1269 1270 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { 1271 assert(verify_jvms(jvms), "jvms must match"); 1272 int loc = jvms->locoff() + idx; 1273 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { 1274 // If current local idx is top then local idx - 1 could 1275 // be a long/double that needs to be killed since top could 1276 // represent the 2nd half of the long/double. 1277 uint ideal = in(loc -1)->ideal_reg(); 1278 if (ideal == Op_RegD || ideal == Op_RegL) { 1279 // set other (low index) half to top 1280 set_req(loc - 1, in(loc)); 1281 } 1282 } 1283 set_req(loc, c); 1284 } 1285 1286 uint SafePointNode::size_of() const { return sizeof(*this); } 1287 bool SafePointNode::cmp( const Node &n ) const { 1288 return (&n == this); // Always fail except on self 1289 } 1290 1291 //-------------------------set_next_exception---------------------------------- 1292 void SafePointNode::set_next_exception(SafePointNode* n) { 1293 assert(n == nullptr || n->Opcode() == Op_SafePoint, "correct value for next_exception"); 1294 if (len() == req()) { 1295 if (n != nullptr) add_prec(n); 1296 } else { 1297 set_prec(req(), n); 1298 } 1299 } 1300 1301 1302 //----------------------------next_exception----------------------------------- 1303 SafePointNode* SafePointNode::next_exception() const { 1304 if (len() == req()) { 1305 return nullptr; 1306 } else { 1307 Node* n = in(req()); 1308 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); 1309 return (SafePointNode*) n; 1310 } 1311 } 1312 1313 1314 //------------------------------Ideal------------------------------------------ 1315 // Skip over any collapsed Regions 1316 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1317 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState"); 1318 return remove_dead_region(phase, can_reshape) ? this : nullptr; 1319 } 1320 1321 //------------------------------Identity--------------------------------------- 1322 // Remove obviously duplicate safepoints 1323 Node* SafePointNode::Identity(PhaseGVN* phase) { 1324 1325 // If you have back to back safepoints, remove one 1326 if (in(TypeFunc::Control)->is_SafePoint()) { 1327 Node* out_c = unique_ctrl_out_or_null(); 1328 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the 1329 // outer loop's safepoint could confuse removal of the outer loop. 1330 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) { 1331 return in(TypeFunc::Control); 1332 } 1333 } 1334 1335 // Transforming long counted loops requires a safepoint node. Do not 1336 // eliminate a safepoint until loop opts are over. 1337 if (in(0)->is_Proj() && !phase->C->major_progress()) { 1338 Node *n0 = in(0)->in(0); 1339 // Check if he is a call projection (except Leaf Call) 1340 if( n0->is_Catch() ) { 1341 n0 = n0->in(0)->in(0); 1342 assert( n0->is_Call(), "expect a call here" ); 1343 } 1344 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { 1345 // Don't remove a safepoint belonging to an OuterStripMinedLoopEndNode. 1346 // If the loop dies, they will be removed together. 1347 if (has_out_with(Op_OuterStripMinedLoopEnd)) { 1348 return this; 1349 } 1350 // Useless Safepoint, so remove it 1351 return in(TypeFunc::Control); 1352 } 1353 } 1354 1355 return this; 1356 } 1357 1358 //------------------------------Value------------------------------------------ 1359 const Type* SafePointNode::Value(PhaseGVN* phase) const { 1360 if (phase->type(in(0)) == Type::TOP) { 1361 return Type::TOP; 1362 } 1363 if (in(0) == this) { 1364 return Type::TOP; // Dead infinite loop 1365 } 1366 return Type::CONTROL; 1367 } 1368 1369 #ifndef PRODUCT 1370 void SafePointNode::dump_spec(outputStream *st) const { 1371 st->print(" SafePoint "); 1372 _replaced_nodes.dump(st); 1373 } 1374 #endif 1375 1376 const RegMask &SafePointNode::in_RegMask(uint idx) const { 1377 if( idx < TypeFunc::Parms ) return RegMask::Empty; 1378 // Values outside the domain represent debug info 1379 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1380 } 1381 const RegMask &SafePointNode::out_RegMask() const { 1382 return RegMask::Empty; 1383 } 1384 1385 1386 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { 1387 assert((int)grow_by > 0, "sanity"); 1388 int monoff = jvms->monoff(); 1389 int scloff = jvms->scloff(); 1390 int endoff = jvms->endoff(); 1391 assert(endoff == (int)req(), "no other states or debug info after me"); 1392 Node* top = Compile::current()->top(); 1393 for (uint i = 0; i < grow_by; i++) { 1394 ins_req(monoff, top); 1395 } 1396 jvms->set_monoff(monoff + grow_by); 1397 jvms->set_scloff(scloff + grow_by); 1398 jvms->set_endoff(endoff + grow_by); 1399 } 1400 1401 void SafePointNode::push_monitor(const FastLockNode *lock) { 1402 // Add a LockNode, which points to both the original BoxLockNode (the 1403 // stack space for the monitor) and the Object being locked. 1404 const int MonitorEdges = 2; 1405 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1406 assert(req() == jvms()->endoff(), "correct sizing"); 1407 int nextmon = jvms()->scloff(); 1408 if (GenerateSynchronizationCode) { 1409 ins_req(nextmon, lock->box_node()); 1410 ins_req(nextmon+1, lock->obj_node()); 1411 } else { 1412 Node* top = Compile::current()->top(); 1413 ins_req(nextmon, top); 1414 ins_req(nextmon, top); 1415 } 1416 jvms()->set_scloff(nextmon + MonitorEdges); 1417 jvms()->set_endoff(req()); 1418 } 1419 1420 void SafePointNode::pop_monitor() { 1421 // Delete last monitor from debug info 1422 debug_only(int num_before_pop = jvms()->nof_monitors()); 1423 const int MonitorEdges = 2; 1424 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1425 int scloff = jvms()->scloff(); 1426 int endoff = jvms()->endoff(); 1427 int new_scloff = scloff - MonitorEdges; 1428 int new_endoff = endoff - MonitorEdges; 1429 jvms()->set_scloff(new_scloff); 1430 jvms()->set_endoff(new_endoff); 1431 while (scloff > new_scloff) del_req_ordered(--scloff); 1432 assert(jvms()->nof_monitors() == num_before_pop-1, ""); 1433 } 1434 1435 Node *SafePointNode::peek_monitor_box() const { 1436 int mon = jvms()->nof_monitors() - 1; 1437 assert(mon >= 0, "must have a monitor"); 1438 return monitor_box(jvms(), mon); 1439 } 1440 1441 Node *SafePointNode::peek_monitor_obj() const { 1442 int mon = jvms()->nof_monitors() - 1; 1443 assert(mon >= 0, "must have a monitor"); 1444 return monitor_obj(jvms(), mon); 1445 } 1446 1447 Node* SafePointNode::peek_operand(uint off) const { 1448 assert(jvms()->sp() > 0, "must have an operand"); 1449 assert(off < jvms()->sp(), "off is out-of-range"); 1450 return stack(jvms(), jvms()->sp() - off - 1); 1451 } 1452 1453 // Do we Match on this edge index or not? Match no edges 1454 uint SafePointNode::match_edge(uint idx) const { 1455 return (TypeFunc::Parms == idx); 1456 } 1457 1458 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) { 1459 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops"); 1460 int nb = igvn->C->root()->find_prec_edge(this); 1461 if (nb != -1) { 1462 igvn->delete_precedence_of(igvn->C->root(), nb); 1463 } 1464 } 1465 1466 //============== SafePointScalarObjectNode ============== 1467 1468 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint n_fields) : 1469 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1470 _first_index(first_index), 1471 _n_fields(n_fields), 1472 _alloc(alloc) 1473 { 1474 #ifdef ASSERT 1475 if (!alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) { 1476 alloc->dump(); 1477 assert(false, "unexpected call node"); 1478 } 1479 #endif 1480 init_class_id(Class_SafePointScalarObject); 1481 } 1482 1483 // Do not allow value-numbering for SafePointScalarObject node. 1484 uint SafePointScalarObjectNode::hash() const { return NO_HASH; } 1485 bool SafePointScalarObjectNode::cmp( const Node &n ) const { 1486 return (&n == this); // Always fail except on self 1487 } 1488 1489 uint SafePointScalarObjectNode::ideal_reg() const { 1490 return 0; // No matching to machine instruction 1491 } 1492 1493 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { 1494 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1495 } 1496 1497 const RegMask &SafePointScalarObjectNode::out_RegMask() const { 1498 return RegMask::Empty; 1499 } 1500 1501 uint SafePointScalarObjectNode::match_edge(uint idx) const { 1502 return 0; 1503 } 1504 1505 SafePointScalarObjectNode* 1506 SafePointScalarObjectNode::clone(Dict* sosn_map, bool& new_node) const { 1507 void* cached = (*sosn_map)[(void*)this]; 1508 if (cached != nullptr) { 1509 new_node = false; 1510 return (SafePointScalarObjectNode*)cached; 1511 } 1512 new_node = true; 1513 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); 1514 sosn_map->Insert((void*)this, (void*)res); 1515 return res; 1516 } 1517 1518 1519 #ifndef PRODUCT 1520 void SafePointScalarObjectNode::dump_spec(outputStream *st) const { 1521 st->print(" # fields@[%d..%d]", first_index(), first_index() + n_fields() - 1); 1522 } 1523 #endif 1524 1525 //============== SafePointScalarMergeNode ============== 1526 1527 SafePointScalarMergeNode::SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx) : 1528 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1529 _merge_pointer_idx(merge_pointer_idx) 1530 { 1531 init_class_id(Class_SafePointScalarMerge); 1532 } 1533 1534 // Do not allow value-numbering for SafePointScalarMerge node. 1535 uint SafePointScalarMergeNode::hash() const { return NO_HASH; } 1536 bool SafePointScalarMergeNode::cmp( const Node &n ) const { 1537 return (&n == this); // Always fail except on self 1538 } 1539 1540 uint SafePointScalarMergeNode::ideal_reg() const { 1541 return 0; // No matching to machine instruction 1542 } 1543 1544 const RegMask &SafePointScalarMergeNode::in_RegMask(uint idx) const { 1545 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1546 } 1547 1548 const RegMask &SafePointScalarMergeNode::out_RegMask() const { 1549 return RegMask::Empty; 1550 } 1551 1552 uint SafePointScalarMergeNode::match_edge(uint idx) const { 1553 return 0; 1554 } 1555 1556 SafePointScalarMergeNode* 1557 SafePointScalarMergeNode::clone(Dict* sosn_map, bool& new_node) const { 1558 void* cached = (*sosn_map)[(void*)this]; 1559 if (cached != nullptr) { 1560 new_node = false; 1561 return (SafePointScalarMergeNode*)cached; 1562 } 1563 new_node = true; 1564 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone(); 1565 sosn_map->Insert((void*)this, (void*)res); 1566 return res; 1567 } 1568 1569 #ifndef PRODUCT 1570 void SafePointScalarMergeNode::dump_spec(outputStream *st) const { 1571 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1); 1572 } 1573 #endif 1574 1575 //============================================================================= 1576 uint AllocateNode::size_of() const { return sizeof(*this); } 1577 1578 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, 1579 Node *ctrl, Node *mem, Node *abio, 1580 Node *size, Node *klass_node, Node *initial_test) 1581 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM), _materialized(0) 1582 { 1583 init_class_id(Class_Allocate); 1584 init_flags(Flag_is_macro); 1585 _is_scalar_replaceable = false; 1586 _is_non_escaping = false; 1587 _is_allocation_MemBar_redundant = false; 1588 Node *topnode = C->top(); 1589 1590 init_req( TypeFunc::Control , ctrl ); 1591 init_req( TypeFunc::I_O , abio ); 1592 init_req( TypeFunc::Memory , mem ); 1593 init_req( TypeFunc::ReturnAdr, topnode ); 1594 init_req( TypeFunc::FramePtr , topnode ); 1595 init_req( AllocSize , size); 1596 init_req( KlassNode , klass_node); 1597 init_req( InitialTest , initial_test); 1598 init_req( ALength , topnode); 1599 init_req( ValidLengthTest , topnode); 1600 C->add_macro_node(this); 1601 } 1602 1603 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) 1604 { 1605 assert(initializer != nullptr && 1606 initializer->is_initializer() && 1607 !initializer->is_static(), 1608 "unexpected initializer method"); 1609 BCEscapeAnalyzer* analyzer = initializer->get_bcea(); 1610 if (analyzer == nullptr) { 1611 return; 1612 } 1613 1614 // Allocation node is first parameter in its initializer 1615 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) { 1616 _is_allocation_MemBar_redundant = true; 1617 } 1618 } 1619 Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) { 1620 Node* mark_node = nullptr; 1621 // For now only enable fast locking for non-array types 1622 mark_node = phase->MakeConX(markWord::prototype().value()); 1623 return mark_node; 1624 } 1625 1626 // This is a precise notnull oop of the klass. 1627 // (Actually, it need not be precise if this is a reflective allocation.) 1628 // It's what we cast the result to. 1629 const TypeOopPtr* AllocateNode::oop_type(const PhaseValues& phase) const { 1630 Node* klass_node = in(KlassNode); 1631 const TypeKlassPtr* tklass = phase.type(klass_node)->isa_klassptr(); 1632 if (!tklass) tklass = TypeInstKlassPtr::OBJECT; 1633 return tklass->as_instance_type(); 1634 } 1635 1636 // Retrieve the length from the AllocateArrayNode. Narrow the type with a 1637 // CastII, if appropriate. If we are not allowed to create new nodes, and 1638 // a CastII is appropriate, return null. 1639 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) { 1640 Node *length = in(AllocateNode::ALength); 1641 assert(length != nullptr, "length is not null"); 1642 1643 const TypeInt* length_type = phase->find_int_type(length); 1644 const TypeAryPtr* ary_type = oop_type->isa_aryptr(); 1645 1646 if (ary_type != nullptr && length_type != nullptr) { 1647 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); 1648 if (narrow_length_type != length_type) { 1649 // Assert one of: 1650 // - the narrow_length is 0 1651 // - the narrow_length is not wider than length 1652 assert(narrow_length_type == TypeInt::ZERO || 1653 (length_type->is_con() && narrow_length_type->is_con() && 1654 (narrow_length_type->_hi <= length_type->_lo)) || 1655 (narrow_length_type->_hi <= length_type->_hi && 1656 narrow_length_type->_lo >= length_type->_lo), 1657 "narrow type must be narrower than length type"); 1658 1659 // Return null if new nodes are not allowed 1660 if (!allow_new_nodes) { 1661 return nullptr; 1662 } 1663 // Create a cast which is control dependent on the initialization to 1664 // propagate the fact that the array length must be positive. 1665 InitializeNode* init = initialization(); 1666 if (init != nullptr) { 1667 length = new CastIINode(init->proj_out_or_null(TypeFunc::Control), length, narrow_length_type); 1668 } 1669 } 1670 } 1671 1672 return length; 1673 } 1674 1675 //============================================================================= 1676 uint LockNode::size_of() const { return sizeof(*this); } 1677 1678 // Redundant lock elimination 1679 // 1680 // There are various patterns of locking where we release and 1681 // immediately reacquire a lock in a piece of code where no operations 1682 // occur in between that would be observable. In those cases we can 1683 // skip releasing and reacquiring the lock without violating any 1684 // fairness requirements. Doing this around a loop could cause a lock 1685 // to be held for a very long time so we concentrate on non-looping 1686 // control flow. We also require that the operations are fully 1687 // redundant meaning that we don't introduce new lock operations on 1688 // some paths so to be able to eliminate it on others ala PRE. This 1689 // would probably require some more extensive graph manipulation to 1690 // guarantee that the memory edges were all handled correctly. 1691 // 1692 // Assuming p is a simple predicate which can't trap in any way and s 1693 // is a synchronized method consider this code: 1694 // 1695 // s(); 1696 // if (p) 1697 // s(); 1698 // else 1699 // s(); 1700 // s(); 1701 // 1702 // 1. The unlocks of the first call to s can be eliminated if the 1703 // locks inside the then and else branches are eliminated. 1704 // 1705 // 2. The unlocks of the then and else branches can be eliminated if 1706 // the lock of the final call to s is eliminated. 1707 // 1708 // Either of these cases subsumes the simple case of sequential control flow 1709 // 1710 // Additionally we can eliminate versions without the else case: 1711 // 1712 // s(); 1713 // if (p) 1714 // s(); 1715 // s(); 1716 // 1717 // 3. In this case we eliminate the unlock of the first s, the lock 1718 // and unlock in the then case and the lock in the final s. 1719 // 1720 // Note also that in all these cases the then/else pieces don't have 1721 // to be trivial as long as they begin and end with synchronization 1722 // operations. 1723 // 1724 // s(); 1725 // if (p) 1726 // s(); 1727 // f(); 1728 // s(); 1729 // s(); 1730 // 1731 // The code will work properly for this case, leaving in the unlock 1732 // before the call to f and the relock after it. 1733 // 1734 // A potentially interesting case which isn't handled here is when the 1735 // locking is partially redundant. 1736 // 1737 // s(); 1738 // if (p) 1739 // s(); 1740 // 1741 // This could be eliminated putting unlocking on the else case and 1742 // eliminating the first unlock and the lock in the then side. 1743 // Alternatively the unlock could be moved out of the then side so it 1744 // was after the merge and the first unlock and second lock 1745 // eliminated. This might require less manipulation of the memory 1746 // state to get correct. 1747 // 1748 // Additionally we might allow work between a unlock and lock before 1749 // giving up eliminating the locks. The current code disallows any 1750 // conditional control flow between these operations. A formulation 1751 // similar to partial redundancy elimination computing the 1752 // availability of unlocking and the anticipatability of locking at a 1753 // program point would allow detection of fully redundant locking with 1754 // some amount of work in between. I'm not sure how often I really 1755 // think that would occur though. Most of the cases I've seen 1756 // indicate it's likely non-trivial work would occur in between. 1757 // There may be other more complicated constructs where we could 1758 // eliminate locking but I haven't seen any others appear as hot or 1759 // interesting. 1760 // 1761 // Locking and unlocking have a canonical form in ideal that looks 1762 // roughly like this: 1763 // 1764 // <obj> 1765 // | \\------+ 1766 // | \ \ 1767 // | BoxLock \ 1768 // | | | \ 1769 // | | \ \ 1770 // | | FastLock 1771 // | | / 1772 // | | / 1773 // | | | 1774 // 1775 // Lock 1776 // | 1777 // Proj #0 1778 // | 1779 // MembarAcquire 1780 // | 1781 // Proj #0 1782 // 1783 // MembarRelease 1784 // | 1785 // Proj #0 1786 // | 1787 // Unlock 1788 // | 1789 // Proj #0 1790 // 1791 // 1792 // This code proceeds by processing Lock nodes during PhaseIterGVN 1793 // and searching back through its control for the proper code 1794 // patterns. Once it finds a set of lock and unlock operations to 1795 // eliminate they are marked as eliminatable which causes the 1796 // expansion of the Lock and Unlock macro nodes to make the operation a NOP 1797 // 1798 //============================================================================= 1799 1800 // 1801 // Utility function to skip over uninteresting control nodes. Nodes skipped are: 1802 // - copy regions. (These may not have been optimized away yet.) 1803 // - eliminated locking nodes 1804 // 1805 static Node *next_control(Node *ctrl) { 1806 if (ctrl == nullptr) 1807 return nullptr; 1808 while (1) { 1809 if (ctrl->is_Region()) { 1810 RegionNode *r = ctrl->as_Region(); 1811 Node *n = r->is_copy(); 1812 if (n == nullptr) 1813 break; // hit a region, return it 1814 else 1815 ctrl = n; 1816 } else if (ctrl->is_Proj()) { 1817 Node *in0 = ctrl->in(0); 1818 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { 1819 ctrl = in0->in(0); 1820 } else { 1821 break; 1822 } 1823 } else { 1824 break; // found an interesting control 1825 } 1826 } 1827 return ctrl; 1828 } 1829 // 1830 // Given a control, see if it's the control projection of an Unlock which 1831 // operating on the same object as lock. 1832 // 1833 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, 1834 GrowableArray<AbstractLockNode*> &lock_ops) { 1835 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : nullptr; 1836 if (ctrl_proj != nullptr && ctrl_proj->_con == TypeFunc::Control) { 1837 Node *n = ctrl_proj->in(0); 1838 if (n != nullptr && n->is_Unlock()) { 1839 UnlockNode *unlock = n->as_Unlock(); 1840 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1841 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 1842 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); 1843 if (lock_obj->eqv_uncast(unlock_obj) && 1844 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && 1845 !unlock->is_eliminated()) { 1846 lock_ops.append(unlock); 1847 return true; 1848 } 1849 } 1850 } 1851 return false; 1852 } 1853 1854 // 1855 // Find the lock matching an unlock. Returns null if a safepoint 1856 // or complicated control is encountered first. 1857 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { 1858 LockNode *lock_result = nullptr; 1859 // find the matching lock, or an intervening safepoint 1860 Node *ctrl = next_control(unlock->in(0)); 1861 while (1) { 1862 assert(ctrl != nullptr, "invalid control graph"); 1863 assert(!ctrl->is_Start(), "missing lock for unlock"); 1864 if (ctrl->is_top()) break; // dead control path 1865 if (ctrl->is_Proj()) ctrl = ctrl->in(0); 1866 if (ctrl->is_SafePoint()) { 1867 break; // found a safepoint (may be the lock we are searching for) 1868 } else if (ctrl->is_Region()) { 1869 // Check for a simple diamond pattern. Punt on anything more complicated 1870 if (ctrl->req() == 3 && ctrl->in(1) != nullptr && ctrl->in(2) != nullptr) { 1871 Node *in1 = next_control(ctrl->in(1)); 1872 Node *in2 = next_control(ctrl->in(2)); 1873 if (((in1->is_IfTrue() && in2->is_IfFalse()) || 1874 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { 1875 ctrl = next_control(in1->in(0)->in(0)); 1876 } else { 1877 break; 1878 } 1879 } else { 1880 break; 1881 } 1882 } else { 1883 ctrl = next_control(ctrl->in(0)); // keep searching 1884 } 1885 } 1886 if (ctrl->is_Lock()) { 1887 LockNode *lock = ctrl->as_Lock(); 1888 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1889 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 1890 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); 1891 if (lock_obj->eqv_uncast(unlock_obj) && 1892 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { 1893 lock_result = lock; 1894 } 1895 } 1896 return lock_result; 1897 } 1898 1899 // This code corresponds to case 3 above. 1900 1901 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1902 GrowableArray<AbstractLockNode*> &lock_ops) { 1903 Node* if_node = node->in(0); 1904 bool if_true = node->is_IfTrue(); 1905 1906 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { 1907 Node *lock_ctrl = next_control(if_node->in(0)); 1908 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { 1909 Node* lock1_node = nullptr; 1910 ProjNode* proj = if_node->as_If()->proj_out(!if_true); 1911 if (if_true) { 1912 if (proj->is_IfFalse() && proj->outcnt() == 1) { 1913 lock1_node = proj->unique_out(); 1914 } 1915 } else { 1916 if (proj->is_IfTrue() && proj->outcnt() == 1) { 1917 lock1_node = proj->unique_out(); 1918 } 1919 } 1920 if (lock1_node != nullptr && lock1_node->is_Lock()) { 1921 LockNode *lock1 = lock1_node->as_Lock(); 1922 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1923 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 1924 Node* lock1_obj = bs->step_over_gc_barrier(lock1->obj_node()); 1925 if (lock_obj->eqv_uncast(lock1_obj) && 1926 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && 1927 !lock1->is_eliminated()) { 1928 lock_ops.append(lock1); 1929 return true; 1930 } 1931 } 1932 } 1933 } 1934 1935 lock_ops.trunc_to(0); 1936 return false; 1937 } 1938 1939 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1940 GrowableArray<AbstractLockNode*> &lock_ops) { 1941 // check each control merging at this point for a matching unlock. 1942 // in(0) should be self edge so skip it. 1943 for (int i = 1; i < (int)region->req(); i++) { 1944 Node *in_node = next_control(region->in(i)); 1945 if (in_node != nullptr) { 1946 if (find_matching_unlock(in_node, lock, lock_ops)) { 1947 // found a match so keep on checking. 1948 continue; 1949 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { 1950 continue; 1951 } 1952 1953 // If we fall through to here then it was some kind of node we 1954 // don't understand or there wasn't a matching unlock, so give 1955 // up trying to merge locks. 1956 lock_ops.trunc_to(0); 1957 return false; 1958 } 1959 } 1960 return true; 1961 1962 } 1963 1964 const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"}; 1965 1966 const char * AbstractLockNode::kind_as_string() const { 1967 return _kind_names[_kind]; 1968 } 1969 1970 #ifndef PRODUCT 1971 // 1972 // Create a counter which counts the number of times this lock is acquired 1973 // 1974 void AbstractLockNode::create_lock_counter(JVMState* state) { 1975 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); 1976 } 1977 1978 void AbstractLockNode::set_eliminated_lock_counter() { 1979 if (_counter) { 1980 // Update the counter to indicate that this lock was eliminated. 1981 // The counter update code will stay around even though the 1982 // optimizer will eliminate the lock operation itself. 1983 _counter->set_tag(NamedCounter::EliminatedLockCounter); 1984 } 1985 } 1986 1987 void AbstractLockNode::dump_spec(outputStream* st) const { 1988 st->print("%s ", _kind_names[_kind]); 1989 CallNode::dump_spec(st); 1990 } 1991 1992 void AbstractLockNode::dump_compact_spec(outputStream* st) const { 1993 st->print("%s", _kind_names[_kind]); 1994 } 1995 #endif 1996 1997 //============================================================================= 1998 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1999 2000 // perform any generic optimizations first (returns 'this' or null) 2001 Node *result = SafePointNode::Ideal(phase, can_reshape); 2002 if (result != nullptr) return result; 2003 // Don't bother trying to transform a dead node 2004 if (in(0) && in(0)->is_top()) return nullptr; 2005 2006 // Now see if we can optimize away this lock. We don't actually 2007 // remove the locking here, we simply set the _eliminate flag which 2008 // prevents macro expansion from expanding the lock. Since we don't 2009 // modify the graph, the value returned from this function is the 2010 // one computed above. 2011 if (can_reshape && EliminateLocks && !is_non_esc_obj()) { 2012 // 2013 // If we are locking an non-escaped object, the lock/unlock is unnecessary 2014 // 2015 ConnectionGraph *cgr = phase->C->congraph(); 2016 if (cgr != nullptr && cgr->not_global_escape(obj_node())) { 2017 assert(!is_eliminated() || is_coarsened(), "sanity"); 2018 // The lock could be marked eliminated by lock coarsening 2019 // code during first IGVN before EA. Replace coarsened flag 2020 // to eliminate all associated locks/unlocks. 2021 #ifdef ASSERT 2022 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1"); 2023 #endif 2024 this->set_non_esc_obj(); 2025 return result; 2026 } 2027 2028 if (!phase->C->do_locks_coarsening()) { 2029 return result; // Compiling without locks coarsening 2030 } 2031 // 2032 // Try lock coarsening 2033 // 2034 PhaseIterGVN* iter = phase->is_IterGVN(); 2035 if (iter != nullptr && !is_eliminated()) { 2036 2037 GrowableArray<AbstractLockNode*> lock_ops; 2038 2039 Node *ctrl = next_control(in(0)); 2040 2041 // now search back for a matching Unlock 2042 if (find_matching_unlock(ctrl, this, lock_ops)) { 2043 // found an unlock directly preceding this lock. This is the 2044 // case of single unlock directly control dependent on a 2045 // single lock which is the trivial version of case 1 or 2. 2046 } else if (ctrl->is_Region() ) { 2047 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { 2048 // found lock preceded by multiple unlocks along all paths 2049 // joining at this point which is case 3 in description above. 2050 } 2051 } else { 2052 // see if this lock comes from either half of an if and the 2053 // predecessors merges unlocks and the other half of the if 2054 // performs a lock. 2055 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { 2056 // found unlock splitting to an if with locks on both branches. 2057 } 2058 } 2059 2060 if (lock_ops.length() > 0) { 2061 // add ourselves to the list of locks to be eliminated. 2062 lock_ops.append(this); 2063 2064 #ifndef PRODUCT 2065 if (PrintEliminateLocks) { 2066 int locks = 0; 2067 int unlocks = 0; 2068 if (Verbose) { 2069 tty->print_cr("=== Locks coarsening ==="); 2070 } 2071 for (int i = 0; i < lock_ops.length(); i++) { 2072 AbstractLockNode* lock = lock_ops.at(i); 2073 if (lock->Opcode() == Op_Lock) 2074 locks++; 2075 else 2076 unlocks++; 2077 if (Verbose) { 2078 tty->print(" %d: ", i); 2079 lock->dump(); 2080 } 2081 } 2082 tty->print_cr("=== Coarsened %d unlocks and %d locks", unlocks, locks); 2083 } 2084 #endif 2085 2086 // for each of the identified locks, mark them 2087 // as eliminatable 2088 for (int i = 0; i < lock_ops.length(); i++) { 2089 AbstractLockNode* lock = lock_ops.at(i); 2090 2091 // Mark it eliminated by coarsening and update any counters 2092 #ifdef ASSERT 2093 lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened"); 2094 #endif 2095 lock->set_coarsened(); 2096 } 2097 // Record this coarsened group. 2098 phase->C->add_coarsened_locks(lock_ops); 2099 } else if (ctrl->is_Region() && 2100 iter->_worklist.member(ctrl)) { 2101 // We weren't able to find any opportunities but the region this 2102 // lock is control dependent on hasn't been processed yet so put 2103 // this lock back on the worklist so we can check again once any 2104 // region simplification has occurred. 2105 iter->_worklist.push(this); 2106 } 2107 } 2108 } 2109 2110 return result; 2111 } 2112 2113 //============================================================================= 2114 bool LockNode::is_nested_lock_region() { 2115 return is_nested_lock_region(nullptr); 2116 } 2117 2118 // p is used for access to compilation log; no logging if null 2119 bool LockNode::is_nested_lock_region(Compile * c) { 2120 BoxLockNode* box = box_node()->as_BoxLock(); 2121 int stk_slot = box->stack_slot(); 2122 if (stk_slot <= 0) { 2123 #ifdef ASSERT 2124 this->log_lock_optimization(c, "eliminate_lock_INLR_1"); 2125 #endif 2126 return false; // External lock or it is not Box (Phi node). 2127 } 2128 2129 // Ignore complex cases: merged locks or multiple locks. 2130 Node* obj = obj_node(); 2131 LockNode* unique_lock = nullptr; 2132 Node* bad_lock = nullptr; 2133 if (!box->is_simple_lock_region(&unique_lock, obj, &bad_lock)) { 2134 #ifdef ASSERT 2135 this->log_lock_optimization(c, "eliminate_lock_INLR_2a", bad_lock); 2136 #endif 2137 return false; 2138 } 2139 if (unique_lock != this) { 2140 #ifdef ASSERT 2141 this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != nullptr ? unique_lock : bad_lock)); 2142 if (PrintEliminateLocks && Verbose) { 2143 tty->print_cr("=============== unique_lock != this ============"); 2144 tty->print(" this: "); 2145 this->dump(); 2146 tty->print(" box: "); 2147 box->dump(); 2148 tty->print(" obj: "); 2149 obj->dump(); 2150 if (unique_lock != nullptr) { 2151 tty->print(" unique_lock: "); 2152 unique_lock->dump(); 2153 } 2154 if (bad_lock != nullptr) { 2155 tty->print(" bad_lock: "); 2156 bad_lock->dump(); 2157 } 2158 tty->print_cr("==============="); 2159 } 2160 #endif 2161 return false; 2162 } 2163 2164 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2165 obj = bs->step_over_gc_barrier(obj); 2166 // Look for external lock for the same object. 2167 SafePointNode* sfn = this->as_SafePoint(); 2168 JVMState* youngest_jvms = sfn->jvms(); 2169 int max_depth = youngest_jvms->depth(); 2170 for (int depth = 1; depth <= max_depth; depth++) { 2171 JVMState* jvms = youngest_jvms->of_depth(depth); 2172 int num_mon = jvms->nof_monitors(); 2173 // Loop over monitors 2174 for (int idx = 0; idx < num_mon; idx++) { 2175 Node* obj_node = sfn->monitor_obj(jvms, idx); 2176 obj_node = bs->step_over_gc_barrier(obj_node); 2177 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock(); 2178 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { 2179 return true; 2180 } 2181 } 2182 } 2183 #ifdef ASSERT 2184 this->log_lock_optimization(c, "eliminate_lock_INLR_3"); 2185 #endif 2186 return false; 2187 } 2188 2189 //============================================================================= 2190 uint UnlockNode::size_of() const { return sizeof(*this); } 2191 2192 //============================================================================= 2193 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2194 2195 // perform any generic optimizations first (returns 'this' or null) 2196 Node *result = SafePointNode::Ideal(phase, can_reshape); 2197 if (result != nullptr) return result; 2198 // Don't bother trying to transform a dead node 2199 if (in(0) && in(0)->is_top()) return nullptr; 2200 2201 // Now see if we can optimize away this unlock. We don't actually 2202 // remove the unlocking here, we simply set the _eliminate flag which 2203 // prevents macro expansion from expanding the unlock. Since we don't 2204 // modify the graph, the value returned from this function is the 2205 // one computed above. 2206 // Escape state is defined after Parse phase. 2207 if (can_reshape && EliminateLocks && !is_non_esc_obj()) { 2208 // 2209 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary. 2210 // 2211 ConnectionGraph *cgr = phase->C->congraph(); 2212 if (cgr != nullptr && cgr->not_global_escape(obj_node())) { 2213 assert(!is_eliminated() || is_coarsened(), "sanity"); 2214 // The lock could be marked eliminated by lock coarsening 2215 // code during first IGVN before EA. Replace coarsened flag 2216 // to eliminate all associated locks/unlocks. 2217 #ifdef ASSERT 2218 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2"); 2219 #endif 2220 this->set_non_esc_obj(); 2221 } 2222 } 2223 return result; 2224 } 2225 2226 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const { 2227 if (C == nullptr) { 2228 return; 2229 } 2230 CompileLog* log = C->log(); 2231 if (log != nullptr) { 2232 Node* box = box_node(); 2233 Node* obj = obj_node(); 2234 int box_id = box != nullptr ? box->_idx : -1; 2235 int obj_id = obj != nullptr ? obj->_idx : -1; 2236 2237 log->begin_head("%s compile_id='%d' lock_id='%d' class='%s' kind='%s' box_id='%d' obj_id='%d' bad_id='%d'", 2238 tag, C->compile_id(), this->_idx, 2239 is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?", 2240 kind_as_string(), box_id, obj_id, (bad_lock != nullptr ? bad_lock->_idx : -1)); 2241 log->stamp(); 2242 log->end_head(); 2243 JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms(); 2244 while (p != nullptr) { 2245 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 2246 p = p->caller(); 2247 } 2248 log->tail(tag); 2249 } 2250 } 2251 2252 bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) { 2253 if (dest_t->is_known_instance() && t_oop->is_known_instance()) { 2254 return dest_t->instance_id() == t_oop->instance_id(); 2255 } 2256 2257 if (dest_t->isa_instptr() && !dest_t->is_instptr()->instance_klass()->equals(phase->C->env()->Object_klass())) { 2258 // clone 2259 if (t_oop->isa_aryptr()) { 2260 return false; 2261 } 2262 if (!t_oop->isa_instptr()) { 2263 return true; 2264 } 2265 if (dest_t->maybe_java_subtype_of(t_oop) || t_oop->maybe_java_subtype_of(dest_t)) { 2266 return true; 2267 } 2268 // unrelated 2269 return false; 2270 } 2271 2272 if (dest_t->isa_aryptr()) { 2273 // arraycopy or array clone 2274 if (t_oop->isa_instptr()) { 2275 return false; 2276 } 2277 if (!t_oop->isa_aryptr()) { 2278 return true; 2279 } 2280 2281 const Type* elem = dest_t->is_aryptr()->elem(); 2282 if (elem == Type::BOTTOM) { 2283 // An array but we don't know what elements are 2284 return true; 2285 } 2286 2287 dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr(); 2288 uint dest_alias = phase->C->get_alias_index(dest_t); 2289 uint t_oop_alias = phase->C->get_alias_index(t_oop); 2290 2291 return dest_alias == t_oop_alias; 2292 } 2293 2294 return true; 2295 }