1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "compiler/compileLog.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/oopMap.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "opto/callGenerator.hpp" 32 #include "opto/callnode.hpp" 33 #include "opto/castnode.hpp" 34 #include "opto/convertnode.hpp" 35 #include "opto/escape.hpp" 36 #include "opto/locknode.hpp" 37 #include "opto/machnode.hpp" 38 #include "opto/matcher.hpp" 39 #include "opto/parse.hpp" 40 #include "opto/regalloc.hpp" 41 #include "opto/regmask.hpp" 42 #include "opto/rootnode.hpp" 43 #include "opto/runtime.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "utilities/powerOfTwo.hpp" 46 #include "code/vmreg.hpp" 47 48 // Portions of code courtesy of Clifford Click 49 50 // Optimization - Graph Style 51 52 //============================================================================= 53 uint StartNode::size_of() const { return sizeof(*this); } 54 bool StartNode::cmp( const Node &n ) const 55 { return _domain == ((StartNode&)n)._domain; } 56 const Type *StartNode::bottom_type() const { return _domain; } 57 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; } 58 #ifndef PRODUCT 59 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);} 60 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ } 61 #endif 62 63 //------------------------------Ideal------------------------------------------ 64 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ 65 return remove_dead_region(phase, can_reshape) ? this : nullptr; 66 } 67 68 //------------------------------calling_convention----------------------------- 69 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 70 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt); 71 } 72 73 //------------------------------Registers-------------------------------------- 74 const RegMask &StartNode::in_RegMask(uint) const { 75 return RegMask::Empty; 76 } 77 78 //------------------------------match------------------------------------------ 79 // Construct projections for incoming parameters, and their RegMask info 80 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) { 81 switch (proj->_con) { 82 case TypeFunc::Control: 83 case TypeFunc::I_O: 84 case TypeFunc::Memory: 85 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 86 case TypeFunc::FramePtr: 87 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); 88 case TypeFunc::ReturnAdr: 89 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); 90 case TypeFunc::Parms: 91 default: { 92 uint parm_num = proj->_con - TypeFunc::Parms; 93 const Type *t = _domain->field_at(proj->_con); 94 if (t->base() == Type::Half) // 2nd half of Longs and Doubles 95 return new ConNode(Type::TOP); 96 uint ideal_reg = t->ideal_reg(); 97 RegMask &rm = match->_calling_convention_mask[parm_num]; 98 return new MachProjNode(this,proj->_con,rm,ideal_reg); 99 } 100 } 101 return nullptr; 102 } 103 104 //------------------------------StartOSRNode---------------------------------- 105 // The method start node for an on stack replacement adapter 106 107 //------------------------------osr_domain----------------------------- 108 const TypeTuple *StartOSRNode::osr_domain() { 109 const Type **fields = TypeTuple::fields(2); 110 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer 111 112 return TypeTuple::make(TypeFunc::Parms+1, fields); 113 } 114 115 //============================================================================= 116 const char * const ParmNode::names[TypeFunc::Parms+1] = { 117 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms" 118 }; 119 120 #ifndef PRODUCT 121 void ParmNode::dump_spec(outputStream *st) const { 122 if( _con < TypeFunc::Parms ) { 123 st->print("%s", names[_con]); 124 } else { 125 st->print("Parm%d: ",_con-TypeFunc::Parms); 126 // Verbose and WizardMode dump bottom_type for all nodes 127 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); 128 } 129 } 130 131 void ParmNode::dump_compact_spec(outputStream *st) const { 132 if (_con < TypeFunc::Parms) { 133 st->print("%s", names[_con]); 134 } else { 135 st->print("%d:", _con-TypeFunc::Parms); 136 // unconditionally dump bottom_type 137 bottom_type()->dump_on(st); 138 } 139 } 140 #endif 141 142 uint ParmNode::ideal_reg() const { 143 switch( _con ) { 144 case TypeFunc::Control : // fall through 145 case TypeFunc::I_O : // fall through 146 case TypeFunc::Memory : return 0; 147 case TypeFunc::FramePtr : // fall through 148 case TypeFunc::ReturnAdr: return Op_RegP; 149 default : assert( _con > TypeFunc::Parms, "" ); 150 // fall through 151 case TypeFunc::Parms : { 152 // Type of argument being passed 153 const Type *t = in(0)->as_Start()->_domain->field_at(_con); 154 return t->ideal_reg(); 155 } 156 } 157 ShouldNotReachHere(); 158 return 0; 159 } 160 161 //============================================================================= 162 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { 163 init_req(TypeFunc::Control,cntrl); 164 init_req(TypeFunc::I_O,i_o); 165 init_req(TypeFunc::Memory,memory); 166 init_req(TypeFunc::FramePtr,frameptr); 167 init_req(TypeFunc::ReturnAdr,retadr); 168 } 169 170 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ 171 return remove_dead_region(phase, can_reshape) ? this : nullptr; 172 } 173 174 const Type* ReturnNode::Value(PhaseGVN* phase) const { 175 return ( phase->type(in(TypeFunc::Control)) == Type::TOP) 176 ? Type::TOP 177 : Type::BOTTOM; 178 } 179 180 // Do we Match on this edge index or not? No edges on return nodes 181 uint ReturnNode::match_edge(uint idx) const { 182 return 0; 183 } 184 185 186 #ifndef PRODUCT 187 void ReturnNode::dump_req(outputStream *st, DumpConfig* dc) const { 188 // Dump the required inputs, after printing "returns" 189 uint i; // Exit value of loop 190 for (i = 0; i < req(); i++) { // For all required inputs 191 if (i == TypeFunc::Parms) st->print("returns "); 192 Node* p = in(i); 193 if (p != nullptr) { 194 p->dump_idx(false, st, dc); 195 st->print(" "); 196 } else { 197 st->print("_ "); 198 } 199 } 200 } 201 #endif 202 203 //============================================================================= 204 RethrowNode::RethrowNode( 205 Node* cntrl, 206 Node* i_o, 207 Node* memory, 208 Node* frameptr, 209 Node* ret_adr, 210 Node* exception 211 ) : Node(TypeFunc::Parms + 1) { 212 init_req(TypeFunc::Control , cntrl ); 213 init_req(TypeFunc::I_O , i_o ); 214 init_req(TypeFunc::Memory , memory ); 215 init_req(TypeFunc::FramePtr , frameptr ); 216 init_req(TypeFunc::ReturnAdr, ret_adr); 217 init_req(TypeFunc::Parms , exception); 218 } 219 220 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ 221 return remove_dead_region(phase, can_reshape) ? this : nullptr; 222 } 223 224 const Type* RethrowNode::Value(PhaseGVN* phase) const { 225 return (phase->type(in(TypeFunc::Control)) == Type::TOP) 226 ? Type::TOP 227 : Type::BOTTOM; 228 } 229 230 uint RethrowNode::match_edge(uint idx) const { 231 return 0; 232 } 233 234 #ifndef PRODUCT 235 void RethrowNode::dump_req(outputStream *st, DumpConfig* dc) const { 236 // Dump the required inputs, after printing "exception" 237 uint i; // Exit value of loop 238 for (i = 0; i < req(); i++) { // For all required inputs 239 if (i == TypeFunc::Parms) st->print("exception "); 240 Node* p = in(i); 241 if (p != nullptr) { 242 p->dump_idx(false, st, dc); 243 st->print(" "); 244 } else { 245 st->print("_ "); 246 } 247 } 248 } 249 #endif 250 251 //============================================================================= 252 // Do we Match on this edge index or not? Match only target address & method 253 uint TailCallNode::match_edge(uint idx) const { 254 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 255 } 256 257 //============================================================================= 258 // Do we Match on this edge index or not? Match only target address & oop 259 uint TailJumpNode::match_edge(uint idx) const { 260 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 261 } 262 263 //============================================================================= 264 JVMState::JVMState(ciMethod* method, JVMState* caller) : 265 _method(method) { 266 assert(method != nullptr, "must be valid call site"); 267 _bci = InvocationEntryBci; 268 _reexecute = Reexecute_Undefined; 269 debug_only(_bci = -99); // random garbage value 270 debug_only(_map = (SafePointNode*)-1); 271 _caller = caller; 272 _depth = 1 + (caller == nullptr ? 0 : caller->depth()); 273 _locoff = TypeFunc::Parms; 274 _stkoff = _locoff + _method->max_locals(); 275 _monoff = _stkoff + _method->max_stack(); 276 _scloff = _monoff; 277 _endoff = _monoff; 278 _sp = 0; 279 } 280 JVMState::JVMState(int stack_size) : 281 _method(nullptr) { 282 _bci = InvocationEntryBci; 283 _reexecute = Reexecute_Undefined; 284 debug_only(_map = (SafePointNode*)-1); 285 _caller = nullptr; 286 _depth = 1; 287 _locoff = TypeFunc::Parms; 288 _stkoff = _locoff; 289 _monoff = _stkoff + stack_size; 290 _scloff = _monoff; 291 _endoff = _monoff; 292 _sp = 0; 293 } 294 295 //--------------------------------of_depth------------------------------------- 296 JVMState* JVMState::of_depth(int d) const { 297 const JVMState* jvmp = this; 298 assert(0 < d && (uint)d <= depth(), "oob"); 299 for (int skip = depth() - d; skip > 0; skip--) { 300 jvmp = jvmp->caller(); 301 } 302 assert(jvmp->depth() == (uint)d, "found the right one"); 303 return (JVMState*)jvmp; 304 } 305 306 //-----------------------------same_calls_as----------------------------------- 307 bool JVMState::same_calls_as(const JVMState* that) const { 308 if (this == that) return true; 309 if (this->depth() != that->depth()) return false; 310 const JVMState* p = this; 311 const JVMState* q = that; 312 for (;;) { 313 if (p->_method != q->_method) return false; 314 if (p->_method == nullptr) return true; // bci is irrelevant 315 if (p->_bci != q->_bci) return false; 316 if (p->_reexecute != q->_reexecute) return false; 317 p = p->caller(); 318 q = q->caller(); 319 if (p == q) return true; 320 assert(p != nullptr && q != nullptr, "depth check ensures we don't run off end"); 321 } 322 } 323 324 //------------------------------debug_start------------------------------------ 325 uint JVMState::debug_start() const { 326 debug_only(JVMState* jvmroot = of_depth(1)); 327 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); 328 return of_depth(1)->locoff(); 329 } 330 331 //-------------------------------debug_end------------------------------------- 332 uint JVMState::debug_end() const { 333 debug_only(JVMState* jvmroot = of_depth(1)); 334 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); 335 return endoff(); 336 } 337 338 //------------------------------debug_depth------------------------------------ 339 uint JVMState::debug_depth() const { 340 uint total = 0; 341 for (const JVMState* jvmp = this; jvmp != nullptr; jvmp = jvmp->caller()) { 342 total += jvmp->debug_size(); 343 } 344 return total; 345 } 346 347 #ifndef PRODUCT 348 349 //------------------------------format_helper---------------------------------- 350 // Given an allocation (a Chaitin object) and a Node decide if the Node carries 351 // any defined value or not. If it does, print out the register or constant. 352 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { 353 if (n == nullptr) { st->print(" null"); return; } 354 if (n->is_SafePointScalarObject()) { 355 // Scalar replacement. 356 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); 357 scobjs->append_if_missing(spobj); 358 int sco_n = scobjs->find(spobj); 359 assert(sco_n >= 0, ""); 360 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); 361 return; 362 } 363 if (regalloc->node_regs_max_index() > 0 && 364 OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined 365 char buf[50]; 366 regalloc->dump_register(n,buf,sizeof(buf)); 367 st->print(" %s%d]=%s",msg,i,buf); 368 } else { // No register, but might be constant 369 const Type *t = n->bottom_type(); 370 switch (t->base()) { 371 case Type::Int: 372 st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con()); 373 break; 374 case Type::AnyPtr: 375 assert( t == TypePtr::NULL_PTR || n->in_dump(), "" ); 376 st->print(" %s%d]=#null",msg,i); 377 break; 378 case Type::AryPtr: 379 case Type::InstPtr: 380 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop())); 381 break; 382 case Type::KlassPtr: 383 case Type::AryKlassPtr: 384 case Type::InstKlassPtr: 385 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->exact_klass())); 386 break; 387 case Type::MetadataPtr: 388 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata())); 389 break; 390 case Type::NarrowOop: 391 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop())); 392 break; 393 case Type::RawPtr: 394 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr())); 395 break; 396 case Type::DoubleCon: 397 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d); 398 break; 399 case Type::FloatCon: 400 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f); 401 break; 402 case Type::Long: 403 st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con())); 404 break; 405 case Type::Half: 406 case Type::Top: 407 st->print(" %s%d]=_",msg,i); 408 break; 409 default: ShouldNotReachHere(); 410 } 411 } 412 } 413 414 //---------------------print_method_with_lineno-------------------------------- 415 void JVMState::print_method_with_lineno(outputStream* st, bool show_name) const { 416 if (show_name) _method->print_short_name(st); 417 418 int lineno = _method->line_number_from_bci(_bci); 419 if (lineno != -1) { 420 st->print(" @ bci:%d (line %d)", _bci, lineno); 421 } else { 422 st->print(" @ bci:%d", _bci); 423 } 424 } 425 426 //------------------------------format----------------------------------------- 427 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { 428 st->print(" #"); 429 if (_method) { 430 print_method_with_lineno(st, true); 431 } else { 432 st->print_cr(" runtime stub "); 433 return; 434 } 435 if (n->is_MachSafePoint()) { 436 GrowableArray<SafePointScalarObjectNode*> scobjs; 437 MachSafePointNode *mcall = n->as_MachSafePoint(); 438 uint i; 439 // Print locals 440 for (i = 0; i < (uint)loc_size(); i++) 441 format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs); 442 // Print stack 443 for (i = 0; i < (uint)stk_size(); i++) { 444 if ((uint)(_stkoff + i) >= mcall->len()) 445 st->print(" oob "); 446 else 447 format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs); 448 } 449 for (i = 0; (int)i < nof_monitors(); i++) { 450 Node *box = mcall->monitor_box(this, i); 451 Node *obj = mcall->monitor_obj(this, i); 452 if (regalloc->node_regs_max_index() > 0 && 453 OptoReg::is_valid(regalloc->get_reg_first(box))) { 454 box = BoxLockNode::box_node(box); 455 format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs); 456 } else { 457 OptoReg::Name box_reg = BoxLockNode::reg(box); 458 st->print(" MON-BOX%d=%s+%d", 459 i, 460 OptoReg::regname(OptoReg::c_frame_pointer), 461 regalloc->reg2offset(box_reg)); 462 } 463 const char* obj_msg = "MON-OBJ["; 464 if (EliminateLocks) { 465 if (BoxLockNode::box_node(box)->is_eliminated()) 466 obj_msg = "MON-OBJ(LOCK ELIMINATED)["; 467 } 468 format_helper(regalloc, st, obj, obj_msg, i, &scobjs); 469 } 470 471 for (i = 0; i < (uint)scobjs.length(); i++) { 472 // Scalar replaced objects. 473 st->cr(); 474 st->print(" # ScObj" INT32_FORMAT " ", i); 475 SafePointScalarObjectNode* spobj = scobjs.at(i); 476 ciKlass* cik = spobj->bottom_type()->is_oopptr()->exact_klass(); 477 assert(cik->is_instance_klass() || 478 cik->is_array_klass(), "Not supported allocation."); 479 ciInstanceKlass *iklass = nullptr; 480 if (cik->is_instance_klass()) { 481 cik->print_name_on(st); 482 iklass = cik->as_instance_klass(); 483 } else if (cik->is_type_array_klass()) { 484 cik->as_array_klass()->base_element_type()->print_name_on(st); 485 st->print("[%d]", spobj->n_fields()); 486 } else if (cik->is_obj_array_klass()) { 487 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass(); 488 if (cie->is_instance_klass()) { 489 cie->print_name_on(st); 490 } else if (cie->is_type_array_klass()) { 491 cie->as_array_klass()->base_element_type()->print_name_on(st); 492 } else { 493 ShouldNotReachHere(); 494 } 495 st->print("[%d]", spobj->n_fields()); 496 int ndim = cik->as_array_klass()->dimension() - 1; 497 while (ndim-- > 0) { 498 st->print("[]"); 499 } 500 } 501 st->print("={"); 502 uint nf = spobj->n_fields(); 503 if (nf > 0) { 504 uint first_ind = spobj->first_index(mcall->jvms()); 505 Node* fld_node = mcall->in(first_ind); 506 ciField* cifield; 507 if (iklass != nullptr) { 508 st->print(" ["); 509 cifield = iklass->nonstatic_field_at(0); 510 cifield->print_name_on(st); 511 format_helper(regalloc, st, fld_node, ":", 0, &scobjs); 512 } else { 513 format_helper(regalloc, st, fld_node, "[", 0, &scobjs); 514 } 515 for (uint j = 1; j < nf; j++) { 516 fld_node = mcall->in(first_ind+j); 517 if (iklass != nullptr) { 518 st->print(", ["); 519 cifield = iklass->nonstatic_field_at(j); 520 cifield->print_name_on(st); 521 format_helper(regalloc, st, fld_node, ":", j, &scobjs); 522 } else { 523 format_helper(regalloc, st, fld_node, ", [", j, &scobjs); 524 } 525 } 526 } 527 st->print(" }"); 528 } 529 } 530 st->cr(); 531 if (caller() != nullptr) caller()->format(regalloc, n, st); 532 } 533 534 535 void JVMState::dump_spec(outputStream *st) const { 536 if (_method != nullptr) { 537 bool printed = false; 538 if (!Verbose) { 539 // The JVMS dumps make really, really long lines. 540 // Take out the most boring parts, which are the package prefixes. 541 char buf[500]; 542 stringStream namest(buf, sizeof(buf)); 543 _method->print_short_name(&namest); 544 if (namest.count() < sizeof(buf)) { 545 const char* name = namest.base(); 546 if (name[0] == ' ') ++name; 547 const char* endcn = strchr(name, ':'); // end of class name 548 if (endcn == nullptr) endcn = strchr(name, '('); 549 if (endcn == nullptr) endcn = name + strlen(name); 550 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') 551 --endcn; 552 st->print(" %s", endcn); 553 printed = true; 554 } 555 } 556 print_method_with_lineno(st, !printed); 557 if(_reexecute == Reexecute_True) 558 st->print(" reexecute"); 559 } else { 560 st->print(" runtime stub"); 561 } 562 if (caller() != nullptr) caller()->dump_spec(st); 563 } 564 565 566 void JVMState::dump_on(outputStream* st) const { 567 bool print_map = _map && !((uintptr_t)_map & 1) && 568 ((caller() == nullptr) || (caller()->map() != _map)); 569 if (print_map) { 570 if (_map->len() > _map->req()) { // _map->has_exceptions() 571 Node* ex = _map->in(_map->req()); // _map->next_exception() 572 // skip the first one; it's already being printed 573 while (ex != nullptr && ex->len() > ex->req()) { 574 ex = ex->in(ex->req()); // ex->next_exception() 575 ex->dump(1); 576 } 577 } 578 _map->dump(Verbose ? 2 : 1); 579 } 580 if (caller() != nullptr) { 581 caller()->dump_on(st); 582 } 583 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", 584 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); 585 if (_method == nullptr) { 586 st->print_cr("(none)"); 587 } else { 588 _method->print_name(st); 589 st->cr(); 590 if (bci() >= 0 && bci() < _method->code_size()) { 591 st->print(" bc: "); 592 _method->print_codes_on(bci(), bci()+1, st); 593 } 594 } 595 } 596 597 // Extra way to dump a jvms from the debugger, 598 // to avoid a bug with C++ member function calls. 599 void dump_jvms(JVMState* jvms) { 600 jvms->dump(); 601 } 602 #endif 603 604 //--------------------------clone_shallow-------------------------------------- 605 JVMState* JVMState::clone_shallow(Compile* C) const { 606 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); 607 n->set_bci(_bci); 608 n->_reexecute = _reexecute; 609 n->set_locoff(_locoff); 610 n->set_stkoff(_stkoff); 611 n->set_monoff(_monoff); 612 n->set_scloff(_scloff); 613 n->set_endoff(_endoff); 614 n->set_sp(_sp); 615 n->set_map(_map); 616 return n; 617 } 618 619 //---------------------------clone_deep---------------------------------------- 620 JVMState* JVMState::clone_deep(Compile* C) const { 621 JVMState* n = clone_shallow(C); 622 for (JVMState* p = n; p->_caller != nullptr; p = p->_caller) { 623 p->_caller = p->_caller->clone_shallow(C); 624 } 625 assert(n->depth() == depth(), "sanity"); 626 assert(n->debug_depth() == debug_depth(), "sanity"); 627 return n; 628 } 629 630 /** 631 * Reset map for all callers 632 */ 633 void JVMState::set_map_deep(SafePointNode* map) { 634 for (JVMState* p = this; p != nullptr; p = p->_caller) { 635 p->set_map(map); 636 } 637 } 638 639 // unlike set_map(), this is two-way setting. 640 void JVMState::bind_map(SafePointNode* map) { 641 set_map(map); 642 _map->set_jvms(this); 643 } 644 645 // Adapt offsets in in-array after adding or removing an edge. 646 // Prerequisite is that the JVMState is used by only one node. 647 void JVMState::adapt_position(int delta) { 648 for (JVMState* jvms = this; jvms != nullptr; jvms = jvms->caller()) { 649 jvms->set_locoff(jvms->locoff() + delta); 650 jvms->set_stkoff(jvms->stkoff() + delta); 651 jvms->set_monoff(jvms->monoff() + delta); 652 jvms->set_scloff(jvms->scloff() + delta); 653 jvms->set_endoff(jvms->endoff() + delta); 654 } 655 } 656 657 // Mirror the stack size calculation in the deopt code 658 // How much stack space would we need at this point in the program in 659 // case of deoptimization? 660 int JVMState::interpreter_frame_size() const { 661 const JVMState* jvms = this; 662 int size = 0; 663 int callee_parameters = 0; 664 int callee_locals = 0; 665 int extra_args = method()->max_stack() - stk_size(); 666 667 while (jvms != nullptr) { 668 int locks = jvms->nof_monitors(); 669 int temps = jvms->stk_size(); 670 bool is_top_frame = (jvms == this); 671 ciMethod* method = jvms->method(); 672 673 int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(), 674 temps + callee_parameters, 675 extra_args, 676 locks, 677 callee_parameters, 678 callee_locals, 679 is_top_frame); 680 size += frame_size; 681 682 callee_parameters = method->size_of_parameters(); 683 callee_locals = method->max_locals(); 684 extra_args = 0; 685 jvms = jvms->caller(); 686 } 687 return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord; 688 } 689 690 //============================================================================= 691 bool CallNode::cmp( const Node &n ) const 692 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } 693 #ifndef PRODUCT 694 void CallNode::dump_req(outputStream *st, DumpConfig* dc) const { 695 // Dump the required inputs, enclosed in '(' and ')' 696 uint i; // Exit value of loop 697 for (i = 0; i < req(); i++) { // For all required inputs 698 if (i == TypeFunc::Parms) st->print("("); 699 Node* p = in(i); 700 if (p != nullptr) { 701 p->dump_idx(false, st, dc); 702 st->print(" "); 703 } else { 704 st->print("_ "); 705 } 706 } 707 st->print(")"); 708 } 709 710 void CallNode::dump_spec(outputStream *st) const { 711 st->print(" "); 712 if (tf() != nullptr) tf()->dump_on(st); 713 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); 714 if (jvms() != nullptr) jvms()->dump_spec(st); 715 } 716 717 void AllocateNode::dump_spec(outputStream* st) const { 718 st->print(" "); 719 if (tf() != nullptr) { 720 tf()->dump_on(st); 721 } 722 if (_cnt != COUNT_UNKNOWN) { 723 st->print(" C=%f", _cnt); 724 } 725 const Node* const klass_node = in(KlassNode); 726 if (klass_node != nullptr) { 727 const TypeKlassPtr* const klass_ptr = klass_node->bottom_type()->isa_klassptr(); 728 729 if (klass_ptr != nullptr && klass_ptr->klass_is_exact()) { 730 st->print(" allocationKlass:"); 731 klass_ptr->exact_klass()->print_name_on(st); 732 } 733 } 734 if (jvms() != nullptr) { 735 jvms()->dump_spec(st); 736 } 737 } 738 #endif 739 740 const Type *CallNode::bottom_type() const { return tf()->range(); } 741 const Type* CallNode::Value(PhaseGVN* phase) const { 742 if (in(0) == nullptr || phase->type(in(0)) == Type::TOP) { 743 return Type::TOP; 744 } 745 return tf()->range(); 746 } 747 748 //------------------------------calling_convention----------------------------- 749 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 750 // Use the standard compiler calling convention 751 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt); 752 } 753 754 755 //------------------------------match------------------------------------------ 756 // Construct projections for control, I/O, memory-fields, ..., and 757 // return result(s) along with their RegMask info 758 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) { 759 switch (proj->_con) { 760 case TypeFunc::Control: 761 case TypeFunc::I_O: 762 case TypeFunc::Memory: 763 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 764 765 case TypeFunc::Parms+1: // For LONG & DOUBLE returns 766 assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, ""); 767 // 2nd half of doubles and longs 768 return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad); 769 770 case TypeFunc::Parms: { // Normal returns 771 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg(); 772 OptoRegPair regs = Opcode() == Op_CallLeafVector 773 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine 774 : is_CallRuntime() 775 ? match->c_return_value(ideal_reg) // Calls into C runtime 776 : match-> return_value(ideal_reg); // Calls into compiled Java code 777 RegMask rm = RegMask(regs.first()); 778 779 if (Opcode() == Op_CallLeafVector) { 780 // If the return is in vector, compute appropriate regmask taking into account the whole range 781 if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) { 782 if(OptoReg::is_valid(regs.second())) { 783 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) { 784 rm.Insert(r); 785 } 786 } 787 } 788 } 789 790 if( OptoReg::is_valid(regs.second()) ) 791 rm.Insert( regs.second() ); 792 return new MachProjNode(this,proj->_con,rm,ideal_reg); 793 } 794 795 case TypeFunc::ReturnAdr: 796 case TypeFunc::FramePtr: 797 default: 798 ShouldNotReachHere(); 799 } 800 return nullptr; 801 } 802 803 // Do we Match on this edge index or not? Match no edges 804 uint CallNode::match_edge(uint idx) const { 805 return 0; 806 } 807 808 // 809 // Determine whether the call could modify the field of the specified 810 // instance at the specified offset. 811 // 812 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { 813 assert((t_oop != nullptr), "sanity"); 814 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) { 815 const TypeTuple* args = _tf->domain(); 816 Node* dest = nullptr; 817 // Stubs that can be called once an ArrayCopyNode is expanded have 818 // different signatures. Look for the second pointer argument, 819 // that is the destination of the copy. 820 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { 821 if (args->field_at(i)->isa_ptr()) { 822 j++; 823 if (j == 2) { 824 dest = in(i); 825 break; 826 } 827 } 828 } 829 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!"); 830 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) { 831 return true; 832 } 833 return false; 834 } 835 if (t_oop->is_known_instance()) { 836 // The instance_id is set only for scalar-replaceable allocations which 837 // are not passed as arguments according to Escape Analysis. 838 return false; 839 } 840 if (t_oop->is_ptr_to_boxed_value()) { 841 ciKlass* boxing_klass = t_oop->is_instptr()->instance_klass(); 842 if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) { 843 // Skip unrelated boxing methods. 844 Node* proj = proj_out_or_null(TypeFunc::Parms); 845 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) { 846 return false; 847 } 848 } 849 if (is_CallJava() && as_CallJava()->method() != nullptr) { 850 ciMethod* meth = as_CallJava()->method(); 851 if (meth->is_getter()) { 852 return false; 853 } 854 // May modify (by reflection) if an boxing object is passed 855 // as argument or returned. 856 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr; 857 if (proj != nullptr) { 858 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr(); 859 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() || 860 (inst_t->instance_klass() == boxing_klass))) { 861 return true; 862 } 863 } 864 const TypeTuple* d = tf()->domain(); 865 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 866 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); 867 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() || 868 (inst_t->instance_klass() == boxing_klass))) { 869 return true; 870 } 871 } 872 return false; 873 } 874 } 875 return true; 876 } 877 878 // Does this call have a direct reference to n other than debug information? 879 bool CallNode::has_non_debug_use(Node *n) { 880 const TypeTuple * d = tf()->domain(); 881 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 882 Node *arg = in(i); 883 if (arg == n) { 884 return true; 885 } 886 } 887 return false; 888 } 889 890 // Returns the unique CheckCastPP of a call 891 // or 'this' if there are several CheckCastPP or unexpected uses 892 // or returns null if there is no one. 893 Node *CallNode::result_cast() { 894 Node *cast = nullptr; 895 896 Node *p = proj_out_or_null(TypeFunc::Parms); 897 if (p == nullptr) 898 return nullptr; 899 900 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { 901 Node *use = p->fast_out(i); 902 if (use->is_CheckCastPP()) { 903 if (cast != nullptr) { 904 return this; // more than 1 CheckCastPP 905 } 906 cast = use; 907 } else if (!use->is_Initialize() && 908 !use->is_AddP() && 909 use->Opcode() != Op_MemBarStoreStore) { 910 // Expected uses are restricted to a CheckCastPP, an Initialize 911 // node, a MemBarStoreStore (clone) and AddP nodes. If we 912 // encounter any other use (a Phi node can be seen in rare 913 // cases) return this to prevent incorrect optimizations. 914 return this; 915 } 916 } 917 return cast; 918 } 919 920 921 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) { 922 projs->fallthrough_proj = nullptr; 923 projs->fallthrough_catchproj = nullptr; 924 projs->fallthrough_ioproj = nullptr; 925 projs->catchall_ioproj = nullptr; 926 projs->catchall_catchproj = nullptr; 927 projs->fallthrough_memproj = nullptr; 928 projs->catchall_memproj = nullptr; 929 projs->resproj = nullptr; 930 projs->exobj = nullptr; 931 932 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 933 ProjNode *pn = fast_out(i)->as_Proj(); 934 if (pn->outcnt() == 0) continue; 935 switch (pn->_con) { 936 case TypeFunc::Control: 937 { 938 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj 939 projs->fallthrough_proj = pn; 940 const Node* cn = pn->unique_ctrl_out_or_null(); 941 if (cn != nullptr && cn->is_Catch()) { 942 ProjNode *cpn = nullptr; 943 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { 944 cpn = cn->fast_out(k)->as_Proj(); 945 assert(cpn->is_CatchProj(), "must be a CatchProjNode"); 946 if (cpn->_con == CatchProjNode::fall_through_index) 947 projs->fallthrough_catchproj = cpn; 948 else { 949 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); 950 projs->catchall_catchproj = cpn; 951 } 952 } 953 } 954 break; 955 } 956 case TypeFunc::I_O: 957 if (pn->_is_io_use) 958 projs->catchall_ioproj = pn; 959 else 960 projs->fallthrough_ioproj = pn; 961 for (DUIterator j = pn->outs(); pn->has_out(j); j++) { 962 Node* e = pn->out(j); 963 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) { 964 assert(projs->exobj == nullptr, "only one"); 965 projs->exobj = e; 966 } 967 } 968 break; 969 case TypeFunc::Memory: 970 if (pn->_is_io_use) 971 projs->catchall_memproj = pn; 972 else 973 projs->fallthrough_memproj = pn; 974 break; 975 case TypeFunc::Parms: 976 projs->resproj = pn; 977 break; 978 default: 979 assert(false, "unexpected projection from allocation node."); 980 } 981 } 982 983 // The resproj may not exist because the result could be ignored 984 // and the exception object may not exist if an exception handler 985 // swallows the exception but all the other must exist and be found. 986 assert(projs->fallthrough_proj != nullptr, "must be found"); 987 do_asserts = do_asserts && !Compile::current()->inlining_incrementally(); 988 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found"); 989 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found"); 990 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found"); 991 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found"); 992 if (separate_io_proj) { 993 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found"); 994 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found"); 995 } 996 } 997 998 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) { 999 #ifdef ASSERT 1000 // Validate attached generator 1001 CallGenerator* cg = generator(); 1002 if (cg != nullptr) { 1003 assert((is_CallStaticJava() && cg->is_mh_late_inline()) || 1004 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch"); 1005 } 1006 #endif // ASSERT 1007 return SafePointNode::Ideal(phase, can_reshape); 1008 } 1009 1010 bool CallNode::is_call_to_arraycopystub() const { 1011 if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) { 1012 return true; 1013 } 1014 return false; 1015 } 1016 1017 //============================================================================= 1018 uint CallJavaNode::size_of() const { return sizeof(*this); } 1019 bool CallJavaNode::cmp( const Node &n ) const { 1020 CallJavaNode &call = (CallJavaNode&)n; 1021 return CallNode::cmp(call) && _method == call._method && 1022 _override_symbolic_info == call._override_symbolic_info; 1023 } 1024 1025 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) { 1026 // Copy debug information and adjust JVMState information 1027 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1; 1028 uint new_dbg_start = tf()->domain()->cnt(); 1029 int jvms_adj = new_dbg_start - old_dbg_start; 1030 assert (new_dbg_start == req(), "argument count mismatch"); 1031 Compile* C = phase->C; 1032 1033 // SafePointScalarObject node could be referenced several times in debug info. 1034 // Use Dict to record cloned nodes. 1035 Dict* sosn_map = new Dict(cmpkey,hashkey); 1036 for (uint i = old_dbg_start; i < sfpt->req(); i++) { 1037 Node* old_in = sfpt->in(i); 1038 // Clone old SafePointScalarObjectNodes, adjusting their field contents. 1039 if (old_in != nullptr && old_in->is_SafePointScalarObject()) { 1040 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); 1041 bool new_node; 1042 Node* new_in = old_sosn->clone(sosn_map, new_node); 1043 if (new_node) { // New node? 1044 new_in->set_req(0, C->root()); // reset control edge 1045 new_in = phase->transform(new_in); // Register new node. 1046 } 1047 old_in = new_in; 1048 } 1049 add_req(old_in); 1050 } 1051 1052 // JVMS may be shared so clone it before we modify it 1053 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr); 1054 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) { 1055 jvms->set_map(this); 1056 jvms->set_locoff(jvms->locoff()+jvms_adj); 1057 jvms->set_stkoff(jvms->stkoff()+jvms_adj); 1058 jvms->set_monoff(jvms->monoff()+jvms_adj); 1059 jvms->set_scloff(jvms->scloff()+jvms_adj); 1060 jvms->set_endoff(jvms->endoff()+jvms_adj); 1061 } 1062 } 1063 1064 #ifdef ASSERT 1065 bool CallJavaNode::validate_symbolic_info() const { 1066 if (method() == nullptr) { 1067 return true; // call into runtime or uncommon trap 1068 } 1069 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci()); 1070 ciMethod* callee = method(); 1071 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) { 1072 assert(override_symbolic_info(), "should be set"); 1073 } 1074 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info"); 1075 return true; 1076 } 1077 #endif 1078 1079 #ifndef PRODUCT 1080 void CallJavaNode::dump_spec(outputStream* st) const { 1081 if( _method ) _method->print_short_name(st); 1082 CallNode::dump_spec(st); 1083 } 1084 1085 void CallJavaNode::dump_compact_spec(outputStream* st) const { 1086 if (_method) { 1087 _method->print_short_name(st); 1088 } else { 1089 st->print("<?>"); 1090 } 1091 } 1092 #endif 1093 1094 //============================================================================= 1095 uint CallStaticJavaNode::size_of() const { return sizeof(*this); } 1096 bool CallStaticJavaNode::cmp( const Node &n ) const { 1097 CallStaticJavaNode &call = (CallStaticJavaNode&)n; 1098 return CallJavaNode::cmp(call); 1099 } 1100 1101 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1102 CallGenerator* cg = generator(); 1103 if (can_reshape && cg != nullptr) { 1104 assert(IncrementalInlineMH, "required"); 1105 assert(cg->call_node() == this, "mismatch"); 1106 assert(cg->is_mh_late_inline(), "not virtual"); 1107 1108 // Check whether this MH handle call becomes a candidate for inlining. 1109 ciMethod* callee = cg->method(); 1110 vmIntrinsics::ID iid = callee->intrinsic_id(); 1111 if (iid == vmIntrinsics::_invokeBasic) { 1112 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) { 1113 phase->C->prepend_late_inline(cg); 1114 set_generator(nullptr); 1115 } 1116 } else if (iid == vmIntrinsics::_linkToNative) { 1117 // never retry 1118 } else { 1119 assert(callee->has_member_arg(), "wrong type of call?"); 1120 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) { 1121 phase->C->prepend_late_inline(cg); 1122 set_generator(nullptr); 1123 } 1124 } 1125 } 1126 return CallNode::Ideal(phase, can_reshape); 1127 } 1128 1129 //----------------------------is_uncommon_trap---------------------------- 1130 // Returns true if this is an uncommon trap. 1131 bool CallStaticJavaNode::is_uncommon_trap() const { 1132 return (_name != nullptr && !strcmp(_name, "uncommon_trap")); 1133 } 1134 1135 //----------------------------uncommon_trap_request---------------------------- 1136 // If this is an uncommon trap, return the request code, else zero. 1137 int CallStaticJavaNode::uncommon_trap_request() const { 1138 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0; 1139 } 1140 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { 1141 #ifndef PRODUCT 1142 if (!(call->req() > TypeFunc::Parms && 1143 call->in(TypeFunc::Parms) != nullptr && 1144 call->in(TypeFunc::Parms)->is_Con() && 1145 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) { 1146 assert(in_dump() != 0, "OK if dumping"); 1147 tty->print("[bad uncommon trap]"); 1148 return 0; 1149 } 1150 #endif 1151 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); 1152 } 1153 1154 #ifndef PRODUCT 1155 void CallStaticJavaNode::dump_spec(outputStream *st) const { 1156 st->print("# Static "); 1157 if (_name != nullptr) { 1158 st->print("%s", _name); 1159 int trap_req = uncommon_trap_request(); 1160 if (trap_req != 0) { 1161 char buf[100]; 1162 st->print("(%s)", 1163 Deoptimization::format_trap_request(buf, sizeof(buf), 1164 trap_req)); 1165 } 1166 st->print(" "); 1167 } 1168 CallJavaNode::dump_spec(st); 1169 } 1170 1171 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const { 1172 if (_method) { 1173 _method->print_short_name(st); 1174 } else if (_name) { 1175 st->print("%s", _name); 1176 } else { 1177 st->print("<?>"); 1178 } 1179 } 1180 #endif 1181 1182 //============================================================================= 1183 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } 1184 bool CallDynamicJavaNode::cmp( const Node &n ) const { 1185 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; 1186 return CallJavaNode::cmp(call); 1187 } 1188 1189 Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1190 CallGenerator* cg = generator(); 1191 if (can_reshape && cg != nullptr) { 1192 assert(IncrementalInlineVirtual, "required"); 1193 assert(cg->call_node() == this, "mismatch"); 1194 assert(cg->is_virtual_late_inline(), "not virtual"); 1195 1196 // Recover symbolic info for method resolution. 1197 ciMethod* caller = jvms()->method(); 1198 ciBytecodeStream iter(caller); 1199 iter.force_bci(jvms()->bci()); 1200 1201 bool not_used1; 1202 ciSignature* not_used2; 1203 ciMethod* orig_callee = iter.get_method(not_used1, ¬_used2); // callee in the bytecode 1204 ciKlass* holder = iter.get_declared_method_holder(); 1205 if (orig_callee->is_method_handle_intrinsic()) { 1206 assert(_override_symbolic_info, "required"); 1207 orig_callee = method(); 1208 holder = method()->holder(); 1209 } 1210 1211 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); 1212 1213 Node* receiver_node = in(TypeFunc::Parms); 1214 const TypeOopPtr* receiver_type = phase->type(receiver_node)->isa_oopptr(); 1215 1216 int not_used3; 1217 bool call_does_dispatch; 1218 ciMethod* callee = phase->C->optimize_virtual_call(caller, klass, holder, orig_callee, receiver_type, true /*is_virtual*/, 1219 call_does_dispatch, not_used3); // out-parameters 1220 if (!call_does_dispatch) { 1221 // Register for late inlining. 1222 cg->set_callee_method(callee); 1223 phase->C->prepend_late_inline(cg); // MH late inlining prepends to the list, so do the same 1224 set_generator(nullptr); 1225 } 1226 } 1227 return CallNode::Ideal(phase, can_reshape); 1228 } 1229 1230 #ifndef PRODUCT 1231 void CallDynamicJavaNode::dump_spec(outputStream *st) const { 1232 st->print("# Dynamic "); 1233 CallJavaNode::dump_spec(st); 1234 } 1235 #endif 1236 1237 //============================================================================= 1238 uint CallRuntimeNode::size_of() const { return sizeof(*this); } 1239 bool CallRuntimeNode::cmp( const Node &n ) const { 1240 CallRuntimeNode &call = (CallRuntimeNode&)n; 1241 return CallNode::cmp(call) && !strcmp(_name,call._name); 1242 } 1243 #ifndef PRODUCT 1244 void CallRuntimeNode::dump_spec(outputStream *st) const { 1245 st->print("# "); 1246 st->print("%s", _name); 1247 CallNode::dump_spec(st); 1248 } 1249 #endif 1250 uint CallLeafVectorNode::size_of() const { return sizeof(*this); } 1251 bool CallLeafVectorNode::cmp( const Node &n ) const { 1252 CallLeafVectorNode &call = (CallLeafVectorNode&)n; 1253 return CallLeafNode::cmp(call) && _num_bits == call._num_bits; 1254 } 1255 1256 //------------------------------calling_convention----------------------------- 1257 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 1258 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt); 1259 } 1260 1261 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 1262 #ifdef ASSERT 1263 assert(tf()->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits, 1264 "return vector size must match"); 1265 const TypeTuple* d = tf()->domain(); 1266 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1267 Node* arg = in(i); 1268 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits, 1269 "vector argument size must match"); 1270 } 1271 #endif 1272 1273 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt); 1274 } 1275 1276 //============================================================================= 1277 //------------------------------calling_convention----------------------------- 1278 1279 1280 //============================================================================= 1281 #ifndef PRODUCT 1282 void CallLeafNode::dump_spec(outputStream *st) const { 1283 st->print("# "); 1284 st->print("%s", _name); 1285 CallNode::dump_spec(st); 1286 } 1287 #endif 1288 1289 //============================================================================= 1290 1291 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { 1292 assert(verify_jvms(jvms), "jvms must match"); 1293 int loc = jvms->locoff() + idx; 1294 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { 1295 // If current local idx is top then local idx - 1 could 1296 // be a long/double that needs to be killed since top could 1297 // represent the 2nd half of the long/double. 1298 uint ideal = in(loc -1)->ideal_reg(); 1299 if (ideal == Op_RegD || ideal == Op_RegL) { 1300 // set other (low index) half to top 1301 set_req(loc - 1, in(loc)); 1302 } 1303 } 1304 set_req(loc, c); 1305 } 1306 1307 uint SafePointNode::size_of() const { return sizeof(*this); } 1308 bool SafePointNode::cmp( const Node &n ) const { 1309 return (&n == this); // Always fail except on self 1310 } 1311 1312 //-------------------------set_next_exception---------------------------------- 1313 void SafePointNode::set_next_exception(SafePointNode* n) { 1314 assert(n == nullptr || n->Opcode() == Op_SafePoint, "correct value for next_exception"); 1315 if (len() == req()) { 1316 if (n != nullptr) add_prec(n); 1317 } else { 1318 set_prec(req(), n); 1319 } 1320 } 1321 1322 1323 //----------------------------next_exception----------------------------------- 1324 SafePointNode* SafePointNode::next_exception() const { 1325 if (len() == req()) { 1326 return nullptr; 1327 } else { 1328 Node* n = in(req()); 1329 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); 1330 return (SafePointNode*) n; 1331 } 1332 } 1333 1334 1335 //------------------------------Ideal------------------------------------------ 1336 // Skip over any collapsed Regions 1337 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1338 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState"); 1339 return remove_dead_region(phase, can_reshape) ? this : nullptr; 1340 } 1341 1342 //------------------------------Identity--------------------------------------- 1343 // Remove obviously duplicate safepoints 1344 Node* SafePointNode::Identity(PhaseGVN* phase) { 1345 1346 // If you have back to back safepoints, remove one 1347 if (in(TypeFunc::Control)->is_SafePoint()) { 1348 Node* out_c = unique_ctrl_out_or_null(); 1349 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the 1350 // outer loop's safepoint could confuse removal of the outer loop. 1351 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) { 1352 return in(TypeFunc::Control); 1353 } 1354 } 1355 1356 // Transforming long counted loops requires a safepoint node. Do not 1357 // eliminate a safepoint until loop opts are over. 1358 if (in(0)->is_Proj() && !phase->C->major_progress()) { 1359 Node *n0 = in(0)->in(0); 1360 // Check if he is a call projection (except Leaf Call) 1361 if( n0->is_Catch() ) { 1362 n0 = n0->in(0)->in(0); 1363 assert( n0->is_Call(), "expect a call here" ); 1364 } 1365 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { 1366 // Don't remove a safepoint belonging to an OuterStripMinedLoopEndNode. 1367 // If the loop dies, they will be removed together. 1368 if (has_out_with(Op_OuterStripMinedLoopEnd)) { 1369 return this; 1370 } 1371 // Useless Safepoint, so remove it 1372 return in(TypeFunc::Control); 1373 } 1374 } 1375 1376 return this; 1377 } 1378 1379 //------------------------------Value------------------------------------------ 1380 const Type* SafePointNode::Value(PhaseGVN* phase) const { 1381 if (phase->type(in(0)) == Type::TOP) { 1382 return Type::TOP; 1383 } 1384 if (in(0) == this) { 1385 return Type::TOP; // Dead infinite loop 1386 } 1387 return Type::CONTROL; 1388 } 1389 1390 #ifndef PRODUCT 1391 void SafePointNode::dump_spec(outputStream *st) const { 1392 st->print(" SafePoint "); 1393 _replaced_nodes.dump(st); 1394 } 1395 #endif 1396 1397 const RegMask &SafePointNode::in_RegMask(uint idx) const { 1398 if( idx < TypeFunc::Parms ) return RegMask::Empty; 1399 // Values outside the domain represent debug info 1400 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1401 } 1402 const RegMask &SafePointNode::out_RegMask() const { 1403 return RegMask::Empty; 1404 } 1405 1406 1407 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { 1408 assert((int)grow_by > 0, "sanity"); 1409 int monoff = jvms->monoff(); 1410 int scloff = jvms->scloff(); 1411 int endoff = jvms->endoff(); 1412 assert(endoff == (int)req(), "no other states or debug info after me"); 1413 Node* top = Compile::current()->top(); 1414 for (uint i = 0; i < grow_by; i++) { 1415 ins_req(monoff, top); 1416 } 1417 jvms->set_monoff(monoff + grow_by); 1418 jvms->set_scloff(scloff + grow_by); 1419 jvms->set_endoff(endoff + grow_by); 1420 } 1421 1422 void SafePointNode::push_monitor(const FastLockNode *lock) { 1423 // Add a LockNode, which points to both the original BoxLockNode (the 1424 // stack space for the monitor) and the Object being locked. 1425 const int MonitorEdges = 2; 1426 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1427 assert(req() == jvms()->endoff(), "correct sizing"); 1428 int nextmon = jvms()->scloff(); 1429 if (GenerateSynchronizationCode) { 1430 ins_req(nextmon, lock->box_node()); 1431 ins_req(nextmon+1, lock->obj_node()); 1432 } else { 1433 Node* top = Compile::current()->top(); 1434 ins_req(nextmon, top); 1435 ins_req(nextmon, top); 1436 } 1437 jvms()->set_scloff(nextmon + MonitorEdges); 1438 jvms()->set_endoff(req()); 1439 } 1440 1441 void SafePointNode::pop_monitor() { 1442 // Delete last monitor from debug info 1443 debug_only(int num_before_pop = jvms()->nof_monitors()); 1444 const int MonitorEdges = 2; 1445 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1446 int scloff = jvms()->scloff(); 1447 int endoff = jvms()->endoff(); 1448 int new_scloff = scloff - MonitorEdges; 1449 int new_endoff = endoff - MonitorEdges; 1450 jvms()->set_scloff(new_scloff); 1451 jvms()->set_endoff(new_endoff); 1452 while (scloff > new_scloff) del_req_ordered(--scloff); 1453 assert(jvms()->nof_monitors() == num_before_pop-1, ""); 1454 } 1455 1456 Node *SafePointNode::peek_monitor_box() const { 1457 int mon = jvms()->nof_monitors() - 1; 1458 assert(mon >= 0, "must have a monitor"); 1459 return monitor_box(jvms(), mon); 1460 } 1461 1462 Node *SafePointNode::peek_monitor_obj() const { 1463 int mon = jvms()->nof_monitors() - 1; 1464 assert(mon >= 0, "must have a monitor"); 1465 return monitor_obj(jvms(), mon); 1466 } 1467 1468 Node* SafePointNode::peek_operand(uint off) const { 1469 assert(jvms()->sp() > 0, "must have an operand"); 1470 assert(off < jvms()->sp(), "off is out-of-range"); 1471 return stack(jvms(), jvms()->sp() - off - 1); 1472 } 1473 1474 // Do we Match on this edge index or not? Match no edges 1475 uint SafePointNode::match_edge(uint idx) const { 1476 return (TypeFunc::Parms == idx); 1477 } 1478 1479 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) { 1480 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops"); 1481 int nb = igvn->C->root()->find_prec_edge(this); 1482 if (nb != -1) { 1483 igvn->delete_precedence_of(igvn->C->root(), nb); 1484 } 1485 } 1486 1487 //============== SafePointScalarObjectNode ============== 1488 1489 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) : 1490 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1491 _first_index(first_index), 1492 _depth(depth), 1493 _n_fields(n_fields), 1494 _alloc(alloc) 1495 { 1496 #ifdef ASSERT 1497 if (!alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) { 1498 alloc->dump(); 1499 assert(false, "unexpected call node"); 1500 } 1501 #endif 1502 init_class_id(Class_SafePointScalarObject); 1503 } 1504 1505 // Do not allow value-numbering for SafePointScalarObject node. 1506 uint SafePointScalarObjectNode::hash() const { return NO_HASH; } 1507 bool SafePointScalarObjectNode::cmp( const Node &n ) const { 1508 return (&n == this); // Always fail except on self 1509 } 1510 1511 uint SafePointScalarObjectNode::ideal_reg() const { 1512 return 0; // No matching to machine instruction 1513 } 1514 1515 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { 1516 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1517 } 1518 1519 const RegMask &SafePointScalarObjectNode::out_RegMask() const { 1520 return RegMask::Empty; 1521 } 1522 1523 uint SafePointScalarObjectNode::match_edge(uint idx) const { 1524 return 0; 1525 } 1526 1527 SafePointScalarObjectNode* 1528 SafePointScalarObjectNode::clone(Dict* sosn_map, bool& new_node) const { 1529 void* cached = (*sosn_map)[(void*)this]; 1530 if (cached != nullptr) { 1531 new_node = false; 1532 return (SafePointScalarObjectNode*)cached; 1533 } 1534 new_node = true; 1535 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); 1536 sosn_map->Insert((void*)this, (void*)res); 1537 return res; 1538 } 1539 1540 1541 #ifndef PRODUCT 1542 void SafePointScalarObjectNode::dump_spec(outputStream *st) const { 1543 st->print(" # fields@[%d..%d]", first_index(), first_index() + n_fields() - 1); 1544 } 1545 #endif 1546 1547 //============== SafePointScalarMergeNode ============== 1548 1549 SafePointScalarMergeNode::SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx) : 1550 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1551 _merge_pointer_idx(merge_pointer_idx) 1552 { 1553 init_class_id(Class_SafePointScalarMerge); 1554 } 1555 1556 // Do not allow value-numbering for SafePointScalarMerge node. 1557 uint SafePointScalarMergeNode::hash() const { return NO_HASH; } 1558 bool SafePointScalarMergeNode::cmp( const Node &n ) const { 1559 return (&n == this); // Always fail except on self 1560 } 1561 1562 uint SafePointScalarMergeNode::ideal_reg() const { 1563 return 0; // No matching to machine instruction 1564 } 1565 1566 const RegMask &SafePointScalarMergeNode::in_RegMask(uint idx) const { 1567 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1568 } 1569 1570 const RegMask &SafePointScalarMergeNode::out_RegMask() const { 1571 return RegMask::Empty; 1572 } 1573 1574 uint SafePointScalarMergeNode::match_edge(uint idx) const { 1575 return 0; 1576 } 1577 1578 SafePointScalarMergeNode* 1579 SafePointScalarMergeNode::clone(Dict* sosn_map, bool& new_node) const { 1580 void* cached = (*sosn_map)[(void*)this]; 1581 if (cached != nullptr) { 1582 new_node = false; 1583 return (SafePointScalarMergeNode*)cached; 1584 } 1585 new_node = true; 1586 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone(); 1587 sosn_map->Insert((void*)this, (void*)res); 1588 return res; 1589 } 1590 1591 #ifndef PRODUCT 1592 void SafePointScalarMergeNode::dump_spec(outputStream *st) const { 1593 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1); 1594 } 1595 #endif 1596 1597 //============================================================================= 1598 uint AllocateNode::size_of() const { return sizeof(*this); } 1599 1600 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, 1601 Node *ctrl, Node *mem, Node *abio, 1602 Node *size, Node *klass_node, Node *initial_test) 1603 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM) 1604 { 1605 init_class_id(Class_Allocate); 1606 init_flags(Flag_is_macro); 1607 _is_scalar_replaceable = false; 1608 _is_non_escaping = false; 1609 _is_allocation_MemBar_redundant = false; 1610 Node *topnode = C->top(); 1611 1612 init_req( TypeFunc::Control , ctrl ); 1613 init_req( TypeFunc::I_O , abio ); 1614 init_req( TypeFunc::Memory , mem ); 1615 init_req( TypeFunc::ReturnAdr, topnode ); 1616 init_req( TypeFunc::FramePtr , topnode ); 1617 init_req( AllocSize , size); 1618 init_req( KlassNode , klass_node); 1619 init_req( InitialTest , initial_test); 1620 init_req( ALength , topnode); 1621 init_req( ValidLengthTest , topnode); 1622 C->add_macro_node(this); 1623 } 1624 1625 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) 1626 { 1627 assert(initializer != nullptr && initializer->is_object_initializer(), 1628 "unexpected initializer method"); 1629 BCEscapeAnalyzer* analyzer = initializer->get_bcea(); 1630 if (analyzer == nullptr) { 1631 return; 1632 } 1633 1634 // Allocation node is first parameter in its initializer 1635 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) { 1636 _is_allocation_MemBar_redundant = true; 1637 } 1638 } 1639 Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) { 1640 Node* mark_node = nullptr; 1641 if (UseCompactObjectHeaders) { 1642 Node* klass_node = in(AllocateNode::KlassNode); 1643 Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset())))); 1644 mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); 1645 } else { 1646 // For now only enable fast locking for non-array types 1647 mark_node = phase->MakeConX(markWord::prototype().value()); 1648 } 1649 return mark_node; 1650 } 1651 1652 // Retrieve the length from the AllocateArrayNode. Narrow the type with a 1653 // CastII, if appropriate. If we are not allowed to create new nodes, and 1654 // a CastII is appropriate, return null. 1655 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) { 1656 Node *length = in(AllocateNode::ALength); 1657 assert(length != nullptr, "length is not null"); 1658 1659 const TypeInt* length_type = phase->find_int_type(length); 1660 const TypeAryPtr* ary_type = oop_type->isa_aryptr(); 1661 1662 if (ary_type != nullptr && length_type != nullptr) { 1663 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); 1664 if (narrow_length_type != length_type) { 1665 // Assert one of: 1666 // - the narrow_length is 0 1667 // - the narrow_length is not wider than length 1668 assert(narrow_length_type == TypeInt::ZERO || 1669 (length_type->is_con() && narrow_length_type->is_con() && 1670 (narrow_length_type->_hi <= length_type->_lo)) || 1671 (narrow_length_type->_hi <= length_type->_hi && 1672 narrow_length_type->_lo >= length_type->_lo), 1673 "narrow type must be narrower than length type"); 1674 1675 // Return null if new nodes are not allowed 1676 if (!allow_new_nodes) { 1677 return nullptr; 1678 } 1679 // Create a cast which is control dependent on the initialization to 1680 // propagate the fact that the array length must be positive. 1681 InitializeNode* init = initialization(); 1682 if (init != nullptr) { 1683 length = new CastIINode(init->proj_out_or_null(TypeFunc::Control), length, narrow_length_type); 1684 } 1685 } 1686 } 1687 1688 return length; 1689 } 1690 1691 //============================================================================= 1692 const TypeFunc* LockNode::_lock_type_Type = nullptr; 1693 1694 uint LockNode::size_of() const { return sizeof(*this); } 1695 1696 // Redundant lock elimination 1697 // 1698 // There are various patterns of locking where we release and 1699 // immediately reacquire a lock in a piece of code where no operations 1700 // occur in between that would be observable. In those cases we can 1701 // skip releasing and reacquiring the lock without violating any 1702 // fairness requirements. Doing this around a loop could cause a lock 1703 // to be held for a very long time so we concentrate on non-looping 1704 // control flow. We also require that the operations are fully 1705 // redundant meaning that we don't introduce new lock operations on 1706 // some paths so to be able to eliminate it on others ala PRE. This 1707 // would probably require some more extensive graph manipulation to 1708 // guarantee that the memory edges were all handled correctly. 1709 // 1710 // Assuming p is a simple predicate which can't trap in any way and s 1711 // is a synchronized method consider this code: 1712 // 1713 // s(); 1714 // if (p) 1715 // s(); 1716 // else 1717 // s(); 1718 // s(); 1719 // 1720 // 1. The unlocks of the first call to s can be eliminated if the 1721 // locks inside the then and else branches are eliminated. 1722 // 1723 // 2. The unlocks of the then and else branches can be eliminated if 1724 // the lock of the final call to s is eliminated. 1725 // 1726 // Either of these cases subsumes the simple case of sequential control flow 1727 // 1728 // Additionally we can eliminate versions without the else case: 1729 // 1730 // s(); 1731 // if (p) 1732 // s(); 1733 // s(); 1734 // 1735 // 3. In this case we eliminate the unlock of the first s, the lock 1736 // and unlock in the then case and the lock in the final s. 1737 // 1738 // Note also that in all these cases the then/else pieces don't have 1739 // to be trivial as long as they begin and end with synchronization 1740 // operations. 1741 // 1742 // s(); 1743 // if (p) 1744 // s(); 1745 // f(); 1746 // s(); 1747 // s(); 1748 // 1749 // The code will work properly for this case, leaving in the unlock 1750 // before the call to f and the relock after it. 1751 // 1752 // A potentially interesting case which isn't handled here is when the 1753 // locking is partially redundant. 1754 // 1755 // s(); 1756 // if (p) 1757 // s(); 1758 // 1759 // This could be eliminated putting unlocking on the else case and 1760 // eliminating the first unlock and the lock in the then side. 1761 // Alternatively the unlock could be moved out of the then side so it 1762 // was after the merge and the first unlock and second lock 1763 // eliminated. This might require less manipulation of the memory 1764 // state to get correct. 1765 // 1766 // Additionally we might allow work between a unlock and lock before 1767 // giving up eliminating the locks. The current code disallows any 1768 // conditional control flow between these operations. A formulation 1769 // similar to partial redundancy elimination computing the 1770 // availability of unlocking and the anticipatability of locking at a 1771 // program point would allow detection of fully redundant locking with 1772 // some amount of work in between. I'm not sure how often I really 1773 // think that would occur though. Most of the cases I've seen 1774 // indicate it's likely non-trivial work would occur in between. 1775 // There may be other more complicated constructs where we could 1776 // eliminate locking but I haven't seen any others appear as hot or 1777 // interesting. 1778 // 1779 // Locking and unlocking have a canonical form in ideal that looks 1780 // roughly like this: 1781 // 1782 // <obj> 1783 // | \\------+ 1784 // | \ \ 1785 // | BoxLock \ 1786 // | | | \ 1787 // | | \ \ 1788 // | | FastLock 1789 // | | / 1790 // | | / 1791 // | | | 1792 // 1793 // Lock 1794 // | 1795 // Proj #0 1796 // | 1797 // MembarAcquire 1798 // | 1799 // Proj #0 1800 // 1801 // MembarRelease 1802 // | 1803 // Proj #0 1804 // | 1805 // Unlock 1806 // | 1807 // Proj #0 1808 // 1809 // 1810 // This code proceeds by processing Lock nodes during PhaseIterGVN 1811 // and searching back through its control for the proper code 1812 // patterns. Once it finds a set of lock and unlock operations to 1813 // eliminate they are marked as eliminatable which causes the 1814 // expansion of the Lock and Unlock macro nodes to make the operation a NOP 1815 // 1816 //============================================================================= 1817 1818 // 1819 // Utility function to skip over uninteresting control nodes. Nodes skipped are: 1820 // - copy regions. (These may not have been optimized away yet.) 1821 // - eliminated locking nodes 1822 // 1823 static Node *next_control(Node *ctrl) { 1824 if (ctrl == nullptr) 1825 return nullptr; 1826 while (1) { 1827 if (ctrl->is_Region()) { 1828 RegionNode *r = ctrl->as_Region(); 1829 Node *n = r->is_copy(); 1830 if (n == nullptr) 1831 break; // hit a region, return it 1832 else 1833 ctrl = n; 1834 } else if (ctrl->is_Proj()) { 1835 Node *in0 = ctrl->in(0); 1836 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { 1837 ctrl = in0->in(0); 1838 } else { 1839 break; 1840 } 1841 } else { 1842 break; // found an interesting control 1843 } 1844 } 1845 return ctrl; 1846 } 1847 // 1848 // Given a control, see if it's the control projection of an Unlock which 1849 // operating on the same object as lock. 1850 // 1851 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, 1852 GrowableArray<AbstractLockNode*> &lock_ops) { 1853 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : nullptr; 1854 if (ctrl_proj != nullptr && ctrl_proj->_con == TypeFunc::Control) { 1855 Node *n = ctrl_proj->in(0); 1856 if (n != nullptr && n->is_Unlock()) { 1857 UnlockNode *unlock = n->as_Unlock(); 1858 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1859 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 1860 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); 1861 if (lock_obj->eqv_uncast(unlock_obj) && 1862 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && 1863 !unlock->is_eliminated()) { 1864 lock_ops.append(unlock); 1865 return true; 1866 } 1867 } 1868 } 1869 return false; 1870 } 1871 1872 // 1873 // Find the lock matching an unlock. Returns null if a safepoint 1874 // or complicated control is encountered first. 1875 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { 1876 LockNode *lock_result = nullptr; 1877 // find the matching lock, or an intervening safepoint 1878 Node *ctrl = next_control(unlock->in(0)); 1879 while (1) { 1880 assert(ctrl != nullptr, "invalid control graph"); 1881 assert(!ctrl->is_Start(), "missing lock for unlock"); 1882 if (ctrl->is_top()) break; // dead control path 1883 if (ctrl->is_Proj()) ctrl = ctrl->in(0); 1884 if (ctrl->is_SafePoint()) { 1885 break; // found a safepoint (may be the lock we are searching for) 1886 } else if (ctrl->is_Region()) { 1887 // Check for a simple diamond pattern. Punt on anything more complicated 1888 if (ctrl->req() == 3 && ctrl->in(1) != nullptr && ctrl->in(2) != nullptr) { 1889 Node *in1 = next_control(ctrl->in(1)); 1890 Node *in2 = next_control(ctrl->in(2)); 1891 if (((in1->is_IfTrue() && in2->is_IfFalse()) || 1892 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { 1893 ctrl = next_control(in1->in(0)->in(0)); 1894 } else { 1895 break; 1896 } 1897 } else { 1898 break; 1899 } 1900 } else { 1901 ctrl = next_control(ctrl->in(0)); // keep searching 1902 } 1903 } 1904 if (ctrl->is_Lock()) { 1905 LockNode *lock = ctrl->as_Lock(); 1906 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1907 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 1908 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); 1909 if (lock_obj->eqv_uncast(unlock_obj) && 1910 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { 1911 lock_result = lock; 1912 } 1913 } 1914 return lock_result; 1915 } 1916 1917 // This code corresponds to case 3 above. 1918 1919 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1920 GrowableArray<AbstractLockNode*> &lock_ops) { 1921 Node* if_node = node->in(0); 1922 bool if_true = node->is_IfTrue(); 1923 1924 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { 1925 Node *lock_ctrl = next_control(if_node->in(0)); 1926 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { 1927 Node* lock1_node = nullptr; 1928 ProjNode* proj = if_node->as_If()->proj_out(!if_true); 1929 if (if_true) { 1930 if (proj->is_IfFalse() && proj->outcnt() == 1) { 1931 lock1_node = proj->unique_out(); 1932 } 1933 } else { 1934 if (proj->is_IfTrue() && proj->outcnt() == 1) { 1935 lock1_node = proj->unique_out(); 1936 } 1937 } 1938 if (lock1_node != nullptr && lock1_node->is_Lock()) { 1939 LockNode *lock1 = lock1_node->as_Lock(); 1940 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1941 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 1942 Node* lock1_obj = bs->step_over_gc_barrier(lock1->obj_node()); 1943 if (lock_obj->eqv_uncast(lock1_obj) && 1944 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && 1945 !lock1->is_eliminated()) { 1946 lock_ops.append(lock1); 1947 return true; 1948 } 1949 } 1950 } 1951 } 1952 1953 lock_ops.trunc_to(0); 1954 return false; 1955 } 1956 1957 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1958 GrowableArray<AbstractLockNode*> &lock_ops) { 1959 // check each control merging at this point for a matching unlock. 1960 // in(0) should be self edge so skip it. 1961 for (int i = 1; i < (int)region->req(); i++) { 1962 Node *in_node = next_control(region->in(i)); 1963 if (in_node != nullptr) { 1964 if (find_matching_unlock(in_node, lock, lock_ops)) { 1965 // found a match so keep on checking. 1966 continue; 1967 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { 1968 continue; 1969 } 1970 1971 // If we fall through to here then it was some kind of node we 1972 // don't understand or there wasn't a matching unlock, so give 1973 // up trying to merge locks. 1974 lock_ops.trunc_to(0); 1975 return false; 1976 } 1977 } 1978 return true; 1979 1980 } 1981 1982 // Check that all locks/unlocks associated with object come from balanced regions. 1983 bool AbstractLockNode::is_balanced() { 1984 Node* obj = obj_node(); 1985 for (uint j = 0; j < obj->outcnt(); j++) { 1986 Node* n = obj->raw_out(j); 1987 if (n->is_AbstractLock() && 1988 n->as_AbstractLock()->obj_node()->eqv_uncast(obj)) { 1989 BoxLockNode* n_box = n->as_AbstractLock()->box_node()->as_BoxLock(); 1990 if (n_box->is_unbalanced()) { 1991 return false; 1992 } 1993 } 1994 } 1995 return true; 1996 } 1997 1998 const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"}; 1999 2000 const char * AbstractLockNode::kind_as_string() const { 2001 return _kind_names[_kind]; 2002 } 2003 2004 #ifndef PRODUCT 2005 // 2006 // Create a counter which counts the number of times this lock is acquired 2007 // 2008 void AbstractLockNode::create_lock_counter(JVMState* state) { 2009 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); 2010 } 2011 2012 void AbstractLockNode::set_eliminated_lock_counter() { 2013 if (_counter) { 2014 // Update the counter to indicate that this lock was eliminated. 2015 // The counter update code will stay around even though the 2016 // optimizer will eliminate the lock operation itself. 2017 _counter->set_tag(NamedCounter::EliminatedLockCounter); 2018 } 2019 } 2020 2021 void AbstractLockNode::dump_spec(outputStream* st) const { 2022 st->print("%s ", _kind_names[_kind]); 2023 CallNode::dump_spec(st); 2024 } 2025 2026 void AbstractLockNode::dump_compact_spec(outputStream* st) const { 2027 st->print("%s", _kind_names[_kind]); 2028 } 2029 #endif 2030 2031 //============================================================================= 2032 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2033 2034 // perform any generic optimizations first (returns 'this' or null) 2035 Node *result = SafePointNode::Ideal(phase, can_reshape); 2036 if (result != nullptr) return result; 2037 // Don't bother trying to transform a dead node 2038 if (in(0) && in(0)->is_top()) return nullptr; 2039 2040 // Now see if we can optimize away this lock. We don't actually 2041 // remove the locking here, we simply set the _eliminate flag which 2042 // prevents macro expansion from expanding the lock. Since we don't 2043 // modify the graph, the value returned from this function is the 2044 // one computed above. 2045 if (can_reshape && EliminateLocks && !is_non_esc_obj()) { 2046 // 2047 // If we are locking an non-escaped object, the lock/unlock is unnecessary 2048 // 2049 ConnectionGraph *cgr = phase->C->congraph(); 2050 if (cgr != nullptr && cgr->can_eliminate_lock(this)) { 2051 assert(!is_eliminated() || is_coarsened(), "sanity"); 2052 // The lock could be marked eliminated by lock coarsening 2053 // code during first IGVN before EA. Replace coarsened flag 2054 // to eliminate all associated locks/unlocks. 2055 #ifdef ASSERT 2056 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1"); 2057 #endif 2058 this->set_non_esc_obj(); 2059 return result; 2060 } 2061 2062 if (!phase->C->do_locks_coarsening()) { 2063 return result; // Compiling without locks coarsening 2064 } 2065 // 2066 // Try lock coarsening 2067 // 2068 PhaseIterGVN* iter = phase->is_IterGVN(); 2069 if (iter != nullptr && !is_eliminated()) { 2070 2071 GrowableArray<AbstractLockNode*> lock_ops; 2072 2073 Node *ctrl = next_control(in(0)); 2074 2075 // now search back for a matching Unlock 2076 if (find_matching_unlock(ctrl, this, lock_ops)) { 2077 // found an unlock directly preceding this lock. This is the 2078 // case of single unlock directly control dependent on a 2079 // single lock which is the trivial version of case 1 or 2. 2080 } else if (ctrl->is_Region() ) { 2081 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { 2082 // found lock preceded by multiple unlocks along all paths 2083 // joining at this point which is case 3 in description above. 2084 } 2085 } else { 2086 // see if this lock comes from either half of an if and the 2087 // predecessors merges unlocks and the other half of the if 2088 // performs a lock. 2089 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { 2090 // found unlock splitting to an if with locks on both branches. 2091 } 2092 } 2093 2094 if (lock_ops.length() > 0) { 2095 // add ourselves to the list of locks to be eliminated. 2096 lock_ops.append(this); 2097 2098 #ifndef PRODUCT 2099 if (PrintEliminateLocks) { 2100 int locks = 0; 2101 int unlocks = 0; 2102 if (Verbose) { 2103 tty->print_cr("=== Locks coarsening ==="); 2104 tty->print("Obj: "); 2105 obj_node()->dump(); 2106 } 2107 for (int i = 0; i < lock_ops.length(); i++) { 2108 AbstractLockNode* lock = lock_ops.at(i); 2109 if (lock->Opcode() == Op_Lock) 2110 locks++; 2111 else 2112 unlocks++; 2113 if (Verbose) { 2114 tty->print("Box %d: ", i); 2115 box_node()->dump(); 2116 tty->print(" %d: ", i); 2117 lock->dump(); 2118 } 2119 } 2120 tty->print_cr("=== Coarsened %d unlocks and %d locks", unlocks, locks); 2121 } 2122 #endif 2123 2124 // for each of the identified locks, mark them 2125 // as eliminatable 2126 for (int i = 0; i < lock_ops.length(); i++) { 2127 AbstractLockNode* lock = lock_ops.at(i); 2128 2129 // Mark it eliminated by coarsening and update any counters 2130 #ifdef ASSERT 2131 lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened"); 2132 #endif 2133 lock->set_coarsened(); 2134 } 2135 // Record this coarsened group. 2136 phase->C->add_coarsened_locks(lock_ops); 2137 } else if (ctrl->is_Region() && 2138 iter->_worklist.member(ctrl)) { 2139 // We weren't able to find any opportunities but the region this 2140 // lock is control dependent on hasn't been processed yet so put 2141 // this lock back on the worklist so we can check again once any 2142 // region simplification has occurred. 2143 iter->_worklist.push(this); 2144 } 2145 } 2146 } 2147 2148 return result; 2149 } 2150 2151 //============================================================================= 2152 bool LockNode::is_nested_lock_region() { 2153 return is_nested_lock_region(nullptr); 2154 } 2155 2156 // p is used for access to compilation log; no logging if null 2157 bool LockNode::is_nested_lock_region(Compile * c) { 2158 BoxLockNode* box = box_node()->as_BoxLock(); 2159 int stk_slot = box->stack_slot(); 2160 if (stk_slot <= 0) { 2161 #ifdef ASSERT 2162 this->log_lock_optimization(c, "eliminate_lock_INLR_1"); 2163 #endif 2164 return false; // External lock or it is not Box (Phi node). 2165 } 2166 2167 // Ignore complex cases: merged locks or multiple locks. 2168 Node* obj = obj_node(); 2169 LockNode* unique_lock = nullptr; 2170 Node* bad_lock = nullptr; 2171 if (!box->is_simple_lock_region(&unique_lock, obj, &bad_lock)) { 2172 #ifdef ASSERT 2173 this->log_lock_optimization(c, "eliminate_lock_INLR_2a", bad_lock); 2174 #endif 2175 return false; 2176 } 2177 if (unique_lock != this) { 2178 #ifdef ASSERT 2179 this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != nullptr ? unique_lock : bad_lock)); 2180 if (PrintEliminateLocks && Verbose) { 2181 tty->print_cr("=============== unique_lock != this ============"); 2182 tty->print(" this: "); 2183 this->dump(); 2184 tty->print(" box: "); 2185 box->dump(); 2186 tty->print(" obj: "); 2187 obj->dump(); 2188 if (unique_lock != nullptr) { 2189 tty->print(" unique_lock: "); 2190 unique_lock->dump(); 2191 } 2192 if (bad_lock != nullptr) { 2193 tty->print(" bad_lock: "); 2194 bad_lock->dump(); 2195 } 2196 tty->print_cr("==============="); 2197 } 2198 #endif 2199 return false; 2200 } 2201 2202 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2203 obj = bs->step_over_gc_barrier(obj); 2204 // Look for external lock for the same object. 2205 SafePointNode* sfn = this->as_SafePoint(); 2206 JVMState* youngest_jvms = sfn->jvms(); 2207 int max_depth = youngest_jvms->depth(); 2208 for (int depth = 1; depth <= max_depth; depth++) { 2209 JVMState* jvms = youngest_jvms->of_depth(depth); 2210 int num_mon = jvms->nof_monitors(); 2211 // Loop over monitors 2212 for (int idx = 0; idx < num_mon; idx++) { 2213 Node* obj_node = sfn->monitor_obj(jvms, idx); 2214 obj_node = bs->step_over_gc_barrier(obj_node); 2215 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock(); 2216 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { 2217 box->set_nested(); 2218 return true; 2219 } 2220 } 2221 } 2222 #ifdef ASSERT 2223 this->log_lock_optimization(c, "eliminate_lock_INLR_3"); 2224 #endif 2225 return false; 2226 } 2227 2228 //============================================================================= 2229 uint UnlockNode::size_of() const { return sizeof(*this); } 2230 2231 //============================================================================= 2232 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2233 2234 // perform any generic optimizations first (returns 'this' or null) 2235 Node *result = SafePointNode::Ideal(phase, can_reshape); 2236 if (result != nullptr) return result; 2237 // Don't bother trying to transform a dead node 2238 if (in(0) && in(0)->is_top()) return nullptr; 2239 2240 // Now see if we can optimize away this unlock. We don't actually 2241 // remove the unlocking here, we simply set the _eliminate flag which 2242 // prevents macro expansion from expanding the unlock. Since we don't 2243 // modify the graph, the value returned from this function is the 2244 // one computed above. 2245 // Escape state is defined after Parse phase. 2246 if (can_reshape && EliminateLocks && !is_non_esc_obj()) { 2247 // 2248 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary. 2249 // 2250 ConnectionGraph *cgr = phase->C->congraph(); 2251 if (cgr != nullptr && cgr->can_eliminate_lock(this)) { 2252 assert(!is_eliminated() || is_coarsened(), "sanity"); 2253 // The lock could be marked eliminated by lock coarsening 2254 // code during first IGVN before EA. Replace coarsened flag 2255 // to eliminate all associated locks/unlocks. 2256 #ifdef ASSERT 2257 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2"); 2258 #endif 2259 this->set_non_esc_obj(); 2260 } 2261 } 2262 return result; 2263 } 2264 2265 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const { 2266 if (C == nullptr) { 2267 return; 2268 } 2269 CompileLog* log = C->log(); 2270 if (log != nullptr) { 2271 Node* box = box_node(); 2272 Node* obj = obj_node(); 2273 int box_id = box != nullptr ? box->_idx : -1; 2274 int obj_id = obj != nullptr ? obj->_idx : -1; 2275 2276 log->begin_head("%s compile_id='%d' lock_id='%d' class='%s' kind='%s' box_id='%d' obj_id='%d' bad_id='%d'", 2277 tag, C->compile_id(), this->_idx, 2278 is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?", 2279 kind_as_string(), box_id, obj_id, (bad_lock != nullptr ? bad_lock->_idx : -1)); 2280 log->stamp(); 2281 log->end_head(); 2282 JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms(); 2283 while (p != nullptr) { 2284 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 2285 p = p->caller(); 2286 } 2287 log->tail(tag); 2288 } 2289 } 2290 2291 bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) { 2292 if (dest_t->is_known_instance() && t_oop->is_known_instance()) { 2293 return dest_t->instance_id() == t_oop->instance_id(); 2294 } 2295 2296 if (dest_t->isa_instptr() && !dest_t->is_instptr()->instance_klass()->equals(phase->C->env()->Object_klass())) { 2297 // clone 2298 if (t_oop->isa_aryptr()) { 2299 return false; 2300 } 2301 if (!t_oop->isa_instptr()) { 2302 return true; 2303 } 2304 if (dest_t->maybe_java_subtype_of(t_oop) || t_oop->maybe_java_subtype_of(dest_t)) { 2305 return true; 2306 } 2307 // unrelated 2308 return false; 2309 } 2310 2311 if (dest_t->isa_aryptr()) { 2312 // arraycopy or array clone 2313 if (t_oop->isa_instptr()) { 2314 return false; 2315 } 2316 if (!t_oop->isa_aryptr()) { 2317 return true; 2318 } 2319 2320 const Type* elem = dest_t->is_aryptr()->elem(); 2321 if (elem == Type::BOTTOM) { 2322 // An array but we don't know what elements are 2323 return true; 2324 } 2325 2326 dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr(); 2327 uint dest_alias = phase->C->get_alias_index(dest_t); 2328 uint t_oop_alias = phase->C->get_alias_index(t_oop); 2329 2330 return dest_alias == t_oop_alias; 2331 } 2332 2333 return true; 2334 }