1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "compiler/compileLog.hpp" 26 #include "ci/ciFlatArrayKlass.hpp" 27 #include "ci/bcEscapeAnalyzer.hpp" 28 #include "compiler/oopMap.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/c2/barrierSetC2.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "opto/callGenerator.hpp" 33 #include "opto/callnode.hpp" 34 #include "opto/castnode.hpp" 35 #include "opto/convertnode.hpp" 36 #include "opto/escape.hpp" 37 #include "opto/inlinetypenode.hpp" 38 #include "opto/locknode.hpp" 39 #include "opto/machnode.hpp" 40 #include "opto/matcher.hpp" 41 #include "opto/parse.hpp" 42 #include "opto/regalloc.hpp" 43 #include "opto/regmask.hpp" 44 #include "opto/rootnode.hpp" 45 #include "opto/runtime.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubRoutines.hpp" 48 #include "utilities/powerOfTwo.hpp" 49 #include "code/vmreg.hpp" 50 51 // Portions of code courtesy of Clifford Click 52 53 // Optimization - Graph Style 54 55 //============================================================================= 56 uint StartNode::size_of() const { return sizeof(*this); } 57 bool StartNode::cmp( const Node &n ) const 58 { return _domain == ((StartNode&)n)._domain; } 59 const Type *StartNode::bottom_type() const { return _domain; } 60 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; } 61 #ifndef PRODUCT 62 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);} 63 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ } 64 #endif 65 66 //------------------------------Ideal------------------------------------------ 67 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ 68 return remove_dead_region(phase, can_reshape) ? this : nullptr; 69 } 70 71 //------------------------------calling_convention----------------------------- 72 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 73 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt); 74 } 75 76 //------------------------------Registers-------------------------------------- 77 const RegMask &StartNode::in_RegMask(uint) const { 78 return RegMask::Empty; 79 } 80 81 //------------------------------match------------------------------------------ 82 // Construct projections for incoming parameters, and their RegMask info 83 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { 84 switch (proj->_con) { 85 case TypeFunc::Control: 86 case TypeFunc::I_O: 87 case TypeFunc::Memory: 88 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 89 case TypeFunc::FramePtr: 90 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); 91 case TypeFunc::ReturnAdr: 92 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); 93 case TypeFunc::Parms: 94 default: { 95 uint parm_num = proj->_con - TypeFunc::Parms; 96 const Type *t = _domain->field_at(proj->_con); 97 if (t->base() == Type::Half) // 2nd half of Longs and Doubles 98 return new ConNode(Type::TOP); 99 uint ideal_reg = t->ideal_reg(); 100 RegMask &rm = match->_calling_convention_mask[parm_num]; 101 return new MachProjNode(this,proj->_con,rm,ideal_reg); 102 } 103 } 104 return nullptr; 105 } 106 107 //============================================================================= 108 const char * const ParmNode::names[TypeFunc::Parms+1] = { 109 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms" 110 }; 111 112 #ifndef PRODUCT 113 void ParmNode::dump_spec(outputStream *st) const { 114 if( _con < TypeFunc::Parms ) { 115 st->print("%s", names[_con]); 116 } else { 117 st->print("Parm%d: ",_con-TypeFunc::Parms); 118 // Verbose and WizardMode dump bottom_type for all nodes 119 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); 120 } 121 } 122 123 void ParmNode::dump_compact_spec(outputStream *st) const { 124 if (_con < TypeFunc::Parms) { 125 st->print("%s", names[_con]); 126 } else { 127 st->print("%d:", _con-TypeFunc::Parms); 128 // unconditionally dump bottom_type 129 bottom_type()->dump_on(st); 130 } 131 } 132 #endif 133 134 uint ParmNode::ideal_reg() const { 135 switch( _con ) { 136 case TypeFunc::Control : // fall through 137 case TypeFunc::I_O : // fall through 138 case TypeFunc::Memory : return 0; 139 case TypeFunc::FramePtr : // fall through 140 case TypeFunc::ReturnAdr: return Op_RegP; 141 default : assert( _con > TypeFunc::Parms, "" ); 142 // fall through 143 case TypeFunc::Parms : { 144 // Type of argument being passed 145 const Type *t = in(0)->as_Start()->_domain->field_at(_con); 146 return t->ideal_reg(); 147 } 148 } 149 ShouldNotReachHere(); 150 return 0; 151 } 152 153 //============================================================================= 154 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { 155 init_req(TypeFunc::Control,cntrl); 156 init_req(TypeFunc::I_O,i_o); 157 init_req(TypeFunc::Memory,memory); 158 init_req(TypeFunc::FramePtr,frameptr); 159 init_req(TypeFunc::ReturnAdr,retadr); 160 } 161 162 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ 163 return remove_dead_region(phase, can_reshape) ? this : nullptr; 164 } 165 166 const Type* ReturnNode::Value(PhaseGVN* phase) const { 167 return ( phase->type(in(TypeFunc::Control)) == Type::TOP) 168 ? Type::TOP 169 : Type::BOTTOM; 170 } 171 172 // Do we Match on this edge index or not? No edges on return nodes 173 uint ReturnNode::match_edge(uint idx) const { 174 return 0; 175 } 176 177 178 #ifndef PRODUCT 179 void ReturnNode::dump_req(outputStream *st, DumpConfig* dc) const { 180 // Dump the required inputs, after printing "returns" 181 uint i; // Exit value of loop 182 for (i = 0; i < req(); i++) { // For all required inputs 183 if (i == TypeFunc::Parms) st->print("returns "); 184 Node* p = in(i); 185 if (p != nullptr) { 186 p->dump_idx(false, st, dc); 187 st->print(" "); 188 } else { 189 st->print("_ "); 190 } 191 } 192 } 193 #endif 194 195 //============================================================================= 196 RethrowNode::RethrowNode( 197 Node* cntrl, 198 Node* i_o, 199 Node* memory, 200 Node* frameptr, 201 Node* ret_adr, 202 Node* exception 203 ) : Node(TypeFunc::Parms + 1) { 204 init_req(TypeFunc::Control , cntrl ); 205 init_req(TypeFunc::I_O , i_o ); 206 init_req(TypeFunc::Memory , memory ); 207 init_req(TypeFunc::FramePtr , frameptr ); 208 init_req(TypeFunc::ReturnAdr, ret_adr); 209 init_req(TypeFunc::Parms , exception); 210 } 211 212 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ 213 return remove_dead_region(phase, can_reshape) ? this : nullptr; 214 } 215 216 const Type* RethrowNode::Value(PhaseGVN* phase) const { 217 return (phase->type(in(TypeFunc::Control)) == Type::TOP) 218 ? Type::TOP 219 : Type::BOTTOM; 220 } 221 222 uint RethrowNode::match_edge(uint idx) const { 223 return 0; 224 } 225 226 #ifndef PRODUCT 227 void RethrowNode::dump_req(outputStream *st, DumpConfig* dc) const { 228 // Dump the required inputs, after printing "exception" 229 uint i; // Exit value of loop 230 for (i = 0; i < req(); i++) { // For all required inputs 231 if (i == TypeFunc::Parms) st->print("exception "); 232 Node* p = in(i); 233 if (p != nullptr) { 234 p->dump_idx(false, st, dc); 235 st->print(" "); 236 } else { 237 st->print("_ "); 238 } 239 } 240 } 241 #endif 242 243 //============================================================================= 244 // Do we Match on this edge index or not? Match only target address & method 245 uint TailCallNode::match_edge(uint idx) const { 246 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 247 } 248 249 //============================================================================= 250 // Do we Match on this edge index or not? Match only target address & oop 251 uint TailJumpNode::match_edge(uint idx) const { 252 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 253 } 254 255 //============================================================================= 256 JVMState::JVMState(ciMethod* method, JVMState* caller) : 257 _method(method) { 258 assert(method != nullptr, "must be valid call site"); 259 _bci = InvocationEntryBci; 260 _reexecute = Reexecute_Undefined; 261 debug_only(_bci = -99); // random garbage value 262 debug_only(_map = (SafePointNode*)-1); 263 _caller = caller; 264 _depth = 1 + (caller == nullptr ? 0 : caller->depth()); 265 _locoff = TypeFunc::Parms; 266 _stkoff = _locoff + _method->max_locals(); 267 _monoff = _stkoff + _method->max_stack(); 268 _scloff = _monoff; 269 _endoff = _monoff; 270 _sp = 0; 271 } 272 JVMState::JVMState(int stack_size) : 273 _method(nullptr) { 274 _bci = InvocationEntryBci; 275 _reexecute = Reexecute_Undefined; 276 debug_only(_map = (SafePointNode*)-1); 277 _caller = nullptr; 278 _depth = 1; 279 _locoff = TypeFunc::Parms; 280 _stkoff = _locoff; 281 _monoff = _stkoff + stack_size; 282 _scloff = _monoff; 283 _endoff = _monoff; 284 _sp = 0; 285 } 286 287 //--------------------------------of_depth------------------------------------- 288 JVMState* JVMState::of_depth(int d) const { 289 const JVMState* jvmp = this; 290 assert(0 < d && (uint)d <= depth(), "oob"); 291 for (int skip = depth() - d; skip > 0; skip--) { 292 jvmp = jvmp->caller(); 293 } 294 assert(jvmp->depth() == (uint)d, "found the right one"); 295 return (JVMState*)jvmp; 296 } 297 298 //-----------------------------same_calls_as----------------------------------- 299 bool JVMState::same_calls_as(const JVMState* that) const { 300 if (this == that) return true; 301 if (this->depth() != that->depth()) return false; 302 const JVMState* p = this; 303 const JVMState* q = that; 304 for (;;) { 305 if (p->_method != q->_method) return false; 306 if (p->_method == nullptr) return true; // bci is irrelevant 307 if (p->_bci != q->_bci) return false; 308 if (p->_reexecute != q->_reexecute) return false; 309 p = p->caller(); 310 q = q->caller(); 311 if (p == q) return true; 312 assert(p != nullptr && q != nullptr, "depth check ensures we don't run off end"); 313 } 314 } 315 316 //------------------------------debug_start------------------------------------ 317 uint JVMState::debug_start() const { 318 debug_only(JVMState* jvmroot = of_depth(1)); 319 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); 320 return of_depth(1)->locoff(); 321 } 322 323 //-------------------------------debug_end------------------------------------- 324 uint JVMState::debug_end() const { 325 debug_only(JVMState* jvmroot = of_depth(1)); 326 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); 327 return endoff(); 328 } 329 330 //------------------------------debug_depth------------------------------------ 331 uint JVMState::debug_depth() const { 332 uint total = 0; 333 for (const JVMState* jvmp = this; jvmp != nullptr; jvmp = jvmp->caller()) { 334 total += jvmp->debug_size(); 335 } 336 return total; 337 } 338 339 #ifndef PRODUCT 340 341 //------------------------------format_helper---------------------------------- 342 // Given an allocation (a Chaitin object) and a Node decide if the Node carries 343 // any defined value or not. If it does, print out the register or constant. 344 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { 345 if (n == nullptr) { st->print(" null"); return; } 346 if (n->is_SafePointScalarObject()) { 347 // Scalar replacement. 348 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); 349 scobjs->append_if_missing(spobj); 350 int sco_n = scobjs->find(spobj); 351 assert(sco_n >= 0, ""); 352 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); 353 return; 354 } 355 if (regalloc->node_regs_max_index() > 0 && 356 OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined 357 char buf[50]; 358 regalloc->dump_register(n,buf,sizeof(buf)); 359 st->print(" %s%d]=%s",msg,i,buf); 360 } else { // No register, but might be constant 361 const Type *t = n->bottom_type(); 362 switch (t->base()) { 363 case Type::Int: 364 st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con()); 365 break; 366 case Type::AnyPtr: 367 assert( t == TypePtr::NULL_PTR || n->in_dump(), "" ); 368 st->print(" %s%d]=#null",msg,i); 369 break; 370 case Type::AryPtr: 371 case Type::InstPtr: 372 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop())); 373 break; 374 case Type::KlassPtr: 375 case Type::AryKlassPtr: 376 case Type::InstKlassPtr: 377 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->exact_klass())); 378 break; 379 case Type::MetadataPtr: 380 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata())); 381 break; 382 case Type::NarrowOop: 383 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop())); 384 break; 385 case Type::RawPtr: 386 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr())); 387 break; 388 case Type::DoubleCon: 389 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d); 390 break; 391 case Type::FloatCon: 392 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f); 393 break; 394 case Type::Long: 395 st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con())); 396 break; 397 case Type::Half: 398 case Type::Top: 399 st->print(" %s%d]=_",msg,i); 400 break; 401 default: ShouldNotReachHere(); 402 } 403 } 404 } 405 406 //---------------------print_method_with_lineno-------------------------------- 407 void JVMState::print_method_with_lineno(outputStream* st, bool show_name) const { 408 if (show_name) _method->print_short_name(st); 409 410 int lineno = _method->line_number_from_bci(_bci); 411 if (lineno != -1) { 412 st->print(" @ bci:%d (line %d)", _bci, lineno); 413 } else { 414 st->print(" @ bci:%d", _bci); 415 } 416 } 417 418 //------------------------------format----------------------------------------- 419 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { 420 st->print(" #"); 421 if (_method) { 422 print_method_with_lineno(st, true); 423 } else { 424 st->print_cr(" runtime stub "); 425 return; 426 } 427 if (n->is_MachSafePoint()) { 428 GrowableArray<SafePointScalarObjectNode*> scobjs; 429 MachSafePointNode *mcall = n->as_MachSafePoint(); 430 uint i; 431 // Print locals 432 for (i = 0; i < (uint)loc_size(); i++) 433 format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs); 434 // Print stack 435 for (i = 0; i < (uint)stk_size(); i++) { 436 if ((uint)(_stkoff + i) >= mcall->len()) 437 st->print(" oob "); 438 else 439 format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs); 440 } 441 for (i = 0; (int)i < nof_monitors(); i++) { 442 Node *box = mcall->monitor_box(this, i); 443 Node *obj = mcall->monitor_obj(this, i); 444 if (regalloc->node_regs_max_index() > 0 && 445 OptoReg::is_valid(regalloc->get_reg_first(box))) { 446 box = BoxLockNode::box_node(box); 447 format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs); 448 } else { 449 OptoReg::Name box_reg = BoxLockNode::reg(box); 450 st->print(" MON-BOX%d=%s+%d", 451 i, 452 OptoReg::regname(OptoReg::c_frame_pointer), 453 regalloc->reg2offset(box_reg)); 454 } 455 const char* obj_msg = "MON-OBJ["; 456 if (EliminateLocks) { 457 if (BoxLockNode::box_node(box)->is_eliminated()) 458 obj_msg = "MON-OBJ(LOCK ELIMINATED)["; 459 } 460 format_helper(regalloc, st, obj, obj_msg, i, &scobjs); 461 } 462 463 for (i = 0; i < (uint)scobjs.length(); i++) { 464 // Scalar replaced objects. 465 st->cr(); 466 st->print(" # ScObj" INT32_FORMAT " ", i); 467 SafePointScalarObjectNode* spobj = scobjs.at(i); 468 ciKlass* cik = spobj->bottom_type()->is_oopptr()->exact_klass(); 469 assert(cik->is_instance_klass() || 470 cik->is_array_klass(), "Not supported allocation."); 471 ciInstanceKlass *iklass = nullptr; 472 if (cik->is_instance_klass()) { 473 cik->print_name_on(st); 474 iklass = cik->as_instance_klass(); 475 } else if (cik->is_type_array_klass()) { 476 cik->as_array_klass()->base_element_type()->print_name_on(st); 477 st->print("[%d]", spobj->n_fields()); 478 } else if (cik->is_obj_array_klass()) { 479 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass(); 480 if (cie->is_instance_klass()) { 481 cie->print_name_on(st); 482 } else if (cie->is_type_array_klass()) { 483 cie->as_array_klass()->base_element_type()->print_name_on(st); 484 } else { 485 ShouldNotReachHere(); 486 } 487 st->print("[%d]", spobj->n_fields()); 488 int ndim = cik->as_array_klass()->dimension() - 1; 489 while (ndim-- > 0) { 490 st->print("[]"); 491 } 492 } else if (cik->is_flat_array_klass()) { 493 ciKlass* cie = cik->as_flat_array_klass()->base_element_klass(); 494 cie->print_name_on(st); 495 st->print("[%d]", spobj->n_fields()); 496 int ndim = cik->as_array_klass()->dimension() - 1; 497 while (ndim-- > 0) { 498 st->print("[]"); 499 } 500 } 501 st->print("={"); 502 uint nf = spobj->n_fields(); 503 if (nf > 0) { 504 uint first_ind = spobj->first_index(mcall->jvms()); 505 if (iklass != nullptr && iklass->is_inlinetype()) { 506 Node* init_node = mcall->in(first_ind++); 507 if (!init_node->is_top()) { 508 st->print(" [is_init"); 509 format_helper(regalloc, st, init_node, ":", -1, nullptr); 510 } 511 } 512 Node* fld_node = mcall->in(first_ind); 513 ciField* cifield; 514 if (iklass != nullptr) { 515 st->print(" ["); 516 if (0 < (uint)iklass->nof_nonstatic_fields()) { 517 cifield = iklass->nonstatic_field_at(0); 518 cifield->print_name_on(st); 519 } else { 520 // Must be a null marker 521 st->print("null marker"); 522 } 523 format_helper(regalloc, st, fld_node, ":", 0, &scobjs); 524 } else { 525 format_helper(regalloc, st, fld_node, "[", 0, &scobjs); 526 } 527 for (uint j = 1; j < nf; j++) { 528 fld_node = mcall->in(first_ind+j); 529 if (iklass != nullptr) { 530 st->print(", ["); 531 if (j < (uint)iklass->nof_nonstatic_fields()) { 532 cifield = iklass->nonstatic_field_at(j); 533 cifield->print_name_on(st); 534 } else { 535 // Must be a null marker 536 st->print("null marker"); 537 } 538 format_helper(regalloc, st, fld_node, ":", j, &scobjs); 539 } else { 540 format_helper(regalloc, st, fld_node, ", [", j, &scobjs); 541 } 542 } 543 } 544 st->print(" }"); 545 } 546 } 547 st->cr(); 548 if (caller() != nullptr) caller()->format(regalloc, n, st); 549 } 550 551 552 void JVMState::dump_spec(outputStream *st) const { 553 if (_method != nullptr) { 554 bool printed = false; 555 if (!Verbose) { 556 // The JVMS dumps make really, really long lines. 557 // Take out the most boring parts, which are the package prefixes. 558 char buf[500]; 559 stringStream namest(buf, sizeof(buf)); 560 _method->print_short_name(&namest); 561 if (namest.count() < sizeof(buf)) { 562 const char* name = namest.base(); 563 if (name[0] == ' ') ++name; 564 const char* endcn = strchr(name, ':'); // end of class name 565 if (endcn == nullptr) endcn = strchr(name, '('); 566 if (endcn == nullptr) endcn = name + strlen(name); 567 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') 568 --endcn; 569 st->print(" %s", endcn); 570 printed = true; 571 } 572 } 573 print_method_with_lineno(st, !printed); 574 if(_reexecute == Reexecute_True) 575 st->print(" reexecute"); 576 } else { 577 st->print(" runtime stub"); 578 } 579 if (caller() != nullptr) caller()->dump_spec(st); 580 } 581 582 583 void JVMState::dump_on(outputStream* st) const { 584 bool print_map = _map && !((uintptr_t)_map & 1) && 585 ((caller() == nullptr) || (caller()->map() != _map)); 586 if (print_map) { 587 if (_map->len() > _map->req()) { // _map->has_exceptions() 588 Node* ex = _map->in(_map->req()); // _map->next_exception() 589 // skip the first one; it's already being printed 590 while (ex != nullptr && ex->len() > ex->req()) { 591 ex = ex->in(ex->req()); // ex->next_exception() 592 ex->dump(1); 593 } 594 } 595 _map->dump(Verbose ? 2 : 1); 596 } 597 if (caller() != nullptr) { 598 caller()->dump_on(st); 599 } 600 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", 601 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); 602 if (_method == nullptr) { 603 st->print_cr("(none)"); 604 } else { 605 _method->print_name(st); 606 st->cr(); 607 if (bci() >= 0 && bci() < _method->code_size()) { 608 st->print(" bc: "); 609 _method->print_codes_on(bci(), bci()+1, st); 610 } 611 } 612 } 613 614 // Extra way to dump a jvms from the debugger, 615 // to avoid a bug with C++ member function calls. 616 void dump_jvms(JVMState* jvms) { 617 jvms->dump(); 618 } 619 #endif 620 621 //--------------------------clone_shallow-------------------------------------- 622 JVMState* JVMState::clone_shallow(Compile* C) const { 623 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); 624 n->set_bci(_bci); 625 n->_reexecute = _reexecute; 626 n->set_locoff(_locoff); 627 n->set_stkoff(_stkoff); 628 n->set_monoff(_monoff); 629 n->set_scloff(_scloff); 630 n->set_endoff(_endoff); 631 n->set_sp(_sp); 632 n->set_map(_map); 633 return n; 634 } 635 636 //---------------------------clone_deep---------------------------------------- 637 JVMState* JVMState::clone_deep(Compile* C) const { 638 JVMState* n = clone_shallow(C); 639 for (JVMState* p = n; p->_caller != nullptr; p = p->_caller) { 640 p->_caller = p->_caller->clone_shallow(C); 641 } 642 assert(n->depth() == depth(), "sanity"); 643 assert(n->debug_depth() == debug_depth(), "sanity"); 644 return n; 645 } 646 647 /** 648 * Reset map for all callers 649 */ 650 void JVMState::set_map_deep(SafePointNode* map) { 651 for (JVMState* p = this; p != nullptr; p = p->_caller) { 652 p->set_map(map); 653 } 654 } 655 656 // unlike set_map(), this is two-way setting. 657 void JVMState::bind_map(SafePointNode* map) { 658 set_map(map); 659 _map->set_jvms(this); 660 } 661 662 // Adapt offsets in in-array after adding or removing an edge. 663 // Prerequisite is that the JVMState is used by only one node. 664 void JVMState::adapt_position(int delta) { 665 for (JVMState* jvms = this; jvms != nullptr; jvms = jvms->caller()) { 666 jvms->set_locoff(jvms->locoff() + delta); 667 jvms->set_stkoff(jvms->stkoff() + delta); 668 jvms->set_monoff(jvms->monoff() + delta); 669 jvms->set_scloff(jvms->scloff() + delta); 670 jvms->set_endoff(jvms->endoff() + delta); 671 } 672 } 673 674 // Mirror the stack size calculation in the deopt code 675 // How much stack space would we need at this point in the program in 676 // case of deoptimization? 677 int JVMState::interpreter_frame_size() const { 678 const JVMState* jvms = this; 679 int size = 0; 680 int callee_parameters = 0; 681 int callee_locals = 0; 682 int extra_args = method()->max_stack() - stk_size(); 683 684 while (jvms != nullptr) { 685 int locks = jvms->nof_monitors(); 686 int temps = jvms->stk_size(); 687 bool is_top_frame = (jvms == this); 688 ciMethod* method = jvms->method(); 689 690 int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(), 691 temps + callee_parameters, 692 extra_args, 693 locks, 694 callee_parameters, 695 callee_locals, 696 is_top_frame); 697 size += frame_size; 698 699 callee_parameters = method->size_of_parameters(); 700 callee_locals = method->max_locals(); 701 extra_args = 0; 702 jvms = jvms->caller(); 703 } 704 return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord; 705 } 706 707 //============================================================================= 708 bool CallNode::cmp( const Node &n ) const 709 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } 710 #ifndef PRODUCT 711 void CallNode::dump_req(outputStream *st, DumpConfig* dc) const { 712 // Dump the required inputs, enclosed in '(' and ')' 713 uint i; // Exit value of loop 714 for (i = 0; i < req(); i++) { // For all required inputs 715 if (i == TypeFunc::Parms) st->print("("); 716 Node* p = in(i); 717 if (p != nullptr) { 718 p->dump_idx(false, st, dc); 719 st->print(" "); 720 } else { 721 st->print("_ "); 722 } 723 } 724 st->print(")"); 725 } 726 727 void CallNode::dump_spec(outputStream *st) const { 728 st->print(" "); 729 if (tf() != nullptr) tf()->dump_on(st); 730 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); 731 if (jvms() != nullptr) jvms()->dump_spec(st); 732 } 733 #endif 734 735 const Type *CallNode::bottom_type() const { return tf()->range_cc(); } 736 const Type* CallNode::Value(PhaseGVN* phase) const { 737 if (in(0) == nullptr || phase->type(in(0)) == Type::TOP) { 738 return Type::TOP; 739 } 740 return tf()->range_cc(); 741 } 742 743 //------------------------------calling_convention----------------------------- 744 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 745 if (_entry_point == StubRoutines::store_inline_type_fields_to_buf()) { 746 // The call to that stub is a special case: its inputs are 747 // multiple values returned from a call and so it should follow 748 // the return convention. 749 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); 750 return; 751 } 752 // Use the standard compiler calling convention 753 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt); 754 } 755 756 757 //------------------------------match------------------------------------------ 758 // Construct projections for control, I/O, memory-fields, ..., and 759 // return result(s) along with their RegMask info 760 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { 761 uint con = proj->_con; 762 const TypeTuple* range_cc = tf()->range_cc(); 763 if (con >= TypeFunc::Parms) { 764 if (tf()->returns_inline_type_as_fields()) { 765 // The call returns multiple values (inline type fields): we 766 // create one projection per returned value. 767 assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return"); 768 uint ideal_reg = range_cc->field_at(con)->ideal_reg(); 769 return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg); 770 } else { 771 if (con == TypeFunc::Parms) { 772 uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg(); 773 OptoRegPair regs = Opcode() == Op_CallLeafVector 774 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine 775 : match->c_return_value(ideal_reg); 776 RegMask rm = RegMask(regs.first()); 777 778 if (Opcode() == Op_CallLeafVector) { 779 // If the return is in vector, compute appropriate regmask taking into account the whole range 780 if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) { 781 if(OptoReg::is_valid(regs.second())) { 782 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) { 783 rm.Insert(r); 784 } 785 } 786 } 787 } 788 789 if (OptoReg::is_valid(regs.second())) { 790 rm.Insert(regs.second()); 791 } 792 return new MachProjNode(this,con,rm,ideal_reg); 793 } else { 794 assert(con == TypeFunc::Parms+1, "only one return value"); 795 assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, ""); 796 return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad); 797 } 798 } 799 } 800 801 switch (con) { 802 case TypeFunc::Control: 803 case TypeFunc::I_O: 804 case TypeFunc::Memory: 805 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 806 807 case TypeFunc::ReturnAdr: 808 case TypeFunc::FramePtr: 809 default: 810 ShouldNotReachHere(); 811 } 812 return nullptr; 813 } 814 815 // Do we Match on this edge index or not? Match no edges 816 uint CallNode::match_edge(uint idx) const { 817 return 0; 818 } 819 820 // 821 // Determine whether the call could modify the field of the specified 822 // instance at the specified offset. 823 // 824 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { 825 assert((t_oop != nullptr), "sanity"); 826 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) { 827 const TypeTuple* args = _tf->domain_sig(); 828 Node* dest = nullptr; 829 // Stubs that can be called once an ArrayCopyNode is expanded have 830 // different signatures. Look for the second pointer argument, 831 // that is the destination of the copy. 832 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { 833 if (args->field_at(i)->isa_ptr()) { 834 j++; 835 if (j == 2) { 836 dest = in(i); 837 break; 838 } 839 } 840 } 841 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!"); 842 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) { 843 return true; 844 } 845 return false; 846 } 847 if (t_oop->is_known_instance()) { 848 // The instance_id is set only for scalar-replaceable allocations which 849 // are not passed as arguments according to Escape Analysis. 850 return false; 851 } 852 if (t_oop->is_ptr_to_boxed_value()) { 853 ciKlass* boxing_klass = t_oop->is_instptr()->instance_klass(); 854 if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) { 855 // Skip unrelated boxing methods. 856 Node* proj = proj_out_or_null(TypeFunc::Parms); 857 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) { 858 return false; 859 } 860 } 861 if (is_CallJava() && as_CallJava()->method() != nullptr) { 862 ciMethod* meth = as_CallJava()->method(); 863 if (meth->is_getter()) { 864 return false; 865 } 866 // May modify (by reflection) if an boxing object is passed 867 // as argument or returned. 868 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr; 869 if (proj != nullptr) { 870 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr(); 871 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() || 872 (inst_t->instance_klass() == boxing_klass))) { 873 return true; 874 } 875 } 876 const TypeTuple* d = tf()->domain_cc(); 877 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 878 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); 879 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() || 880 (inst_t->instance_klass() == boxing_klass))) { 881 return true; 882 } 883 } 884 return false; 885 } 886 } 887 return true; 888 } 889 890 // Does this call have a direct reference to n other than debug information? 891 bool CallNode::has_non_debug_use(Node* n) { 892 const TypeTuple* d = tf()->domain_cc(); 893 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 894 if (in(i) == n) { 895 return true; 896 } 897 } 898 return false; 899 } 900 901 bool CallNode::has_debug_use(Node* n) { 902 if (jvms() != nullptr) { 903 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) { 904 if (in(i) == n) { 905 return true; 906 } 907 } 908 } 909 return false; 910 } 911 912 // Returns the unique CheckCastPP of a call 913 // or 'this' if there are several CheckCastPP or unexpected uses 914 // or returns null if there is no one. 915 Node *CallNode::result_cast() { 916 Node *cast = nullptr; 917 918 Node *p = proj_out_or_null(TypeFunc::Parms); 919 if (p == nullptr) 920 return nullptr; 921 922 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { 923 Node *use = p->fast_out(i); 924 if (use->is_CheckCastPP()) { 925 if (cast != nullptr) { 926 return this; // more than 1 CheckCastPP 927 } 928 cast = use; 929 } else if (!use->is_Initialize() && 930 !use->is_AddP() && 931 use->Opcode() != Op_MemBarStoreStore) { 932 // Expected uses are restricted to a CheckCastPP, an Initialize 933 // node, a MemBarStoreStore (clone) and AddP nodes. If we 934 // encounter any other use (a Phi node can be seen in rare 935 // cases) return this to prevent incorrect optimizations. 936 return this; 937 } 938 } 939 return cast; 940 } 941 942 943 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) { 944 uint max_res = TypeFunc::Parms-1; 945 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 946 ProjNode *pn = fast_out(i)->as_Proj(); 947 max_res = MAX2(max_res, pn->_con); 948 } 949 950 assert(max_res < _tf->range_cc()->cnt(), "result out of bounds"); 951 952 uint projs_size = sizeof(CallProjections); 953 if (max_res > TypeFunc::Parms) { 954 projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*); 955 } 956 char* projs_storage = resource_allocate_bytes(projs_size); 957 CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1); 958 959 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 960 ProjNode *pn = fast_out(i)->as_Proj(); 961 if (pn->outcnt() == 0) continue; 962 switch (pn->_con) { 963 case TypeFunc::Control: 964 { 965 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj 966 projs->fallthrough_proj = pn; 967 const Node* cn = pn->unique_ctrl_out_or_null(); 968 if (cn != nullptr && cn->is_Catch()) { 969 ProjNode *cpn = nullptr; 970 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { 971 cpn = cn->fast_out(k)->as_Proj(); 972 assert(cpn->is_CatchProj(), "must be a CatchProjNode"); 973 if (cpn->_con == CatchProjNode::fall_through_index) 974 projs->fallthrough_catchproj = cpn; 975 else { 976 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); 977 projs->catchall_catchproj = cpn; 978 } 979 } 980 } 981 break; 982 } 983 case TypeFunc::I_O: 984 if (pn->_is_io_use) 985 projs->catchall_ioproj = pn; 986 else 987 projs->fallthrough_ioproj = pn; 988 for (DUIterator j = pn->outs(); pn->has_out(j); j++) { 989 Node* e = pn->out(j); 990 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) { 991 assert(projs->exobj == nullptr, "only one"); 992 projs->exobj = e; 993 } 994 } 995 break; 996 case TypeFunc::Memory: 997 if (pn->_is_io_use) 998 projs->catchall_memproj = pn; 999 else 1000 projs->fallthrough_memproj = pn; 1001 break; 1002 case TypeFunc::Parms: 1003 projs->resproj[0] = pn; 1004 break; 1005 default: 1006 assert(pn->_con <= max_res, "unexpected projection from allocation node."); 1007 projs->resproj[pn->_con-TypeFunc::Parms] = pn; 1008 break; 1009 } 1010 } 1011 1012 // The resproj may not exist because the result could be ignored 1013 // and the exception object may not exist if an exception handler 1014 // swallows the exception but all the other must exist and be found. 1015 do_asserts = do_asserts && !Compile::current()->inlining_incrementally(); 1016 assert(!do_asserts || projs->fallthrough_proj != nullptr, "must be found"); 1017 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found"); 1018 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found"); 1019 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found"); 1020 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found"); 1021 if (separate_io_proj) { 1022 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found"); 1023 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found"); 1024 } 1025 return projs; 1026 } 1027 1028 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1029 #ifdef ASSERT 1030 // Validate attached generator 1031 CallGenerator* cg = generator(); 1032 if (cg != nullptr) { 1033 assert((is_CallStaticJava() && cg->is_mh_late_inline()) || 1034 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch"); 1035 } 1036 #endif // ASSERT 1037 return SafePointNode::Ideal(phase, can_reshape); 1038 } 1039 1040 bool CallNode::is_call_to_arraycopystub() const { 1041 if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) { 1042 return true; 1043 } 1044 return false; 1045 } 1046 1047 //============================================================================= 1048 uint CallJavaNode::size_of() const { return sizeof(*this); } 1049 bool CallJavaNode::cmp( const Node &n ) const { 1050 CallJavaNode &call = (CallJavaNode&)n; 1051 return CallNode::cmp(call) && _method == call._method && 1052 _override_symbolic_info == call._override_symbolic_info; 1053 } 1054 1055 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) { 1056 // Copy debug information and adjust JVMState information 1057 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain_sig()->cnt() : (uint)TypeFunc::Parms+1; 1058 uint new_dbg_start = tf()->domain_sig()->cnt(); 1059 int jvms_adj = new_dbg_start - old_dbg_start; 1060 assert (new_dbg_start == req(), "argument count mismatch"); 1061 Compile* C = phase->C; 1062 1063 // SafePointScalarObject node could be referenced several times in debug info. 1064 // Use Dict to record cloned nodes. 1065 Dict* sosn_map = new Dict(cmpkey,hashkey); 1066 for (uint i = old_dbg_start; i < sfpt->req(); i++) { 1067 Node* old_in = sfpt->in(i); 1068 // Clone old SafePointScalarObjectNodes, adjusting their field contents. 1069 if (old_in != nullptr && old_in->is_SafePointScalarObject()) { 1070 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); 1071 bool new_node; 1072 Node* new_in = old_sosn->clone(sosn_map, new_node); 1073 if (new_node) { // New node? 1074 new_in->set_req(0, C->root()); // reset control edge 1075 new_in = phase->transform(new_in); // Register new node. 1076 } 1077 old_in = new_in; 1078 } 1079 add_req(old_in); 1080 } 1081 1082 // JVMS may be shared so clone it before we modify it 1083 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr); 1084 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) { 1085 jvms->set_map(this); 1086 jvms->set_locoff(jvms->locoff()+jvms_adj); 1087 jvms->set_stkoff(jvms->stkoff()+jvms_adj); 1088 jvms->set_monoff(jvms->monoff()+jvms_adj); 1089 jvms->set_scloff(jvms->scloff()+jvms_adj); 1090 jvms->set_endoff(jvms->endoff()+jvms_adj); 1091 } 1092 } 1093 1094 #ifdef ASSERT 1095 bool CallJavaNode::validate_symbolic_info() const { 1096 if (method() == nullptr) { 1097 return true; // call into runtime or uncommon trap 1098 } 1099 Bytecodes::Code bc = jvms()->method()->java_code_at_bci(jvms()->bci()); 1100 if (EnableValhalla && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) { 1101 return true; 1102 } 1103 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci()); 1104 ciMethod* callee = method(); 1105 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) { 1106 assert(override_symbolic_info(), "should be set"); 1107 } 1108 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info"); 1109 return true; 1110 } 1111 #endif 1112 1113 #ifndef PRODUCT 1114 void CallJavaNode::dump_spec(outputStream* st) const { 1115 if( _method ) _method->print_short_name(st); 1116 CallNode::dump_spec(st); 1117 } 1118 1119 void CallJavaNode::dump_compact_spec(outputStream* st) const { 1120 if (_method) { 1121 _method->print_short_name(st); 1122 } else { 1123 st->print("<?>"); 1124 } 1125 } 1126 #endif 1127 1128 //============================================================================= 1129 uint CallStaticJavaNode::size_of() const { return sizeof(*this); } 1130 bool CallStaticJavaNode::cmp( const Node &n ) const { 1131 CallStaticJavaNode &call = (CallStaticJavaNode&)n; 1132 return CallJavaNode::cmp(call); 1133 } 1134 1135 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1136 if (can_reshape && uncommon_trap_request() != 0) { 1137 PhaseIterGVN* igvn = phase->is_IterGVN(); 1138 if (remove_unknown_flat_array_load(igvn, in(0), in(TypeFunc::Memory), in(TypeFunc::Parms))) { 1139 if (!in(0)->is_Region()) { 1140 igvn->replace_input_of(this, 0, phase->C->top()); 1141 } 1142 return this; 1143 } 1144 } 1145 1146 CallGenerator* cg = generator(); 1147 if (can_reshape && cg != nullptr) { 1148 assert(IncrementalInlineMH, "required"); 1149 assert(cg->call_node() == this, "mismatch"); 1150 assert(cg->is_mh_late_inline(), "not virtual"); 1151 1152 // Check whether this MH handle call becomes a candidate for inlining. 1153 ciMethod* callee = cg->method(); 1154 vmIntrinsics::ID iid = callee->intrinsic_id(); 1155 if (iid == vmIntrinsics::_invokeBasic) { 1156 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) { 1157 phase->C->prepend_late_inline(cg); 1158 set_generator(nullptr); 1159 } 1160 } else if (iid == vmIntrinsics::_linkToNative) { 1161 // never retry 1162 } else { 1163 assert(callee->has_member_arg(), "wrong type of call?"); 1164 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) { 1165 phase->C->prepend_late_inline(cg); 1166 set_generator(nullptr); 1167 } 1168 } 1169 } 1170 return CallNode::Ideal(phase, can_reshape); 1171 } 1172 1173 //----------------------------is_uncommon_trap---------------------------- 1174 // Returns true if this is an uncommon trap. 1175 bool CallStaticJavaNode::is_uncommon_trap() const { 1176 return (_name != nullptr && !strcmp(_name, "uncommon_trap")); 1177 } 1178 1179 //----------------------------uncommon_trap_request---------------------------- 1180 // If this is an uncommon trap, return the request code, else zero. 1181 int CallStaticJavaNode::uncommon_trap_request() const { 1182 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0; 1183 } 1184 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { 1185 #ifndef PRODUCT 1186 if (!(call->req() > TypeFunc::Parms && 1187 call->in(TypeFunc::Parms) != nullptr && 1188 call->in(TypeFunc::Parms)->is_Con() && 1189 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) { 1190 assert(in_dump() != 0, "OK if dumping"); 1191 tty->print("[bad uncommon trap]"); 1192 return 0; 1193 } 1194 #endif 1195 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); 1196 } 1197 1198 // Split if can cause the flat array branch of an array load with unknown type (see 1199 // Parse::array_load) to end in an uncommon trap. In that case, the call to 1200 // 'load_unknown_inline' is useless. Replace it with an uncommon trap with the same JVMState. 1201 bool CallStaticJavaNode::remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg) { 1202 if (ctl == nullptr || ctl->is_top() || mem == nullptr || mem->is_top() || !mem->is_MergeMem()) { 1203 return false; 1204 } 1205 if (ctl->is_Region()) { 1206 bool res = false; 1207 for (uint i = 1; i < ctl->req(); i++) { 1208 MergeMemNode* mm = mem->clone()->as_MergeMem(); 1209 for (MergeMemStream mms(mm); mms.next_non_empty(); ) { 1210 Node* m = mms.memory(); 1211 if (m->is_Phi() && m->in(0) == ctl) { 1212 mms.set_memory(m->in(i)); 1213 } 1214 } 1215 if (remove_unknown_flat_array_load(igvn, ctl->in(i), mm, unc_arg)) { 1216 res = true; 1217 if (!ctl->in(i)->is_Region()) { 1218 igvn->replace_input_of(ctl, i, igvn->C->top()); 1219 } 1220 } 1221 igvn->remove_dead_node(mm); 1222 } 1223 return res; 1224 } 1225 // Verify the control flow is ok 1226 Node* call = ctl; 1227 MemBarNode* membar = nullptr; 1228 for (;;) { 1229 if (call == nullptr || call->is_top()) { 1230 return false; 1231 } 1232 if (call->is_Proj() || call->is_Catch() || call->is_MemBar()) { 1233 call = call->in(0); 1234 } else if (call->Opcode() == Op_CallStaticJava && !call->in(0)->is_top() && 1235 call->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) { 1236 assert(call->in(0)->is_Proj() && call->in(0)->in(0)->is_MemBar(), "missing membar"); 1237 membar = call->in(0)->in(0)->as_MemBar(); 1238 break; 1239 } else { 1240 return false; 1241 } 1242 } 1243 1244 JVMState* jvms = call->jvms(); 1245 if (igvn->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) { 1246 return false; 1247 } 1248 1249 Node* call_mem = call->in(TypeFunc::Memory); 1250 if (call_mem == nullptr || call_mem->is_top()) { 1251 return false; 1252 } 1253 if (!call_mem->is_MergeMem()) { 1254 call_mem = MergeMemNode::make(call_mem); 1255 igvn->register_new_node_with_optimizer(call_mem); 1256 } 1257 1258 // Verify that there's no unexpected side effect 1259 for (MergeMemStream mms2(mem->as_MergeMem(), call_mem->as_MergeMem()); mms2.next_non_empty2(); ) { 1260 Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory(); 1261 Node* m2 = mms2.memory2(); 1262 1263 for (uint i = 0; i < 100; i++) { 1264 if (m1 == m2) { 1265 break; 1266 } else if (m1->is_Proj()) { 1267 m1 = m1->in(0); 1268 } else if (m1->is_MemBar()) { 1269 m1 = m1->in(TypeFunc::Memory); 1270 } else if (m1->Opcode() == Op_CallStaticJava && 1271 m1->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) { 1272 if (m1 != call) { 1273 return false; 1274 } 1275 break; 1276 } else if (m1->is_MergeMem()) { 1277 MergeMemNode* mm = m1->as_MergeMem(); 1278 int idx = mms2.alias_idx(); 1279 if (idx == Compile::AliasIdxBot) { 1280 m1 = mm->base_memory(); 1281 } else { 1282 m1 = mm->memory_at(idx); 1283 } 1284 } else { 1285 return false; 1286 } 1287 } 1288 } 1289 if (call_mem->outcnt() == 0) { 1290 igvn->remove_dead_node(call_mem); 1291 } 1292 1293 // Remove membar preceding the call 1294 membar->remove(igvn); 1295 1296 address call_addr = OptoRuntime::uncommon_trap_blob()->entry_point(); 1297 CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", nullptr); 1298 unc->init_req(TypeFunc::Control, call->in(0)); 1299 unc->init_req(TypeFunc::I_O, call->in(TypeFunc::I_O)); 1300 unc->init_req(TypeFunc::Memory, call->in(TypeFunc::Memory)); 1301 unc->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr)); 1302 unc->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr)); 1303 unc->init_req(TypeFunc::Parms+0, unc_arg); 1304 unc->set_cnt(PROB_UNLIKELY_MAG(4)); 1305 unc->copy_call_debug_info(igvn, call->as_CallStaticJava()); 1306 1307 // Replace the call with an uncommon trap 1308 igvn->replace_input_of(call, 0, igvn->C->top()); 1309 1310 igvn->register_new_node_with_optimizer(unc); 1311 1312 Node* ctrl = igvn->transform(new ProjNode(unc, TypeFunc::Control)); 1313 Node* halt = igvn->transform(new HaltNode(ctrl, call->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen")); 1314 igvn->add_input_to(igvn->C->root(), halt); 1315 1316 return true; 1317 } 1318 1319 1320 #ifndef PRODUCT 1321 void CallStaticJavaNode::dump_spec(outputStream *st) const { 1322 st->print("# Static "); 1323 if (_name != nullptr) { 1324 st->print("%s", _name); 1325 int trap_req = uncommon_trap_request(); 1326 if (trap_req != 0) { 1327 char buf[100]; 1328 st->print("(%s)", 1329 Deoptimization::format_trap_request(buf, sizeof(buf), 1330 trap_req)); 1331 } 1332 st->print(" "); 1333 } 1334 CallJavaNode::dump_spec(st); 1335 } 1336 1337 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const { 1338 if (_method) { 1339 _method->print_short_name(st); 1340 } else if (_name) { 1341 st->print("%s", _name); 1342 } else { 1343 st->print("<?>"); 1344 } 1345 } 1346 #endif 1347 1348 //============================================================================= 1349 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } 1350 bool CallDynamicJavaNode::cmp( const Node &n ) const { 1351 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; 1352 return CallJavaNode::cmp(call); 1353 } 1354 1355 Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1356 CallGenerator* cg = generator(); 1357 if (can_reshape && cg != nullptr) { 1358 assert(IncrementalInlineVirtual, "required"); 1359 assert(cg->call_node() == this, "mismatch"); 1360 assert(cg->is_virtual_late_inline(), "not virtual"); 1361 1362 // Recover symbolic info for method resolution. 1363 ciMethod* caller = jvms()->method(); 1364 ciBytecodeStream iter(caller); 1365 iter.force_bci(jvms()->bci()); 1366 1367 bool not_used1; 1368 ciSignature* not_used2; 1369 ciMethod* orig_callee = iter.get_method(not_used1, ¬_used2); // callee in the bytecode 1370 ciKlass* holder = iter.get_declared_method_holder(); 1371 if (orig_callee->is_method_handle_intrinsic()) { 1372 assert(_override_symbolic_info, "required"); 1373 orig_callee = method(); 1374 holder = method()->holder(); 1375 } 1376 1377 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); 1378 1379 Node* receiver_node = in(TypeFunc::Parms); 1380 const TypeOopPtr* receiver_type = phase->type(receiver_node)->isa_oopptr(); 1381 1382 int not_used3; 1383 bool call_does_dispatch; 1384 ciMethod* callee = phase->C->optimize_virtual_call(caller, klass, holder, orig_callee, receiver_type, true /*is_virtual*/, 1385 call_does_dispatch, not_used3); // out-parameters 1386 if (!call_does_dispatch) { 1387 // Register for late inlining. 1388 cg->set_callee_method(callee); 1389 phase->C->prepend_late_inline(cg); // MH late inlining prepends to the list, so do the same 1390 set_generator(nullptr); 1391 } 1392 } 1393 return CallNode::Ideal(phase, can_reshape); 1394 } 1395 1396 #ifndef PRODUCT 1397 void CallDynamicJavaNode::dump_spec(outputStream *st) const { 1398 st->print("# Dynamic "); 1399 CallJavaNode::dump_spec(st); 1400 } 1401 #endif 1402 1403 //============================================================================= 1404 uint CallRuntimeNode::size_of() const { return sizeof(*this); } 1405 bool CallRuntimeNode::cmp( const Node &n ) const { 1406 CallRuntimeNode &call = (CallRuntimeNode&)n; 1407 return CallNode::cmp(call) && !strcmp(_name,call._name); 1408 } 1409 #ifndef PRODUCT 1410 void CallRuntimeNode::dump_spec(outputStream *st) const { 1411 st->print("# "); 1412 st->print("%s", _name); 1413 CallNode::dump_spec(st); 1414 } 1415 #endif 1416 uint CallLeafVectorNode::size_of() const { return sizeof(*this); } 1417 bool CallLeafVectorNode::cmp( const Node &n ) const { 1418 CallLeafVectorNode &call = (CallLeafVectorNode&)n; 1419 return CallLeafNode::cmp(call) && _num_bits == call._num_bits; 1420 } 1421 1422 //------------------------------calling_convention----------------------------- 1423 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 1424 if (_entry_point == nullptr) { 1425 // The call to that stub is a special case: its inputs are 1426 // multiple values returned from a call and so it should follow 1427 // the return convention. 1428 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); 1429 return; 1430 } 1431 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt); 1432 } 1433 1434 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 1435 #ifdef ASSERT 1436 assert(tf()->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits, 1437 "return vector size must match"); 1438 const TypeTuple* d = tf()->domain_sig(); 1439 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1440 Node* arg = in(i); 1441 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits, 1442 "vector argument size must match"); 1443 } 1444 #endif 1445 1446 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt); 1447 } 1448 1449 //============================================================================= 1450 //------------------------------calling_convention----------------------------- 1451 1452 1453 //============================================================================= 1454 #ifndef PRODUCT 1455 void CallLeafNode::dump_spec(outputStream *st) const { 1456 st->print("# "); 1457 st->print("%s", _name); 1458 CallNode::dump_spec(st); 1459 } 1460 #endif 1461 1462 uint CallLeafNoFPNode::match_edge(uint idx) const { 1463 // Null entry point is a special case for which the target is in a 1464 // register. Need to match that edge. 1465 return entry_point() == nullptr && idx == TypeFunc::Parms; 1466 } 1467 1468 //============================================================================= 1469 1470 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { 1471 assert(verify_jvms(jvms), "jvms must match"); 1472 int loc = jvms->locoff() + idx; 1473 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { 1474 // If current local idx is top then local idx - 1 could 1475 // be a long/double that needs to be killed since top could 1476 // represent the 2nd half of the long/double. 1477 uint ideal = in(loc -1)->ideal_reg(); 1478 if (ideal == Op_RegD || ideal == Op_RegL) { 1479 // set other (low index) half to top 1480 set_req(loc - 1, in(loc)); 1481 } 1482 } 1483 set_req(loc, c); 1484 } 1485 1486 uint SafePointNode::size_of() const { return sizeof(*this); } 1487 bool SafePointNode::cmp( const Node &n ) const { 1488 return (&n == this); // Always fail except on self 1489 } 1490 1491 //-------------------------set_next_exception---------------------------------- 1492 void SafePointNode::set_next_exception(SafePointNode* n) { 1493 assert(n == nullptr || n->Opcode() == Op_SafePoint, "correct value for next_exception"); 1494 if (len() == req()) { 1495 if (n != nullptr) add_prec(n); 1496 } else { 1497 set_prec(req(), n); 1498 } 1499 } 1500 1501 1502 //----------------------------next_exception----------------------------------- 1503 SafePointNode* SafePointNode::next_exception() const { 1504 if (len() == req()) { 1505 return nullptr; 1506 } else { 1507 Node* n = in(req()); 1508 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); 1509 return (SafePointNode*) n; 1510 } 1511 } 1512 1513 1514 //------------------------------Ideal------------------------------------------ 1515 // Skip over any collapsed Regions 1516 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1517 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState"); 1518 if (remove_dead_region(phase, can_reshape)) { 1519 return this; 1520 } 1521 // Scalarize inline types in safepoint debug info. 1522 // Delay this until all inlining is over to avoid getting inconsistent debug info. 1523 if (phase->C->scalarize_in_safepoints() && can_reshape && jvms() != nullptr) { 1524 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) { 1525 Node* n = in(i)->uncast(); 1526 if (n->is_InlineType()) { 1527 n->as_InlineType()->make_scalar_in_safepoints(phase->is_IterGVN()); 1528 } 1529 } 1530 } 1531 return nullptr; 1532 } 1533 1534 //------------------------------Identity--------------------------------------- 1535 // Remove obviously duplicate safepoints 1536 Node* SafePointNode::Identity(PhaseGVN* phase) { 1537 1538 // If you have back to back safepoints, remove one 1539 if (in(TypeFunc::Control)->is_SafePoint()) { 1540 Node* out_c = unique_ctrl_out_or_null(); 1541 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the 1542 // outer loop's safepoint could confuse removal of the outer loop. 1543 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) { 1544 return in(TypeFunc::Control); 1545 } 1546 } 1547 1548 // Transforming long counted loops requires a safepoint node. Do not 1549 // eliminate a safepoint until loop opts are over. 1550 if (in(0)->is_Proj() && !phase->C->major_progress()) { 1551 Node *n0 = in(0)->in(0); 1552 // Check if he is a call projection (except Leaf Call) 1553 if( n0->is_Catch() ) { 1554 n0 = n0->in(0)->in(0); 1555 assert( n0->is_Call(), "expect a call here" ); 1556 } 1557 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { 1558 // Don't remove a safepoint belonging to an OuterStripMinedLoopEndNode. 1559 // If the loop dies, they will be removed together. 1560 if (has_out_with(Op_OuterStripMinedLoopEnd)) { 1561 return this; 1562 } 1563 // Useless Safepoint, so remove it 1564 return in(TypeFunc::Control); 1565 } 1566 } 1567 1568 return this; 1569 } 1570 1571 //------------------------------Value------------------------------------------ 1572 const Type* SafePointNode::Value(PhaseGVN* phase) const { 1573 if (phase->type(in(0)) == Type::TOP) { 1574 return Type::TOP; 1575 } 1576 if (in(0) == this) { 1577 return Type::TOP; // Dead infinite loop 1578 } 1579 return Type::CONTROL; 1580 } 1581 1582 #ifndef PRODUCT 1583 void SafePointNode::dump_spec(outputStream *st) const { 1584 st->print(" SafePoint "); 1585 _replaced_nodes.dump(st); 1586 } 1587 #endif 1588 1589 const RegMask &SafePointNode::in_RegMask(uint idx) const { 1590 if( idx < TypeFunc::Parms ) return RegMask::Empty; 1591 // Values outside the domain represent debug info 1592 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1593 } 1594 const RegMask &SafePointNode::out_RegMask() const { 1595 return RegMask::Empty; 1596 } 1597 1598 1599 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { 1600 assert((int)grow_by > 0, "sanity"); 1601 int monoff = jvms->monoff(); 1602 int scloff = jvms->scloff(); 1603 int endoff = jvms->endoff(); 1604 assert(endoff == (int)req(), "no other states or debug info after me"); 1605 Node* top = Compile::current()->top(); 1606 for (uint i = 0; i < grow_by; i++) { 1607 ins_req(monoff, top); 1608 } 1609 jvms->set_monoff(monoff + grow_by); 1610 jvms->set_scloff(scloff + grow_by); 1611 jvms->set_endoff(endoff + grow_by); 1612 } 1613 1614 void SafePointNode::push_monitor(const FastLockNode *lock) { 1615 // Add a LockNode, which points to both the original BoxLockNode (the 1616 // stack space for the monitor) and the Object being locked. 1617 const int MonitorEdges = 2; 1618 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1619 assert(req() == jvms()->endoff(), "correct sizing"); 1620 int nextmon = jvms()->scloff(); 1621 if (GenerateSynchronizationCode) { 1622 ins_req(nextmon, lock->box_node()); 1623 ins_req(nextmon+1, lock->obj_node()); 1624 } else { 1625 Node* top = Compile::current()->top(); 1626 ins_req(nextmon, top); 1627 ins_req(nextmon, top); 1628 } 1629 jvms()->set_scloff(nextmon + MonitorEdges); 1630 jvms()->set_endoff(req()); 1631 } 1632 1633 void SafePointNode::pop_monitor() { 1634 // Delete last monitor from debug info 1635 debug_only(int num_before_pop = jvms()->nof_monitors()); 1636 const int MonitorEdges = 2; 1637 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1638 int scloff = jvms()->scloff(); 1639 int endoff = jvms()->endoff(); 1640 int new_scloff = scloff - MonitorEdges; 1641 int new_endoff = endoff - MonitorEdges; 1642 jvms()->set_scloff(new_scloff); 1643 jvms()->set_endoff(new_endoff); 1644 while (scloff > new_scloff) del_req_ordered(--scloff); 1645 assert(jvms()->nof_monitors() == num_before_pop-1, ""); 1646 } 1647 1648 Node *SafePointNode::peek_monitor_box() const { 1649 int mon = jvms()->nof_monitors() - 1; 1650 assert(mon >= 0, "must have a monitor"); 1651 return monitor_box(jvms(), mon); 1652 } 1653 1654 Node *SafePointNode::peek_monitor_obj() const { 1655 int mon = jvms()->nof_monitors() - 1; 1656 assert(mon >= 0, "must have a monitor"); 1657 return monitor_obj(jvms(), mon); 1658 } 1659 1660 Node* SafePointNode::peek_operand(uint off) const { 1661 assert(jvms()->sp() > 0, "must have an operand"); 1662 assert(off < jvms()->sp(), "off is out-of-range"); 1663 return stack(jvms(), jvms()->sp() - off - 1); 1664 } 1665 1666 // Do we Match on this edge index or not? Match no edges 1667 uint SafePointNode::match_edge(uint idx) const { 1668 return (TypeFunc::Parms == idx); 1669 } 1670 1671 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) { 1672 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops"); 1673 int nb = igvn->C->root()->find_prec_edge(this); 1674 if (nb != -1) { 1675 igvn->delete_precedence_of(igvn->C->root(), nb); 1676 } 1677 } 1678 1679 //============== SafePointScalarObjectNode ============== 1680 1681 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) : 1682 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1683 _first_index(first_index), 1684 _depth(depth), 1685 _n_fields(n_fields), 1686 _alloc(alloc) 1687 { 1688 #ifdef ASSERT 1689 if (alloc != nullptr && !alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) { 1690 alloc->dump(); 1691 assert(false, "unexpected call node"); 1692 } 1693 #endif 1694 init_class_id(Class_SafePointScalarObject); 1695 } 1696 1697 // Do not allow value-numbering for SafePointScalarObject node. 1698 uint SafePointScalarObjectNode::hash() const { return NO_HASH; } 1699 bool SafePointScalarObjectNode::cmp( const Node &n ) const { 1700 return (&n == this); // Always fail except on self 1701 } 1702 1703 uint SafePointScalarObjectNode::ideal_reg() const { 1704 return 0; // No matching to machine instruction 1705 } 1706 1707 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { 1708 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1709 } 1710 1711 const RegMask &SafePointScalarObjectNode::out_RegMask() const { 1712 return RegMask::Empty; 1713 } 1714 1715 uint SafePointScalarObjectNode::match_edge(uint idx) const { 1716 return 0; 1717 } 1718 1719 SafePointScalarObjectNode* 1720 SafePointScalarObjectNode::clone(Dict* sosn_map, bool& new_node) const { 1721 void* cached = (*sosn_map)[(void*)this]; 1722 if (cached != nullptr) { 1723 new_node = false; 1724 return (SafePointScalarObjectNode*)cached; 1725 } 1726 new_node = true; 1727 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); 1728 sosn_map->Insert((void*)this, (void*)res); 1729 return res; 1730 } 1731 1732 1733 #ifndef PRODUCT 1734 void SafePointScalarObjectNode::dump_spec(outputStream *st) const { 1735 st->print(" # fields@[%d..%d]", first_index(), first_index() + n_fields() - 1); 1736 } 1737 #endif 1738 1739 //============== SafePointScalarMergeNode ============== 1740 1741 SafePointScalarMergeNode::SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx) : 1742 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1743 _merge_pointer_idx(merge_pointer_idx) 1744 { 1745 init_class_id(Class_SafePointScalarMerge); 1746 } 1747 1748 // Do not allow value-numbering for SafePointScalarMerge node. 1749 uint SafePointScalarMergeNode::hash() const { return NO_HASH; } 1750 bool SafePointScalarMergeNode::cmp( const Node &n ) const { 1751 return (&n == this); // Always fail except on self 1752 } 1753 1754 uint SafePointScalarMergeNode::ideal_reg() const { 1755 return 0; // No matching to machine instruction 1756 } 1757 1758 const RegMask &SafePointScalarMergeNode::in_RegMask(uint idx) const { 1759 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1760 } 1761 1762 const RegMask &SafePointScalarMergeNode::out_RegMask() const { 1763 return RegMask::Empty; 1764 } 1765 1766 uint SafePointScalarMergeNode::match_edge(uint idx) const { 1767 return 0; 1768 } 1769 1770 SafePointScalarMergeNode* 1771 SafePointScalarMergeNode::clone(Dict* sosn_map, bool& new_node) const { 1772 void* cached = (*sosn_map)[(void*)this]; 1773 if (cached != nullptr) { 1774 new_node = false; 1775 return (SafePointScalarMergeNode*)cached; 1776 } 1777 new_node = true; 1778 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone(); 1779 sosn_map->Insert((void*)this, (void*)res); 1780 return res; 1781 } 1782 1783 #ifndef PRODUCT 1784 void SafePointScalarMergeNode::dump_spec(outputStream *st) const { 1785 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1); 1786 } 1787 #endif 1788 1789 //============================================================================= 1790 uint AllocateNode::size_of() const { return sizeof(*this); } 1791 1792 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, 1793 Node *ctrl, Node *mem, Node *abio, 1794 Node *size, Node *klass_node, 1795 Node* initial_test, 1796 InlineTypeNode* inline_type_node) 1797 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM) 1798 { 1799 init_class_id(Class_Allocate); 1800 init_flags(Flag_is_macro); 1801 _is_scalar_replaceable = false; 1802 _is_non_escaping = false; 1803 _is_allocation_MemBar_redundant = false; 1804 _larval = false; 1805 Node *topnode = C->top(); 1806 1807 init_req( TypeFunc::Control , ctrl ); 1808 init_req( TypeFunc::I_O , abio ); 1809 init_req( TypeFunc::Memory , mem ); 1810 init_req( TypeFunc::ReturnAdr, topnode ); 1811 init_req( TypeFunc::FramePtr , topnode ); 1812 init_req( AllocSize , size); 1813 init_req( KlassNode , klass_node); 1814 init_req( InitialTest , initial_test); 1815 init_req( ALength , topnode); 1816 init_req( ValidLengthTest , topnode); 1817 init_req( InlineType , inline_type_node); 1818 // DefaultValue defaults to nullptr 1819 // RawDefaultValue defaults to nullptr 1820 C->add_macro_node(this); 1821 } 1822 1823 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) 1824 { 1825 assert(initializer != nullptr && 1826 (initializer->is_object_constructor() || initializer->is_class_initializer()), 1827 "unexpected initializer method"); 1828 BCEscapeAnalyzer* analyzer = initializer->get_bcea(); 1829 if (analyzer == nullptr) { 1830 return; 1831 } 1832 1833 // Allocation node is first parameter in its initializer 1834 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) { 1835 _is_allocation_MemBar_redundant = true; 1836 } 1837 } 1838 1839 Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) { 1840 Node* mark_node = nullptr; 1841 if (UseCompactObjectHeaders || EnableValhalla) { 1842 Node* klass_node = in(AllocateNode::KlassNode); 1843 Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset())))); 1844 mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); 1845 if (EnableValhalla) { 1846 mark_node = phase->transform(mark_node); 1847 // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal 1848 mark_node = new OrXNode(mark_node, phase->MakeConX(_larval ? markWord::larval_bit_in_place : 0)); 1849 } 1850 return mark_node; 1851 } else { 1852 return phase->MakeConX(markWord::prototype().value()); 1853 } 1854 } 1855 1856 // Retrieve the length from the AllocateArrayNode. Narrow the type with a 1857 // CastII, if appropriate. If we are not allowed to create new nodes, and 1858 // a CastII is appropriate, return null. 1859 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) { 1860 Node *length = in(AllocateNode::ALength); 1861 assert(length != nullptr, "length is not null"); 1862 1863 const TypeInt* length_type = phase->find_int_type(length); 1864 const TypeAryPtr* ary_type = oop_type->isa_aryptr(); 1865 1866 if (ary_type != nullptr && length_type != nullptr) { 1867 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); 1868 if (narrow_length_type != length_type) { 1869 // Assert one of: 1870 // - the narrow_length is 0 1871 // - the narrow_length is not wider than length 1872 assert(narrow_length_type == TypeInt::ZERO || 1873 (length_type->is_con() && narrow_length_type->is_con() && 1874 (narrow_length_type->_hi <= length_type->_lo)) || 1875 (narrow_length_type->_hi <= length_type->_hi && 1876 narrow_length_type->_lo >= length_type->_lo), 1877 "narrow type must be narrower than length type"); 1878 1879 // Return null if new nodes are not allowed 1880 if (!allow_new_nodes) { 1881 return nullptr; 1882 } 1883 // Create a cast which is control dependent on the initialization to 1884 // propagate the fact that the array length must be positive. 1885 InitializeNode* init = initialization(); 1886 if (init != nullptr) { 1887 length = new CastIINode(init->proj_out_or_null(TypeFunc::Control), length, narrow_length_type); 1888 } 1889 } 1890 } 1891 1892 return length; 1893 } 1894 1895 //============================================================================= 1896 const TypeFunc* LockNode::_lock_type_Type = nullptr; 1897 1898 uint LockNode::size_of() const { return sizeof(*this); } 1899 1900 // Redundant lock elimination 1901 // 1902 // There are various patterns of locking where we release and 1903 // immediately reacquire a lock in a piece of code where no operations 1904 // occur in between that would be observable. In those cases we can 1905 // skip releasing and reacquiring the lock without violating any 1906 // fairness requirements. Doing this around a loop could cause a lock 1907 // to be held for a very long time so we concentrate on non-looping 1908 // control flow. We also require that the operations are fully 1909 // redundant meaning that we don't introduce new lock operations on 1910 // some paths so to be able to eliminate it on others ala PRE. This 1911 // would probably require some more extensive graph manipulation to 1912 // guarantee that the memory edges were all handled correctly. 1913 // 1914 // Assuming p is a simple predicate which can't trap in any way and s 1915 // is a synchronized method consider this code: 1916 // 1917 // s(); 1918 // if (p) 1919 // s(); 1920 // else 1921 // s(); 1922 // s(); 1923 // 1924 // 1. The unlocks of the first call to s can be eliminated if the 1925 // locks inside the then and else branches are eliminated. 1926 // 1927 // 2. The unlocks of the then and else branches can be eliminated if 1928 // the lock of the final call to s is eliminated. 1929 // 1930 // Either of these cases subsumes the simple case of sequential control flow 1931 // 1932 // Additionally we can eliminate versions without the else case: 1933 // 1934 // s(); 1935 // if (p) 1936 // s(); 1937 // s(); 1938 // 1939 // 3. In this case we eliminate the unlock of the first s, the lock 1940 // and unlock in the then case and the lock in the final s. 1941 // 1942 // Note also that in all these cases the then/else pieces don't have 1943 // to be trivial as long as they begin and end with synchronization 1944 // operations. 1945 // 1946 // s(); 1947 // if (p) 1948 // s(); 1949 // f(); 1950 // s(); 1951 // s(); 1952 // 1953 // The code will work properly for this case, leaving in the unlock 1954 // before the call to f and the relock after it. 1955 // 1956 // A potentially interesting case which isn't handled here is when the 1957 // locking is partially redundant. 1958 // 1959 // s(); 1960 // if (p) 1961 // s(); 1962 // 1963 // This could be eliminated putting unlocking on the else case and 1964 // eliminating the first unlock and the lock in the then side. 1965 // Alternatively the unlock could be moved out of the then side so it 1966 // was after the merge and the first unlock and second lock 1967 // eliminated. This might require less manipulation of the memory 1968 // state to get correct. 1969 // 1970 // Additionally we might allow work between a unlock and lock before 1971 // giving up eliminating the locks. The current code disallows any 1972 // conditional control flow between these operations. A formulation 1973 // similar to partial redundancy elimination computing the 1974 // availability of unlocking and the anticipatability of locking at a 1975 // program point would allow detection of fully redundant locking with 1976 // some amount of work in between. I'm not sure how often I really 1977 // think that would occur though. Most of the cases I've seen 1978 // indicate it's likely non-trivial work would occur in between. 1979 // There may be other more complicated constructs where we could 1980 // eliminate locking but I haven't seen any others appear as hot or 1981 // interesting. 1982 // 1983 // Locking and unlocking have a canonical form in ideal that looks 1984 // roughly like this: 1985 // 1986 // <obj> 1987 // | \\------+ 1988 // | \ \ 1989 // | BoxLock \ 1990 // | | | \ 1991 // | | \ \ 1992 // | | FastLock 1993 // | | / 1994 // | | / 1995 // | | | 1996 // 1997 // Lock 1998 // | 1999 // Proj #0 2000 // | 2001 // MembarAcquire 2002 // | 2003 // Proj #0 2004 // 2005 // MembarRelease 2006 // | 2007 // Proj #0 2008 // | 2009 // Unlock 2010 // | 2011 // Proj #0 2012 // 2013 // 2014 // This code proceeds by processing Lock nodes during PhaseIterGVN 2015 // and searching back through its control for the proper code 2016 // patterns. Once it finds a set of lock and unlock operations to 2017 // eliminate they are marked as eliminatable which causes the 2018 // expansion of the Lock and Unlock macro nodes to make the operation a NOP 2019 // 2020 //============================================================================= 2021 2022 // 2023 // Utility function to skip over uninteresting control nodes. Nodes skipped are: 2024 // - copy regions. (These may not have been optimized away yet.) 2025 // - eliminated locking nodes 2026 // 2027 static Node *next_control(Node *ctrl) { 2028 if (ctrl == nullptr) 2029 return nullptr; 2030 while (1) { 2031 if (ctrl->is_Region()) { 2032 RegionNode *r = ctrl->as_Region(); 2033 Node *n = r->is_copy(); 2034 if (n == nullptr) 2035 break; // hit a region, return it 2036 else 2037 ctrl = n; 2038 } else if (ctrl->is_Proj()) { 2039 Node *in0 = ctrl->in(0); 2040 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { 2041 ctrl = in0->in(0); 2042 } else { 2043 break; 2044 } 2045 } else { 2046 break; // found an interesting control 2047 } 2048 } 2049 return ctrl; 2050 } 2051 // 2052 // Given a control, see if it's the control projection of an Unlock which 2053 // operating on the same object as lock. 2054 // 2055 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, 2056 GrowableArray<AbstractLockNode*> &lock_ops) { 2057 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : nullptr; 2058 if (ctrl_proj != nullptr && ctrl_proj->_con == TypeFunc::Control) { 2059 Node *n = ctrl_proj->in(0); 2060 if (n != nullptr && n->is_Unlock()) { 2061 UnlockNode *unlock = n->as_Unlock(); 2062 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2063 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 2064 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); 2065 if (lock_obj->eqv_uncast(unlock_obj) && 2066 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && 2067 !unlock->is_eliminated()) { 2068 lock_ops.append(unlock); 2069 return true; 2070 } 2071 } 2072 } 2073 return false; 2074 } 2075 2076 // 2077 // Find the lock matching an unlock. Returns null if a safepoint 2078 // or complicated control is encountered first. 2079 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { 2080 LockNode *lock_result = nullptr; 2081 // find the matching lock, or an intervening safepoint 2082 Node *ctrl = next_control(unlock->in(0)); 2083 while (1) { 2084 assert(ctrl != nullptr, "invalid control graph"); 2085 assert(!ctrl->is_Start(), "missing lock for unlock"); 2086 if (ctrl->is_top()) break; // dead control path 2087 if (ctrl->is_Proj()) ctrl = ctrl->in(0); 2088 if (ctrl->is_SafePoint()) { 2089 break; // found a safepoint (may be the lock we are searching for) 2090 } else if (ctrl->is_Region()) { 2091 // Check for a simple diamond pattern. Punt on anything more complicated 2092 if (ctrl->req() == 3 && ctrl->in(1) != nullptr && ctrl->in(2) != nullptr) { 2093 Node *in1 = next_control(ctrl->in(1)); 2094 Node *in2 = next_control(ctrl->in(2)); 2095 if (((in1->is_IfTrue() && in2->is_IfFalse()) || 2096 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { 2097 ctrl = next_control(in1->in(0)->in(0)); 2098 } else { 2099 break; 2100 } 2101 } else { 2102 break; 2103 } 2104 } else { 2105 ctrl = next_control(ctrl->in(0)); // keep searching 2106 } 2107 } 2108 if (ctrl->is_Lock()) { 2109 LockNode *lock = ctrl->as_Lock(); 2110 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2111 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 2112 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); 2113 if (lock_obj->eqv_uncast(unlock_obj) && 2114 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { 2115 lock_result = lock; 2116 } 2117 } 2118 return lock_result; 2119 } 2120 2121 // This code corresponds to case 3 above. 2122 2123 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, 2124 GrowableArray<AbstractLockNode*> &lock_ops) { 2125 Node* if_node = node->in(0); 2126 bool if_true = node->is_IfTrue(); 2127 2128 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { 2129 Node *lock_ctrl = next_control(if_node->in(0)); 2130 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { 2131 Node* lock1_node = nullptr; 2132 ProjNode* proj = if_node->as_If()->proj_out(!if_true); 2133 if (if_true) { 2134 if (proj->is_IfFalse() && proj->outcnt() == 1) { 2135 lock1_node = proj->unique_out(); 2136 } 2137 } else { 2138 if (proj->is_IfTrue() && proj->outcnt() == 1) { 2139 lock1_node = proj->unique_out(); 2140 } 2141 } 2142 if (lock1_node != nullptr && lock1_node->is_Lock()) { 2143 LockNode *lock1 = lock1_node->as_Lock(); 2144 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2145 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 2146 Node* lock1_obj = bs->step_over_gc_barrier(lock1->obj_node()); 2147 if (lock_obj->eqv_uncast(lock1_obj) && 2148 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && 2149 !lock1->is_eliminated()) { 2150 lock_ops.append(lock1); 2151 return true; 2152 } 2153 } 2154 } 2155 } 2156 2157 lock_ops.trunc_to(0); 2158 return false; 2159 } 2160 2161 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, 2162 GrowableArray<AbstractLockNode*> &lock_ops) { 2163 // check each control merging at this point for a matching unlock. 2164 // in(0) should be self edge so skip it. 2165 for (int i = 1; i < (int)region->req(); i++) { 2166 Node *in_node = next_control(region->in(i)); 2167 if (in_node != nullptr) { 2168 if (find_matching_unlock(in_node, lock, lock_ops)) { 2169 // found a match so keep on checking. 2170 continue; 2171 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { 2172 continue; 2173 } 2174 2175 // If we fall through to here then it was some kind of node we 2176 // don't understand or there wasn't a matching unlock, so give 2177 // up trying to merge locks. 2178 lock_ops.trunc_to(0); 2179 return false; 2180 } 2181 } 2182 return true; 2183 2184 } 2185 2186 // Check that all locks/unlocks associated with object come from balanced regions. 2187 bool AbstractLockNode::is_balanced() { 2188 Node* obj = obj_node(); 2189 for (uint j = 0; j < obj->outcnt(); j++) { 2190 Node* n = obj->raw_out(j); 2191 if (n->is_AbstractLock() && 2192 n->as_AbstractLock()->obj_node()->eqv_uncast(obj)) { 2193 BoxLockNode* n_box = n->as_AbstractLock()->box_node()->as_BoxLock(); 2194 if (n_box->is_unbalanced()) { 2195 return false; 2196 } 2197 } 2198 } 2199 return true; 2200 } 2201 2202 const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"}; 2203 2204 const char * AbstractLockNode::kind_as_string() const { 2205 return _kind_names[_kind]; 2206 } 2207 2208 #ifndef PRODUCT 2209 // 2210 // Create a counter which counts the number of times this lock is acquired 2211 // 2212 void AbstractLockNode::create_lock_counter(JVMState* state) { 2213 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); 2214 } 2215 2216 void AbstractLockNode::set_eliminated_lock_counter() { 2217 if (_counter) { 2218 // Update the counter to indicate that this lock was eliminated. 2219 // The counter update code will stay around even though the 2220 // optimizer will eliminate the lock operation itself. 2221 _counter->set_tag(NamedCounter::EliminatedLockCounter); 2222 } 2223 } 2224 2225 void AbstractLockNode::dump_spec(outputStream* st) const { 2226 st->print("%s ", _kind_names[_kind]); 2227 CallNode::dump_spec(st); 2228 } 2229 2230 void AbstractLockNode::dump_compact_spec(outputStream* st) const { 2231 st->print("%s", _kind_names[_kind]); 2232 } 2233 #endif 2234 2235 //============================================================================= 2236 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2237 2238 // perform any generic optimizations first (returns 'this' or null) 2239 Node *result = SafePointNode::Ideal(phase, can_reshape); 2240 if (result != nullptr) return result; 2241 // Don't bother trying to transform a dead node 2242 if (in(0) && in(0)->is_top()) return nullptr; 2243 2244 // Now see if we can optimize away this lock. We don't actually 2245 // remove the locking here, we simply set the _eliminate flag which 2246 // prevents macro expansion from expanding the lock. Since we don't 2247 // modify the graph, the value returned from this function is the 2248 // one computed above. 2249 const Type* obj_type = phase->type(obj_node()); 2250 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) { 2251 // 2252 // If we are locking an non-escaped object, the lock/unlock is unnecessary 2253 // 2254 ConnectionGraph *cgr = phase->C->congraph(); 2255 if (cgr != nullptr && cgr->can_eliminate_lock(this)) { 2256 assert(!is_eliminated() || is_coarsened(), "sanity"); 2257 // The lock could be marked eliminated by lock coarsening 2258 // code during first IGVN before EA. Replace coarsened flag 2259 // to eliminate all associated locks/unlocks. 2260 #ifdef ASSERT 2261 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1"); 2262 #endif 2263 this->set_non_esc_obj(); 2264 return result; 2265 } 2266 2267 if (!phase->C->do_locks_coarsening()) { 2268 return result; // Compiling without locks coarsening 2269 } 2270 // 2271 // Try lock coarsening 2272 // 2273 PhaseIterGVN* iter = phase->is_IterGVN(); 2274 if (iter != nullptr && !is_eliminated()) { 2275 2276 GrowableArray<AbstractLockNode*> lock_ops; 2277 2278 Node *ctrl = next_control(in(0)); 2279 2280 // now search back for a matching Unlock 2281 if (find_matching_unlock(ctrl, this, lock_ops)) { 2282 // found an unlock directly preceding this lock. This is the 2283 // case of single unlock directly control dependent on a 2284 // single lock which is the trivial version of case 1 or 2. 2285 } else if (ctrl->is_Region() ) { 2286 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { 2287 // found lock preceded by multiple unlocks along all paths 2288 // joining at this point which is case 3 in description above. 2289 } 2290 } else { 2291 // see if this lock comes from either half of an if and the 2292 // predecessors merges unlocks and the other half of the if 2293 // performs a lock. 2294 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { 2295 // found unlock splitting to an if with locks on both branches. 2296 } 2297 } 2298 2299 if (lock_ops.length() > 0) { 2300 // add ourselves to the list of locks to be eliminated. 2301 lock_ops.append(this); 2302 2303 #ifndef PRODUCT 2304 if (PrintEliminateLocks) { 2305 int locks = 0; 2306 int unlocks = 0; 2307 if (Verbose) { 2308 tty->print_cr("=== Locks coarsening ==="); 2309 tty->print("Obj: "); 2310 obj_node()->dump(); 2311 } 2312 for (int i = 0; i < lock_ops.length(); i++) { 2313 AbstractLockNode* lock = lock_ops.at(i); 2314 if (lock->Opcode() == Op_Lock) 2315 locks++; 2316 else 2317 unlocks++; 2318 if (Verbose) { 2319 tty->print("Box %d: ", i); 2320 box_node()->dump(); 2321 tty->print(" %d: ", i); 2322 lock->dump(); 2323 } 2324 } 2325 tty->print_cr("=== Coarsened %d unlocks and %d locks", unlocks, locks); 2326 } 2327 #endif 2328 2329 // for each of the identified locks, mark them 2330 // as eliminatable 2331 for (int i = 0; i < lock_ops.length(); i++) { 2332 AbstractLockNode* lock = lock_ops.at(i); 2333 2334 // Mark it eliminated by coarsening and update any counters 2335 #ifdef ASSERT 2336 lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened"); 2337 #endif 2338 lock->set_coarsened(); 2339 } 2340 // Record this coarsened group. 2341 phase->C->add_coarsened_locks(lock_ops); 2342 } else if (ctrl->is_Region() && 2343 iter->_worklist.member(ctrl)) { 2344 // We weren't able to find any opportunities but the region this 2345 // lock is control dependent on hasn't been processed yet so put 2346 // this lock back on the worklist so we can check again once any 2347 // region simplification has occurred. 2348 iter->_worklist.push(this); 2349 } 2350 } 2351 } 2352 2353 return result; 2354 } 2355 2356 //============================================================================= 2357 bool LockNode::is_nested_lock_region() { 2358 return is_nested_lock_region(nullptr); 2359 } 2360 2361 // p is used for access to compilation log; no logging if null 2362 bool LockNode::is_nested_lock_region(Compile * c) { 2363 BoxLockNode* box = box_node()->as_BoxLock(); 2364 int stk_slot = box->stack_slot(); 2365 if (stk_slot <= 0) { 2366 #ifdef ASSERT 2367 this->log_lock_optimization(c, "eliminate_lock_INLR_1"); 2368 #endif 2369 return false; // External lock or it is not Box (Phi node). 2370 } 2371 2372 // Ignore complex cases: merged locks or multiple locks. 2373 Node* obj = obj_node(); 2374 LockNode* unique_lock = nullptr; 2375 Node* bad_lock = nullptr; 2376 if (!box->is_simple_lock_region(&unique_lock, obj, &bad_lock)) { 2377 #ifdef ASSERT 2378 this->log_lock_optimization(c, "eliminate_lock_INLR_2a", bad_lock); 2379 #endif 2380 return false; 2381 } 2382 if (unique_lock != this) { 2383 #ifdef ASSERT 2384 this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != nullptr ? unique_lock : bad_lock)); 2385 if (PrintEliminateLocks && Verbose) { 2386 tty->print_cr("=============== unique_lock != this ============"); 2387 tty->print(" this: "); 2388 this->dump(); 2389 tty->print(" box: "); 2390 box->dump(); 2391 tty->print(" obj: "); 2392 obj->dump(); 2393 if (unique_lock != nullptr) { 2394 tty->print(" unique_lock: "); 2395 unique_lock->dump(); 2396 } 2397 if (bad_lock != nullptr) { 2398 tty->print(" bad_lock: "); 2399 bad_lock->dump(); 2400 } 2401 tty->print_cr("==============="); 2402 } 2403 #endif 2404 return false; 2405 } 2406 2407 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2408 obj = bs->step_over_gc_barrier(obj); 2409 // Look for external lock for the same object. 2410 SafePointNode* sfn = this->as_SafePoint(); 2411 JVMState* youngest_jvms = sfn->jvms(); 2412 int max_depth = youngest_jvms->depth(); 2413 for (int depth = 1; depth <= max_depth; depth++) { 2414 JVMState* jvms = youngest_jvms->of_depth(depth); 2415 int num_mon = jvms->nof_monitors(); 2416 // Loop over monitors 2417 for (int idx = 0; idx < num_mon; idx++) { 2418 Node* obj_node = sfn->monitor_obj(jvms, idx); 2419 obj_node = bs->step_over_gc_barrier(obj_node); 2420 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock(); 2421 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { 2422 box->set_nested(); 2423 return true; 2424 } 2425 } 2426 } 2427 #ifdef ASSERT 2428 this->log_lock_optimization(c, "eliminate_lock_INLR_3"); 2429 #endif 2430 return false; 2431 } 2432 2433 //============================================================================= 2434 uint UnlockNode::size_of() const { return sizeof(*this); } 2435 2436 //============================================================================= 2437 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2438 2439 // perform any generic optimizations first (returns 'this' or null) 2440 Node *result = SafePointNode::Ideal(phase, can_reshape); 2441 if (result != nullptr) return result; 2442 // Don't bother trying to transform a dead node 2443 if (in(0) && in(0)->is_top()) return nullptr; 2444 2445 // Now see if we can optimize away this unlock. We don't actually 2446 // remove the unlocking here, we simply set the _eliminate flag which 2447 // prevents macro expansion from expanding the unlock. Since we don't 2448 // modify the graph, the value returned from this function is the 2449 // one computed above. 2450 // Escape state is defined after Parse phase. 2451 const Type* obj_type = phase->type(obj_node()); 2452 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) { 2453 // 2454 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary. 2455 // 2456 ConnectionGraph *cgr = phase->C->congraph(); 2457 if (cgr != nullptr && cgr->can_eliminate_lock(this)) { 2458 assert(!is_eliminated() || is_coarsened(), "sanity"); 2459 // The lock could be marked eliminated by lock coarsening 2460 // code during first IGVN before EA. Replace coarsened flag 2461 // to eliminate all associated locks/unlocks. 2462 #ifdef ASSERT 2463 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2"); 2464 #endif 2465 this->set_non_esc_obj(); 2466 } 2467 } 2468 return result; 2469 } 2470 2471 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const { 2472 if (C == nullptr) { 2473 return; 2474 } 2475 CompileLog* log = C->log(); 2476 if (log != nullptr) { 2477 Node* box = box_node(); 2478 Node* obj = obj_node(); 2479 int box_id = box != nullptr ? box->_idx : -1; 2480 int obj_id = obj != nullptr ? obj->_idx : -1; 2481 2482 log->begin_head("%s compile_id='%d' lock_id='%d' class='%s' kind='%s' box_id='%d' obj_id='%d' bad_id='%d'", 2483 tag, C->compile_id(), this->_idx, 2484 is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?", 2485 kind_as_string(), box_id, obj_id, (bad_lock != nullptr ? bad_lock->_idx : -1)); 2486 log->stamp(); 2487 log->end_head(); 2488 JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms(); 2489 while (p != nullptr) { 2490 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 2491 p = p->caller(); 2492 } 2493 log->tail(tag); 2494 } 2495 } 2496 2497 bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) { 2498 if (dest_t->is_known_instance() && t_oop->is_known_instance()) { 2499 return dest_t->instance_id() == t_oop->instance_id(); 2500 } 2501 2502 if (dest_t->isa_instptr() && !dest_t->is_instptr()->instance_klass()->equals(phase->C->env()->Object_klass())) { 2503 // clone 2504 if (t_oop->isa_aryptr()) { 2505 return false; 2506 } 2507 if (!t_oop->isa_instptr()) { 2508 return true; 2509 } 2510 if (dest_t->maybe_java_subtype_of(t_oop) || t_oop->maybe_java_subtype_of(dest_t)) { 2511 return true; 2512 } 2513 // unrelated 2514 return false; 2515 } 2516 2517 if (dest_t->isa_aryptr()) { 2518 // arraycopy or array clone 2519 if (t_oop->isa_instptr()) { 2520 return false; 2521 } 2522 if (!t_oop->isa_aryptr()) { 2523 return true; 2524 } 2525 2526 const Type* elem = dest_t->is_aryptr()->elem(); 2527 if (elem == Type::BOTTOM) { 2528 // An array but we don't know what elements are 2529 return true; 2530 } 2531 2532 dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr(); 2533 t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot); 2534 uint dest_alias = phase->C->get_alias_index(dest_t); 2535 uint t_oop_alias = phase->C->get_alias_index(t_oop); 2536 2537 return dest_alias == t_oop_alias; 2538 } 2539 2540 return true; 2541 }