1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "ci/ciFlatArrayKlass.hpp" 28 #include "ci/bcEscapeAnalyzer.hpp" 29 #include "compiler/oopMap.hpp" 30 #include "gc/shared/barrierSet.hpp" 31 #include "gc/shared/c2/barrierSetC2.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "opto/callGenerator.hpp" 34 #include "opto/callnode.hpp" 35 #include "opto/castnode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/escape.hpp" 38 #include "opto/inlinetypenode.hpp" 39 #include "opto/locknode.hpp" 40 #include "opto/machnode.hpp" 41 #include "opto/matcher.hpp" 42 #include "opto/parse.hpp" 43 #include "opto/regalloc.hpp" 44 #include "opto/regmask.hpp" 45 #include "opto/rootnode.hpp" 46 #include "opto/runtime.hpp" 47 #include "runtime/sharedRuntime.hpp" 48 #include "runtime/stubRoutines.hpp" 49 #include "utilities/powerOfTwo.hpp" 50 #include "code/vmreg.hpp" 51 52 // Portions of code courtesy of Clifford Click 53 54 // Optimization - Graph Style 55 56 //============================================================================= 57 uint StartNode::size_of() const { return sizeof(*this); } 58 bool StartNode::cmp( const Node &n ) const 59 { return _domain == ((StartNode&)n)._domain; } 60 const Type *StartNode::bottom_type() const { return _domain; } 61 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; } 62 #ifndef PRODUCT 63 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);} 64 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ } 65 #endif 66 67 //------------------------------Ideal------------------------------------------ 68 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ 69 return remove_dead_region(phase, can_reshape) ? this : nullptr; 70 } 71 72 //------------------------------calling_convention----------------------------- 73 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 74 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt); 75 } 76 77 //------------------------------Registers-------------------------------------- 78 const RegMask &StartNode::in_RegMask(uint) const { 79 return RegMask::Empty; 80 } 81 82 //------------------------------match------------------------------------------ 83 // Construct projections for incoming parameters, and their RegMask info 84 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { 85 switch (proj->_con) { 86 case TypeFunc::Control: 87 case TypeFunc::I_O: 88 case TypeFunc::Memory: 89 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 90 case TypeFunc::FramePtr: 91 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); 92 case TypeFunc::ReturnAdr: 93 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); 94 case TypeFunc::Parms: 95 default: { 96 uint parm_num = proj->_con - TypeFunc::Parms; 97 const Type *t = _domain->field_at(proj->_con); 98 if (t->base() == Type::Half) // 2nd half of Longs and Doubles 99 return new ConNode(Type::TOP); 100 uint ideal_reg = t->ideal_reg(); 101 RegMask &rm = match->_calling_convention_mask[parm_num]; 102 return new MachProjNode(this,proj->_con,rm,ideal_reg); 103 } 104 } 105 return nullptr; 106 } 107 108 //============================================================================= 109 const char * const ParmNode::names[TypeFunc::Parms+1] = { 110 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms" 111 }; 112 113 #ifndef PRODUCT 114 void ParmNode::dump_spec(outputStream *st) const { 115 if( _con < TypeFunc::Parms ) { 116 st->print("%s", names[_con]); 117 } else { 118 st->print("Parm%d: ",_con-TypeFunc::Parms); 119 // Verbose and WizardMode dump bottom_type for all nodes 120 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); 121 } 122 } 123 124 void ParmNode::dump_compact_spec(outputStream *st) const { 125 if (_con < TypeFunc::Parms) { 126 st->print("%s", names[_con]); 127 } else { 128 st->print("%d:", _con-TypeFunc::Parms); 129 // unconditionally dump bottom_type 130 bottom_type()->dump_on(st); 131 } 132 } 133 #endif 134 135 uint ParmNode::ideal_reg() const { 136 switch( _con ) { 137 case TypeFunc::Control : // fall through 138 case TypeFunc::I_O : // fall through 139 case TypeFunc::Memory : return 0; 140 case TypeFunc::FramePtr : // fall through 141 case TypeFunc::ReturnAdr: return Op_RegP; 142 default : assert( _con > TypeFunc::Parms, "" ); 143 // fall through 144 case TypeFunc::Parms : { 145 // Type of argument being passed 146 const Type *t = in(0)->as_Start()->_domain->field_at(_con); 147 return t->ideal_reg(); 148 } 149 } 150 ShouldNotReachHere(); 151 return 0; 152 } 153 154 //============================================================================= 155 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { 156 init_req(TypeFunc::Control,cntrl); 157 init_req(TypeFunc::I_O,i_o); 158 init_req(TypeFunc::Memory,memory); 159 init_req(TypeFunc::FramePtr,frameptr); 160 init_req(TypeFunc::ReturnAdr,retadr); 161 } 162 163 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ 164 return remove_dead_region(phase, can_reshape) ? this : nullptr; 165 } 166 167 const Type* ReturnNode::Value(PhaseGVN* phase) const { 168 return ( phase->type(in(TypeFunc::Control)) == Type::TOP) 169 ? Type::TOP 170 : Type::BOTTOM; 171 } 172 173 // Do we Match on this edge index or not? No edges on return nodes 174 uint ReturnNode::match_edge(uint idx) const { 175 return 0; 176 } 177 178 179 #ifndef PRODUCT 180 void ReturnNode::dump_req(outputStream *st, DumpConfig* dc) const { 181 // Dump the required inputs, after printing "returns" 182 uint i; // Exit value of loop 183 for (i = 0; i < req(); i++) { // For all required inputs 184 if (i == TypeFunc::Parms) st->print("returns "); 185 Node* p = in(i); 186 if (p != nullptr) { 187 p->dump_idx(false, st, dc); 188 st->print(" "); 189 } else { 190 st->print("_ "); 191 } 192 } 193 } 194 #endif 195 196 //============================================================================= 197 RethrowNode::RethrowNode( 198 Node* cntrl, 199 Node* i_o, 200 Node* memory, 201 Node* frameptr, 202 Node* ret_adr, 203 Node* exception 204 ) : Node(TypeFunc::Parms + 1) { 205 init_req(TypeFunc::Control , cntrl ); 206 init_req(TypeFunc::I_O , i_o ); 207 init_req(TypeFunc::Memory , memory ); 208 init_req(TypeFunc::FramePtr , frameptr ); 209 init_req(TypeFunc::ReturnAdr, ret_adr); 210 init_req(TypeFunc::Parms , exception); 211 } 212 213 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ 214 return remove_dead_region(phase, can_reshape) ? this : nullptr; 215 } 216 217 const Type* RethrowNode::Value(PhaseGVN* phase) const { 218 return (phase->type(in(TypeFunc::Control)) == Type::TOP) 219 ? Type::TOP 220 : Type::BOTTOM; 221 } 222 223 uint RethrowNode::match_edge(uint idx) const { 224 return 0; 225 } 226 227 #ifndef PRODUCT 228 void RethrowNode::dump_req(outputStream *st, DumpConfig* dc) const { 229 // Dump the required inputs, after printing "exception" 230 uint i; // Exit value of loop 231 for (i = 0; i < req(); i++) { // For all required inputs 232 if (i == TypeFunc::Parms) st->print("exception "); 233 Node* p = in(i); 234 if (p != nullptr) { 235 p->dump_idx(false, st, dc); 236 st->print(" "); 237 } else { 238 st->print("_ "); 239 } 240 } 241 } 242 #endif 243 244 //============================================================================= 245 // Do we Match on this edge index or not? Match only target address & method 246 uint TailCallNode::match_edge(uint idx) const { 247 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 248 } 249 250 //============================================================================= 251 // Do we Match on this edge index or not? Match only target address & oop 252 uint TailJumpNode::match_edge(uint idx) const { 253 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 254 } 255 256 //============================================================================= 257 JVMState::JVMState(ciMethod* method, JVMState* caller) : 258 _method(method) { 259 assert(method != nullptr, "must be valid call site"); 260 _bci = InvocationEntryBci; 261 _reexecute = Reexecute_Undefined; 262 debug_only(_bci = -99); // random garbage value 263 debug_only(_map = (SafePointNode*)-1); 264 _caller = caller; 265 _depth = 1 + (caller == nullptr ? 0 : caller->depth()); 266 _locoff = TypeFunc::Parms; 267 _stkoff = _locoff + _method->max_locals(); 268 _monoff = _stkoff + _method->max_stack(); 269 _scloff = _monoff; 270 _endoff = _monoff; 271 _sp = 0; 272 } 273 JVMState::JVMState(int stack_size) : 274 _method(nullptr) { 275 _bci = InvocationEntryBci; 276 _reexecute = Reexecute_Undefined; 277 debug_only(_map = (SafePointNode*)-1); 278 _caller = nullptr; 279 _depth = 1; 280 _locoff = TypeFunc::Parms; 281 _stkoff = _locoff; 282 _monoff = _stkoff + stack_size; 283 _scloff = _monoff; 284 _endoff = _monoff; 285 _sp = 0; 286 } 287 288 //--------------------------------of_depth------------------------------------- 289 JVMState* JVMState::of_depth(int d) const { 290 const JVMState* jvmp = this; 291 assert(0 < d && (uint)d <= depth(), "oob"); 292 for (int skip = depth() - d; skip > 0; skip--) { 293 jvmp = jvmp->caller(); 294 } 295 assert(jvmp->depth() == (uint)d, "found the right one"); 296 return (JVMState*)jvmp; 297 } 298 299 //-----------------------------same_calls_as----------------------------------- 300 bool JVMState::same_calls_as(const JVMState* that) const { 301 if (this == that) return true; 302 if (this->depth() != that->depth()) return false; 303 const JVMState* p = this; 304 const JVMState* q = that; 305 for (;;) { 306 if (p->_method != q->_method) return false; 307 if (p->_method == nullptr) return true; // bci is irrelevant 308 if (p->_bci != q->_bci) return false; 309 if (p->_reexecute != q->_reexecute) return false; 310 p = p->caller(); 311 q = q->caller(); 312 if (p == q) return true; 313 assert(p != nullptr && q != nullptr, "depth check ensures we don't run off end"); 314 } 315 } 316 317 //------------------------------debug_start------------------------------------ 318 uint JVMState::debug_start() const { 319 debug_only(JVMState* jvmroot = of_depth(1)); 320 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); 321 return of_depth(1)->locoff(); 322 } 323 324 //-------------------------------debug_end------------------------------------- 325 uint JVMState::debug_end() const { 326 debug_only(JVMState* jvmroot = of_depth(1)); 327 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); 328 return endoff(); 329 } 330 331 //------------------------------debug_depth------------------------------------ 332 uint JVMState::debug_depth() const { 333 uint total = 0; 334 for (const JVMState* jvmp = this; jvmp != nullptr; jvmp = jvmp->caller()) { 335 total += jvmp->debug_size(); 336 } 337 return total; 338 } 339 340 #ifndef PRODUCT 341 342 //------------------------------format_helper---------------------------------- 343 // Given an allocation (a Chaitin object) and a Node decide if the Node carries 344 // any defined value or not. If it does, print out the register or constant. 345 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { 346 if (n == nullptr) { st->print(" null"); return; } 347 if (n->is_SafePointScalarObject()) { 348 // Scalar replacement. 349 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); 350 scobjs->append_if_missing(spobj); 351 int sco_n = scobjs->find(spobj); 352 assert(sco_n >= 0, ""); 353 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); 354 return; 355 } 356 if (regalloc->node_regs_max_index() > 0 && 357 OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined 358 char buf[50]; 359 regalloc->dump_register(n,buf,sizeof(buf)); 360 st->print(" %s%d]=%s",msg,i,buf); 361 } else { // No register, but might be constant 362 const Type *t = n->bottom_type(); 363 switch (t->base()) { 364 case Type::Int: 365 st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con()); 366 break; 367 case Type::AnyPtr: 368 assert( t == TypePtr::NULL_PTR || n->in_dump(), "" ); 369 st->print(" %s%d]=#null",msg,i); 370 break; 371 case Type::AryPtr: 372 case Type::InstPtr: 373 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop())); 374 break; 375 case Type::KlassPtr: 376 case Type::AryKlassPtr: 377 case Type::InstKlassPtr: 378 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->exact_klass())); 379 break; 380 case Type::MetadataPtr: 381 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata())); 382 break; 383 case Type::NarrowOop: 384 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop())); 385 break; 386 case Type::RawPtr: 387 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr())); 388 break; 389 case Type::DoubleCon: 390 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d); 391 break; 392 case Type::FloatCon: 393 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f); 394 break; 395 case Type::Long: 396 st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con())); 397 break; 398 case Type::Half: 399 case Type::Top: 400 st->print(" %s%d]=_",msg,i); 401 break; 402 default: ShouldNotReachHere(); 403 } 404 } 405 } 406 407 //---------------------print_method_with_lineno-------------------------------- 408 void JVMState::print_method_with_lineno(outputStream* st, bool show_name) const { 409 if (show_name) _method->print_short_name(st); 410 411 int lineno = _method->line_number_from_bci(_bci); 412 if (lineno != -1) { 413 st->print(" @ bci:%d (line %d)", _bci, lineno); 414 } else { 415 st->print(" @ bci:%d", _bci); 416 } 417 } 418 419 //------------------------------format----------------------------------------- 420 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { 421 st->print(" #"); 422 if (_method) { 423 print_method_with_lineno(st, true); 424 } else { 425 st->print_cr(" runtime stub "); 426 return; 427 } 428 if (n->is_MachSafePoint()) { 429 GrowableArray<SafePointScalarObjectNode*> scobjs; 430 MachSafePointNode *mcall = n->as_MachSafePoint(); 431 uint i; 432 // Print locals 433 for (i = 0; i < (uint)loc_size(); i++) 434 format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs); 435 // Print stack 436 for (i = 0; i < (uint)stk_size(); i++) { 437 if ((uint)(_stkoff + i) >= mcall->len()) 438 st->print(" oob "); 439 else 440 format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs); 441 } 442 for (i = 0; (int)i < nof_monitors(); i++) { 443 Node *box = mcall->monitor_box(this, i); 444 Node *obj = mcall->monitor_obj(this, i); 445 if (regalloc->node_regs_max_index() > 0 && 446 OptoReg::is_valid(regalloc->get_reg_first(box))) { 447 box = BoxLockNode::box_node(box); 448 format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs); 449 } else { 450 OptoReg::Name box_reg = BoxLockNode::reg(box); 451 st->print(" MON-BOX%d=%s+%d", 452 i, 453 OptoReg::regname(OptoReg::c_frame_pointer), 454 regalloc->reg2offset(box_reg)); 455 } 456 const char* obj_msg = "MON-OBJ["; 457 if (EliminateLocks) { 458 if (BoxLockNode::box_node(box)->is_eliminated()) 459 obj_msg = "MON-OBJ(LOCK ELIMINATED)["; 460 } 461 format_helper(regalloc, st, obj, obj_msg, i, &scobjs); 462 } 463 464 for (i = 0; i < (uint)scobjs.length(); i++) { 465 // Scalar replaced objects. 466 st->cr(); 467 st->print(" # ScObj" INT32_FORMAT " ", i); 468 SafePointScalarObjectNode* spobj = scobjs.at(i); 469 ciKlass* cik = spobj->bottom_type()->is_oopptr()->exact_klass(); 470 assert(cik->is_instance_klass() || 471 cik->is_array_klass(), "Not supported allocation."); 472 ciInstanceKlass *iklass = nullptr; 473 if (cik->is_instance_klass()) { 474 cik->print_name_on(st); 475 iklass = cik->as_instance_klass(); 476 } else if (cik->is_type_array_klass()) { 477 cik->as_array_klass()->base_element_type()->print_name_on(st); 478 st->print("[%d]", spobj->n_fields()); 479 } else if (cik->is_obj_array_klass()) { 480 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass(); 481 if (cie->is_instance_klass()) { 482 cie->print_name_on(st); 483 } else if (cie->is_type_array_klass()) { 484 cie->as_array_klass()->base_element_type()->print_name_on(st); 485 } else { 486 ShouldNotReachHere(); 487 } 488 st->print("[%d]", spobj->n_fields()); 489 int ndim = cik->as_array_klass()->dimension() - 1; 490 while (ndim-- > 0) { 491 st->print("[]"); 492 } 493 } else if (cik->is_flat_array_klass()) { 494 ciKlass* cie = cik->as_flat_array_klass()->base_element_klass(); 495 cie->print_name_on(st); 496 st->print("[%d]", spobj->n_fields()); 497 int ndim = cik->as_array_klass()->dimension() - 1; 498 while (ndim-- > 0) { 499 st->print("[]"); 500 } 501 } 502 st->print("={"); 503 uint nf = spobj->n_fields(); 504 if (nf > 0) { 505 uint first_ind = spobj->first_index(mcall->jvms()); 506 if (iklass != nullptr && iklass->is_inlinetype()) { 507 Node* init_node = mcall->in(first_ind++); 508 if (!init_node->is_top()) { 509 st->print(" [is_init"); 510 format_helper(regalloc, st, init_node, ":", -1, nullptr); 511 } 512 } 513 Node* fld_node = mcall->in(first_ind); 514 ciField* cifield; 515 if (iklass != nullptr) { 516 st->print(" ["); 517 cifield = iklass->nonstatic_field_at(0); 518 cifield->print_name_on(st); 519 format_helper(regalloc, st, fld_node, ":", 0, &scobjs); 520 } else { 521 format_helper(regalloc, st, fld_node, "[", 0, &scobjs); 522 } 523 for (uint j = 1; j < nf; j++) { 524 fld_node = mcall->in(first_ind+j); 525 if (iklass != nullptr) { 526 st->print(", ["); 527 cifield = iklass->nonstatic_field_at(j); 528 cifield->print_name_on(st); 529 format_helper(regalloc, st, fld_node, ":", j, &scobjs); 530 } else { 531 format_helper(regalloc, st, fld_node, ", [", j, &scobjs); 532 } 533 } 534 } 535 st->print(" }"); 536 } 537 } 538 st->cr(); 539 if (caller() != nullptr) caller()->format(regalloc, n, st); 540 } 541 542 543 void JVMState::dump_spec(outputStream *st) const { 544 if (_method != nullptr) { 545 bool printed = false; 546 if (!Verbose) { 547 // The JVMS dumps make really, really long lines. 548 // Take out the most boring parts, which are the package prefixes. 549 char buf[500]; 550 stringStream namest(buf, sizeof(buf)); 551 _method->print_short_name(&namest); 552 if (namest.count() < sizeof(buf)) { 553 const char* name = namest.base(); 554 if (name[0] == ' ') ++name; 555 const char* endcn = strchr(name, ':'); // end of class name 556 if (endcn == nullptr) endcn = strchr(name, '('); 557 if (endcn == nullptr) endcn = name + strlen(name); 558 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') 559 --endcn; 560 st->print(" %s", endcn); 561 printed = true; 562 } 563 } 564 print_method_with_lineno(st, !printed); 565 if(_reexecute == Reexecute_True) 566 st->print(" reexecute"); 567 } else { 568 st->print(" runtime stub"); 569 } 570 if (caller() != nullptr) caller()->dump_spec(st); 571 } 572 573 574 void JVMState::dump_on(outputStream* st) const { 575 bool print_map = _map && !((uintptr_t)_map & 1) && 576 ((caller() == nullptr) || (caller()->map() != _map)); 577 if (print_map) { 578 if (_map->len() > _map->req()) { // _map->has_exceptions() 579 Node* ex = _map->in(_map->req()); // _map->next_exception() 580 // skip the first one; it's already being printed 581 while (ex != nullptr && ex->len() > ex->req()) { 582 ex = ex->in(ex->req()); // ex->next_exception() 583 ex->dump(1); 584 } 585 } 586 _map->dump(Verbose ? 2 : 1); 587 } 588 if (caller() != nullptr) { 589 caller()->dump_on(st); 590 } 591 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", 592 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); 593 if (_method == nullptr) { 594 st->print_cr("(none)"); 595 } else { 596 _method->print_name(st); 597 st->cr(); 598 if (bci() >= 0 && bci() < _method->code_size()) { 599 st->print(" bc: "); 600 _method->print_codes_on(bci(), bci()+1, st); 601 } 602 } 603 } 604 605 // Extra way to dump a jvms from the debugger, 606 // to avoid a bug with C++ member function calls. 607 void dump_jvms(JVMState* jvms) { 608 jvms->dump(); 609 } 610 #endif 611 612 //--------------------------clone_shallow-------------------------------------- 613 JVMState* JVMState::clone_shallow(Compile* C) const { 614 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); 615 n->set_bci(_bci); 616 n->_reexecute = _reexecute; 617 n->set_locoff(_locoff); 618 n->set_stkoff(_stkoff); 619 n->set_monoff(_monoff); 620 n->set_scloff(_scloff); 621 n->set_endoff(_endoff); 622 n->set_sp(_sp); 623 n->set_map(_map); 624 return n; 625 } 626 627 //---------------------------clone_deep---------------------------------------- 628 JVMState* JVMState::clone_deep(Compile* C) const { 629 JVMState* n = clone_shallow(C); 630 for (JVMState* p = n; p->_caller != nullptr; p = p->_caller) { 631 p->_caller = p->_caller->clone_shallow(C); 632 } 633 assert(n->depth() == depth(), "sanity"); 634 assert(n->debug_depth() == debug_depth(), "sanity"); 635 return n; 636 } 637 638 /** 639 * Reset map for all callers 640 */ 641 void JVMState::set_map_deep(SafePointNode* map) { 642 for (JVMState* p = this; p != nullptr; p = p->_caller) { 643 p->set_map(map); 644 } 645 } 646 647 // unlike set_map(), this is two-way setting. 648 void JVMState::bind_map(SafePointNode* map) { 649 set_map(map); 650 _map->set_jvms(this); 651 } 652 653 // Adapt offsets in in-array after adding or removing an edge. 654 // Prerequisite is that the JVMState is used by only one node. 655 void JVMState::adapt_position(int delta) { 656 for (JVMState* jvms = this; jvms != nullptr; jvms = jvms->caller()) { 657 jvms->set_locoff(jvms->locoff() + delta); 658 jvms->set_stkoff(jvms->stkoff() + delta); 659 jvms->set_monoff(jvms->monoff() + delta); 660 jvms->set_scloff(jvms->scloff() + delta); 661 jvms->set_endoff(jvms->endoff() + delta); 662 } 663 } 664 665 // Mirror the stack size calculation in the deopt code 666 // How much stack space would we need at this point in the program in 667 // case of deoptimization? 668 int JVMState::interpreter_frame_size() const { 669 const JVMState* jvms = this; 670 int size = 0; 671 int callee_parameters = 0; 672 int callee_locals = 0; 673 int extra_args = method()->max_stack() - stk_size(); 674 675 while (jvms != nullptr) { 676 int locks = jvms->nof_monitors(); 677 int temps = jvms->stk_size(); 678 bool is_top_frame = (jvms == this); 679 ciMethod* method = jvms->method(); 680 681 int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(), 682 temps + callee_parameters, 683 extra_args, 684 locks, 685 callee_parameters, 686 callee_locals, 687 is_top_frame); 688 size += frame_size; 689 690 callee_parameters = method->size_of_parameters(); 691 callee_locals = method->max_locals(); 692 extra_args = 0; 693 jvms = jvms->caller(); 694 } 695 return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord; 696 } 697 698 //============================================================================= 699 bool CallNode::cmp( const Node &n ) const 700 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } 701 #ifndef PRODUCT 702 void CallNode::dump_req(outputStream *st, DumpConfig* dc) const { 703 // Dump the required inputs, enclosed in '(' and ')' 704 uint i; // Exit value of loop 705 for (i = 0; i < req(); i++) { // For all required inputs 706 if (i == TypeFunc::Parms) st->print("("); 707 Node* p = in(i); 708 if (p != nullptr) { 709 p->dump_idx(false, st, dc); 710 st->print(" "); 711 } else { 712 st->print("_ "); 713 } 714 } 715 st->print(")"); 716 } 717 718 void CallNode::dump_spec(outputStream *st) const { 719 st->print(" "); 720 if (tf() != nullptr) tf()->dump_on(st); 721 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); 722 if (jvms() != nullptr) jvms()->dump_spec(st); 723 } 724 #endif 725 726 const Type *CallNode::bottom_type() const { return tf()->range_cc(); } 727 const Type* CallNode::Value(PhaseGVN* phase) const { 728 if (!in(0) || phase->type(in(0)) == Type::TOP) { 729 return Type::TOP; 730 } 731 return tf()->range_cc(); 732 } 733 734 //------------------------------calling_convention----------------------------- 735 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 736 if (_entry_point == StubRoutines::store_inline_type_fields_to_buf()) { 737 // The call to that stub is a special case: its inputs are 738 // multiple values returned from a call and so it should follow 739 // the return convention. 740 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); 741 return; 742 } 743 // Use the standard compiler calling convention 744 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt); 745 } 746 747 748 //------------------------------match------------------------------------------ 749 // Construct projections for control, I/O, memory-fields, ..., and 750 // return result(s) along with their RegMask info 751 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { 752 uint con = proj->_con; 753 const TypeTuple* range_cc = tf()->range_cc(); 754 if (con >= TypeFunc::Parms) { 755 if (tf()->returns_inline_type_as_fields()) { 756 // The call returns multiple values (inline type fields): we 757 // create one projection per returned value. 758 assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return"); 759 uint ideal_reg = range_cc->field_at(con)->ideal_reg(); 760 return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg); 761 } else { 762 if (con == TypeFunc::Parms) { 763 uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg(); 764 OptoRegPair regs = Opcode() == Op_CallLeafVector 765 ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine 766 : match->c_return_value(ideal_reg); 767 RegMask rm = RegMask(regs.first()); 768 769 if (Opcode() == Op_CallLeafVector) { 770 // If the return is in vector, compute appropriate regmask taking into account the whole range 771 if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) { 772 if(OptoReg::is_valid(regs.second())) { 773 for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) { 774 rm.Insert(r); 775 } 776 } 777 } 778 } 779 780 if (OptoReg::is_valid(regs.second())) { 781 rm.Insert(regs.second()); 782 } 783 return new MachProjNode(this,con,rm,ideal_reg); 784 } else { 785 assert(con == TypeFunc::Parms+1, "only one return value"); 786 assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, ""); 787 return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad); 788 } 789 } 790 } 791 792 switch (con) { 793 case TypeFunc::Control: 794 case TypeFunc::I_O: 795 case TypeFunc::Memory: 796 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 797 798 case TypeFunc::ReturnAdr: 799 case TypeFunc::FramePtr: 800 default: 801 ShouldNotReachHere(); 802 } 803 return nullptr; 804 } 805 806 // Do we Match on this edge index or not? Match no edges 807 uint CallNode::match_edge(uint idx) const { 808 return 0; 809 } 810 811 // 812 // Determine whether the call could modify the field of the specified 813 // instance at the specified offset. 814 // 815 bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { 816 assert((t_oop != nullptr), "sanity"); 817 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) { 818 const TypeTuple* args = _tf->domain_sig(); 819 Node* dest = nullptr; 820 // Stubs that can be called once an ArrayCopyNode is expanded have 821 // different signatures. Look for the second pointer argument, 822 // that is the destination of the copy. 823 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { 824 if (args->field_at(i)->isa_ptr()) { 825 j++; 826 if (j == 2) { 827 dest = in(i); 828 break; 829 } 830 } 831 } 832 guarantee(dest != nullptr, "Call had only one ptr in, broken IR!"); 833 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) { 834 return true; 835 } 836 return false; 837 } 838 if (t_oop->is_known_instance()) { 839 // The instance_id is set only for scalar-replaceable allocations which 840 // are not passed as arguments according to Escape Analysis. 841 return false; 842 } 843 if (t_oop->is_ptr_to_boxed_value()) { 844 ciKlass* boxing_klass = t_oop->is_instptr()->instance_klass(); 845 if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) { 846 // Skip unrelated boxing methods. 847 Node* proj = proj_out_or_null(TypeFunc::Parms); 848 if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) { 849 return false; 850 } 851 } 852 if (is_CallJava() && as_CallJava()->method() != nullptr) { 853 ciMethod* meth = as_CallJava()->method(); 854 if (meth->is_getter()) { 855 return false; 856 } 857 // May modify (by reflection) if an boxing object is passed 858 // as argument or returned. 859 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr; 860 if (proj != nullptr) { 861 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr(); 862 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() || 863 (inst_t->instance_klass() == boxing_klass))) { 864 return true; 865 } 866 } 867 const TypeTuple* d = tf()->domain_cc(); 868 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 869 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); 870 if ((inst_t != nullptr) && (!inst_t->klass_is_exact() || 871 (inst_t->instance_klass() == boxing_klass))) { 872 return true; 873 } 874 } 875 return false; 876 } 877 } 878 return true; 879 } 880 881 // Does this call have a direct reference to n other than debug information? 882 bool CallNode::has_non_debug_use(Node* n) { 883 const TypeTuple* d = tf()->domain_cc(); 884 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 885 if (in(i) == n) { 886 return true; 887 } 888 } 889 return false; 890 } 891 892 bool CallNode::has_debug_use(Node* n) { 893 if (jvms() != nullptr) { 894 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) { 895 if (in(i) == n) { 896 return true; 897 } 898 } 899 } 900 return false; 901 } 902 903 // Returns the unique CheckCastPP of a call 904 // or 'this' if there are several CheckCastPP or unexpected uses 905 // or returns null if there is no one. 906 Node *CallNode::result_cast() { 907 Node *cast = nullptr; 908 909 Node *p = proj_out_or_null(TypeFunc::Parms); 910 if (p == nullptr) 911 return nullptr; 912 913 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { 914 Node *use = p->fast_out(i); 915 if (use->is_CheckCastPP()) { 916 if (cast != nullptr) { 917 return this; // more than 1 CheckCastPP 918 } 919 cast = use; 920 } else if (!use->is_Initialize() && 921 !use->is_AddP() && 922 use->Opcode() != Op_MemBarStoreStore) { 923 // Expected uses are restricted to a CheckCastPP, an Initialize 924 // node, a MemBarStoreStore (clone) and AddP nodes. If we 925 // encounter any other use (a Phi node can be seen in rare 926 // cases) return this to prevent incorrect optimizations. 927 return this; 928 } 929 } 930 return cast; 931 } 932 933 934 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) { 935 uint max_res = TypeFunc::Parms-1; 936 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 937 ProjNode *pn = fast_out(i)->as_Proj(); 938 max_res = MAX2(max_res, pn->_con); 939 } 940 941 assert(max_res < _tf->range_cc()->cnt(), "result out of bounds"); 942 943 uint projs_size = sizeof(CallProjections); 944 if (max_res > TypeFunc::Parms) { 945 projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*); 946 } 947 char* projs_storage = resource_allocate_bytes(projs_size); 948 CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1); 949 950 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 951 ProjNode *pn = fast_out(i)->as_Proj(); 952 if (pn->outcnt() == 0) continue; 953 switch (pn->_con) { 954 case TypeFunc::Control: 955 { 956 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj 957 projs->fallthrough_proj = pn; 958 const Node* cn = pn->unique_ctrl_out_or_null(); 959 if (cn != nullptr && cn->is_Catch()) { 960 ProjNode *cpn = nullptr; 961 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { 962 cpn = cn->fast_out(k)->as_Proj(); 963 assert(cpn->is_CatchProj(), "must be a CatchProjNode"); 964 if (cpn->_con == CatchProjNode::fall_through_index) 965 projs->fallthrough_catchproj = cpn; 966 else { 967 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); 968 projs->catchall_catchproj = cpn; 969 } 970 } 971 } 972 break; 973 } 974 case TypeFunc::I_O: 975 if (pn->_is_io_use) 976 projs->catchall_ioproj = pn; 977 else 978 projs->fallthrough_ioproj = pn; 979 for (DUIterator j = pn->outs(); pn->has_out(j); j++) { 980 Node* e = pn->out(j); 981 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) { 982 assert(projs->exobj == nullptr, "only one"); 983 projs->exobj = e; 984 } 985 } 986 break; 987 case TypeFunc::Memory: 988 if (pn->_is_io_use) 989 projs->catchall_memproj = pn; 990 else 991 projs->fallthrough_memproj = pn; 992 break; 993 case TypeFunc::Parms: 994 projs->resproj[0] = pn; 995 break; 996 default: 997 assert(pn->_con <= max_res, "unexpected projection from allocation node."); 998 projs->resproj[pn->_con-TypeFunc::Parms] = pn; 999 break; 1000 } 1001 } 1002 1003 // The resproj may not exist because the result could be ignored 1004 // and the exception object may not exist if an exception handler 1005 // swallows the exception but all the other must exist and be found. 1006 do_asserts = do_asserts && !Compile::current()->inlining_incrementally(); 1007 assert(!do_asserts || projs->fallthrough_proj != nullptr, "must be found"); 1008 assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found"); 1009 assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found"); 1010 assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found"); 1011 assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found"); 1012 if (separate_io_proj) { 1013 assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found"); 1014 assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found"); 1015 } 1016 return projs; 1017 } 1018 1019 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1020 #ifdef ASSERT 1021 // Validate attached generator 1022 CallGenerator* cg = generator(); 1023 if (cg != nullptr) { 1024 assert((is_CallStaticJava() && cg->is_mh_late_inline()) || 1025 (is_CallDynamicJava() && cg->is_virtual_late_inline()), "mismatch"); 1026 } 1027 #endif // ASSERT 1028 return SafePointNode::Ideal(phase, can_reshape); 1029 } 1030 1031 bool CallNode::is_call_to_arraycopystub() const { 1032 if (_name != nullptr && strstr(_name, "arraycopy") != nullptr) { 1033 return true; 1034 } 1035 return false; 1036 } 1037 1038 //============================================================================= 1039 uint CallJavaNode::size_of() const { return sizeof(*this); } 1040 bool CallJavaNode::cmp( const Node &n ) const { 1041 CallJavaNode &call = (CallJavaNode&)n; 1042 return CallNode::cmp(call) && _method == call._method && 1043 _override_symbolic_info == call._override_symbolic_info; 1044 } 1045 1046 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) { 1047 // Copy debug information and adjust JVMState information 1048 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain_sig()->cnt() : (uint)TypeFunc::Parms+1; 1049 uint new_dbg_start = tf()->domain_sig()->cnt(); 1050 int jvms_adj = new_dbg_start - old_dbg_start; 1051 assert (new_dbg_start == req(), "argument count mismatch"); 1052 Compile* C = phase->C; 1053 1054 // SafePointScalarObject node could be referenced several times in debug info. 1055 // Use Dict to record cloned nodes. 1056 Dict* sosn_map = new Dict(cmpkey,hashkey); 1057 for (uint i = old_dbg_start; i < sfpt->req(); i++) { 1058 Node* old_in = sfpt->in(i); 1059 // Clone old SafePointScalarObjectNodes, adjusting their field contents. 1060 if (old_in != nullptr && old_in->is_SafePointScalarObject()) { 1061 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); 1062 bool new_node; 1063 Node* new_in = old_sosn->clone(sosn_map, new_node); 1064 if (new_node) { // New node? 1065 new_in->set_req(0, C->root()); // reset control edge 1066 new_in = phase->transform(new_in); // Register new node. 1067 } 1068 old_in = new_in; 1069 } 1070 add_req(old_in); 1071 } 1072 1073 // JVMS may be shared so clone it before we modify it 1074 set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr); 1075 for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) { 1076 jvms->set_map(this); 1077 jvms->set_locoff(jvms->locoff()+jvms_adj); 1078 jvms->set_stkoff(jvms->stkoff()+jvms_adj); 1079 jvms->set_monoff(jvms->monoff()+jvms_adj); 1080 jvms->set_scloff(jvms->scloff()+jvms_adj); 1081 jvms->set_endoff(jvms->endoff()+jvms_adj); 1082 } 1083 } 1084 1085 #ifdef ASSERT 1086 bool CallJavaNode::validate_symbolic_info() const { 1087 if (method() == nullptr) { 1088 return true; // call into runtime or uncommon trap 1089 } 1090 Bytecodes::Code bc = jvms()->method()->java_code_at_bci(jvms()->bci()); 1091 if (EnableValhalla && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) { 1092 return true; 1093 } 1094 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci()); 1095 ciMethod* callee = method(); 1096 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) { 1097 assert(override_symbolic_info(), "should be set"); 1098 } 1099 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info"); 1100 return true; 1101 } 1102 #endif 1103 1104 #ifndef PRODUCT 1105 void CallJavaNode::dump_spec(outputStream* st) const { 1106 if( _method ) _method->print_short_name(st); 1107 CallNode::dump_spec(st); 1108 } 1109 1110 void CallJavaNode::dump_compact_spec(outputStream* st) const { 1111 if (_method) { 1112 _method->print_short_name(st); 1113 } else { 1114 st->print("<?>"); 1115 } 1116 } 1117 #endif 1118 1119 //============================================================================= 1120 uint CallStaticJavaNode::size_of() const { return sizeof(*this); } 1121 bool CallStaticJavaNode::cmp( const Node &n ) const { 1122 CallStaticJavaNode &call = (CallStaticJavaNode&)n; 1123 return CallJavaNode::cmp(call); 1124 } 1125 1126 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1127 if (can_reshape && uncommon_trap_request() != 0) { 1128 PhaseIterGVN* igvn = phase->is_IterGVN(); 1129 if (remove_unknown_flat_array_load(igvn, in(0), in(TypeFunc::Memory), in(TypeFunc::Parms))) { 1130 if (!in(0)->is_Region()) { 1131 igvn->replace_input_of(this, 0, phase->C->top()); 1132 } 1133 return this; 1134 } 1135 } 1136 1137 CallGenerator* cg = generator(); 1138 if (can_reshape && cg != nullptr) { 1139 assert(IncrementalInlineMH, "required"); 1140 assert(cg->call_node() == this, "mismatch"); 1141 assert(cg->is_mh_late_inline(), "not virtual"); 1142 1143 // Check whether this MH handle call becomes a candidate for inlining. 1144 ciMethod* callee = cg->method(); 1145 vmIntrinsics::ID iid = callee->intrinsic_id(); 1146 if (iid == vmIntrinsics::_invokeBasic) { 1147 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) { 1148 phase->C->prepend_late_inline(cg); 1149 set_generator(nullptr); 1150 } 1151 } else if (iid == vmIntrinsics::_linkToNative) { 1152 // never retry 1153 } else { 1154 assert(callee->has_member_arg(), "wrong type of call?"); 1155 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) { 1156 phase->C->prepend_late_inline(cg); 1157 set_generator(nullptr); 1158 } 1159 } 1160 } 1161 return CallNode::Ideal(phase, can_reshape); 1162 } 1163 1164 //----------------------------is_uncommon_trap---------------------------- 1165 // Returns true if this is an uncommon trap. 1166 bool CallStaticJavaNode::is_uncommon_trap() const { 1167 return (_name != nullptr && !strcmp(_name, "uncommon_trap")); 1168 } 1169 1170 //----------------------------uncommon_trap_request---------------------------- 1171 // If this is an uncommon trap, return the request code, else zero. 1172 int CallStaticJavaNode::uncommon_trap_request() const { 1173 return is_uncommon_trap() ? extract_uncommon_trap_request(this) : 0; 1174 } 1175 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { 1176 #ifndef PRODUCT 1177 if (!(call->req() > TypeFunc::Parms && 1178 call->in(TypeFunc::Parms) != nullptr && 1179 call->in(TypeFunc::Parms)->is_Con() && 1180 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) { 1181 assert(in_dump() != 0, "OK if dumping"); 1182 tty->print("[bad uncommon trap]"); 1183 return 0; 1184 } 1185 #endif 1186 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); 1187 } 1188 1189 // Split if can cause the flat array branch of an array load with unknown type (see 1190 // Parse::array_load) to end in an uncommon trap. In that case, the call to 1191 // 'load_unknown_inline' is useless. Replace it with an uncommon trap with the same JVMState. 1192 bool CallStaticJavaNode::remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg) { 1193 if (ctl == nullptr || ctl->is_top() || mem == nullptr || mem->is_top() || !mem->is_MergeMem()) { 1194 return false; 1195 } 1196 if (ctl->is_Region()) { 1197 bool res = false; 1198 for (uint i = 1; i < ctl->req(); i++) { 1199 MergeMemNode* mm = mem->clone()->as_MergeMem(); 1200 for (MergeMemStream mms(mm); mms.next_non_empty(); ) { 1201 Node* m = mms.memory(); 1202 if (m->is_Phi() && m->in(0) == ctl) { 1203 mms.set_memory(m->in(i)); 1204 } 1205 } 1206 if (remove_unknown_flat_array_load(igvn, ctl->in(i), mm, unc_arg)) { 1207 res = true; 1208 if (!ctl->in(i)->is_Region()) { 1209 igvn->replace_input_of(ctl, i, igvn->C->top()); 1210 } 1211 } 1212 igvn->remove_dead_node(mm); 1213 } 1214 return res; 1215 } 1216 // Verify the control flow is ok 1217 Node* call = ctl; 1218 MemBarNode* membar = nullptr; 1219 for (;;) { 1220 if (call == nullptr || call->is_top()) { 1221 return false; 1222 } 1223 if (call->is_Proj() || call->is_Catch() || call->is_MemBar()) { 1224 call = call->in(0); 1225 } else if (call->Opcode() == Op_CallStaticJava && !call->in(0)->is_top() && 1226 call->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) { 1227 assert(call->in(0)->is_Proj() && call->in(0)->in(0)->is_MemBar(), "missing membar"); 1228 membar = call->in(0)->in(0)->as_MemBar(); 1229 break; 1230 } else { 1231 return false; 1232 } 1233 } 1234 1235 JVMState* jvms = call->jvms(); 1236 if (igvn->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) { 1237 return false; 1238 } 1239 1240 Node* call_mem = call->in(TypeFunc::Memory); 1241 if (call_mem == nullptr || call_mem->is_top()) { 1242 return false; 1243 } 1244 if (!call_mem->is_MergeMem()) { 1245 call_mem = MergeMemNode::make(call_mem); 1246 igvn->register_new_node_with_optimizer(call_mem); 1247 } 1248 1249 // Verify that there's no unexpected side effect 1250 for (MergeMemStream mms2(mem->as_MergeMem(), call_mem->as_MergeMem()); mms2.next_non_empty2(); ) { 1251 Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory(); 1252 Node* m2 = mms2.memory2(); 1253 1254 for (uint i = 0; i < 100; i++) { 1255 if (m1 == m2) { 1256 break; 1257 } else if (m1->is_Proj()) { 1258 m1 = m1->in(0); 1259 } else if (m1->is_MemBar()) { 1260 m1 = m1->in(TypeFunc::Memory); 1261 } else if (m1->Opcode() == Op_CallStaticJava && 1262 m1->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) { 1263 if (m1 != call) { 1264 return false; 1265 } 1266 break; 1267 } else if (m1->is_MergeMem()) { 1268 MergeMemNode* mm = m1->as_MergeMem(); 1269 int idx = mms2.alias_idx(); 1270 if (idx == Compile::AliasIdxBot) { 1271 m1 = mm->base_memory(); 1272 } else { 1273 m1 = mm->memory_at(idx); 1274 } 1275 } else { 1276 return false; 1277 } 1278 } 1279 } 1280 if (call_mem->outcnt() == 0) { 1281 igvn->remove_dead_node(call_mem); 1282 } 1283 1284 // Remove membar preceding the call 1285 membar->remove(igvn); 1286 1287 address call_addr = OptoRuntime::uncommon_trap_blob()->entry_point(); 1288 CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", nullptr); 1289 unc->init_req(TypeFunc::Control, call->in(0)); 1290 unc->init_req(TypeFunc::I_O, call->in(TypeFunc::I_O)); 1291 unc->init_req(TypeFunc::Memory, call->in(TypeFunc::Memory)); 1292 unc->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr)); 1293 unc->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr)); 1294 unc->init_req(TypeFunc::Parms+0, unc_arg); 1295 unc->set_cnt(PROB_UNLIKELY_MAG(4)); 1296 unc->copy_call_debug_info(igvn, call->as_CallStaticJava()); 1297 1298 // Replace the call with an uncommon trap 1299 igvn->replace_input_of(call, 0, igvn->C->top()); 1300 1301 igvn->register_new_node_with_optimizer(unc); 1302 1303 Node* ctrl = igvn->transform(new ProjNode(unc, TypeFunc::Control)); 1304 Node* halt = igvn->transform(new HaltNode(ctrl, call->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen")); 1305 igvn->add_input_to(igvn->C->root(), halt); 1306 1307 return true; 1308 } 1309 1310 1311 #ifndef PRODUCT 1312 void CallStaticJavaNode::dump_spec(outputStream *st) const { 1313 st->print("# Static "); 1314 if (_name != nullptr) { 1315 st->print("%s", _name); 1316 int trap_req = uncommon_trap_request(); 1317 if (trap_req != 0) { 1318 char buf[100]; 1319 st->print("(%s)", 1320 Deoptimization::format_trap_request(buf, sizeof(buf), 1321 trap_req)); 1322 } 1323 st->print(" "); 1324 } 1325 CallJavaNode::dump_spec(st); 1326 } 1327 1328 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const { 1329 if (_method) { 1330 _method->print_short_name(st); 1331 } else if (_name) { 1332 st->print("%s", _name); 1333 } else { 1334 st->print("<?>"); 1335 } 1336 } 1337 #endif 1338 1339 //============================================================================= 1340 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } 1341 bool CallDynamicJavaNode::cmp( const Node &n ) const { 1342 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; 1343 return CallJavaNode::cmp(call); 1344 } 1345 1346 Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1347 CallGenerator* cg = generator(); 1348 if (can_reshape && cg != nullptr) { 1349 assert(IncrementalInlineVirtual, "required"); 1350 assert(cg->call_node() == this, "mismatch"); 1351 assert(cg->is_virtual_late_inline(), "not virtual"); 1352 1353 // Recover symbolic info for method resolution. 1354 ciMethod* caller = jvms()->method(); 1355 ciBytecodeStream iter(caller); 1356 iter.force_bci(jvms()->bci()); 1357 1358 bool not_used1; 1359 ciSignature* not_used2; 1360 ciMethod* orig_callee = iter.get_method(not_used1, ¬_used2); // callee in the bytecode 1361 ciKlass* holder = iter.get_declared_method_holder(); 1362 if (orig_callee->is_method_handle_intrinsic()) { 1363 assert(_override_symbolic_info, "required"); 1364 orig_callee = method(); 1365 holder = method()->holder(); 1366 } 1367 1368 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); 1369 1370 Node* receiver_node = in(TypeFunc::Parms); 1371 const TypeOopPtr* receiver_type = phase->type(receiver_node)->isa_oopptr(); 1372 1373 int not_used3; 1374 bool call_does_dispatch; 1375 ciMethod* callee = phase->C->optimize_virtual_call(caller, klass, holder, orig_callee, receiver_type, true /*is_virtual*/, 1376 call_does_dispatch, not_used3); // out-parameters 1377 if (!call_does_dispatch) { 1378 // Register for late inlining. 1379 cg->set_callee_method(callee); 1380 phase->C->prepend_late_inline(cg); // MH late inlining prepends to the list, so do the same 1381 set_generator(nullptr); 1382 } 1383 } 1384 return CallNode::Ideal(phase, can_reshape); 1385 } 1386 1387 #ifndef PRODUCT 1388 void CallDynamicJavaNode::dump_spec(outputStream *st) const { 1389 st->print("# Dynamic "); 1390 CallJavaNode::dump_spec(st); 1391 } 1392 #endif 1393 1394 //============================================================================= 1395 uint CallRuntimeNode::size_of() const { return sizeof(*this); } 1396 bool CallRuntimeNode::cmp( const Node &n ) const { 1397 CallRuntimeNode &call = (CallRuntimeNode&)n; 1398 return CallNode::cmp(call) && !strcmp(_name,call._name); 1399 } 1400 #ifndef PRODUCT 1401 void CallRuntimeNode::dump_spec(outputStream *st) const { 1402 st->print("# "); 1403 st->print("%s", _name); 1404 CallNode::dump_spec(st); 1405 } 1406 #endif 1407 uint CallLeafVectorNode::size_of() const { return sizeof(*this); } 1408 bool CallLeafVectorNode::cmp( const Node &n ) const { 1409 CallLeafVectorNode &call = (CallLeafVectorNode&)n; 1410 return CallLeafNode::cmp(call) && _num_bits == call._num_bits; 1411 } 1412 1413 //------------------------------calling_convention----------------------------- 1414 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 1415 if (_entry_point == nullptr) { 1416 // The call to that stub is a special case: its inputs are 1417 // multiple values returned from a call and so it should follow 1418 // the return convention. 1419 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); 1420 return; 1421 } 1422 SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt); 1423 } 1424 1425 void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 1426 #ifdef ASSERT 1427 assert(tf()->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits, 1428 "return vector size must match"); 1429 const TypeTuple* d = tf()->domain_sig(); 1430 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1431 Node* arg = in(i); 1432 assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits, 1433 "vector argument size must match"); 1434 } 1435 #endif 1436 1437 SharedRuntime::vector_calling_convention(parm_regs, _num_bits, argcnt); 1438 } 1439 1440 //============================================================================= 1441 //------------------------------calling_convention----------------------------- 1442 1443 1444 //============================================================================= 1445 #ifndef PRODUCT 1446 void CallLeafNode::dump_spec(outputStream *st) const { 1447 st->print("# "); 1448 st->print("%s", _name); 1449 CallNode::dump_spec(st); 1450 } 1451 #endif 1452 1453 uint CallLeafNoFPNode::match_edge(uint idx) const { 1454 // Null entry point is a special case for which the target is in a 1455 // register. Need to match that edge. 1456 return entry_point() == nullptr && idx == TypeFunc::Parms; 1457 } 1458 1459 //============================================================================= 1460 1461 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { 1462 assert(verify_jvms(jvms), "jvms must match"); 1463 int loc = jvms->locoff() + idx; 1464 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { 1465 // If current local idx is top then local idx - 1 could 1466 // be a long/double that needs to be killed since top could 1467 // represent the 2nd half of the long/double. 1468 uint ideal = in(loc -1)->ideal_reg(); 1469 if (ideal == Op_RegD || ideal == Op_RegL) { 1470 // set other (low index) half to top 1471 set_req(loc - 1, in(loc)); 1472 } 1473 } 1474 set_req(loc, c); 1475 } 1476 1477 uint SafePointNode::size_of() const { return sizeof(*this); } 1478 bool SafePointNode::cmp( const Node &n ) const { 1479 return (&n == this); // Always fail except on self 1480 } 1481 1482 //-------------------------set_next_exception---------------------------------- 1483 void SafePointNode::set_next_exception(SafePointNode* n) { 1484 assert(n == nullptr || n->Opcode() == Op_SafePoint, "correct value for next_exception"); 1485 if (len() == req()) { 1486 if (n != nullptr) add_prec(n); 1487 } else { 1488 set_prec(req(), n); 1489 } 1490 } 1491 1492 1493 //----------------------------next_exception----------------------------------- 1494 SafePointNode* SafePointNode::next_exception() const { 1495 if (len() == req()) { 1496 return nullptr; 1497 } else { 1498 Node* n = in(req()); 1499 assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); 1500 return (SafePointNode*) n; 1501 } 1502 } 1503 1504 1505 //------------------------------Ideal------------------------------------------ 1506 // Skip over any collapsed Regions 1507 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1508 assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState"); 1509 if (remove_dead_region(phase, can_reshape)) { 1510 return this; 1511 } 1512 // Scalarize inline types in safepoint debug info. 1513 // Delay this until all inlining is over to avoid getting inconsistent debug info. 1514 if (phase->C->scalarize_in_safepoints() && can_reshape && jvms() != nullptr) { 1515 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) { 1516 Node* n = in(i)->uncast(); 1517 if (n->is_InlineType()) { 1518 n->as_InlineType()->make_scalar_in_safepoints(phase->is_IterGVN()); 1519 } 1520 } 1521 } 1522 return nullptr; 1523 } 1524 1525 //------------------------------Identity--------------------------------------- 1526 // Remove obviously duplicate safepoints 1527 Node* SafePointNode::Identity(PhaseGVN* phase) { 1528 1529 // If you have back to back safepoints, remove one 1530 if (in(TypeFunc::Control)->is_SafePoint()) { 1531 Node* out_c = unique_ctrl_out_or_null(); 1532 // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the 1533 // outer loop's safepoint could confuse removal of the outer loop. 1534 if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) { 1535 return in(TypeFunc::Control); 1536 } 1537 } 1538 1539 // Transforming long counted loops requires a safepoint node. Do not 1540 // eliminate a safepoint until loop opts are over. 1541 if (in(0)->is_Proj() && !phase->C->major_progress()) { 1542 Node *n0 = in(0)->in(0); 1543 // Check if he is a call projection (except Leaf Call) 1544 if( n0->is_Catch() ) { 1545 n0 = n0->in(0)->in(0); 1546 assert( n0->is_Call(), "expect a call here" ); 1547 } 1548 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { 1549 // Don't remove a safepoint belonging to an OuterStripMinedLoopEndNode. 1550 // If the loop dies, they will be removed together. 1551 if (has_out_with(Op_OuterStripMinedLoopEnd)) { 1552 return this; 1553 } 1554 // Useless Safepoint, so remove it 1555 return in(TypeFunc::Control); 1556 } 1557 } 1558 1559 return this; 1560 } 1561 1562 //------------------------------Value------------------------------------------ 1563 const Type* SafePointNode::Value(PhaseGVN* phase) const { 1564 if (phase->type(in(0)) == Type::TOP) { 1565 return Type::TOP; 1566 } 1567 if (in(0) == this) { 1568 return Type::TOP; // Dead infinite loop 1569 } 1570 return Type::CONTROL; 1571 } 1572 1573 #ifndef PRODUCT 1574 void SafePointNode::dump_spec(outputStream *st) const { 1575 st->print(" SafePoint "); 1576 _replaced_nodes.dump(st); 1577 } 1578 #endif 1579 1580 const RegMask &SafePointNode::in_RegMask(uint idx) const { 1581 if( idx < TypeFunc::Parms ) return RegMask::Empty; 1582 // Values outside the domain represent debug info 1583 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1584 } 1585 const RegMask &SafePointNode::out_RegMask() const { 1586 return RegMask::Empty; 1587 } 1588 1589 1590 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { 1591 assert((int)grow_by > 0, "sanity"); 1592 int monoff = jvms->monoff(); 1593 int scloff = jvms->scloff(); 1594 int endoff = jvms->endoff(); 1595 assert(endoff == (int)req(), "no other states or debug info after me"); 1596 Node* top = Compile::current()->top(); 1597 for (uint i = 0; i < grow_by; i++) { 1598 ins_req(monoff, top); 1599 } 1600 jvms->set_monoff(monoff + grow_by); 1601 jvms->set_scloff(scloff + grow_by); 1602 jvms->set_endoff(endoff + grow_by); 1603 } 1604 1605 void SafePointNode::push_monitor(const FastLockNode *lock) { 1606 // Add a LockNode, which points to both the original BoxLockNode (the 1607 // stack space for the monitor) and the Object being locked. 1608 const int MonitorEdges = 2; 1609 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1610 assert(req() == jvms()->endoff(), "correct sizing"); 1611 int nextmon = jvms()->scloff(); 1612 if (GenerateSynchronizationCode) { 1613 ins_req(nextmon, lock->box_node()); 1614 ins_req(nextmon+1, lock->obj_node()); 1615 } else { 1616 Node* top = Compile::current()->top(); 1617 ins_req(nextmon, top); 1618 ins_req(nextmon, top); 1619 } 1620 jvms()->set_scloff(nextmon + MonitorEdges); 1621 jvms()->set_endoff(req()); 1622 } 1623 1624 void SafePointNode::pop_monitor() { 1625 // Delete last monitor from debug info 1626 debug_only(int num_before_pop = jvms()->nof_monitors()); 1627 const int MonitorEdges = 2; 1628 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1629 int scloff = jvms()->scloff(); 1630 int endoff = jvms()->endoff(); 1631 int new_scloff = scloff - MonitorEdges; 1632 int new_endoff = endoff - MonitorEdges; 1633 jvms()->set_scloff(new_scloff); 1634 jvms()->set_endoff(new_endoff); 1635 while (scloff > new_scloff) del_req_ordered(--scloff); 1636 assert(jvms()->nof_monitors() == num_before_pop-1, ""); 1637 } 1638 1639 Node *SafePointNode::peek_monitor_box() const { 1640 int mon = jvms()->nof_monitors() - 1; 1641 assert(mon >= 0, "must have a monitor"); 1642 return monitor_box(jvms(), mon); 1643 } 1644 1645 Node *SafePointNode::peek_monitor_obj() const { 1646 int mon = jvms()->nof_monitors() - 1; 1647 assert(mon >= 0, "must have a monitor"); 1648 return monitor_obj(jvms(), mon); 1649 } 1650 1651 Node* SafePointNode::peek_operand(uint off) const { 1652 assert(jvms()->sp() > 0, "must have an operand"); 1653 assert(off < jvms()->sp(), "off is out-of-range"); 1654 return stack(jvms(), jvms()->sp() - off - 1); 1655 } 1656 1657 // Do we Match on this edge index or not? Match no edges 1658 uint SafePointNode::match_edge(uint idx) const { 1659 return (TypeFunc::Parms == idx); 1660 } 1661 1662 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) { 1663 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops"); 1664 int nb = igvn->C->root()->find_prec_edge(this); 1665 if (nb != -1) { 1666 igvn->delete_precedence_of(igvn->C->root(), nb); 1667 } 1668 } 1669 1670 //============== SafePointScalarObjectNode ============== 1671 1672 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields) : 1673 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1674 _first_index(first_index), 1675 _depth(depth), 1676 _n_fields(n_fields), 1677 _alloc(alloc) 1678 { 1679 #ifdef ASSERT 1680 if (alloc != nullptr && !alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) { 1681 alloc->dump(); 1682 assert(false, "unexpected call node"); 1683 } 1684 #endif 1685 init_class_id(Class_SafePointScalarObject); 1686 } 1687 1688 // Do not allow value-numbering for SafePointScalarObject node. 1689 uint SafePointScalarObjectNode::hash() const { return NO_HASH; } 1690 bool SafePointScalarObjectNode::cmp( const Node &n ) const { 1691 return (&n == this); // Always fail except on self 1692 } 1693 1694 uint SafePointScalarObjectNode::ideal_reg() const { 1695 return 0; // No matching to machine instruction 1696 } 1697 1698 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { 1699 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1700 } 1701 1702 const RegMask &SafePointScalarObjectNode::out_RegMask() const { 1703 return RegMask::Empty; 1704 } 1705 1706 uint SafePointScalarObjectNode::match_edge(uint idx) const { 1707 return 0; 1708 } 1709 1710 SafePointScalarObjectNode* 1711 SafePointScalarObjectNode::clone(Dict* sosn_map, bool& new_node) const { 1712 void* cached = (*sosn_map)[(void*)this]; 1713 if (cached != nullptr) { 1714 new_node = false; 1715 return (SafePointScalarObjectNode*)cached; 1716 } 1717 new_node = true; 1718 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); 1719 sosn_map->Insert((void*)this, (void*)res); 1720 return res; 1721 } 1722 1723 1724 #ifndef PRODUCT 1725 void SafePointScalarObjectNode::dump_spec(outputStream *st) const { 1726 st->print(" # fields@[%d..%d]", first_index(), first_index() + n_fields() - 1); 1727 } 1728 #endif 1729 1730 //============== SafePointScalarMergeNode ============== 1731 1732 SafePointScalarMergeNode::SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx) : 1733 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1734 _merge_pointer_idx(merge_pointer_idx) 1735 { 1736 init_class_id(Class_SafePointScalarMerge); 1737 } 1738 1739 // Do not allow value-numbering for SafePointScalarMerge node. 1740 uint SafePointScalarMergeNode::hash() const { return NO_HASH; } 1741 bool SafePointScalarMergeNode::cmp( const Node &n ) const { 1742 return (&n == this); // Always fail except on self 1743 } 1744 1745 uint SafePointScalarMergeNode::ideal_reg() const { 1746 return 0; // No matching to machine instruction 1747 } 1748 1749 const RegMask &SafePointScalarMergeNode::in_RegMask(uint idx) const { 1750 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1751 } 1752 1753 const RegMask &SafePointScalarMergeNode::out_RegMask() const { 1754 return RegMask::Empty; 1755 } 1756 1757 uint SafePointScalarMergeNode::match_edge(uint idx) const { 1758 return 0; 1759 } 1760 1761 SafePointScalarMergeNode* 1762 SafePointScalarMergeNode::clone(Dict* sosn_map, bool& new_node) const { 1763 void* cached = (*sosn_map)[(void*)this]; 1764 if (cached != nullptr) { 1765 new_node = false; 1766 return (SafePointScalarMergeNode*)cached; 1767 } 1768 new_node = true; 1769 SafePointScalarMergeNode* res = (SafePointScalarMergeNode*)Node::clone(); 1770 sosn_map->Insert((void*)this, (void*)res); 1771 return res; 1772 } 1773 1774 #ifndef PRODUCT 1775 void SafePointScalarMergeNode::dump_spec(outputStream *st) const { 1776 st->print(" # merge_pointer_idx=%d, scalarized_objects=%d", _merge_pointer_idx, req()-1); 1777 } 1778 #endif 1779 1780 //============================================================================= 1781 uint AllocateNode::size_of() const { return sizeof(*this); } 1782 1783 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, 1784 Node *ctrl, Node *mem, Node *abio, 1785 Node *size, Node *klass_node, 1786 Node* initial_test, 1787 InlineTypeNode* inline_type_node) 1788 : CallNode(atype, nullptr, TypeRawPtr::BOTTOM) 1789 { 1790 init_class_id(Class_Allocate); 1791 init_flags(Flag_is_macro); 1792 _is_scalar_replaceable = false; 1793 _is_non_escaping = false; 1794 _is_allocation_MemBar_redundant = false; 1795 _larval = false; 1796 Node *topnode = C->top(); 1797 1798 init_req( TypeFunc::Control , ctrl ); 1799 init_req( TypeFunc::I_O , abio ); 1800 init_req( TypeFunc::Memory , mem ); 1801 init_req( TypeFunc::ReturnAdr, topnode ); 1802 init_req( TypeFunc::FramePtr , topnode ); 1803 init_req( AllocSize , size); 1804 init_req( KlassNode , klass_node); 1805 init_req( InitialTest , initial_test); 1806 init_req( ALength , topnode); 1807 init_req( ValidLengthTest , topnode); 1808 init_req( InlineType , inline_type_node); 1809 // DefaultValue defaults to nullptr 1810 // RawDefaultValue defaults to nullptr 1811 C->add_macro_node(this); 1812 } 1813 1814 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) 1815 { 1816 assert(initializer != nullptr && 1817 (initializer->is_object_constructor() || initializer->is_class_initializer()), 1818 "unexpected initializer method"); 1819 BCEscapeAnalyzer* analyzer = initializer->get_bcea(); 1820 if (analyzer == nullptr) { 1821 return; 1822 } 1823 1824 // Allocation node is first parameter in its initializer 1825 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) { 1826 _is_allocation_MemBar_redundant = true; 1827 } 1828 } 1829 1830 Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) { 1831 Node* mark_node = nullptr; 1832 if (EnableValhalla) { 1833 Node* klass_node = in(AllocateNode::KlassNode); 1834 Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset())))); 1835 mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); 1836 } else { 1837 mark_node = phase->MakeConX(markWord::prototype().value()); 1838 } 1839 mark_node = phase->transform(mark_node); 1840 // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal 1841 return new OrXNode(mark_node, phase->MakeConX(_larval ? markWord::larval_bit_in_place : 0)); 1842 } 1843 1844 // Retrieve the length from the AllocateArrayNode. Narrow the type with a 1845 // CastII, if appropriate. If we are not allowed to create new nodes, and 1846 // a CastII is appropriate, return null. 1847 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseValues* phase, bool allow_new_nodes) { 1848 Node *length = in(AllocateNode::ALength); 1849 assert(length != nullptr, "length is not null"); 1850 1851 const TypeInt* length_type = phase->find_int_type(length); 1852 const TypeAryPtr* ary_type = oop_type->isa_aryptr(); 1853 1854 if (ary_type != nullptr && length_type != nullptr) { 1855 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); 1856 if (narrow_length_type != length_type) { 1857 // Assert one of: 1858 // - the narrow_length is 0 1859 // - the narrow_length is not wider than length 1860 assert(narrow_length_type == TypeInt::ZERO || 1861 (length_type->is_con() && narrow_length_type->is_con() && 1862 (narrow_length_type->_hi <= length_type->_lo)) || 1863 (narrow_length_type->_hi <= length_type->_hi && 1864 narrow_length_type->_lo >= length_type->_lo), 1865 "narrow type must be narrower than length type"); 1866 1867 // Return null if new nodes are not allowed 1868 if (!allow_new_nodes) { 1869 return nullptr; 1870 } 1871 // Create a cast which is control dependent on the initialization to 1872 // propagate the fact that the array length must be positive. 1873 InitializeNode* init = initialization(); 1874 if (init != nullptr) { 1875 length = new CastIINode(init->proj_out_or_null(TypeFunc::Control), length, narrow_length_type); 1876 } 1877 } 1878 } 1879 1880 return length; 1881 } 1882 1883 //============================================================================= 1884 uint LockNode::size_of() const { return sizeof(*this); } 1885 1886 // Redundant lock elimination 1887 // 1888 // There are various patterns of locking where we release and 1889 // immediately reacquire a lock in a piece of code where no operations 1890 // occur in between that would be observable. In those cases we can 1891 // skip releasing and reacquiring the lock without violating any 1892 // fairness requirements. Doing this around a loop could cause a lock 1893 // to be held for a very long time so we concentrate on non-looping 1894 // control flow. We also require that the operations are fully 1895 // redundant meaning that we don't introduce new lock operations on 1896 // some paths so to be able to eliminate it on others ala PRE. This 1897 // would probably require some more extensive graph manipulation to 1898 // guarantee that the memory edges were all handled correctly. 1899 // 1900 // Assuming p is a simple predicate which can't trap in any way and s 1901 // is a synchronized method consider this code: 1902 // 1903 // s(); 1904 // if (p) 1905 // s(); 1906 // else 1907 // s(); 1908 // s(); 1909 // 1910 // 1. The unlocks of the first call to s can be eliminated if the 1911 // locks inside the then and else branches are eliminated. 1912 // 1913 // 2. The unlocks of the then and else branches can be eliminated if 1914 // the lock of the final call to s is eliminated. 1915 // 1916 // Either of these cases subsumes the simple case of sequential control flow 1917 // 1918 // Additionally we can eliminate versions without the else case: 1919 // 1920 // s(); 1921 // if (p) 1922 // s(); 1923 // s(); 1924 // 1925 // 3. In this case we eliminate the unlock of the first s, the lock 1926 // and unlock in the then case and the lock in the final s. 1927 // 1928 // Note also that in all these cases the then/else pieces don't have 1929 // to be trivial as long as they begin and end with synchronization 1930 // operations. 1931 // 1932 // s(); 1933 // if (p) 1934 // s(); 1935 // f(); 1936 // s(); 1937 // s(); 1938 // 1939 // The code will work properly for this case, leaving in the unlock 1940 // before the call to f and the relock after it. 1941 // 1942 // A potentially interesting case which isn't handled here is when the 1943 // locking is partially redundant. 1944 // 1945 // s(); 1946 // if (p) 1947 // s(); 1948 // 1949 // This could be eliminated putting unlocking on the else case and 1950 // eliminating the first unlock and the lock in the then side. 1951 // Alternatively the unlock could be moved out of the then side so it 1952 // was after the merge and the first unlock and second lock 1953 // eliminated. This might require less manipulation of the memory 1954 // state to get correct. 1955 // 1956 // Additionally we might allow work between a unlock and lock before 1957 // giving up eliminating the locks. The current code disallows any 1958 // conditional control flow between these operations. A formulation 1959 // similar to partial redundancy elimination computing the 1960 // availability of unlocking and the anticipatability of locking at a 1961 // program point would allow detection of fully redundant locking with 1962 // some amount of work in between. I'm not sure how often I really 1963 // think that would occur though. Most of the cases I've seen 1964 // indicate it's likely non-trivial work would occur in between. 1965 // There may be other more complicated constructs where we could 1966 // eliminate locking but I haven't seen any others appear as hot or 1967 // interesting. 1968 // 1969 // Locking and unlocking have a canonical form in ideal that looks 1970 // roughly like this: 1971 // 1972 // <obj> 1973 // | \\------+ 1974 // | \ \ 1975 // | BoxLock \ 1976 // | | | \ 1977 // | | \ \ 1978 // | | FastLock 1979 // | | / 1980 // | | / 1981 // | | | 1982 // 1983 // Lock 1984 // | 1985 // Proj #0 1986 // | 1987 // MembarAcquire 1988 // | 1989 // Proj #0 1990 // 1991 // MembarRelease 1992 // | 1993 // Proj #0 1994 // | 1995 // Unlock 1996 // | 1997 // Proj #0 1998 // 1999 // 2000 // This code proceeds by processing Lock nodes during PhaseIterGVN 2001 // and searching back through its control for the proper code 2002 // patterns. Once it finds a set of lock and unlock operations to 2003 // eliminate they are marked as eliminatable which causes the 2004 // expansion of the Lock and Unlock macro nodes to make the operation a NOP 2005 // 2006 //============================================================================= 2007 2008 // 2009 // Utility function to skip over uninteresting control nodes. Nodes skipped are: 2010 // - copy regions. (These may not have been optimized away yet.) 2011 // - eliminated locking nodes 2012 // 2013 static Node *next_control(Node *ctrl) { 2014 if (ctrl == nullptr) 2015 return nullptr; 2016 while (1) { 2017 if (ctrl->is_Region()) { 2018 RegionNode *r = ctrl->as_Region(); 2019 Node *n = r->is_copy(); 2020 if (n == nullptr) 2021 break; // hit a region, return it 2022 else 2023 ctrl = n; 2024 } else if (ctrl->is_Proj()) { 2025 Node *in0 = ctrl->in(0); 2026 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { 2027 ctrl = in0->in(0); 2028 } else { 2029 break; 2030 } 2031 } else { 2032 break; // found an interesting control 2033 } 2034 } 2035 return ctrl; 2036 } 2037 // 2038 // Given a control, see if it's the control projection of an Unlock which 2039 // operating on the same object as lock. 2040 // 2041 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, 2042 GrowableArray<AbstractLockNode*> &lock_ops) { 2043 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : nullptr; 2044 if (ctrl_proj != nullptr && ctrl_proj->_con == TypeFunc::Control) { 2045 Node *n = ctrl_proj->in(0); 2046 if (n != nullptr && n->is_Unlock()) { 2047 UnlockNode *unlock = n->as_Unlock(); 2048 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2049 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 2050 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); 2051 if (lock_obj->eqv_uncast(unlock_obj) && 2052 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && 2053 !unlock->is_eliminated()) { 2054 lock_ops.append(unlock); 2055 return true; 2056 } 2057 } 2058 } 2059 return false; 2060 } 2061 2062 // 2063 // Find the lock matching an unlock. Returns null if a safepoint 2064 // or complicated control is encountered first. 2065 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { 2066 LockNode *lock_result = nullptr; 2067 // find the matching lock, or an intervening safepoint 2068 Node *ctrl = next_control(unlock->in(0)); 2069 while (1) { 2070 assert(ctrl != nullptr, "invalid control graph"); 2071 assert(!ctrl->is_Start(), "missing lock for unlock"); 2072 if (ctrl->is_top()) break; // dead control path 2073 if (ctrl->is_Proj()) ctrl = ctrl->in(0); 2074 if (ctrl->is_SafePoint()) { 2075 break; // found a safepoint (may be the lock we are searching for) 2076 } else if (ctrl->is_Region()) { 2077 // Check for a simple diamond pattern. Punt on anything more complicated 2078 if (ctrl->req() == 3 && ctrl->in(1) != nullptr && ctrl->in(2) != nullptr) { 2079 Node *in1 = next_control(ctrl->in(1)); 2080 Node *in2 = next_control(ctrl->in(2)); 2081 if (((in1->is_IfTrue() && in2->is_IfFalse()) || 2082 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { 2083 ctrl = next_control(in1->in(0)->in(0)); 2084 } else { 2085 break; 2086 } 2087 } else { 2088 break; 2089 } 2090 } else { 2091 ctrl = next_control(ctrl->in(0)); // keep searching 2092 } 2093 } 2094 if (ctrl->is_Lock()) { 2095 LockNode *lock = ctrl->as_Lock(); 2096 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2097 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 2098 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); 2099 if (lock_obj->eqv_uncast(unlock_obj) && 2100 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { 2101 lock_result = lock; 2102 } 2103 } 2104 return lock_result; 2105 } 2106 2107 // This code corresponds to case 3 above. 2108 2109 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, 2110 GrowableArray<AbstractLockNode*> &lock_ops) { 2111 Node* if_node = node->in(0); 2112 bool if_true = node->is_IfTrue(); 2113 2114 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { 2115 Node *lock_ctrl = next_control(if_node->in(0)); 2116 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { 2117 Node* lock1_node = nullptr; 2118 ProjNode* proj = if_node->as_If()->proj_out(!if_true); 2119 if (if_true) { 2120 if (proj->is_IfFalse() && proj->outcnt() == 1) { 2121 lock1_node = proj->unique_out(); 2122 } 2123 } else { 2124 if (proj->is_IfTrue() && proj->outcnt() == 1) { 2125 lock1_node = proj->unique_out(); 2126 } 2127 } 2128 if (lock1_node != nullptr && lock1_node->is_Lock()) { 2129 LockNode *lock1 = lock1_node->as_Lock(); 2130 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2131 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 2132 Node* lock1_obj = bs->step_over_gc_barrier(lock1->obj_node()); 2133 if (lock_obj->eqv_uncast(lock1_obj) && 2134 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && 2135 !lock1->is_eliminated()) { 2136 lock_ops.append(lock1); 2137 return true; 2138 } 2139 } 2140 } 2141 } 2142 2143 lock_ops.trunc_to(0); 2144 return false; 2145 } 2146 2147 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, 2148 GrowableArray<AbstractLockNode*> &lock_ops) { 2149 // check each control merging at this point for a matching unlock. 2150 // in(0) should be self edge so skip it. 2151 for (int i = 1; i < (int)region->req(); i++) { 2152 Node *in_node = next_control(region->in(i)); 2153 if (in_node != nullptr) { 2154 if (find_matching_unlock(in_node, lock, lock_ops)) { 2155 // found a match so keep on checking. 2156 continue; 2157 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { 2158 continue; 2159 } 2160 2161 // If we fall through to here then it was some kind of node we 2162 // don't understand or there wasn't a matching unlock, so give 2163 // up trying to merge locks. 2164 lock_ops.trunc_to(0); 2165 return false; 2166 } 2167 } 2168 return true; 2169 2170 } 2171 2172 // Check that all locks/unlocks associated with object come from balanced regions. 2173 bool AbstractLockNode::is_balanced() { 2174 Node* obj = obj_node(); 2175 for (uint j = 0; j < obj->outcnt(); j++) { 2176 Node* n = obj->raw_out(j); 2177 if (n->is_AbstractLock() && 2178 n->as_AbstractLock()->obj_node()->eqv_uncast(obj)) { 2179 BoxLockNode* n_box = n->as_AbstractLock()->box_node()->as_BoxLock(); 2180 if (n_box->is_unbalanced()) { 2181 return false; 2182 } 2183 } 2184 } 2185 return true; 2186 } 2187 2188 const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"}; 2189 2190 const char * AbstractLockNode::kind_as_string() const { 2191 return _kind_names[_kind]; 2192 } 2193 2194 #ifndef PRODUCT 2195 // 2196 // Create a counter which counts the number of times this lock is acquired 2197 // 2198 void AbstractLockNode::create_lock_counter(JVMState* state) { 2199 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); 2200 } 2201 2202 void AbstractLockNode::set_eliminated_lock_counter() { 2203 if (_counter) { 2204 // Update the counter to indicate that this lock was eliminated. 2205 // The counter update code will stay around even though the 2206 // optimizer will eliminate the lock operation itself. 2207 _counter->set_tag(NamedCounter::EliminatedLockCounter); 2208 } 2209 } 2210 2211 void AbstractLockNode::dump_spec(outputStream* st) const { 2212 st->print("%s ", _kind_names[_kind]); 2213 CallNode::dump_spec(st); 2214 } 2215 2216 void AbstractLockNode::dump_compact_spec(outputStream* st) const { 2217 st->print("%s", _kind_names[_kind]); 2218 } 2219 #endif 2220 2221 //============================================================================= 2222 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2223 2224 // perform any generic optimizations first (returns 'this' or null) 2225 Node *result = SafePointNode::Ideal(phase, can_reshape); 2226 if (result != nullptr) return result; 2227 // Don't bother trying to transform a dead node 2228 if (in(0) && in(0)->is_top()) return nullptr; 2229 2230 // Now see if we can optimize away this lock. We don't actually 2231 // remove the locking here, we simply set the _eliminate flag which 2232 // prevents macro expansion from expanding the lock. Since we don't 2233 // modify the graph, the value returned from this function is the 2234 // one computed above. 2235 const Type* obj_type = phase->type(obj_node()); 2236 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) { 2237 // 2238 // If we are locking an non-escaped object, the lock/unlock is unnecessary 2239 // 2240 ConnectionGraph *cgr = phase->C->congraph(); 2241 if (cgr != nullptr && cgr->can_eliminate_lock(this)) { 2242 assert(!is_eliminated() || is_coarsened(), "sanity"); 2243 // The lock could be marked eliminated by lock coarsening 2244 // code during first IGVN before EA. Replace coarsened flag 2245 // to eliminate all associated locks/unlocks. 2246 #ifdef ASSERT 2247 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1"); 2248 #endif 2249 this->set_non_esc_obj(); 2250 return result; 2251 } 2252 2253 if (!phase->C->do_locks_coarsening()) { 2254 return result; // Compiling without locks coarsening 2255 } 2256 // 2257 // Try lock coarsening 2258 // 2259 PhaseIterGVN* iter = phase->is_IterGVN(); 2260 if (iter != nullptr && !is_eliminated()) { 2261 2262 GrowableArray<AbstractLockNode*> lock_ops; 2263 2264 Node *ctrl = next_control(in(0)); 2265 2266 // now search back for a matching Unlock 2267 if (find_matching_unlock(ctrl, this, lock_ops)) { 2268 // found an unlock directly preceding this lock. This is the 2269 // case of single unlock directly control dependent on a 2270 // single lock which is the trivial version of case 1 or 2. 2271 } else if (ctrl->is_Region() ) { 2272 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { 2273 // found lock preceded by multiple unlocks along all paths 2274 // joining at this point which is case 3 in description above. 2275 } 2276 } else { 2277 // see if this lock comes from either half of an if and the 2278 // predecessors merges unlocks and the other half of the if 2279 // performs a lock. 2280 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { 2281 // found unlock splitting to an if with locks on both branches. 2282 } 2283 } 2284 2285 if (lock_ops.length() > 0) { 2286 // add ourselves to the list of locks to be eliminated. 2287 lock_ops.append(this); 2288 2289 #ifndef PRODUCT 2290 if (PrintEliminateLocks) { 2291 int locks = 0; 2292 int unlocks = 0; 2293 if (Verbose) { 2294 tty->print_cr("=== Locks coarsening ==="); 2295 tty->print("Obj: "); 2296 obj_node()->dump(); 2297 } 2298 for (int i = 0; i < lock_ops.length(); i++) { 2299 AbstractLockNode* lock = lock_ops.at(i); 2300 if (lock->Opcode() == Op_Lock) 2301 locks++; 2302 else 2303 unlocks++; 2304 if (Verbose) { 2305 tty->print("Box %d: ", i); 2306 box_node()->dump(); 2307 tty->print(" %d: ", i); 2308 lock->dump(); 2309 } 2310 } 2311 tty->print_cr("=== Coarsened %d unlocks and %d locks", unlocks, locks); 2312 } 2313 #endif 2314 2315 // for each of the identified locks, mark them 2316 // as eliminatable 2317 for (int i = 0; i < lock_ops.length(); i++) { 2318 AbstractLockNode* lock = lock_ops.at(i); 2319 2320 // Mark it eliminated by coarsening and update any counters 2321 #ifdef ASSERT 2322 lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened"); 2323 #endif 2324 lock->set_coarsened(); 2325 } 2326 // Record this coarsened group. 2327 phase->C->add_coarsened_locks(lock_ops); 2328 } else if (ctrl->is_Region() && 2329 iter->_worklist.member(ctrl)) { 2330 // We weren't able to find any opportunities but the region this 2331 // lock is control dependent on hasn't been processed yet so put 2332 // this lock back on the worklist so we can check again once any 2333 // region simplification has occurred. 2334 iter->_worklist.push(this); 2335 } 2336 } 2337 } 2338 2339 return result; 2340 } 2341 2342 //============================================================================= 2343 bool LockNode::is_nested_lock_region() { 2344 return is_nested_lock_region(nullptr); 2345 } 2346 2347 // p is used for access to compilation log; no logging if null 2348 bool LockNode::is_nested_lock_region(Compile * c) { 2349 BoxLockNode* box = box_node()->as_BoxLock(); 2350 int stk_slot = box->stack_slot(); 2351 if (stk_slot <= 0) { 2352 #ifdef ASSERT 2353 this->log_lock_optimization(c, "eliminate_lock_INLR_1"); 2354 #endif 2355 return false; // External lock or it is not Box (Phi node). 2356 } 2357 2358 // Ignore complex cases: merged locks or multiple locks. 2359 Node* obj = obj_node(); 2360 LockNode* unique_lock = nullptr; 2361 Node* bad_lock = nullptr; 2362 if (!box->is_simple_lock_region(&unique_lock, obj, &bad_lock)) { 2363 #ifdef ASSERT 2364 this->log_lock_optimization(c, "eliminate_lock_INLR_2a", bad_lock); 2365 #endif 2366 return false; 2367 } 2368 if (unique_lock != this) { 2369 #ifdef ASSERT 2370 this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != nullptr ? unique_lock : bad_lock)); 2371 if (PrintEliminateLocks && Verbose) { 2372 tty->print_cr("=============== unique_lock != this ============"); 2373 tty->print(" this: "); 2374 this->dump(); 2375 tty->print(" box: "); 2376 box->dump(); 2377 tty->print(" obj: "); 2378 obj->dump(); 2379 if (unique_lock != nullptr) { 2380 tty->print(" unique_lock: "); 2381 unique_lock->dump(); 2382 } 2383 if (bad_lock != nullptr) { 2384 tty->print(" bad_lock: "); 2385 bad_lock->dump(); 2386 } 2387 tty->print_cr("==============="); 2388 } 2389 #endif 2390 return false; 2391 } 2392 2393 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2394 obj = bs->step_over_gc_barrier(obj); 2395 // Look for external lock for the same object. 2396 SafePointNode* sfn = this->as_SafePoint(); 2397 JVMState* youngest_jvms = sfn->jvms(); 2398 int max_depth = youngest_jvms->depth(); 2399 for (int depth = 1; depth <= max_depth; depth++) { 2400 JVMState* jvms = youngest_jvms->of_depth(depth); 2401 int num_mon = jvms->nof_monitors(); 2402 // Loop over monitors 2403 for (int idx = 0; idx < num_mon; idx++) { 2404 Node* obj_node = sfn->monitor_obj(jvms, idx); 2405 obj_node = bs->step_over_gc_barrier(obj_node); 2406 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock(); 2407 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { 2408 box->set_nested(); 2409 return true; 2410 } 2411 } 2412 } 2413 #ifdef ASSERT 2414 this->log_lock_optimization(c, "eliminate_lock_INLR_3"); 2415 #endif 2416 return false; 2417 } 2418 2419 //============================================================================= 2420 uint UnlockNode::size_of() const { return sizeof(*this); } 2421 2422 //============================================================================= 2423 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2424 2425 // perform any generic optimizations first (returns 'this' or null) 2426 Node *result = SafePointNode::Ideal(phase, can_reshape); 2427 if (result != nullptr) return result; 2428 // Don't bother trying to transform a dead node 2429 if (in(0) && in(0)->is_top()) return nullptr; 2430 2431 // Now see if we can optimize away this unlock. We don't actually 2432 // remove the unlocking here, we simply set the _eliminate flag which 2433 // prevents macro expansion from expanding the unlock. Since we don't 2434 // modify the graph, the value returned from this function is the 2435 // one computed above. 2436 // Escape state is defined after Parse phase. 2437 const Type* obj_type = phase->type(obj_node()); 2438 if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) { 2439 // 2440 // If we are unlocking an non-escaped object, the lock/unlock is unnecessary. 2441 // 2442 ConnectionGraph *cgr = phase->C->congraph(); 2443 if (cgr != nullptr && cgr->can_eliminate_lock(this)) { 2444 assert(!is_eliminated() || is_coarsened(), "sanity"); 2445 // The lock could be marked eliminated by lock coarsening 2446 // code during first IGVN before EA. Replace coarsened flag 2447 // to eliminate all associated locks/unlocks. 2448 #ifdef ASSERT 2449 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2"); 2450 #endif 2451 this->set_non_esc_obj(); 2452 } 2453 } 2454 return result; 2455 } 2456 2457 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const { 2458 if (C == nullptr) { 2459 return; 2460 } 2461 CompileLog* log = C->log(); 2462 if (log != nullptr) { 2463 Node* box = box_node(); 2464 Node* obj = obj_node(); 2465 int box_id = box != nullptr ? box->_idx : -1; 2466 int obj_id = obj != nullptr ? obj->_idx : -1; 2467 2468 log->begin_head("%s compile_id='%d' lock_id='%d' class='%s' kind='%s' box_id='%d' obj_id='%d' bad_id='%d'", 2469 tag, C->compile_id(), this->_idx, 2470 is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?", 2471 kind_as_string(), box_id, obj_id, (bad_lock != nullptr ? bad_lock->_idx : -1)); 2472 log->stamp(); 2473 log->end_head(); 2474 JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms(); 2475 while (p != nullptr) { 2476 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 2477 p = p->caller(); 2478 } 2479 log->tail(tag); 2480 } 2481 } 2482 2483 bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) { 2484 if (dest_t->is_known_instance() && t_oop->is_known_instance()) { 2485 return dest_t->instance_id() == t_oop->instance_id(); 2486 } 2487 2488 if (dest_t->isa_instptr() && !dest_t->is_instptr()->instance_klass()->equals(phase->C->env()->Object_klass())) { 2489 // clone 2490 if (t_oop->isa_aryptr()) { 2491 return false; 2492 } 2493 if (!t_oop->isa_instptr()) { 2494 return true; 2495 } 2496 if (dest_t->maybe_java_subtype_of(t_oop) || t_oop->maybe_java_subtype_of(dest_t)) { 2497 return true; 2498 } 2499 // unrelated 2500 return false; 2501 } 2502 2503 if (dest_t->isa_aryptr()) { 2504 // arraycopy or array clone 2505 if (t_oop->isa_instptr()) { 2506 return false; 2507 } 2508 if (!t_oop->isa_aryptr()) { 2509 return true; 2510 } 2511 2512 const Type* elem = dest_t->is_aryptr()->elem(); 2513 if (elem == Type::BOTTOM) { 2514 // An array but we don't know what elements are 2515 return true; 2516 } 2517 2518 dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr(); 2519 t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot); 2520 uint dest_alias = phase->C->get_alias_index(dest_t); 2521 uint t_oop_alias = phase->C->get_alias_index(t_oop); 2522 2523 return dest_alias == t_oop_alias; 2524 } 2525 2526 return true; 2527 }