1 /* 2 * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciTypeFlow.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "opto/addnode.hpp" 30 #include "opto/castnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/connode.hpp" 33 #include "opto/loopnode.hpp" 34 #include "opto/phaseX.hpp" 35 #include "opto/runtime.hpp" 36 #include "opto/rootnode.hpp" 37 #include "opto/subnode.hpp" 38 39 // Portions of code courtesy of Clifford Click 40 41 // Optimization - Graph Style 42 43 44 #ifndef PRODUCT 45 extern int explicit_null_checks_elided; 46 #endif 47 48 //============================================================================= 49 //------------------------------Value------------------------------------------ 50 // Return a tuple for whichever arm of the IF is reachable 51 const Type* IfNode::Value(PhaseGVN* phase) const { 52 if( !in(0) ) return Type::TOP; 53 if( phase->type(in(0)) == Type::TOP ) 54 return Type::TOP; 55 const Type *t = phase->type(in(1)); 56 if( t == Type::TOP ) // data is undefined 57 return TypeTuple::IFNEITHER; // unreachable altogether 58 if( t == TypeInt::ZERO ) // zero, or false 59 return TypeTuple::IFFALSE; // only false branch is reachable 60 if( t == TypeInt::ONE ) // 1, or true 61 return TypeTuple::IFTRUE; // only true branch is reachable 62 assert( t == TypeInt::BOOL, "expected boolean type" ); 63 64 return TypeTuple::IFBOTH; // No progress 65 } 66 67 const RegMask &IfNode::out_RegMask() const { 68 return RegMask::Empty; 69 } 70 71 //------------------------------split_if--------------------------------------- 72 // Look for places where we merge constants, then test on the merged value. 73 // If the IF test will be constant folded on the path with the constant, we 74 // win by splitting the IF to before the merge point. 75 static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { 76 // I could be a lot more general here, but I'm trying to squeeze this 77 // in before the Christmas '98 break so I'm gonna be kinda restrictive 78 // on the patterns I accept. CNC 79 80 // Look for a compare of a constant and a merged value 81 Node *i1 = iff->in(1); 82 if( !i1->is_Bool() ) return nullptr; 83 BoolNode *b = i1->as_Bool(); 84 Node *cmp = b->in(1); 85 if( !cmp->is_Cmp() ) return nullptr; 86 i1 = cmp->in(1); 87 if( i1 == nullptr || !i1->is_Phi() ) return nullptr; 88 PhiNode *phi = i1->as_Phi(); 89 Node *con2 = cmp->in(2); 90 if( !con2->is_Con() ) return nullptr; 91 // See that the merge point contains some constants 92 Node *con1=nullptr; 93 uint i4; 94 for( i4 = 1; i4 < phi->req(); i4++ ) { 95 con1 = phi->in(i4); 96 if( !con1 ) return nullptr; // Do not optimize partially collapsed merges 97 if( con1->is_Con() ) break; // Found a constant 98 // Also allow null-vs-not-null checks 99 const TypePtr *tp = igvn->type(con1)->isa_ptr(); 100 if( tp && tp->_ptr == TypePtr::NotNull ) 101 break; 102 } 103 if( i4 >= phi->req() ) return nullptr; // Found no constants 104 105 igvn->C->set_has_split_ifs(true); // Has chance for split-if 106 107 // Make sure that the compare can be constant folded away 108 Node *cmp2 = cmp->clone(); 109 cmp2->set_req(1,con1); 110 cmp2->set_req(2,con2); 111 const Type *t = cmp2->Value(igvn); 112 // This compare is dead, so whack it! 113 igvn->remove_dead_node(cmp2); 114 if( !t->singleton() ) return nullptr; 115 116 // No intervening control, like a simple Call 117 Node* r = iff->in(0); 118 if (!r->is_Region() || r->is_Loop() || phi->region() != r || r->as_Region()->is_copy()) { 119 return nullptr; 120 } 121 122 // No other users of the cmp/bool 123 if (b->outcnt() != 1 || cmp->outcnt() != 1) { 124 //tty->print_cr("many users of cmp/bool"); 125 return nullptr; 126 } 127 128 // Make sure we can determine where all the uses of merged values go 129 for (DUIterator_Fast jmax, j = r->fast_outs(jmax); j < jmax; j++) { 130 Node* u = r->fast_out(j); 131 if( u == r ) continue; 132 if( u == iff ) continue; 133 if( u->outcnt() == 0 ) continue; // use is dead & ignorable 134 if( !u->is_Phi() ) { 135 /* 136 if( u->is_Start() ) { 137 tty->print_cr("Region has inlined start use"); 138 } else { 139 tty->print_cr("Region has odd use"); 140 u->dump(2); 141 }*/ 142 return nullptr; 143 } 144 if( u != phi ) { 145 // CNC - do not allow any other merged value 146 //tty->print_cr("Merging another value"); 147 //u->dump(2); 148 return nullptr; 149 } 150 // Make sure we can account for all Phi uses 151 for (DUIterator_Fast kmax, k = u->fast_outs(kmax); k < kmax; k++) { 152 Node* v = u->fast_out(k); // User of the phi 153 // CNC - Allow only really simple patterns. 154 // In particular I disallow AddP of the Phi, a fairly common pattern 155 if (v == cmp) continue; // The compare is OK 156 if (v->is_ConstraintCast()) { 157 // If the cast is derived from data flow edges, it may not have a control edge. 158 // If so, it should be safe to split. But follow-up code can not deal with 159 // this (l. 359). So skip. 160 if (v->in(0) == nullptr) { 161 return nullptr; 162 } 163 if (v->in(0)->in(0) == iff) { 164 continue; // CastPP/II of the IfNode is OK 165 } 166 } 167 // Disabled following code because I cannot tell if exactly one 168 // path dominates without a real dominator check. CNC 9/9/1999 169 //uint vop = v->Opcode(); 170 //if( vop == Op_Phi ) { // Phi from another merge point might be OK 171 // Node *r = v->in(0); // Get controlling point 172 // if( !r ) return nullptr; // Degraded to a copy 173 // // Find exactly one path in (either True or False doms, but not IFF) 174 // int cnt = 0; 175 // for( uint i = 1; i < r->req(); i++ ) 176 // if( r->in(i) && r->in(i)->in(0) == iff ) 177 // cnt++; 178 // if( cnt == 1 ) continue; // Exactly one of True or False guards Phi 179 //} 180 if( !v->is_Call() ) { 181 /* 182 if( v->Opcode() == Op_AddP ) { 183 tty->print_cr("Phi has AddP use"); 184 } else if( v->Opcode() == Op_CastPP ) { 185 tty->print_cr("Phi has CastPP use"); 186 } else if( v->Opcode() == Op_CastII ) { 187 tty->print_cr("Phi has CastII use"); 188 } else { 189 tty->print_cr("Phi has use I can't be bothered with"); 190 } 191 */ 192 } 193 return nullptr; 194 195 /* CNC - Cut out all the fancy acceptance tests 196 // Can we clone this use when doing the transformation? 197 // If all uses are from Phis at this merge or constants, then YES. 198 if( !v->in(0) && v != cmp ) { 199 tty->print_cr("Phi has free-floating use"); 200 v->dump(2); 201 return nullptr; 202 } 203 for( uint l = 1; l < v->req(); l++ ) { 204 if( (!v->in(l)->is_Phi() || v->in(l)->in(0) != r) && 205 !v->in(l)->is_Con() ) { 206 tty->print_cr("Phi has use"); 207 v->dump(2); 208 return nullptr; 209 } // End of if Phi-use input is neither Phi nor Constant 210 } // End of for all inputs to Phi-use 211 */ 212 } // End of for all uses of Phi 213 } // End of for all uses of Region 214 215 // Only do this if the IF node is in a sane state 216 if (iff->outcnt() != 2) 217 return nullptr; 218 219 // Got a hit! Do the Mondo Hack! 220 // 221 //ABC a1c def ghi B 1 e h A C a c d f g i 222 // R - Phi - Phi - Phi Rc - Phi - Phi - Phi Rx - Phi - Phi - Phi 223 // cmp - 2 cmp - 2 cmp - 2 224 // bool bool_c bool_x 225 // if if_c if_x 226 // T F T F T F 227 // ..s.. ..t .. ..s.. ..t.. ..s.. ..t.. 228 // 229 // Split the paths coming into the merge point into 2 separate groups of 230 // merges. On the left will be all the paths feeding constants into the 231 // Cmp's Phi. On the right will be the remaining paths. The Cmp's Phi 232 // will fold up into a constant; this will let the Cmp fold up as well as 233 // all the control flow. Below the original IF we have 2 control 234 // dependent regions, 's' and 't'. Now we will merge the two paths 235 // just prior to 's' and 't' from the two IFs. At least 1 path (and quite 236 // likely 2 or more) will promptly constant fold away. 237 PhaseGVN *phase = igvn; 238 239 // Make a region merging constants and a region merging the rest 240 uint req_c = 0; 241 for (uint ii = 1; ii < r->req(); ii++) { 242 if (phi->in(ii) == con1) { 243 req_c++; 244 } 245 Node* proj = PhaseIdealLoop::find_parse_predicate(r->in(ii)); 246 if (proj != nullptr) { 247 // Bail out if splitting through a region with a predicate input (could 248 // also be a loop header before loop opts creates a LoopNode for it). 249 return nullptr; 250 } 251 } 252 253 // If all the defs of the phi are the same constant, we already have the desired end state. 254 // Skip the split that would create empty phi and region nodes. 255 if ((r->req() - req_c) == 1) { 256 return nullptr; 257 } 258 259 // At this point we know that we can apply the split if optimization. If the region is still on the worklist, 260 // we should wait until it is processed. The region might be removed which makes this optimization redundant. 261 // This also avoids the creation of dead data loops when rewiring data nodes below when a region is dying. 262 if (igvn->_worklist.member(r)) { 263 igvn->_worklist.push(iff); // retry split if later again 264 return nullptr; 265 } 266 267 Node *region_c = new RegionNode(req_c + 1); 268 Node *phi_c = con1; 269 uint len = r->req(); 270 Node *region_x = new RegionNode(len - req_c); 271 Node *phi_x = PhiNode::make_blank(region_x, phi); 272 for (uint i = 1, i_c = 1, i_x = 1; i < len; i++) { 273 if (phi->in(i) == con1) { 274 region_c->init_req( i_c++, r ->in(i) ); 275 } else { 276 region_x->init_req( i_x, r ->in(i) ); 277 phi_x ->init_req( i_x++, phi->in(i) ); 278 } 279 } 280 281 // Register the new RegionNodes but do not transform them. Cannot 282 // transform until the entire Region/Phi conglomerate has been hacked 283 // as a single huge transform. 284 igvn->register_new_node_with_optimizer( region_c ); 285 igvn->register_new_node_with_optimizer( region_x ); 286 // Prevent the untimely death of phi_x. Currently he has no uses. He is 287 // about to get one. If this only use goes away, then phi_x will look dead. 288 // However, he will be picking up some more uses down below. 289 Node *hook = new Node(4); 290 hook->init_req(0, phi_x); 291 hook->init_req(1, phi_c); 292 phi_x = phase->transform( phi_x ); 293 294 // Make the compare 295 Node *cmp_c = phase->makecon(t); 296 Node *cmp_x = cmp->clone(); 297 cmp_x->set_req(1,phi_x); 298 cmp_x->set_req(2,con2); 299 cmp_x = phase->transform(cmp_x); 300 // Make the bool 301 Node *b_c = phase->transform(new BoolNode(cmp_c,b->_test._test)); 302 Node *b_x = phase->transform(new BoolNode(cmp_x,b->_test._test)); 303 // Make the IfNode 304 IfNode* iff_c = iff->clone()->as_If(); 305 iff_c->set_req(0, region_c); 306 iff_c->set_req(1, b_c); 307 igvn->set_type_bottom(iff_c); 308 igvn->_worklist.push(iff_c); 309 hook->init_req(2, iff_c); 310 311 IfNode* iff_x = iff->clone()->as_If(); 312 iff_x->set_req(0, region_x); 313 iff_x->set_req(1, b_x); 314 igvn->set_type_bottom(iff_x); 315 igvn->_worklist.push(iff_x); 316 hook->init_req(3, iff_x); 317 318 // Make the true/false arms 319 Node *iff_c_t = phase->transform(new IfTrueNode (iff_c)); 320 Node *iff_c_f = phase->transform(new IfFalseNode(iff_c)); 321 Node *iff_x_t = phase->transform(new IfTrueNode (iff_x)); 322 Node *iff_x_f = phase->transform(new IfFalseNode(iff_x)); 323 324 // Merge the TRUE paths 325 Node *region_s = new RegionNode(3); 326 igvn->_worklist.push(region_s); 327 region_s->init_req(1, iff_c_t); 328 region_s->init_req(2, iff_x_t); 329 igvn->register_new_node_with_optimizer( region_s ); 330 331 // Merge the FALSE paths 332 Node *region_f = new RegionNode(3); 333 igvn->_worklist.push(region_f); 334 region_f->init_req(1, iff_c_f); 335 region_f->init_req(2, iff_x_f); 336 igvn->register_new_node_with_optimizer( region_f ); 337 338 igvn->hash_delete(cmp);// Remove soon-to-be-dead node from hash table. 339 cmp->set_req(1,nullptr); // Whack the inputs to cmp because it will be dead 340 cmp->set_req(2,nullptr); 341 // Check for all uses of the Phi and give them a new home. 342 // The 'cmp' got cloned, but CastPP/IIs need to be moved. 343 Node *phi_s = nullptr; // do not construct unless needed 344 Node *phi_f = nullptr; // do not construct unless needed 345 for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) { 346 Node* v = phi->last_out(i2);// User of the phi 347 igvn->rehash_node_delayed(v); // Have to fixup other Phi users 348 uint vop = v->Opcode(); 349 Node *proj = nullptr; 350 if( vop == Op_Phi ) { // Remote merge point 351 Node *r = v->in(0); 352 for (uint i3 = 1; i3 < r->req(); i3++) 353 if (r->in(i3) && r->in(i3)->in(0) == iff) { 354 proj = r->in(i3); 355 break; 356 } 357 } else if( v->is_ConstraintCast() ) { 358 proj = v->in(0); // Controlling projection 359 } else { 360 assert( 0, "do not know how to handle this guy" ); 361 } 362 guarantee(proj != nullptr, "sanity"); 363 364 Node *proj_path_data, *proj_path_ctrl; 365 if( proj->Opcode() == Op_IfTrue ) { 366 if( phi_s == nullptr ) { 367 // Only construct phi_s if needed, otherwise provides 368 // interfering use. 369 phi_s = PhiNode::make_blank(region_s,phi); 370 phi_s->init_req( 1, phi_c ); 371 phi_s->init_req( 2, phi_x ); 372 hook->add_req(phi_s); 373 phi_s = phase->transform(phi_s); 374 } 375 proj_path_data = phi_s; 376 proj_path_ctrl = region_s; 377 } else { 378 if( phi_f == nullptr ) { 379 // Only construct phi_f if needed, otherwise provides 380 // interfering use. 381 phi_f = PhiNode::make_blank(region_f,phi); 382 phi_f->init_req( 1, phi_c ); 383 phi_f->init_req( 2, phi_x ); 384 hook->add_req(phi_f); 385 phi_f = phase->transform(phi_f); 386 } 387 proj_path_data = phi_f; 388 proj_path_ctrl = region_f; 389 } 390 391 // Fixup 'v' for for the split 392 if( vop == Op_Phi ) { // Remote merge point 393 uint i; 394 for( i = 1; i < v->req(); i++ ) 395 if( v->in(i) == phi ) 396 break; 397 v->set_req(i, proj_path_data ); 398 } else if( v->is_ConstraintCast() ) { 399 v->set_req(0, proj_path_ctrl ); 400 v->set_req(1, proj_path_data ); 401 } else 402 ShouldNotReachHere(); 403 } 404 405 // Now replace the original iff's True/False with region_s/region_t. 406 // This makes the original iff go dead. 407 for (DUIterator_Last i3min, i3 = iff->last_outs(i3min); i3 >= i3min; --i3) { 408 Node* p = iff->last_out(i3); 409 assert( p->Opcode() == Op_IfTrue || p->Opcode() == Op_IfFalse, "" ); 410 Node *u = (p->Opcode() == Op_IfTrue) ? region_s : region_f; 411 // Replace p with u 412 igvn->add_users_to_worklist(p); 413 for (DUIterator_Last lmin, l = p->last_outs(lmin); l >= lmin;) { 414 Node* x = p->last_out(l); 415 igvn->hash_delete(x); 416 uint uses_found = 0; 417 for( uint j = 0; j < x->req(); j++ ) { 418 if( x->in(j) == p ) { 419 x->set_req(j, u); 420 uses_found++; 421 } 422 } 423 l -= uses_found; // we deleted 1 or more copies of this edge 424 } 425 igvn->remove_dead_node(p); 426 } 427 428 // Force the original merge dead 429 igvn->hash_delete(r); 430 // First, remove region's dead users. 431 for (DUIterator_Last lmin, l = r->last_outs(lmin); l >= lmin;) { 432 Node* u = r->last_out(l); 433 if( u == r ) { 434 r->set_req(0, nullptr); 435 } else { 436 assert(u->outcnt() == 0, "only dead users"); 437 igvn->remove_dead_node(u); 438 } 439 l -= 1; 440 } 441 igvn->remove_dead_node(r); 442 443 // Now remove the bogus extra edges used to keep things alive 444 igvn->remove_dead_node( hook ); 445 446 // Must return either the original node (now dead) or a new node 447 // (Do not return a top here, since that would break the uniqueness of top.) 448 return new ConINode(TypeInt::ZERO); 449 } 450 451 // if this IfNode follows a range check pattern return the projection 452 // for the failed path 453 ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) { 454 if (outcnt() != 2) { 455 return nullptr; 456 } 457 Node* b = in(1); 458 if (b == nullptr || !b->is_Bool()) return nullptr; 459 BoolNode* bn = b->as_Bool(); 460 Node* cmp = bn->in(1); 461 if (cmp == nullptr) return nullptr; 462 if (cmp->Opcode() != Op_CmpU) return nullptr; 463 464 l = cmp->in(1); 465 r = cmp->in(2); 466 flip_test = 1; 467 if (bn->_test._test == BoolTest::le) { 468 l = cmp->in(2); 469 r = cmp->in(1); 470 flip_test = 2; 471 } else if (bn->_test._test != BoolTest::lt) { 472 return nullptr; 473 } 474 if (l->is_top()) return nullptr; // Top input means dead test 475 if (r->Opcode() != Op_LoadRange && !is_RangeCheck()) return nullptr; 476 477 // We have recognized one of these forms: 478 // Flip 1: If (Bool[<] CmpU(l, LoadRange)) ... 479 // Flip 2: If (Bool[<=] CmpU(LoadRange, l)) ... 480 481 ProjNode* iftrap = proj_out_or_null(flip_test == 2 ? true : false); 482 return iftrap; 483 } 484 485 486 //------------------------------is_range_check--------------------------------- 487 // Return 0 if not a range check. Return 1 if a range check and set index and 488 // offset. Return 2 if we had to negate the test. Index is null if the check 489 // is versus a constant. 490 int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) { 491 int flip_test = 0; 492 Node* l = nullptr; 493 Node* r = nullptr; 494 ProjNode* iftrap = range_check_trap_proj(flip_test, l, r); 495 496 if (iftrap == nullptr) { 497 return 0; 498 } 499 500 // Make sure it's a real range check by requiring an uncommon trap 501 // along the OOB path. Otherwise, it's possible that the user wrote 502 // something which optimized to look like a range check but behaves 503 // in some other way. 504 if (iftrap->is_uncommon_trap_proj(Deoptimization::Reason_range_check) == nullptr) { 505 return 0; 506 } 507 508 // Look for index+offset form 509 Node* ind = l; 510 jint off = 0; 511 if (l->is_top()) { 512 return 0; 513 } else if (l->Opcode() == Op_AddI) { 514 if ((off = l->in(1)->find_int_con(0)) != 0) { 515 ind = l->in(2)->uncast(); 516 } else if ((off = l->in(2)->find_int_con(0)) != 0) { 517 ind = l->in(1)->uncast(); 518 } 519 } else if ((off = l->find_int_con(-1)) >= 0) { 520 // constant offset with no variable index 521 ind = nullptr; 522 } else { 523 // variable index with no constant offset (or dead negative index) 524 off = 0; 525 } 526 527 // Return all the values: 528 index = ind; 529 offset = off; 530 range = r; 531 return flip_test; 532 } 533 534 //------------------------------adjust_check----------------------------------- 535 // Adjust (widen) a prior range check 536 static void adjust_check(Node* proj, Node* range, Node* index, 537 int flip, jint off_lo, PhaseIterGVN* igvn) { 538 PhaseGVN *gvn = igvn; 539 // Break apart the old check 540 Node *iff = proj->in(0); 541 Node *bol = iff->in(1); 542 if( bol->is_top() ) return; // In case a partially dead range check appears 543 // bail (or bomb[ASSERT/DEBUG]) if NOT projection-->IfNode-->BoolNode 544 DEBUG_ONLY( if( !bol->is_Bool() ) { proj->dump(3); fatal("Expect projection-->IfNode-->BoolNode"); } ) 545 if( !bol->is_Bool() ) return; 546 547 Node *cmp = bol->in(1); 548 // Compute a new check 549 Node *new_add = gvn->intcon(off_lo); 550 if( index ) { 551 new_add = off_lo ? gvn->transform(new AddINode( index, new_add )) : index; 552 } 553 Node *new_cmp = (flip == 1) 554 ? new CmpUNode( new_add, range ) 555 : new CmpUNode( range, new_add ); 556 new_cmp = gvn->transform(new_cmp); 557 // See if no need to adjust the existing check 558 if( new_cmp == cmp ) return; 559 // Else, adjust existing check 560 Node *new_bol = gvn->transform( new BoolNode( new_cmp, bol->as_Bool()->_test._test ) ); 561 igvn->rehash_node_delayed( iff ); 562 iff->set_req_X( 1, new_bol, igvn ); 563 } 564 565 //------------------------------up_one_dom------------------------------------- 566 // Walk up the dominator tree one step. Return null at root or true 567 // complex merges. Skips through small diamonds. 568 Node* IfNode::up_one_dom(Node *curr, bool linear_only) { 569 Node *dom = curr->in(0); 570 if( !dom ) // Found a Region degraded to a copy? 571 return curr->nonnull_req(); // Skip thru it 572 573 if( curr != dom ) // Normal walk up one step? 574 return dom; 575 576 // Use linear_only if we are still parsing, since we cannot 577 // trust the regions to be fully filled in. 578 if (linear_only) 579 return nullptr; 580 581 if( dom->is_Root() ) 582 return nullptr; 583 584 // Else hit a Region. Check for a loop header 585 if( dom->is_Loop() ) 586 return dom->in(1); // Skip up thru loops 587 588 // Check for small diamonds 589 Node *din1, *din2, *din3, *din4; 590 if( dom->req() == 3 && // 2-path merge point 591 (din1 = dom ->in(1)) && // Left path exists 592 (din2 = dom ->in(2)) && // Right path exists 593 (din3 = din1->in(0)) && // Left path up one 594 (din4 = din2->in(0)) ) { // Right path up one 595 if( din3->is_Call() && // Handle a slow-path call on either arm 596 (din3 = din3->in(0)) ) 597 din3 = din3->in(0); 598 if( din4->is_Call() && // Handle a slow-path call on either arm 599 (din4 = din4->in(0)) ) 600 din4 = din4->in(0); 601 if (din3 != nullptr && din3 == din4 && din3->is_If()) // Regions not degraded to a copy 602 return din3; // Skip around diamonds 603 } 604 605 // Give up the search at true merges 606 return nullptr; // Dead loop? Or hit root? 607 } 608 609 610 //------------------------------filtered_int_type-------------------------------- 611 // Return a possibly more restrictive type for val based on condition control flow for an if 612 const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj) { 613 assert(if_proj && 614 (if_proj->Opcode() == Op_IfTrue || if_proj->Opcode() == Op_IfFalse), "expecting an if projection"); 615 if (if_proj->in(0) && if_proj->in(0)->is_If()) { 616 IfNode* iff = if_proj->in(0)->as_If(); 617 if (iff->in(1) && iff->in(1)->is_Bool()) { 618 BoolNode* bol = iff->in(1)->as_Bool(); 619 if (bol->in(1) && bol->in(1)->is_Cmp()) { 620 const CmpNode* cmp = bol->in(1)->as_Cmp(); 621 if (cmp->in(1) == val) { 622 const TypeInt* cmp2_t = gvn->type(cmp->in(2))->isa_int(); 623 if (cmp2_t != nullptr) { 624 jint lo = cmp2_t->_lo; 625 jint hi = cmp2_t->_hi; 626 BoolTest::mask msk = if_proj->Opcode() == Op_IfTrue ? bol->_test._test : bol->_test.negate(); 627 switch (msk) { 628 case BoolTest::ne: { 629 // If val is compared to its lower or upper bound, we can narrow the type 630 const TypeInt* val_t = gvn->type(val)->isa_int(); 631 if (val_t != nullptr && !val_t->singleton() && cmp2_t->is_con()) { 632 if (val_t->_lo == lo) { 633 return TypeInt::make(val_t->_lo + 1, val_t->_hi, val_t->_widen); 634 } else if (val_t->_hi == hi) { 635 return TypeInt::make(val_t->_lo, val_t->_hi - 1, val_t->_widen); 636 } 637 } 638 // Can't refine type 639 return nullptr; 640 } 641 case BoolTest::eq: 642 return cmp2_t; 643 case BoolTest::lt: 644 lo = TypeInt::INT->_lo; 645 if (hi != min_jint) { 646 hi = hi - 1; 647 } 648 break; 649 case BoolTest::le: 650 lo = TypeInt::INT->_lo; 651 break; 652 case BoolTest::gt: 653 if (lo != max_jint) { 654 lo = lo + 1; 655 } 656 hi = TypeInt::INT->_hi; 657 break; 658 case BoolTest::ge: 659 // lo unchanged 660 hi = TypeInt::INT->_hi; 661 break; 662 default: 663 break; 664 } 665 const TypeInt* rtn_t = TypeInt::make(lo, hi, cmp2_t->_widen); 666 return rtn_t; 667 } 668 } 669 } 670 } 671 } 672 return nullptr; 673 } 674 675 //------------------------------fold_compares---------------------------- 676 // See if a pair of CmpIs can be converted into a CmpU. In some cases 677 // the direction of this if is determined by the preceding if so it 678 // can be eliminate entirely. 679 // 680 // Given an if testing (CmpI n v) check for an immediately control 681 // dependent if that is testing (CmpI n v2) and has one projection 682 // leading to this if and the other projection leading to a region 683 // that merges one of this ifs control projections. 684 // 685 // If 686 // / | 687 // / | 688 // / | 689 // If | 690 // /\ | 691 // / \ | 692 // / \ | 693 // / Region 694 // 695 // Or given an if testing (CmpI n v) check for a dominating if that is 696 // testing (CmpI n v2), both having one projection leading to an 697 // uncommon trap. Allow Another independent guard in between to cover 698 // an explicit range check: 699 // if (index < 0 || index >= array.length) { 700 // which may need a null check to guard the LoadRange 701 // 702 // If 703 // / \ 704 // / \ 705 // / \ 706 // If unc 707 // /\ 708 // / \ 709 // / \ 710 // / unc 711 // 712 713 // Is the comparison for this If suitable for folding? 714 bool IfNode::cmpi_folds(PhaseIterGVN* igvn, bool fold_ne) { 715 return in(1) != nullptr && 716 in(1)->is_Bool() && 717 in(1)->in(1) != nullptr && 718 in(1)->in(1)->Opcode() == Op_CmpI && 719 in(1)->in(1)->in(2) != nullptr && 720 in(1)->in(1)->in(2) != igvn->C->top() && 721 (in(1)->as_Bool()->_test.is_less() || 722 in(1)->as_Bool()->_test.is_greater() || 723 (fold_ne && in(1)->as_Bool()->_test._test == BoolTest::ne)); 724 } 725 726 // Is a dominating control suitable for folding with this if? 727 bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) { 728 return ctrl != nullptr && 729 ctrl->is_Proj() && 730 ctrl->in(0) != nullptr && 731 ctrl->in(0)->Opcode() == Op_If && 732 ctrl->in(0)->outcnt() == 2 && 733 ctrl->in(0)->as_If()->cmpi_folds(igvn, true) && 734 // Must compare same value 735 ctrl->in(0)->in(1)->in(1)->in(1) != nullptr && 736 ctrl->in(0)->in(1)->in(1)->in(1) != igvn->C->top() && 737 ctrl->in(0)->in(1)->in(1)->in(1) == in(1)->in(1)->in(1); 738 } 739 740 // Do this If and the dominating If share a region? 741 bool IfNode::has_shared_region(ProjNode* proj, ProjNode*& success, ProjNode*& fail) { 742 ProjNode* otherproj = proj->other_if_proj(); 743 Node* otherproj_ctrl_use = otherproj->unique_ctrl_out_or_null(); 744 RegionNode* region = (otherproj_ctrl_use != nullptr && otherproj_ctrl_use->is_Region()) ? otherproj_ctrl_use->as_Region() : nullptr; 745 success = nullptr; 746 fail = nullptr; 747 748 if (otherproj->outcnt() == 1 && region != nullptr && !region->has_phi()) { 749 for (int i = 0; i < 2; i++) { 750 ProjNode* proj = proj_out(i); 751 if (success == nullptr && proj->outcnt() == 1 && proj->unique_out() == region) { 752 success = proj; 753 } else if (fail == nullptr) { 754 fail = proj; 755 } else { 756 success = fail = nullptr; 757 } 758 } 759 } 760 return success != nullptr && fail != nullptr; 761 } 762 763 bool IfNode::is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* unc) { 764 // Different methods and methods containing jsrs are not supported. 765 ciMethod* method = unc->jvms()->method(); 766 ciMethod* dom_method = dom_unc->jvms()->method(); 767 if (method != dom_method || method->has_jsrs()) { 768 return false; 769 } 770 // Check that both traps are in the same activation of the method (instead 771 // of two activations being inlined through different call sites) by verifying 772 // that the call stacks are equal for both JVMStates. 773 JVMState* dom_caller = dom_unc->jvms()->caller(); 774 JVMState* caller = unc->jvms()->caller(); 775 if ((dom_caller == nullptr) != (caller == nullptr)) { 776 // The current method must either be inlined into both dom_caller and 777 // caller or must not be inlined at all (top method). Bail out otherwise. 778 return false; 779 } else if (dom_caller != nullptr && !dom_caller->same_calls_as(caller)) { 780 return false; 781 } 782 // Check that the bci of the dominating uncommon trap dominates the bci 783 // of the dominated uncommon trap. Otherwise we may not re-execute 784 // the dominated check after deoptimization from the merged uncommon trap. 785 ciTypeFlow* flow = dom_method->get_flow_analysis(); 786 int bci = unc->jvms()->bci(); 787 int dom_bci = dom_unc->jvms()->bci(); 788 if (!flow->is_dominated_by(bci, dom_bci)) { 789 return false; 790 } 791 792 return true; 793 } 794 795 // Return projection that leads to an uncommon trap if any 796 ProjNode* IfNode::uncommon_trap_proj(CallStaticJavaNode*& call) const { 797 for (int i = 0; i < 2; i++) { 798 call = proj_out(i)->is_uncommon_trap_proj(Deoptimization::Reason_none); 799 if (call != nullptr) { 800 return proj_out(i); 801 } 802 } 803 return nullptr; 804 } 805 806 // Do this If and the dominating If both branch out to an uncommon trap 807 bool IfNode::has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNode*& fail, PhaseIterGVN* igvn) { 808 ProjNode* otherproj = proj->other_if_proj(); 809 CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj(Deoptimization::Reason_none); 810 811 if (otherproj->outcnt() == 1 && dom_unc != nullptr) { 812 // We need to re-execute the folded Ifs after deoptimization from the merged traps 813 if (!dom_unc->jvms()->should_reexecute()) { 814 return false; 815 } 816 817 CallStaticJavaNode* unc = nullptr; 818 ProjNode* unc_proj = uncommon_trap_proj(unc); 819 if (unc_proj != nullptr && unc_proj->outcnt() == 1) { 820 if (dom_unc == unc) { 821 // Allow the uncommon trap to be shared through a region 822 RegionNode* r = unc->in(0)->as_Region(); 823 if (r->outcnt() != 2 || r->req() != 3 || r->find_edge(otherproj) == -1 || r->find_edge(unc_proj) == -1) { 824 return false; 825 } 826 assert(r->has_phi() == nullptr, "simple region shouldn't have a phi"); 827 } else if (dom_unc->in(0) != otherproj || unc->in(0) != unc_proj) { 828 return false; 829 } 830 831 if (!is_dominator_unc(dom_unc, unc)) { 832 return false; 833 } 834 835 // See merge_uncommon_traps: the reason of the uncommon trap 836 // will be changed and the state of the dominating If will be 837 // used. Checked that we didn't apply this transformation in a 838 // previous compilation and it didn't cause too many traps 839 ciMethod* dom_method = dom_unc->jvms()->method(); 840 int dom_bci = dom_unc->jvms()->bci(); 841 if (!igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_unstable_fused_if) && 842 !igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_range_check) && 843 // Return true if c2 manages to reconcile with UnstableIf optimization. See the comments for it. 844 igvn->C->remove_unstable_if_trap(dom_unc, true/*yield*/)) { 845 success = unc_proj; 846 fail = unc_proj->other_if_proj(); 847 return true; 848 } 849 } 850 } 851 return false; 852 } 853 854 // Check that the 2 CmpI can be folded into as single CmpU and proceed with the folding 855 bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn) { 856 Node* this_cmp = in(1)->in(1); 857 BoolNode* this_bool = in(1)->as_Bool(); 858 IfNode* dom_iff = proj->in(0)->as_If(); 859 BoolNode* dom_bool = dom_iff->in(1)->as_Bool(); 860 Node* lo = dom_iff->in(1)->in(1)->in(2); 861 Node* hi = this_cmp->in(2); 862 Node* n = this_cmp->in(1); 863 ProjNode* otherproj = proj->other_if_proj(); 864 865 const TypeInt* lo_type = IfNode::filtered_int_type(igvn, n, otherproj); 866 const TypeInt* hi_type = IfNode::filtered_int_type(igvn, n, success); 867 868 BoolTest::mask lo_test = dom_bool->_test._test; 869 BoolTest::mask hi_test = this_bool->_test._test; 870 BoolTest::mask cond = hi_test; 871 872 // convert: 873 // 874 // dom_bool = x {<,<=,>,>=} a 875 // / \ 876 // proj = {True,False} / \ otherproj = {False,True} 877 // / 878 // this_bool = x {<,<=} b 879 // / \ 880 // fail = {True,False} / \ success = {False,True} 881 // / 882 // 883 // (Second test guaranteed canonicalized, first one may not have 884 // been canonicalized yet) 885 // 886 // into: 887 // 888 // cond = (x - lo) {<u,<=u,>u,>=u} adjusted_lim 889 // / \ 890 // fail / \ success 891 // / 892 // 893 894 // Figure out which of the two tests sets the upper bound and which 895 // sets the lower bound if any. 896 Node* adjusted_lim = nullptr; 897 if (lo_type != nullptr && hi_type != nullptr && hi_type->_lo > lo_type->_hi && 898 hi_type->_hi == max_jint && lo_type->_lo == min_jint && lo_test != BoolTest::ne) { 899 assert((dom_bool->_test.is_less() && !proj->_con) || 900 (dom_bool->_test.is_greater() && proj->_con), "incorrect test"); 901 902 // this_bool = < 903 // dom_bool = >= (proj = True) or dom_bool = < (proj = False) 904 // x in [a, b[ on the fail (= True) projection, b > a-1 (because of hi_type->_lo > lo_type->_hi test above): 905 // lo = a, hi = b, adjusted_lim = b-a, cond = <u 906 // dom_bool = > (proj = True) or dom_bool = <= (proj = False) 907 // x in ]a, b[ on the fail (= True) projection, b > a: 908 // lo = a+1, hi = b, adjusted_lim = b-a-1, cond = <u 909 // this_bool = <= 910 // dom_bool = >= (proj = True) or dom_bool = < (proj = False) 911 // x in [a, b] on the fail (= True) projection, b+1 > a-1: 912 // lo = a, hi = b, adjusted_lim = b-a+1, cond = <u 913 // lo = a, hi = b, adjusted_lim = b-a, cond = <=u doesn't work because b = a - 1 is possible, then b-a = -1 914 // dom_bool = > (proj = True) or dom_bool = <= (proj = False) 915 // x in ]a, b] on the fail (= True) projection b+1 > a: 916 // lo = a+1, hi = b, adjusted_lim = b-a, cond = <u 917 // lo = a+1, hi = b, adjusted_lim = b-a-1, cond = <=u doesn't work because a = b is possible, then b-a-1 = -1 918 919 if (hi_test == BoolTest::lt) { 920 if (lo_test == BoolTest::gt || lo_test == BoolTest::le) { 921 lo = igvn->transform(new AddINode(lo, igvn->intcon(1))); 922 } 923 } else if (hi_test == BoolTest::le) { 924 if (lo_test == BoolTest::ge || lo_test == BoolTest::lt) { 925 adjusted_lim = igvn->transform(new SubINode(hi, lo)); 926 adjusted_lim = igvn->transform(new AddINode(adjusted_lim, igvn->intcon(1))); 927 cond = BoolTest::lt; 928 } else if (lo_test == BoolTest::gt || lo_test == BoolTest::le) { 929 adjusted_lim = igvn->transform(new SubINode(hi, lo)); 930 lo = igvn->transform(new AddINode(lo, igvn->intcon(1))); 931 cond = BoolTest::lt; 932 } else { 933 assert(false, "unhandled lo_test: %d", lo_test); 934 return false; 935 } 936 } else { 937 assert(igvn->_worklist.member(in(1)) && in(1)->Value(igvn) != igvn->type(in(1)), "unhandled hi_test: %d", hi_test); 938 return false; 939 } 940 // this test was canonicalized 941 assert(this_bool->_test.is_less() && fail->_con, "incorrect test"); 942 } else if (lo_type != nullptr && hi_type != nullptr && lo_type->_lo > hi_type->_hi && 943 lo_type->_hi == max_jint && hi_type->_lo == min_jint && lo_test != BoolTest::ne) { 944 945 // this_bool = < 946 // dom_bool = < (proj = True) or dom_bool = >= (proj = False) 947 // x in [b, a[ on the fail (= False) projection, a > b-1 (because of lo_type->_lo > hi_type->_hi above): 948 // lo = b, hi = a, adjusted_lim = a-b, cond = >=u 949 // dom_bool = <= (proj = True) or dom_bool = > (proj = False) 950 // x in [b, a] on the fail (= False) projection, a+1 > b-1: 951 // lo = b, hi = a, adjusted_lim = a-b+1, cond = >=u 952 // lo = b, hi = a, adjusted_lim = a-b, cond = >u doesn't work because a = b - 1 is possible, then b-a = -1 953 // this_bool = <= 954 // dom_bool = < (proj = True) or dom_bool = >= (proj = False) 955 // x in ]b, a[ on the fail (= False) projection, a > b: 956 // lo = b+1, hi = a, adjusted_lim = a-b-1, cond = >=u 957 // dom_bool = <= (proj = True) or dom_bool = > (proj = False) 958 // x in ]b, a] on the fail (= False) projection, a+1 > b: 959 // lo = b+1, hi = a, adjusted_lim = a-b, cond = >=u 960 // lo = b+1, hi = a, adjusted_lim = a-b-1, cond = >u doesn't work because a = b is possible, then b-a-1 = -1 961 962 swap(lo, hi); 963 swap(lo_type, hi_type); 964 swap(lo_test, hi_test); 965 966 assert((dom_bool->_test.is_less() && proj->_con) || 967 (dom_bool->_test.is_greater() && !proj->_con), "incorrect test"); 968 969 cond = (hi_test == BoolTest::le || hi_test == BoolTest::gt) ? BoolTest::gt : BoolTest::ge; 970 971 if (lo_test == BoolTest::lt) { 972 if (hi_test == BoolTest::lt || hi_test == BoolTest::ge) { 973 cond = BoolTest::ge; 974 } else if (hi_test == BoolTest::le || hi_test == BoolTest::gt) { 975 adjusted_lim = igvn->transform(new SubINode(hi, lo)); 976 adjusted_lim = igvn->transform(new AddINode(adjusted_lim, igvn->intcon(1))); 977 cond = BoolTest::ge; 978 } else { 979 assert(false, "unhandled hi_test: %d", hi_test); 980 return false; 981 } 982 } else if (lo_test == BoolTest::le) { 983 if (hi_test == BoolTest::lt || hi_test == BoolTest::ge) { 984 lo = igvn->transform(new AddINode(lo, igvn->intcon(1))); 985 cond = BoolTest::ge; 986 } else if (hi_test == BoolTest::le || hi_test == BoolTest::gt) { 987 adjusted_lim = igvn->transform(new SubINode(hi, lo)); 988 lo = igvn->transform(new AddINode(lo, igvn->intcon(1))); 989 cond = BoolTest::ge; 990 } else { 991 assert(false, "unhandled hi_test: %d", hi_test); 992 return false; 993 } 994 } else { 995 assert(igvn->_worklist.member(in(1)) && in(1)->Value(igvn) != igvn->type(in(1)), "unhandled lo_test: %d", lo_test); 996 return false; 997 } 998 // this test was canonicalized 999 assert(this_bool->_test.is_less() && !fail->_con, "incorrect test"); 1000 } else { 1001 const TypeInt* failtype = filtered_int_type(igvn, n, proj); 1002 if (failtype != nullptr) { 1003 const TypeInt* type2 = filtered_int_type(igvn, n, fail); 1004 if (type2 != nullptr) { 1005 failtype = failtype->join(type2)->is_int(); 1006 if (failtype->_lo > failtype->_hi) { 1007 // previous if determines the result of this if so 1008 // replace Bool with constant 1009 igvn->replace_input_of(this, 1, igvn->intcon(success->_con)); 1010 return true; 1011 } 1012 } 1013 } 1014 lo = nullptr; 1015 hi = nullptr; 1016 } 1017 1018 if (lo && hi) { 1019 Node* hook = new Node(1); 1020 hook->init_req(0, lo); // Add a use to lo to prevent him from dying 1021 // Merge the two compares into a single unsigned compare by building (CmpU (n - lo) (hi - lo)) 1022 Node* adjusted_val = igvn->transform(new SubINode(n, lo)); 1023 if (adjusted_lim == nullptr) { 1024 adjusted_lim = igvn->transform(new SubINode(hi, lo)); 1025 } 1026 hook->destruct(igvn); 1027 1028 int lo = igvn->type(adjusted_lim)->is_int()->_lo; 1029 if (lo < 0) { 1030 // If range check elimination applies to this comparison, it includes code to protect from overflows that may 1031 // cause the main loop to be skipped entirely. Delay this transformation. 1032 // Example: 1033 // for (int i = 0; i < limit; i++) { 1034 // if (i < max_jint && i > min_jint) {... 1035 // } 1036 // Comparisons folded as: 1037 // i - min_jint - 1 <u -2 1038 // when RC applies, main loop limit becomes: 1039 // min(limit, max(-2 + min_jint + 1, min_jint)) 1040 // = min(limit, min_jint) 1041 // = min_jint 1042 if (!igvn->C->post_loop_opts_phase()) { 1043 if (adjusted_val->outcnt() == 0) { 1044 igvn->remove_dead_node(adjusted_val); 1045 } 1046 if (adjusted_lim->outcnt() == 0) { 1047 igvn->remove_dead_node(adjusted_lim); 1048 } 1049 igvn->C->record_for_post_loop_opts_igvn(this); 1050 return false; 1051 } 1052 } 1053 1054 Node* newcmp = igvn->transform(new CmpUNode(adjusted_val, adjusted_lim)); 1055 Node* newbool = igvn->transform(new BoolNode(newcmp, cond)); 1056 1057 igvn->replace_input_of(dom_iff, 1, igvn->intcon(proj->_con)); 1058 igvn->replace_input_of(this, 1, newbool); 1059 1060 return true; 1061 } 1062 return false; 1063 } 1064 1065 // Merge the branches that trap for this If and the dominating If into 1066 // a single region that branches to the uncommon trap for the 1067 // dominating If 1068 Node* IfNode::merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn) { 1069 Node* res = this; 1070 assert(success->in(0) == this, "bad projection"); 1071 1072 ProjNode* otherproj = proj->other_if_proj(); 1073 1074 CallStaticJavaNode* unc = success->is_uncommon_trap_proj(Deoptimization::Reason_none); 1075 CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj(Deoptimization::Reason_none); 1076 1077 if (unc != dom_unc) { 1078 Node* r = new RegionNode(3); 1079 1080 r->set_req(1, otherproj); 1081 r->set_req(2, success); 1082 r = igvn->transform(r); 1083 assert(r->is_Region(), "can't go away"); 1084 1085 // Make both If trap at the state of the first If: once the CmpI 1086 // nodes are merged, if we trap we don't know which of the CmpI 1087 // nodes would have caused the trap so we have to restart 1088 // execution at the first one 1089 igvn->replace_input_of(dom_unc, 0, r); 1090 igvn->replace_input_of(unc, 0, igvn->C->top()); 1091 } 1092 int trap_request = dom_unc->uncommon_trap_request(); 1093 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request); 1094 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request); 1095 1096 int flip_test = 0; 1097 Node* l = nullptr; 1098 Node* r = nullptr; 1099 1100 if (success->in(0)->as_If()->range_check_trap_proj(flip_test, l, r) != nullptr) { 1101 // If this looks like a range check, change the trap to 1102 // Reason_range_check so the compiler recognizes it as a range 1103 // check and applies the corresponding optimizations 1104 trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_range_check, action); 1105 1106 improve_address_types(l, r, fail, igvn); 1107 1108 res = igvn->transform(new RangeCheckNode(in(0), in(1), _prob, _fcnt)); 1109 } else if (unc != dom_unc) { 1110 // If we trap we won't know what CmpI would have caused the trap 1111 // so use a special trap reason to mark this pair of CmpI nodes as 1112 // bad candidate for folding. On recompilation we won't fold them 1113 // and we may trap again but this time we'll know what branch 1114 // traps 1115 trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_unstable_fused_if, action); 1116 } 1117 igvn->replace_input_of(dom_unc, TypeFunc::Parms, igvn->intcon(trap_request)); 1118 return res; 1119 } 1120 1121 // If we are turning 2 CmpI nodes into a CmpU that follows the pattern 1122 // of a rangecheck on index i, on 64 bit the compares may be followed 1123 // by memory accesses using i as index. In that case, the CmpU tells 1124 // us something about the values taken by i that can help the compiler 1125 // (see Compile::conv_I2X_index()) 1126 void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGVN* igvn) { 1127 #ifdef _LP64 1128 ResourceMark rm; 1129 Node_Stack stack(2); 1130 1131 assert(r->Opcode() == Op_LoadRange, "unexpected range check"); 1132 const TypeInt* array_size = igvn->type(r)->is_int(); 1133 1134 stack.push(l, 0); 1135 1136 while(stack.size() > 0) { 1137 Node* n = stack.node(); 1138 uint start = stack.index(); 1139 1140 uint i = start; 1141 for (; i < n->outcnt(); i++) { 1142 Node* use = n->raw_out(i); 1143 if (stack.size() == 1) { 1144 if (use->Opcode() == Op_ConvI2L) { 1145 const TypeLong* bounds = use->as_Type()->type()->is_long(); 1146 if (bounds->_lo <= array_size->_lo && bounds->_hi >= array_size->_hi && 1147 (bounds->_lo != array_size->_lo || bounds->_hi != array_size->_hi)) { 1148 stack.set_index(i+1); 1149 stack.push(use, 0); 1150 break; 1151 } 1152 } 1153 } else if (use->is_Mem()) { 1154 Node* ctrl = use->in(0); 1155 for (int i = 0; i < 10 && ctrl != nullptr && ctrl != fail; i++) { 1156 ctrl = up_one_dom(ctrl); 1157 } 1158 if (ctrl == fail) { 1159 Node* init_n = stack.node_at(1); 1160 assert(init_n->Opcode() == Op_ConvI2L, "unexpected first node"); 1161 // Create a new narrow ConvI2L node that is dependent on the range check 1162 Node* new_n = igvn->C->conv_I2X_index(igvn, l, array_size, fail); 1163 1164 // The type of the ConvI2L may be widen and so the new 1165 // ConvI2L may not be better than an existing ConvI2L 1166 if (new_n != init_n) { 1167 for (uint j = 2; j < stack.size(); j++) { 1168 Node* n = stack.node_at(j); 1169 Node* clone = n->clone(); 1170 int rep = clone->replace_edge(init_n, new_n, igvn); 1171 assert(rep > 0, "can't find expected node?"); 1172 clone = igvn->transform(clone); 1173 init_n = n; 1174 new_n = clone; 1175 } 1176 igvn->hash_delete(use); 1177 int rep = use->replace_edge(init_n, new_n, igvn); 1178 assert(rep > 0, "can't find expected node?"); 1179 igvn->transform(use); 1180 if (init_n->outcnt() == 0) { 1181 igvn->_worklist.push(init_n); 1182 } 1183 } 1184 } 1185 } else if (use->in(0) == nullptr && (igvn->type(use)->isa_long() || 1186 igvn->type(use)->isa_ptr())) { 1187 stack.set_index(i+1); 1188 stack.push(use, 0); 1189 break; 1190 } 1191 } 1192 if (i == n->outcnt()) { 1193 stack.pop(); 1194 } 1195 } 1196 #endif 1197 } 1198 1199 bool IfNode::is_cmp_with_loadrange(ProjNode* proj) { 1200 if (in(1) != nullptr && 1201 in(1)->in(1) != nullptr && 1202 in(1)->in(1)->in(2) != nullptr) { 1203 Node* other = in(1)->in(1)->in(2); 1204 if (other->Opcode() == Op_LoadRange && 1205 ((other->in(0) != nullptr && other->in(0) == proj) || 1206 (other->in(0) == nullptr && 1207 other->in(2) != nullptr && 1208 other->in(2)->is_AddP() && 1209 other->in(2)->in(1) != nullptr && 1210 other->in(2)->in(1)->Opcode() == Op_CastPP && 1211 other->in(2)->in(1)->in(0) == proj))) { 1212 return true; 1213 } 1214 } 1215 return false; 1216 } 1217 1218 bool IfNode::is_null_check(ProjNode* proj, PhaseIterGVN* igvn) { 1219 Node* other = in(1)->in(1)->in(2); 1220 if (other->in(MemNode::Address) != nullptr && 1221 proj->in(0)->in(1) != nullptr && 1222 proj->in(0)->in(1)->is_Bool() && 1223 proj->in(0)->in(1)->in(1) != nullptr && 1224 proj->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && 1225 proj->in(0)->in(1)->in(1)->in(2) != nullptr && 1226 proj->in(0)->in(1)->in(1)->in(1) == other->in(MemNode::Address)->in(AddPNode::Address)->uncast() && 1227 igvn->type(proj->in(0)->in(1)->in(1)->in(2)) == TypePtr::NULL_PTR) { 1228 return true; 1229 } 1230 return false; 1231 } 1232 1233 // Check that the If that is in between the 2 integer comparisons has 1234 // no side effect 1235 bool IfNode::is_side_effect_free_test(ProjNode* proj, PhaseIterGVN* igvn) { 1236 if (proj == nullptr) { 1237 return false; 1238 } 1239 CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); 1240 if (unc != nullptr && proj->outcnt() <= 2) { 1241 if (proj->outcnt() == 1 || 1242 // Allow simple null check from LoadRange 1243 (is_cmp_with_loadrange(proj) && is_null_check(proj, igvn))) { 1244 CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); 1245 CallStaticJavaNode* dom_unc = proj->in(0)->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); 1246 assert(dom_unc != nullptr, "is_uncommon_trap_if_pattern returned null"); 1247 1248 // reroute_side_effect_free_unc changes the state of this 1249 // uncommon trap to restart execution at the previous 1250 // CmpI. Check that this change in a previous compilation didn't 1251 // cause too many traps. 1252 int trap_request = unc->uncommon_trap_request(); 1253 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request); 1254 1255 if (igvn->C->too_many_traps(dom_unc->jvms()->method(), dom_unc->jvms()->bci(), reason)) { 1256 return false; 1257 } 1258 1259 if (!is_dominator_unc(dom_unc, unc)) { 1260 return false; 1261 } 1262 1263 return true; 1264 } 1265 } 1266 return false; 1267 } 1268 1269 // Make the If between the 2 integer comparisons trap at the state of 1270 // the first If: the last CmpI is the one replaced by a CmpU and the 1271 // first CmpI is eliminated, so the test between the 2 CmpI nodes 1272 // won't be guarded by the first CmpI anymore. It can trap in cases 1273 // where the first CmpI would have prevented it from executing: on a 1274 // trap, we need to restart execution at the state of the first CmpI 1275 void IfNode::reroute_side_effect_free_unc(ProjNode* proj, ProjNode* dom_proj, PhaseIterGVN* igvn) { 1276 CallStaticJavaNode* dom_unc = dom_proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); 1277 ProjNode* otherproj = proj->other_if_proj(); 1278 CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); 1279 Node* call_proj = dom_unc->unique_ctrl_out(); 1280 Node* halt = call_proj->unique_ctrl_out(); 1281 1282 Node* new_unc = dom_unc->clone(); 1283 call_proj = call_proj->clone(); 1284 halt = halt->clone(); 1285 Node* c = otherproj->clone(); 1286 1287 c = igvn->transform(c); 1288 new_unc->set_req(TypeFunc::Parms, unc->in(TypeFunc::Parms)); 1289 new_unc->set_req(0, c); 1290 new_unc = igvn->transform(new_unc); 1291 call_proj->set_req(0, new_unc); 1292 call_proj = igvn->transform(call_proj); 1293 halt->set_req(0, call_proj); 1294 halt = igvn->transform(halt); 1295 1296 igvn->replace_node(otherproj, igvn->C->top()); 1297 igvn->C->root()->add_req(halt); 1298 } 1299 1300 Node* IfNode::fold_compares(PhaseIterGVN* igvn) { 1301 if (Opcode() != Op_If) return nullptr; 1302 1303 if (cmpi_folds(igvn)) { 1304 Node* ctrl = in(0); 1305 if (is_ctrl_folds(ctrl, igvn) && ctrl->outcnt() == 1) { 1306 // A integer comparison immediately dominated by another integer 1307 // comparison 1308 ProjNode* success = nullptr; 1309 ProjNode* fail = nullptr; 1310 ProjNode* dom_cmp = ctrl->as_Proj(); 1311 if (has_shared_region(dom_cmp, success, fail) && 1312 // Next call modifies graph so must be last 1313 fold_compares_helper(dom_cmp, success, fail, igvn)) { 1314 return this; 1315 } 1316 if (has_only_uncommon_traps(dom_cmp, success, fail, igvn) && 1317 // Next call modifies graph so must be last 1318 fold_compares_helper(dom_cmp, success, fail, igvn)) { 1319 return merge_uncommon_traps(dom_cmp, success, fail, igvn); 1320 } 1321 return nullptr; 1322 } else if (ctrl->in(0) != nullptr && 1323 ctrl->in(0)->in(0) != nullptr) { 1324 ProjNode* success = nullptr; 1325 ProjNode* fail = nullptr; 1326 Node* dom = ctrl->in(0)->in(0); 1327 ProjNode* dom_cmp = dom->isa_Proj(); 1328 ProjNode* other_cmp = ctrl->isa_Proj(); 1329 1330 // Check if it's an integer comparison dominated by another 1331 // integer comparison with another test in between 1332 if (is_ctrl_folds(dom, igvn) && 1333 has_only_uncommon_traps(dom_cmp, success, fail, igvn) && 1334 is_side_effect_free_test(other_cmp, igvn) && 1335 // Next call modifies graph so must be last 1336 fold_compares_helper(dom_cmp, success, fail, igvn)) { 1337 reroute_side_effect_free_unc(other_cmp, dom_cmp, igvn); 1338 return merge_uncommon_traps(dom_cmp, success, fail, igvn); 1339 } 1340 } 1341 } 1342 return nullptr; 1343 } 1344 1345 //------------------------------remove_useless_bool---------------------------- 1346 // Check for people making a useless boolean: things like 1347 // if( (x < y ? true : false) ) { ... } 1348 // Replace with if( x < y ) { ... } 1349 static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) { 1350 Node *i1 = iff->in(1); 1351 if( !i1->is_Bool() ) return nullptr; 1352 BoolNode *bol = i1->as_Bool(); 1353 1354 Node *cmp = bol->in(1); 1355 if( cmp->Opcode() != Op_CmpI ) return nullptr; 1356 1357 // Must be comparing against a bool 1358 const Type *cmp2_t = phase->type( cmp->in(2) ); 1359 if( cmp2_t != TypeInt::ZERO && 1360 cmp2_t != TypeInt::ONE ) 1361 return nullptr; 1362 1363 // Find a prior merge point merging the boolean 1364 i1 = cmp->in(1); 1365 if( !i1->is_Phi() ) return nullptr; 1366 PhiNode *phi = i1->as_Phi(); 1367 if( phase->type( phi ) != TypeInt::BOOL ) 1368 return nullptr; 1369 1370 // Check for diamond pattern 1371 int true_path = phi->is_diamond_phi(); 1372 if( true_path == 0 ) return nullptr; 1373 1374 // Make sure that iff and the control of the phi are different. This 1375 // should really only happen for dead control flow since it requires 1376 // an illegal cycle. 1377 if (phi->in(0)->in(1)->in(0) == iff) return nullptr; 1378 1379 // phi->region->if_proj->ifnode->bool->cmp 1380 BoolNode *bol2 = phi->in(0)->in(1)->in(0)->in(1)->as_Bool(); 1381 1382 // Now get the 'sense' of the test correct so we can plug in 1383 // either iff2->in(1) or its complement. 1384 int flip = 0; 1385 if( bol->_test._test == BoolTest::ne ) flip = 1-flip; 1386 else if( bol->_test._test != BoolTest::eq ) return nullptr; 1387 if( cmp2_t == TypeInt::ZERO ) flip = 1-flip; 1388 1389 const Type *phi1_t = phase->type( phi->in(1) ); 1390 const Type *phi2_t = phase->type( phi->in(2) ); 1391 // Check for Phi(0,1) and flip 1392 if( phi1_t == TypeInt::ZERO ) { 1393 if( phi2_t != TypeInt::ONE ) return nullptr; 1394 flip = 1-flip; 1395 } else { 1396 // Check for Phi(1,0) 1397 if( phi1_t != TypeInt::ONE ) return nullptr; 1398 if( phi2_t != TypeInt::ZERO ) return nullptr; 1399 } 1400 if( true_path == 2 ) { 1401 flip = 1-flip; 1402 } 1403 1404 Node* new_bol = (flip ? phase->transform( bol2->negate(phase) ) : bol2); 1405 assert(new_bol != iff->in(1), "must make progress"); 1406 iff->set_req_X(1, new_bol, phase); 1407 // Intervening diamond probably goes dead 1408 phase->C->set_major_progress(); 1409 return iff; 1410 } 1411 1412 static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff); 1413 1414 struct RangeCheck { 1415 Node* ctl; 1416 jint off; 1417 }; 1418 1419 Node* IfNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { 1420 if (remove_dead_region(phase, can_reshape)) return this; 1421 // No Def-Use info? 1422 if (!can_reshape) return nullptr; 1423 1424 // Don't bother trying to transform a dead if 1425 if (in(0)->is_top()) return nullptr; 1426 // Don't bother trying to transform an if with a dead test 1427 if (in(1)->is_top()) return nullptr; 1428 // Another variation of a dead test 1429 if (in(1)->is_Con()) return nullptr; 1430 // Another variation of a dead if 1431 if (outcnt() < 2) return nullptr; 1432 1433 // Canonicalize the test. 1434 Node* idt_if = idealize_test(phase, this); 1435 if (idt_if != nullptr) return idt_if; 1436 1437 // Try to split the IF 1438 PhaseIterGVN *igvn = phase->is_IterGVN(); 1439 Node *s = split_if(this, igvn); 1440 if (s != nullptr) return s; 1441 1442 return NodeSentinel; 1443 } 1444 1445 //------------------------------Ideal------------------------------------------ 1446 // Return a node which is more "ideal" than the current node. Strip out 1447 // control copies 1448 Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1449 Node* res = Ideal_common(phase, can_reshape); 1450 if (res != NodeSentinel) { 1451 return res; 1452 } 1453 1454 // Check for people making a useless boolean: things like 1455 // if( (x < y ? true : false) ) { ... } 1456 // Replace with if( x < y ) { ... } 1457 Node* bol2 = remove_useless_bool(this, phase); 1458 if (bol2) return bol2; 1459 1460 if (in(0) == nullptr) return nullptr; // Dead loop? 1461 1462 PhaseIterGVN* igvn = phase->is_IterGVN(); 1463 Node* result = fold_compares(igvn); 1464 if (result != nullptr) { 1465 return result; 1466 } 1467 1468 // Scan for an equivalent test 1469 int dist = 4; // Cutoff limit for search 1470 if (is_If() && in(1)->is_Bool()) { 1471 Node* cmp = in(1)->in(1); 1472 if (cmp->Opcode() == Op_CmpP && 1473 cmp->in(2) != nullptr && // make sure cmp is not already dead 1474 cmp->in(2)->bottom_type() == TypePtr::NULL_PTR) { 1475 dist = 64; // Limit for null-pointer scans 1476 } 1477 } 1478 1479 Node* prev_dom = search_identical(dist); 1480 1481 if (prev_dom != nullptr) { 1482 // Replace dominated IfNode 1483 return dominated_by(prev_dom, igvn); 1484 } 1485 1486 return simple_subsuming(igvn); 1487 } 1488 1489 //------------------------------dominated_by----------------------------------- 1490 Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN *igvn) { 1491 #ifndef PRODUCT 1492 if (TraceIterativeGVN) { 1493 tty->print(" Removing IfNode: "); this->dump(); 1494 } 1495 #endif 1496 1497 igvn->hash_delete(this); // Remove self to prevent spurious V-N 1498 Node *idom = in(0); 1499 // Need opcode to decide which way 'this' test goes 1500 int prev_op = prev_dom->Opcode(); 1501 Node *top = igvn->C->top(); // Shortcut to top 1502 1503 // Loop predicates may have depending checks which should not 1504 // be skipped. For example, range check predicate has two checks 1505 // for lower and upper bounds. 1506 ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj(); 1507 if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != nullptr || 1508 unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != nullptr) { 1509 prev_dom = idom; 1510 } 1511 1512 // Now walk the current IfNode's projections. 1513 // Loop ends when 'this' has no more uses. 1514 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) { 1515 Node *ifp = last_out(i); // Get IfTrue/IfFalse 1516 igvn->add_users_to_worklist(ifp); 1517 // Check which projection it is and set target. 1518 // Data-target is either the dominating projection of the same type 1519 // or TOP if the dominating projection is of opposite type. 1520 // Data-target will be used as the new control edge for the non-CFG 1521 // nodes like Casts and Loads. 1522 Node *data_target = (ifp->Opcode() == prev_op) ? prev_dom : top; 1523 // Control-target is just the If's immediate dominator or TOP. 1524 Node *ctrl_target = (ifp->Opcode() == prev_op) ? idom : top; 1525 1526 // For each child of an IfTrue/IfFalse projection, reroute. 1527 // Loop ends when projection has no more uses. 1528 for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) { 1529 Node* s = ifp->last_out(j); // Get child of IfTrue/IfFalse 1530 if (s->depends_only_on_test() && igvn->no_dependent_zero_check(s)) { 1531 // For control producers. 1532 // Do not rewire Div and Mod nodes which could have a zero divisor to avoid skipping their zero check. 1533 igvn->replace_input_of(s, 0, data_target); // Move child to data-target 1534 } else { 1535 // Find the control input matching this def-use edge. 1536 // For Regions it may not be in slot 0. 1537 uint l; 1538 for (l = 0; s->in(l) != ifp; l++) { } 1539 igvn->replace_input_of(s, l, ctrl_target); 1540 } 1541 } // End for each child of a projection 1542 1543 igvn->remove_dead_node(ifp); 1544 } // End for each IfTrue/IfFalse child of If 1545 1546 // Kill the IfNode 1547 igvn->remove_dead_node(this); 1548 1549 // Must return either the original node (now dead) or a new node 1550 // (Do not return a top here, since that would break the uniqueness of top.) 1551 return new ConINode(TypeInt::ZERO); 1552 } 1553 1554 Node* IfNode::search_identical(int dist) { 1555 // Setup to scan up the CFG looking for a dominating test 1556 Node* dom = in(0); 1557 Node* prev_dom = this; 1558 int op = Opcode(); 1559 // Search up the dominator tree for an If with an identical test 1560 while (dom->Opcode() != op || // Not same opcode? 1561 dom->in(1) != in(1) || // Not same input 1? 1562 prev_dom->in(0) != dom) { // One path of test does not dominate? 1563 if (dist < 0) return nullptr; 1564 1565 dist--; 1566 prev_dom = dom; 1567 dom = up_one_dom(dom); 1568 if (!dom) return nullptr; 1569 } 1570 1571 // Check that we did not follow a loop back to ourselves 1572 if (this == dom) { 1573 return nullptr; 1574 } 1575 1576 #ifndef PRODUCT 1577 if (dist > 2) { // Add to count of null checks elided 1578 explicit_null_checks_elided++; 1579 } 1580 #endif 1581 1582 return prev_dom; 1583 } 1584 1585 1586 static int subsuming_bool_test_encode(Node*); 1587 1588 // Check if dominating test is subsuming 'this' one. 1589 // 1590 // cmp 1591 // / \ 1592 // (r1) bool \ 1593 // / bool (r2) 1594 // (dom) if \ 1595 // \ ) 1596 // (pre) if[TF] / 1597 // \ / 1598 // if (this) 1599 // \r1 1600 // r2\ eqT eqF neT neF ltT ltF leT leF gtT gtF geT geF 1601 // eq t f f t f - - f f - - f 1602 // ne f t t f t - - t t - - t 1603 // lt f - - f t f - f f - f t 1604 // le t - - t t - t f f t - t 1605 // gt f - - f f - f t t f - f 1606 // ge t - - t f t - t t - t f 1607 // 1608 Node* IfNode::simple_subsuming(PhaseIterGVN* igvn) { 1609 // Table encoding: N/A (na), True-branch (tb), False-branch (fb). 1610 static enum { na, tb, fb } s_short_circuit_map[6][12] = { 1611 /*rel: eq+T eq+F ne+T ne+F lt+T lt+F le+T le+F gt+T gt+F ge+T ge+F*/ 1612 /*eq*/{ tb, fb, fb, tb, fb, na, na, fb, fb, na, na, fb }, 1613 /*ne*/{ fb, tb, tb, fb, tb, na, na, tb, tb, na, na, tb }, 1614 /*lt*/{ fb, na, na, fb, tb, fb, na, fb, fb, na, fb, tb }, 1615 /*le*/{ tb, na, na, tb, tb, na, tb, fb, fb, tb, na, tb }, 1616 /*gt*/{ fb, na, na, fb, fb, na, fb, tb, tb, fb, na, fb }, 1617 /*ge*/{ tb, na, na, tb, fb, tb, na, tb, tb, na, tb, fb }}; 1618 1619 Node* pre = in(0); 1620 if (!pre->is_IfTrue() && !pre->is_IfFalse()) { 1621 return nullptr; 1622 } 1623 Node* dom = pre->in(0); 1624 if (!dom->is_If()) { 1625 return nullptr; 1626 } 1627 Node* bol = in(1); 1628 if (!bol->is_Bool()) { 1629 return nullptr; 1630 } 1631 Node* cmp = in(1)->in(1); 1632 if (!cmp->is_Cmp()) { 1633 return nullptr; 1634 } 1635 1636 if (!dom->in(1)->is_Bool()) { 1637 return nullptr; 1638 } 1639 if (dom->in(1)->in(1) != cmp) { // Not same cond? 1640 return nullptr; 1641 } 1642 1643 int drel = subsuming_bool_test_encode(dom->in(1)); 1644 int trel = subsuming_bool_test_encode(bol); 1645 int bout = pre->is_IfFalse() ? 1 : 0; 1646 1647 if (drel < 0 || trel < 0) { 1648 return nullptr; 1649 } 1650 int br = s_short_circuit_map[trel][2*drel+bout]; 1651 if (br == na) { 1652 return nullptr; 1653 } 1654 #ifndef PRODUCT 1655 if (TraceIterativeGVN) { 1656 tty->print(" Subsumed IfNode: "); dump(); 1657 } 1658 #endif 1659 // Replace condition with constant True(1)/False(0). 1660 bool is_always_true = br == tb; 1661 set_req(1, igvn->intcon(is_always_true ? 1 : 0)); 1662 1663 // Update any data dependencies to the directly dominating test. This subsumed test is not immediately removed by igvn 1664 // and therefore subsequent optimizations might miss these data dependencies otherwise. There might be a dead loop 1665 // ('always_taken_proj' == 'pre') that is cleaned up later. Skip this case to make the iterator work properly. 1666 Node* always_taken_proj = proj_out(is_always_true); 1667 if (always_taken_proj != pre) { 1668 for (DUIterator_Fast imax, i = always_taken_proj->fast_outs(imax); i < imax; i++) { 1669 Node* u = always_taken_proj->fast_out(i); 1670 if (!u->is_CFG()) { 1671 igvn->replace_input_of(u, 0, pre); 1672 --i; 1673 --imax; 1674 } 1675 } 1676 } 1677 1678 if (bol->outcnt() == 0) { 1679 igvn->remove_dead_node(bol); // Kill the BoolNode. 1680 } 1681 return this; 1682 } 1683 1684 // Map BoolTest to local table encoding. The BoolTest (e)numerals 1685 // { eq = 0, ne = 4, le = 5, ge = 7, lt = 3, gt = 1 } 1686 // are mapped to table indices, while the remaining (e)numerals in BoolTest 1687 // { overflow = 2, no_overflow = 6, never = 8, illegal = 9 } 1688 // are ignored (these are not modeled in the table). 1689 // 1690 static int subsuming_bool_test_encode(Node* node) { 1691 precond(node->is_Bool()); 1692 BoolTest::mask x = node->as_Bool()->_test._test; 1693 switch (x) { 1694 case BoolTest::eq: return 0; 1695 case BoolTest::ne: return 1; 1696 case BoolTest::lt: return 2; 1697 case BoolTest::le: return 3; 1698 case BoolTest::gt: return 4; 1699 case BoolTest::ge: return 5; 1700 case BoolTest::overflow: 1701 case BoolTest::no_overflow: 1702 case BoolTest::never: 1703 case BoolTest::illegal: 1704 default: 1705 return -1; 1706 } 1707 } 1708 1709 //------------------------------Identity--------------------------------------- 1710 // If the test is constant & we match, then we are the input Control 1711 Node* IfProjNode::Identity(PhaseGVN* phase) { 1712 // Can only optimize if cannot go the other way 1713 const TypeTuple *t = phase->type(in(0))->is_tuple(); 1714 if (t == TypeTuple::IFNEITHER || (always_taken(t) && 1715 // During parsing (GVN) we don't remove dead code aggressively. 1716 // Cut off dead branch and let PhaseRemoveUseless take care of it. 1717 (!phase->is_IterGVN() || 1718 // During IGVN, first wait for the dead branch to be killed. 1719 // Otherwise, the IfNode's control will have two control uses (the IfNode 1720 // that doesn't go away because it still has uses and this branch of the 1721 // If) which breaks other optimizations. Node::has_special_unique_user() 1722 // will cause this node to be reprocessed once the dead branch is killed. 1723 in(0)->outcnt() == 1))) { 1724 // IfNode control 1725 if (in(0)->is_BaseCountedLoopEnd()) { 1726 // CountedLoopEndNode may be eliminated by if subsuming, replace CountedLoopNode with LoopNode to 1727 // avoid mismatching between CountedLoopNode and CountedLoopEndNode in the following optimization. 1728 Node* head = unique_ctrl_out_or_null(); 1729 if (head != nullptr && head->is_BaseCountedLoop() && head->in(LoopNode::LoopBackControl) == this) { 1730 Node* new_head = new LoopNode(head->in(LoopNode::EntryControl), this); 1731 phase->is_IterGVN()->register_new_node_with_optimizer(new_head); 1732 phase->is_IterGVN()->replace_node(head, new_head); 1733 } 1734 } 1735 return in(0)->in(0); 1736 } 1737 // no progress 1738 return this; 1739 } 1740 1741 #ifndef PRODUCT 1742 //------------------------------dump_spec-------------------------------------- 1743 void IfNode::dump_spec(outputStream *st) const { 1744 st->print("P=%f, C=%f",_prob,_fcnt); 1745 } 1746 #endif 1747 1748 //------------------------------idealize_test---------------------------------- 1749 // Try to canonicalize tests better. Peek at the Cmp/Bool/If sequence and 1750 // come up with a canonical sequence. Bools getting 'eq', 'gt' and 'ge' forms 1751 // converted to 'ne', 'le' and 'lt' forms. IfTrue/IfFalse get swapped as 1752 // needed. 1753 static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff) { 1754 assert(iff->in(0) != nullptr, "If must be live"); 1755 1756 if (iff->outcnt() != 2) return nullptr; // Malformed projections. 1757 Node* old_if_f = iff->proj_out(false); 1758 Node* old_if_t = iff->proj_out(true); 1759 1760 // CountedLoopEnds want the back-control test to be TRUE, regardless of 1761 // whether they are testing a 'gt' or 'lt' condition. The 'gt' condition 1762 // happens in count-down loops 1763 if (iff->is_BaseCountedLoopEnd()) return nullptr; 1764 if (!iff->in(1)->is_Bool()) return nullptr; // Happens for partially optimized IF tests 1765 BoolNode *b = iff->in(1)->as_Bool(); 1766 BoolTest bt = b->_test; 1767 // Test already in good order? 1768 if( bt.is_canonical() ) 1769 return nullptr; 1770 1771 // Flip test to be canonical. Requires flipping the IfFalse/IfTrue and 1772 // cloning the IfNode. 1773 Node* new_b = phase->transform( new BoolNode(b->in(1), bt.negate()) ); 1774 if( !new_b->is_Bool() ) return nullptr; 1775 b = new_b->as_Bool(); 1776 1777 PhaseIterGVN *igvn = phase->is_IterGVN(); 1778 assert( igvn, "Test is not canonical in parser?" ); 1779 1780 // The IF node never really changes, but it needs to be cloned 1781 iff = iff->clone()->as_If(); 1782 iff->set_req(1, b); 1783 iff->_prob = 1.0-iff->_prob; 1784 1785 Node *prior = igvn->hash_find_insert(iff); 1786 if( prior ) { 1787 igvn->remove_dead_node(iff); 1788 iff = (IfNode*)prior; 1789 } else { 1790 // Cannot call transform on it just yet 1791 igvn->set_type_bottom(iff); 1792 } 1793 igvn->_worklist.push(iff); 1794 1795 // Now handle projections. Cloning not required. 1796 Node* new_if_f = (Node*)(new IfFalseNode( iff )); 1797 Node* new_if_t = (Node*)(new IfTrueNode ( iff )); 1798 1799 igvn->register_new_node_with_optimizer(new_if_f); 1800 igvn->register_new_node_with_optimizer(new_if_t); 1801 // Flip test, so flip trailing control 1802 igvn->replace_node(old_if_f, new_if_t); 1803 igvn->replace_node(old_if_t, new_if_f); 1804 1805 // Progress 1806 return iff; 1807 } 1808 1809 Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1810 Node* res = Ideal_common(phase, can_reshape); 1811 if (res != NodeSentinel) { 1812 return res; 1813 } 1814 1815 PhaseIterGVN *igvn = phase->is_IterGVN(); 1816 // Setup to scan up the CFG looking for a dominating test 1817 Node* prev_dom = this; 1818 1819 // Check for range-check vs other kinds of tests 1820 Node* index1; 1821 Node* range1; 1822 jint offset1; 1823 int flip1 = is_range_check(range1, index1, offset1); 1824 if (flip1) { 1825 Node* dom = in(0); 1826 // Try to remove extra range checks. All 'up_one_dom' gives up at merges 1827 // so all checks we inspect post-dominate the top-most check we find. 1828 // If we are going to fail the current check and we reach the top check 1829 // then we are guaranteed to fail, so just start interpreting there. 1830 // We 'expand' the top 3 range checks to include all post-dominating 1831 // checks. 1832 1833 // The top 3 range checks seen 1834 const int NRC = 3; 1835 RangeCheck prev_checks[NRC]; 1836 int nb_checks = 0; 1837 1838 // Low and high offsets seen so far 1839 jint off_lo = offset1; 1840 jint off_hi = offset1; 1841 1842 bool found_immediate_dominator = false; 1843 1844 // Scan for the top checks and collect range of offsets 1845 for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit 1846 if (dom->Opcode() == Op_RangeCheck && // Not same opcode? 1847 prev_dom->in(0) == dom) { // One path of test does dominate? 1848 if (dom == this) return nullptr; // dead loop 1849 // See if this is a range check 1850 Node* index2; 1851 Node* range2; 1852 jint offset2; 1853 int flip2 = dom->as_RangeCheck()->is_range_check(range2, index2, offset2); 1854 // See if this is a _matching_ range check, checking against 1855 // the same array bounds. 1856 if (flip2 == flip1 && range2 == range1 && index2 == index1 && 1857 dom->outcnt() == 2) { 1858 if (nb_checks == 0 && dom->in(1) == in(1)) { 1859 // Found an immediately dominating test at the same offset. 1860 // This kind of back-to-back test can be eliminated locally, 1861 // and there is no need to search further for dominating tests. 1862 assert(offset2 == offset1, "Same test but different offsets"); 1863 found_immediate_dominator = true; 1864 break; 1865 } 1866 // Gather expanded bounds 1867 off_lo = MIN2(off_lo,offset2); 1868 off_hi = MAX2(off_hi,offset2); 1869 // Record top NRC range checks 1870 prev_checks[nb_checks%NRC].ctl = prev_dom; 1871 prev_checks[nb_checks%NRC].off = offset2; 1872 nb_checks++; 1873 } 1874 } 1875 prev_dom = dom; 1876 dom = up_one_dom(dom); 1877 if (!dom) break; 1878 } 1879 1880 if (!found_immediate_dominator) { 1881 // Attempt to widen the dominating range check to cover some later 1882 // ones. Since range checks "fail" by uncommon-trapping to the 1883 // interpreter, widening a check can make us speculatively enter 1884 // the interpreter. If we see range-check deopt's, do not widen! 1885 if (!phase->C->allow_range_check_smearing()) return nullptr; 1886 1887 // Didn't find prior covering check, so cannot remove anything. 1888 if (nb_checks == 0) { 1889 return nullptr; 1890 } 1891 // Constant indices only need to check the upper bound. 1892 // Non-constant indices must check both low and high. 1893 int chk0 = (nb_checks - 1) % NRC; 1894 if (index1) { 1895 if (nb_checks == 1) { 1896 return nullptr; 1897 } else { 1898 // If the top range check's constant is the min or max of 1899 // all constants we widen the next one to cover the whole 1900 // range of constants. 1901 RangeCheck rc0 = prev_checks[chk0]; 1902 int chk1 = (nb_checks - 2) % NRC; 1903 RangeCheck rc1 = prev_checks[chk1]; 1904 if (rc0.off == off_lo) { 1905 adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn); 1906 prev_dom = rc1.ctl; 1907 } else if (rc0.off == off_hi) { 1908 adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn); 1909 prev_dom = rc1.ctl; 1910 } else { 1911 // If the top test's constant is not the min or max of all 1912 // constants, we need 3 range checks. We must leave the 1913 // top test unchanged because widening it would allow the 1914 // accesses it protects to successfully read/write out of 1915 // bounds. 1916 if (nb_checks == 2) { 1917 return nullptr; 1918 } 1919 int chk2 = (nb_checks - 3) % NRC; 1920 RangeCheck rc2 = prev_checks[chk2]; 1921 // The top range check a+i covers interval: -a <= i < length-a 1922 // The second range check b+i covers interval: -b <= i < length-b 1923 if (rc1.off <= rc0.off) { 1924 // if b <= a, we change the second range check to: 1925 // -min_of_all_constants <= i < length-min_of_all_constants 1926 // Together top and second range checks now cover: 1927 // -min_of_all_constants <= i < length-a 1928 // which is more restrictive than -b <= i < length-b: 1929 // -b <= -min_of_all_constants <= i < length-a <= length-b 1930 // The third check is then changed to: 1931 // -max_of_all_constants <= i < length-max_of_all_constants 1932 // so 2nd and 3rd checks restrict allowed values of i to: 1933 // -min_of_all_constants <= i < length-max_of_all_constants 1934 adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn); 1935 adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn); 1936 } else { 1937 // if b > a, we change the second range check to: 1938 // -max_of_all_constants <= i < length-max_of_all_constants 1939 // Together top and second range checks now cover: 1940 // -a <= i < length-max_of_all_constants 1941 // which is more restrictive than -b <= i < length-b: 1942 // -b < -a <= i < length-max_of_all_constants <= length-b 1943 // The third check is then changed to: 1944 // -max_of_all_constants <= i < length-max_of_all_constants 1945 // so 2nd and 3rd checks restrict allowed values of i to: 1946 // -min_of_all_constants <= i < length-max_of_all_constants 1947 adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn); 1948 adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn); 1949 } 1950 prev_dom = rc2.ctl; 1951 } 1952 } 1953 } else { 1954 RangeCheck rc0 = prev_checks[chk0]; 1955 // 'Widen' the offset of the 1st and only covering check 1956 adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn); 1957 // Test is now covered by prior checks, dominate it out 1958 prev_dom = rc0.ctl; 1959 } 1960 } 1961 } else { 1962 prev_dom = search_identical(4); 1963 1964 if (prev_dom == nullptr) { 1965 return nullptr; 1966 } 1967 } 1968 1969 // Replace dominated IfNode 1970 return dominated_by(prev_dom, igvn); 1971 }