1 /* 2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/barrierSet.hpp" 27 #include "gc/shared/c2/barrierSetC2.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "oops/objArrayKlass.hpp" 31 #include "opto/addnode.hpp" 32 #include "opto/castnode.hpp" 33 #include "opto/cfgnode.hpp" 34 #include "opto/connode.hpp" 35 #include "opto/convertnode.hpp" 36 #include "opto/inlinetypenode.hpp" 37 #include "opto/loopnode.hpp" 38 #include "opto/machnode.hpp" 39 #include "opto/movenode.hpp" 40 #include "opto/narrowptrnode.hpp" 41 #include "opto/mulnode.hpp" 42 #include "opto/phaseX.hpp" 43 #include "opto/regmask.hpp" 44 #include "opto/runtime.hpp" 45 #include "opto/subnode.hpp" 46 #include "opto/vectornode.hpp" 47 #include "utilities/vmError.hpp" 48 49 // Portions of code courtesy of Clifford Click 50 51 // Optimization - Graph Style 52 53 //============================================================================= 54 //------------------------------Value------------------------------------------ 55 // Compute the type of the RegionNode. 56 const Type* RegionNode::Value(PhaseGVN* phase) const { 57 for( uint i=1; i<req(); ++i ) { // For all paths in 58 Node *n = in(i); // Get Control source 59 if( !n ) continue; // Missing inputs are TOP 60 if( phase->type(n) == Type::CONTROL ) 61 return Type::CONTROL; 62 } 63 return Type::TOP; // All paths dead? Then so are we 64 } 65 66 //------------------------------Identity--------------------------------------- 67 // Check for Region being Identity. 68 Node* RegionNode::Identity(PhaseGVN* phase) { 69 // Cannot have Region be an identity, even if it has only 1 input. 70 // Phi users cannot have their Region input folded away for them, 71 // since they need to select the proper data input 72 return this; 73 } 74 75 //------------------------------merge_region----------------------------------- 76 // If a Region flows into a Region, merge into one big happy merge. This is 77 // hard to do if there is stuff that has to happen 78 static Node *merge_region(RegionNode *region, PhaseGVN *phase) { 79 if( region->Opcode() != Op_Region ) // Do not do to LoopNodes 80 return NULL; 81 Node *progress = NULL; // Progress flag 82 PhaseIterGVN *igvn = phase->is_IterGVN(); 83 84 uint rreq = region->req(); 85 for( uint i = 1; i < rreq; i++ ) { 86 Node *r = region->in(i); 87 if( r && r->Opcode() == Op_Region && // Found a region? 88 r->in(0) == r && // Not already collapsed? 89 r != region && // Avoid stupid situations 90 r->outcnt() == 2 ) { // Self user and 'region' user only? 91 assert(!r->as_Region()->has_phi(), "no phi users"); 92 if( !progress ) { // No progress 93 if (region->has_phi()) { 94 return NULL; // Only flatten if no Phi users 95 // igvn->hash_delete( phi ); 96 } 97 igvn->hash_delete( region ); 98 progress = region; // Making progress 99 } 100 igvn->hash_delete( r ); 101 102 // Append inputs to 'r' onto 'region' 103 for( uint j = 1; j < r->req(); j++ ) { 104 // Move an input from 'r' to 'region' 105 region->add_req(r->in(j)); 106 r->set_req(j, phase->C->top()); 107 // Update phis of 'region' 108 //for( uint k = 0; k < max; k++ ) { 109 // Node *phi = region->out(k); 110 // if( phi->is_Phi() ) { 111 // phi->add_req(phi->in(i)); 112 // } 113 //} 114 115 rreq++; // One more input to Region 116 } // Found a region to merge into Region 117 igvn->_worklist.push(r); 118 // Clobber pointer to the now dead 'r' 119 region->set_req(i, phase->C->top()); 120 } 121 } 122 123 return progress; 124 } 125 126 127 128 //--------------------------------has_phi-------------------------------------- 129 // Helper function: Return any PhiNode that uses this region or NULL 130 PhiNode* RegionNode::has_phi() const { 131 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 132 Node* phi = fast_out(i); 133 if (phi->is_Phi()) { // Check for Phi users 134 assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)"); 135 return phi->as_Phi(); // this one is good enough 136 } 137 } 138 139 return NULL; 140 } 141 142 143 //-----------------------------has_unique_phi---------------------------------- 144 // Helper function: Return the only PhiNode that uses this region or NULL 145 PhiNode* RegionNode::has_unique_phi() const { 146 // Check that only one use is a Phi 147 PhiNode* only_phi = NULL; 148 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 149 Node* phi = fast_out(i); 150 if (phi->is_Phi()) { // Check for Phi users 151 assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)"); 152 if (only_phi == NULL) { 153 only_phi = phi->as_Phi(); 154 } else { 155 return NULL; // multiple phis 156 } 157 } 158 } 159 160 return only_phi; 161 } 162 163 164 //------------------------------check_phi_clipping----------------------------- 165 // Helper function for RegionNode's identification of FP clipping 166 // Check inputs to the Phi 167 static bool check_phi_clipping( PhiNode *phi, ConNode * &min, uint &min_idx, ConNode * &max, uint &max_idx, Node * &val, uint &val_idx ) { 168 min = NULL; 169 max = NULL; 170 val = NULL; 171 min_idx = 0; 172 max_idx = 0; 173 val_idx = 0; 174 uint phi_max = phi->req(); 175 if( phi_max == 4 ) { 176 for( uint j = 1; j < phi_max; ++j ) { 177 Node *n = phi->in(j); 178 int opcode = n->Opcode(); 179 switch( opcode ) { 180 case Op_ConI: 181 { 182 if( min == NULL ) { 183 min = n->Opcode() == Op_ConI ? (ConNode*)n : NULL; 184 min_idx = j; 185 } else { 186 max = n->Opcode() == Op_ConI ? (ConNode*)n : NULL; 187 max_idx = j; 188 if( min->get_int() > max->get_int() ) { 189 // Swap min and max 190 ConNode *temp; 191 uint temp_idx; 192 temp = min; min = max; max = temp; 193 temp_idx = min_idx; min_idx = max_idx; max_idx = temp_idx; 194 } 195 } 196 } 197 break; 198 default: 199 { 200 val = n; 201 val_idx = j; 202 } 203 break; 204 } 205 } 206 } 207 return ( min && max && val && (min->get_int() <= 0) && (max->get_int() >=0) ); 208 } 209 210 211 //------------------------------check_if_clipping------------------------------ 212 // Helper function for RegionNode's identification of FP clipping 213 // Check that inputs to Region come from two IfNodes, 214 // 215 // If 216 // False True 217 // If | 218 // False True | 219 // | | | 220 // RegionNode_inputs 221 // 222 static bool check_if_clipping( const RegionNode *region, IfNode * &bot_if, IfNode * &top_if ) { 223 top_if = NULL; 224 bot_if = NULL; 225 226 // Check control structure above RegionNode for (if ( if ) ) 227 Node *in1 = region->in(1); 228 Node *in2 = region->in(2); 229 Node *in3 = region->in(3); 230 // Check that all inputs are projections 231 if( in1->is_Proj() && in2->is_Proj() && in3->is_Proj() ) { 232 Node *in10 = in1->in(0); 233 Node *in20 = in2->in(0); 234 Node *in30 = in3->in(0); 235 // Check that #1 and #2 are ifTrue and ifFalse from same If 236 if( in10 != NULL && in10->is_If() && 237 in20 != NULL && in20->is_If() && 238 in30 != NULL && in30->is_If() && in10 == in20 && 239 (in1->Opcode() != in2->Opcode()) ) { 240 Node *in100 = in10->in(0); 241 Node *in1000 = (in100 != NULL && in100->is_Proj()) ? in100->in(0) : NULL; 242 // Check that control for in10 comes from other branch of IF from in3 243 if( in1000 != NULL && in1000->is_If() && 244 in30 == in1000 && (in3->Opcode() != in100->Opcode()) ) { 245 // Control pattern checks 246 top_if = (IfNode*)in1000; 247 bot_if = (IfNode*)in10; 248 } 249 } 250 } 251 252 return (top_if != NULL); 253 } 254 255 256 //------------------------------check_convf2i_clipping------------------------- 257 // Helper function for RegionNode's identification of FP clipping 258 // Verify that the value input to the phi comes from "ConvF2I; LShift; RShift" 259 static bool check_convf2i_clipping( PhiNode *phi, uint idx, ConvF2INode * &convf2i, Node *min, Node *max) { 260 convf2i = NULL; 261 262 // Check for the RShiftNode 263 Node *rshift = phi->in(idx); 264 assert( rshift, "Previous checks ensure phi input is present"); 265 if( rshift->Opcode() != Op_RShiftI ) { return false; } 266 267 // Check for the LShiftNode 268 Node *lshift = rshift->in(1); 269 assert( lshift, "Previous checks ensure phi input is present"); 270 if( lshift->Opcode() != Op_LShiftI ) { return false; } 271 272 // Check for the ConvF2INode 273 Node *conv = lshift->in(1); 274 if( conv->Opcode() != Op_ConvF2I ) { return false; } 275 276 // Check that shift amounts are only to get sign bits set after F2I 277 jint max_cutoff = max->get_int(); 278 jint min_cutoff = min->get_int(); 279 jint left_shift = lshift->in(2)->get_int(); 280 jint right_shift = rshift->in(2)->get_int(); 281 jint max_post_shift = nth_bit(BitsPerJavaInteger - left_shift - 1); 282 if( left_shift != right_shift || 283 0 > left_shift || left_shift >= BitsPerJavaInteger || 284 max_post_shift < max_cutoff || 285 max_post_shift < -min_cutoff ) { 286 // Shifts are necessary but current transformation eliminates them 287 return false; 288 } 289 290 // OK to return the result of ConvF2I without shifting 291 convf2i = (ConvF2INode*)conv; 292 return true; 293 } 294 295 296 //------------------------------check_compare_clipping------------------------- 297 // Helper function for RegionNode's identification of FP clipping 298 static bool check_compare_clipping( bool less_than, IfNode *iff, ConNode *limit, Node * & input ) { 299 Node *i1 = iff->in(1); 300 if ( !i1->is_Bool() ) { return false; } 301 BoolNode *bool1 = i1->as_Bool(); 302 if( less_than && bool1->_test._test != BoolTest::le ) { return false; } 303 else if( !less_than && bool1->_test._test != BoolTest::lt ) { return false; } 304 const Node *cmpF = bool1->in(1); 305 if( cmpF->Opcode() != Op_CmpF ) { return false; } 306 // Test that the float value being compared against 307 // is equivalent to the int value used as a limit 308 Node *nodef = cmpF->in(2); 309 if( nodef->Opcode() != Op_ConF ) { return false; } 310 jfloat conf = nodef->getf(); 311 jint coni = limit->get_int(); 312 if( ((int)conf) != coni ) { return false; } 313 input = cmpF->in(1); 314 return true; 315 } 316 317 //------------------------------is_unreachable_region-------------------------- 318 // Find if the Region node is reachable from the root. 319 bool RegionNode::is_unreachable_region(const PhaseGVN* phase) { 320 Node* top = phase->C->top(); 321 assert(req() == 2 || (req() == 3 && in(1) != NULL && in(2) == top), "sanity check arguments"); 322 if (_is_unreachable_region) { 323 // Return cached result from previous evaluation which should still be valid 324 assert(is_unreachable_from_root(phase), "walk the graph again and check if its indeed unreachable"); 325 return true; 326 } 327 328 // First, cut the simple case of fallthrough region when NONE of 329 // region's phis references itself directly or through a data node. 330 if (is_possible_unsafe_loop(phase)) { 331 // If we have a possible unsafe loop, check if the region node is actually unreachable from root. 332 if (is_unreachable_from_root(phase)) { 333 _is_unreachable_region = true; 334 return true; 335 } 336 } 337 return false; 338 } 339 340 bool RegionNode::is_possible_unsafe_loop(const PhaseGVN* phase) const { 341 uint max = outcnt(); 342 uint i; 343 for (i = 0; i < max; i++) { 344 Node* n = raw_out(i); 345 if (n != NULL && n->is_Phi()) { 346 PhiNode* phi = n->as_Phi(); 347 assert(phi->in(0) == this, "sanity check phi"); 348 if (phi->outcnt() == 0) { 349 continue; // Safe case - no loops 350 } 351 if (phi->outcnt() == 1) { 352 Node* u = phi->raw_out(0); 353 // Skip if only one use is an other Phi or Call or Uncommon trap. 354 // It is safe to consider this case as fallthrough. 355 if (u != NULL && (u->is_Phi() || u->is_CFG())) { 356 continue; 357 } 358 } 359 // Check when phi references itself directly or through an other node. 360 if (phi->as_Phi()->simple_data_loop_check(phi->in(1)) >= PhiNode::Unsafe) { 361 break; // Found possible unsafe data loop. 362 } 363 } 364 } 365 if (i >= max) { 366 return false; // An unsafe case was NOT found - don't need graph walk. 367 } 368 return true; 369 } 370 371 bool RegionNode::is_unreachable_from_root(const PhaseGVN* phase) const { 372 ResourceMark rm; 373 Node_List nstack; 374 VectorSet visited; 375 376 // Mark all control nodes reachable from root outputs 377 Node *n = (Node*)phase->C->root(); 378 nstack.push(n); 379 visited.set(n->_idx); 380 while (nstack.size() != 0) { 381 n = nstack.pop(); 382 uint max = n->outcnt(); 383 for (uint i = 0; i < max; i++) { 384 Node* m = n->raw_out(i); 385 if (m != NULL && m->is_CFG()) { 386 if (m == this) { 387 return false; // We reached the Region node - it is not dead. 388 } 389 if (!visited.test_set(m->_idx)) 390 nstack.push(m); 391 } 392 } 393 } 394 return true; // The Region node is unreachable - it is dead. 395 } 396 397 Node* PhiNode::try_clean_mem_phi(PhaseGVN *phase) { 398 // Incremental inlining + PhaseStringOpts sometimes produce: 399 // 400 // cmpP with 1 top input 401 // | 402 // If 403 // / \ 404 // IfFalse IfTrue /- Some Node 405 // \ / / / 406 // Region / /-MergeMem 407 // \---Phi 408 // 409 // 410 // It's expected by PhaseStringOpts that the Region goes away and is 411 // replaced by If's control input but because there's still a Phi, 412 // the Region stays in the graph. The top input from the cmpP is 413 // propagated forward and a subgraph that is useful goes away. The 414 // code below replaces the Phi with the MergeMem so that the Region 415 // is simplified. 416 417 if (type() == Type::MEMORY && is_diamond_phi(true)) { 418 MergeMemNode* m = NULL; 419 assert(req() == 3, "same as region"); 420 Node* r = in(0); 421 for (uint i = 1; i < 3; ++i) { 422 Node *mem = in(i); 423 if (mem && mem->is_MergeMem() && r->in(i)->outcnt() == 1) { 424 // Nothing is control-dependent on path #i except the region itself. 425 m = mem->as_MergeMem(); 426 uint j = 3 - i; 427 Node* other = in(j); 428 if (other && other == m->base_memory()) { 429 // m is a successor memory to other, and is not pinned inside the diamond, so push it out. 430 // This will allow the diamond to collapse completely. 431 return m; 432 } 433 } 434 } 435 } 436 return NULL; 437 } 438 439 //------------------------------Ideal------------------------------------------ 440 // Return a node which is more "ideal" than the current node. Must preserve 441 // the CFG, but we can still strip out dead paths. 442 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { 443 if( !can_reshape && !in(0) ) return NULL; // Already degraded to a Copy 444 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge"); 445 446 // Check for RegionNode with no Phi users and both inputs come from either 447 // arm of the same IF. If found, then the control-flow split is useless. 448 bool has_phis = false; 449 if (can_reshape) { // Need DU info to check for Phi users 450 has_phis = (has_phi() != NULL); // Cache result 451 if (has_phis) { 452 PhiNode* phi = has_unique_phi(); 453 if (phi != NULL) { 454 Node* m = phi->try_clean_mem_phi(phase); 455 if (m != NULL) { 456 phase->is_IterGVN()->replace_node(phi, m); 457 has_phis = false; 458 } 459 } 460 } 461 462 if (!has_phis) { // No Phi users? Nothing merging? 463 for (uint i = 1; i < req()-1; i++) { 464 Node *if1 = in(i); 465 if( !if1 ) continue; 466 Node *iff = if1->in(0); 467 if( !iff || !iff->is_If() ) continue; 468 for( uint j=i+1; j<req(); j++ ) { 469 if( in(j) && in(j)->in(0) == iff && 470 if1->Opcode() != in(j)->Opcode() ) { 471 // Add the IF Projections to the worklist. They (and the IF itself) 472 // will be eliminated if dead. 473 phase->is_IterGVN()->add_users_to_worklist(iff); 474 set_req(i, iff->in(0));// Skip around the useless IF diamond 475 set_req(j, NULL); 476 return this; // Record progress 477 } 478 } 479 } 480 } 481 } 482 483 // Remove TOP or NULL input paths. If only 1 input path remains, this Region 484 // degrades to a copy. 485 bool add_to_worklist = false; 486 bool modified = false; 487 int cnt = 0; // Count of values merging 488 DEBUG_ONLY( int cnt_orig = req(); ) // Save original inputs count 489 int del_it = 0; // The last input path we delete 490 // For all inputs... 491 for( uint i=1; i<req(); ++i ){// For all paths in 492 Node *n = in(i); // Get the input 493 if( n != NULL ) { 494 // Remove useless control copy inputs 495 if( n->is_Region() && n->as_Region()->is_copy() ) { 496 set_req(i, n->nonnull_req()); 497 modified = true; 498 i--; 499 continue; 500 } 501 if( n->is_Proj() ) { // Remove useless rethrows 502 Node *call = n->in(0); 503 if (call->is_Call() && call->as_Call()->entry_point() == OptoRuntime::rethrow_stub()) { 504 set_req(i, call->in(0)); 505 modified = true; 506 i--; 507 continue; 508 } 509 } 510 if( phase->type(n) == Type::TOP ) { 511 set_req(i, NULL); // Ignore TOP inputs 512 modified = true; 513 i--; 514 continue; 515 } 516 cnt++; // One more value merging 517 518 } else if (can_reshape) { // Else found dead path with DU info 519 PhaseIterGVN *igvn = phase->is_IterGVN(); 520 del_req(i); // Yank path from self 521 del_it = i; 522 uint max = outcnt(); 523 DUIterator j; 524 bool progress = true; 525 while(progress) { // Need to establish property over all users 526 progress = false; 527 for (j = outs(); has_out(j); j++) { 528 Node *n = out(j); 529 if( n->req() != req() && n->is_Phi() ) { 530 assert( n->in(0) == this, "" ); 531 igvn->hash_delete(n); // Yank from hash before hacking edges 532 n->set_req_X(i,NULL,igvn);// Correct DU info 533 n->del_req(i); // Yank path from Phis 534 if( max != outcnt() ) { 535 progress = true; 536 j = refresh_out_pos(j); 537 max = outcnt(); 538 } 539 } 540 } 541 } 542 add_to_worklist = true; 543 i--; 544 } 545 } 546 547 if (can_reshape && cnt == 1) { 548 // Is it dead loop? 549 // If it is LoopNopde it had 2 (+1 itself) inputs and 550 // one of them was cut. The loop is dead if it was EntryContol. 551 // Loop node may have only one input because entry path 552 // is removed in PhaseIdealLoop::Dominators(). 553 assert(!this->is_Loop() || cnt_orig <= 3, "Loop node should have 3 or less inputs"); 554 if ((this->is_Loop() && (del_it == LoopNode::EntryControl || 555 (del_it == 0 && is_unreachable_region(phase)))) || 556 (!this->is_Loop() && has_phis && is_unreachable_region(phase))) { 557 // Yes, the region will be removed during the next step below. 558 // Cut the backedge input and remove phis since no data paths left. 559 // We don't cut outputs to other nodes here since we need to put them 560 // on the worklist. 561 PhaseIterGVN *igvn = phase->is_IterGVN(); 562 if (in(1)->outcnt() == 1) { 563 igvn->_worklist.push(in(1)); 564 } 565 del_req(1); 566 cnt = 0; 567 assert( req() == 1, "no more inputs expected" ); 568 uint max = outcnt(); 569 bool progress = true; 570 Node *top = phase->C->top(); 571 DUIterator j; 572 while(progress) { 573 progress = false; 574 for (j = outs(); has_out(j); j++) { 575 Node *n = out(j); 576 if( n->is_Phi() ) { 577 assert(n->in(0) == this, ""); 578 assert( n->req() == 2 && n->in(1) != NULL, "Only one data input expected" ); 579 // Break dead loop data path. 580 // Eagerly replace phis with top to avoid regionless phis. 581 igvn->replace_node(n, top); 582 if( max != outcnt() ) { 583 progress = true; 584 j = refresh_out_pos(j); 585 max = outcnt(); 586 } 587 } 588 } 589 } 590 add_to_worklist = true; 591 } 592 } 593 if (add_to_worklist) { 594 phase->is_IterGVN()->add_users_to_worklist(this); // Revisit collapsed Phis 595 } 596 597 if( cnt <= 1 ) { // Only 1 path in? 598 set_req(0, NULL); // Null control input for region copy 599 if( cnt == 0 && !can_reshape) { // Parse phase - leave the node as it is. 600 // No inputs or all inputs are NULL. 601 return NULL; 602 } else if (can_reshape) { // Optimization phase - remove the node 603 PhaseIterGVN *igvn = phase->is_IterGVN(); 604 // Strip mined (inner) loop is going away, remove outer loop. 605 if (is_CountedLoop() && 606 as_Loop()->is_strip_mined()) { 607 Node* outer_sfpt = as_CountedLoop()->outer_safepoint(); 608 Node* outer_out = as_CountedLoop()->outer_loop_exit(); 609 if (outer_sfpt != NULL && outer_out != NULL) { 610 Node* in = outer_sfpt->in(0); 611 igvn->replace_node(outer_out, in); 612 LoopNode* outer = as_CountedLoop()->outer_loop(); 613 igvn->replace_input_of(outer, LoopNode::LoopBackControl, igvn->C->top()); 614 } 615 } 616 if (is_CountedLoop()) { 617 Node* opaq = as_CountedLoop()->is_canonical_loop_entry(); 618 if (opaq != NULL) { 619 // This is not a loop anymore. No need to keep the Opaque1 node on the test that guards the loop as it won't be 620 // subject to further loop opts. 621 assert(opaq->Opcode() == Op_Opaque1, ""); 622 igvn->replace_node(opaq, opaq->in(1)); 623 } 624 } 625 Node *parent_ctrl; 626 if( cnt == 0 ) { 627 assert( req() == 1, "no inputs expected" ); 628 // During IGVN phase such region will be subsumed by TOP node 629 // so region's phis will have TOP as control node. 630 // Kill phis here to avoid it. 631 // Also set other user's input to top. 632 parent_ctrl = phase->C->top(); 633 } else { 634 // The fallthrough case since we already checked dead loops above. 635 parent_ctrl = in(1); 636 assert(parent_ctrl != NULL, "Region is a copy of some non-null control"); 637 assert(parent_ctrl != this, "Close dead loop"); 638 } 639 if (!add_to_worklist) 640 igvn->add_users_to_worklist(this); // Check for further allowed opts 641 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) { 642 Node* n = last_out(i); 643 igvn->hash_delete(n); // Remove from worklist before modifying edges 644 if (n->outcnt() == 0) { 645 int uses_found = n->replace_edge(this, phase->C->top(), igvn); 646 if (uses_found > 1) { // (--i) done at the end of the loop. 647 i -= (uses_found - 1); 648 } 649 continue; 650 } 651 if( n->is_Phi() ) { // Collapse all Phis 652 // Eagerly replace phis to avoid regionless phis. 653 Node* in; 654 if( cnt == 0 ) { 655 assert( n->req() == 1, "No data inputs expected" ); 656 in = parent_ctrl; // replaced by top 657 } else { 658 assert( n->req() == 2 && n->in(1) != NULL, "Only one data input expected" ); 659 in = n->in(1); // replaced by unique input 660 if( n->as_Phi()->is_unsafe_data_reference(in) ) 661 in = phase->C->top(); // replaced by top 662 } 663 igvn->replace_node(n, in); 664 } 665 else if( n->is_Region() ) { // Update all incoming edges 666 assert(n != this, "Must be removed from DefUse edges"); 667 int uses_found = n->replace_edge(this, parent_ctrl, igvn); 668 if (uses_found > 1) { // (--i) done at the end of the loop. 669 i -= (uses_found - 1); 670 } 671 } 672 else { 673 assert(n->in(0) == this, "Expect RegionNode to be control parent"); 674 n->set_req(0, parent_ctrl); 675 } 676 #ifdef ASSERT 677 for( uint k=0; k < n->req(); k++ ) { 678 assert(n->in(k) != this, "All uses of RegionNode should be gone"); 679 } 680 #endif 681 } 682 // Remove the RegionNode itself from DefUse info 683 igvn->remove_dead_node(this); 684 return NULL; 685 } 686 return this; // Record progress 687 } 688 689 690 // If a Region flows into a Region, merge into one big happy merge. 691 if (can_reshape) { 692 Node *m = merge_region(this, phase); 693 if (m != NULL) return m; 694 } 695 696 // Check if this region is the root of a clipping idiom on floats 697 if( ConvertFloat2IntClipping && can_reshape && req() == 4 ) { 698 // Check that only one use is a Phi and that it simplifies to two constants + 699 PhiNode* phi = has_unique_phi(); 700 if (phi != NULL) { // One Phi user 701 // Check inputs to the Phi 702 ConNode *min; 703 ConNode *max; 704 Node *val; 705 uint min_idx; 706 uint max_idx; 707 uint val_idx; 708 if( check_phi_clipping( phi, min, min_idx, max, max_idx, val, val_idx ) ) { 709 IfNode *top_if; 710 IfNode *bot_if; 711 if( check_if_clipping( this, bot_if, top_if ) ) { 712 // Control pattern checks, now verify compares 713 Node *top_in = NULL; // value being compared against 714 Node *bot_in = NULL; 715 if( check_compare_clipping( true, bot_if, min, bot_in ) && 716 check_compare_clipping( false, top_if, max, top_in ) ) { 717 if( bot_in == top_in ) { 718 PhaseIterGVN *gvn = phase->is_IterGVN(); 719 assert( gvn != NULL, "Only had DefUse info in IterGVN"); 720 // Only remaining check is that bot_in == top_in == (Phi's val + mods) 721 722 // Check for the ConvF2INode 723 ConvF2INode *convf2i; 724 if( check_convf2i_clipping( phi, val_idx, convf2i, min, max ) && 725 convf2i->in(1) == bot_in ) { 726 // Matched pattern, including LShiftI; RShiftI, replace with integer compares 727 // max test 728 Node *cmp = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, min )); 729 Node *boo = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::lt )); 730 IfNode *iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( top_if->in(0), boo, PROB_UNLIKELY_MAG(5), top_if->_fcnt )); 731 Node *if_min= gvn->register_new_node_with_optimizer(new IfTrueNode (iff)); 732 Node *ifF = gvn->register_new_node_with_optimizer(new IfFalseNode(iff)); 733 // min test 734 cmp = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, max )); 735 boo = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::gt )); 736 iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( ifF, boo, PROB_UNLIKELY_MAG(5), bot_if->_fcnt )); 737 Node *if_max= gvn->register_new_node_with_optimizer(new IfTrueNode (iff)); 738 ifF = gvn->register_new_node_with_optimizer(new IfFalseNode(iff)); 739 // update input edges to region node 740 set_req_X( min_idx, if_min, gvn ); 741 set_req_X( max_idx, if_max, gvn ); 742 set_req_X( val_idx, ifF, gvn ); 743 // remove unnecessary 'LShiftI; RShiftI' idiom 744 gvn->hash_delete(phi); 745 phi->set_req_X( val_idx, convf2i, gvn ); 746 gvn->hash_find_insert(phi); 747 // Return transformed region node 748 return this; 749 } 750 } 751 } 752 } 753 } 754 } 755 } 756 757 if (can_reshape) { 758 modified |= optimize_trichotomy(phase->is_IterGVN()); 759 } 760 761 return modified ? this : NULL; 762 } 763 764 //------------------------------optimize_trichotomy-------------------------- 765 // Optimize nested comparisons of the following kind: 766 // 767 // int compare(int a, int b) { 768 // return (a < b) ? -1 : (a == b) ? 0 : 1; 769 // } 770 // 771 // Shape 1: 772 // if (compare(a, b) == 1) { ... } -> if (a > b) { ... } 773 // 774 // Shape 2: 775 // if (compare(a, b) == 0) { ... } -> if (a == b) { ... } 776 // 777 // Above code leads to the following IR shapes where both Ifs compare the 778 // same value and two out of three region inputs idx1 and idx2 map to 779 // the same value and control flow. 780 // 781 // (1) If (2) If 782 // / \ / \ 783 // Proj Proj Proj Proj 784 // | \ | \ 785 // | If | If If 786 // | / \ | / \ / \ 787 // | Proj Proj | Proj Proj ==> Proj Proj 788 // | / / \ | / | / 789 // Region / \ | / | / 790 // \ / \ | / | / 791 // Region Region Region 792 // 793 // The method returns true if 'this' is modified and false otherwise. 794 bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) { 795 int idx1 = 1, idx2 = 2; 796 Node* region = NULL; 797 if (req() == 3 && in(1) != NULL && in(2) != NULL) { 798 // Shape 1: Check if one of the inputs is a region that merges two control 799 // inputs and has no other users (especially no Phi users). 800 region = in(1)->isa_Region() ? in(1) : in(2)->isa_Region(); 801 if (region == NULL || region->outcnt() != 2 || region->req() != 3) { 802 return false; // No suitable region input found 803 } 804 } else if (req() == 4) { 805 // Shape 2: Check if two control inputs map to the same value of the unique phi 806 // user and treat these as if they would come from another region (shape (1)). 807 PhiNode* phi = has_unique_phi(); 808 if (phi == NULL) { 809 return false; // No unique phi user 810 } 811 if (phi->in(idx1) != phi->in(idx2)) { 812 idx2 = 3; 813 if (phi->in(idx1) != phi->in(idx2)) { 814 idx1 = 2; 815 if (phi->in(idx1) != phi->in(idx2)) { 816 return false; // No equal phi inputs found 817 } 818 } 819 } 820 assert(phi->in(idx1) == phi->in(idx2), "must be"); // Region is merging same value 821 region = this; 822 } 823 if (region == NULL || region->in(idx1) == NULL || region->in(idx2) == NULL) { 824 return false; // Region does not merge two control inputs 825 } 826 // At this point we know that region->in(idx1) and region->(idx2) map to the same 827 // value and control flow. Now search for ifs that feed into these region inputs. 828 ProjNode* proj1 = region->in(idx1)->isa_Proj(); 829 ProjNode* proj2 = region->in(idx2)->isa_Proj(); 830 if (proj1 == NULL || proj1->outcnt() != 1 || 831 proj2 == NULL || proj2->outcnt() != 1) { 832 return false; // No projection inputs with region as unique user found 833 } 834 assert(proj1 != proj2, "should be different projections"); 835 IfNode* iff1 = proj1->in(0)->isa_If(); 836 IfNode* iff2 = proj2->in(0)->isa_If(); 837 if (iff1 == NULL || iff1->outcnt() != 2 || 838 iff2 == NULL || iff2->outcnt() != 2) { 839 return false; // No ifs found 840 } 841 if (iff1 == iff2) { 842 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated 843 igvn->replace_input_of(region, idx1, iff1->in(0)); 844 igvn->replace_input_of(region, idx2, igvn->C->top()); 845 return (region == this); // Remove useless if (both projections map to the same control/value) 846 } 847 BoolNode* bol1 = iff1->in(1)->isa_Bool(); 848 BoolNode* bol2 = iff2->in(1)->isa_Bool(); 849 if (bol1 == NULL || bol2 == NULL) { 850 return false; // No bool inputs found 851 } 852 Node* cmp1 = bol1->in(1); 853 Node* cmp2 = bol2->in(1); 854 bool commute = false; 855 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) { 856 return false; // No comparison 857 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD || 858 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD || 859 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN || 860 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN || 861 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() || 862 cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) { 863 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests. 864 // SubTypeCheck is not commutative 865 return false; 866 } else if (cmp1 != cmp2) { 867 if (cmp1->in(1) == cmp2->in(2) && 868 cmp1->in(2) == cmp2->in(1)) { 869 commute = true; // Same but swapped inputs, commute the test 870 } else { 871 return false; // Ifs are not comparing the same values 872 } 873 } 874 proj1 = proj1->other_if_proj(); 875 proj2 = proj2->other_if_proj(); 876 if (!((proj1->unique_ctrl_out_or_null() == iff2 && 877 proj2->unique_ctrl_out_or_null() == this) || 878 (proj2->unique_ctrl_out_or_null() == iff1 && 879 proj1->unique_ctrl_out_or_null() == this))) { 880 return false; // Ifs are not connected through other projs 881 } 882 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged 883 // through 'region' and map to the same value. Merge the boolean tests and replace 884 // the ifs by a single comparison. 885 BoolTest test1 = (proj1->_con == 1) ? bol1->_test : bol1->_test.negate(); 886 BoolTest test2 = (proj2->_con == 1) ? bol2->_test : bol2->_test.negate(); 887 test1 = commute ? test1.commute() : test1; 888 // After possibly commuting test1, if we can merge test1 & test2, then proj2/iff2/bol2 are the nodes to refine. 889 BoolTest::mask res = test1.merge(test2); 890 if (res == BoolTest::illegal) { 891 return false; // Unable to merge tests 892 } 893 // Adjust iff1 to always pass (only iff2 will remain) 894 igvn->replace_input_of(iff1, 1, igvn->intcon(proj1->_con)); 895 if (res == BoolTest::never) { 896 // Merged test is always false, adjust iff2 to always fail 897 igvn->replace_input_of(iff2, 1, igvn->intcon(1 - proj2->_con)); 898 } else { 899 // Replace bool input of iff2 with merged test 900 BoolNode* new_bol = new BoolNode(bol2->in(1), res); 901 igvn->replace_input_of(iff2, 1, igvn->transform((proj2->_con == 1) ? new_bol : new_bol->negate(igvn))); 902 if (new_bol->outcnt() == 0) { 903 igvn->remove_dead_node(new_bol); 904 } 905 } 906 return false; 907 } 908 909 const RegMask &RegionNode::out_RegMask() const { 910 return RegMask::Empty; 911 } 912 913 // Find the one non-null required input. RegionNode only 914 Node *Node::nonnull_req() const { 915 assert( is_Region(), "" ); 916 for( uint i = 1; i < _cnt; i++ ) 917 if( in(i) ) 918 return in(i); 919 ShouldNotReachHere(); 920 return NULL; 921 } 922 923 924 //============================================================================= 925 // note that these functions assume that the _adr_type field is flattened 926 uint PhiNode::hash() const { 927 const Type* at = _adr_type; 928 return TypeNode::hash() + (at ? at->hash() : 0); 929 } 930 bool PhiNode::cmp( const Node &n ) const { 931 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type; 932 } 933 static inline 934 const TypePtr* flatten_phi_adr_type(const TypePtr* at) { 935 if (at == NULL || at == TypePtr::BOTTOM) return at; 936 return Compile::current()->alias_type(at)->adr_type(); 937 } 938 939 //----------------------------make--------------------------------------------- 940 // create a new phi with edges matching r and set (initially) to x 941 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) { 942 uint preds = r->req(); // Number of predecessor paths 943 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flattened_accesses_share_alias()), "flatten at"); 944 PhiNode* p = new PhiNode(r, t, at); 945 for (uint j = 1; j < preds; j++) { 946 // Fill in all inputs, except those which the region does not yet have 947 if (r->in(j) != NULL) 948 p->init_req(j, x); 949 } 950 return p; 951 } 952 PhiNode* PhiNode::make(Node* r, Node* x) { 953 const Type* t = x->bottom_type(); 954 const TypePtr* at = NULL; 955 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type()); 956 return make(r, x, t, at); 957 } 958 PhiNode* PhiNode::make_blank(Node* r, Node* x) { 959 const Type* t = x->bottom_type(); 960 const TypePtr* at = NULL; 961 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type()); 962 return new PhiNode(r, t, at); 963 } 964 965 966 //------------------------slice_memory----------------------------------------- 967 // create a new phi with narrowed memory type 968 PhiNode* PhiNode::slice_memory(const TypePtr* adr_type) const { 969 PhiNode* mem = (PhiNode*) clone(); 970 *(const TypePtr**)&mem->_adr_type = adr_type; 971 // convert self-loops, or else we get a bad graph 972 for (uint i = 1; i < req(); i++) { 973 if ((const Node*)in(i) == this) mem->set_req(i, mem); 974 } 975 mem->verify_adr_type(); 976 return mem; 977 } 978 979 //------------------------split_out_instance----------------------------------- 980 // Split out an instance type from a bottom phi. 981 PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const { 982 const TypeOopPtr *t_oop = at->isa_oopptr(); 983 assert(t_oop != NULL && t_oop->is_known_instance(), "expecting instance oopptr"); 984 const TypePtr *t = adr_type(); 985 assert(type() == Type::MEMORY && 986 (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || 987 t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && 988 t->is_oopptr()->cast_to_exactness(true) 989 ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) 990 ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop), 991 "bottom or raw memory required"); 992 993 // Check if an appropriate node already exists. 994 Node *region = in(0); 995 for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) { 996 Node* use = region->fast_out(k); 997 if( use->is_Phi()) { 998 PhiNode *phi2 = use->as_Phi(); 999 if (phi2->type() == Type::MEMORY && phi2->adr_type() == at) { 1000 return phi2; 1001 } 1002 } 1003 } 1004 Compile *C = igvn->C; 1005 Arena *a = Thread::current()->resource_area(); 1006 Node_Array node_map = new Node_Array(a); 1007 Node_Stack stack(a, C->live_nodes() >> 4); 1008 PhiNode *nphi = slice_memory(at); 1009 igvn->register_new_node_with_optimizer( nphi ); 1010 node_map.map(_idx, nphi); 1011 stack.push((Node *)this, 1); 1012 while(!stack.is_empty()) { 1013 PhiNode *ophi = stack.node()->as_Phi(); 1014 uint i = stack.index(); 1015 assert(i >= 1, "not control edge"); 1016 stack.pop(); 1017 nphi = node_map[ophi->_idx]->as_Phi(); 1018 for (; i < ophi->req(); i++) { 1019 Node *in = ophi->in(i); 1020 if (in == NULL || igvn->type(in) == Type::TOP) 1021 continue; 1022 Node *opt = MemNode::optimize_simple_memory_chain(in, t_oop, NULL, igvn); 1023 PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : NULL; 1024 if (optphi != NULL && optphi->adr_type() == TypePtr::BOTTOM) { 1025 opt = node_map[optphi->_idx]; 1026 if (opt == NULL) { 1027 stack.push(ophi, i); 1028 nphi = optphi->slice_memory(at); 1029 igvn->register_new_node_with_optimizer( nphi ); 1030 node_map.map(optphi->_idx, nphi); 1031 ophi = optphi; 1032 i = 0; // will get incremented at top of loop 1033 continue; 1034 } 1035 } 1036 nphi->set_req(i, opt); 1037 } 1038 } 1039 return nphi; 1040 } 1041 1042 //------------------------verify_adr_type-------------------------------------- 1043 #ifdef ASSERT 1044 void PhiNode::verify_adr_type(VectorSet& visited, const TypePtr* at) const { 1045 if (visited.test_set(_idx)) return; //already visited 1046 1047 // recheck constructor invariants: 1048 verify_adr_type(false); 1049 1050 // recheck local phi/phi consistency: 1051 assert(_adr_type == at || _adr_type == TypePtr::BOTTOM, 1052 "adr_type must be consistent across phi nest"); 1053 1054 // walk around 1055 for (uint i = 1; i < req(); i++) { 1056 Node* n = in(i); 1057 if (n == NULL) continue; 1058 const Node* np = in(i); 1059 if (np->is_Phi()) { 1060 np->as_Phi()->verify_adr_type(visited, at); 1061 } else if (n->bottom_type() == Type::TOP 1062 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) { 1063 // ignore top inputs 1064 } else { 1065 const TypePtr* nat = flatten_phi_adr_type(n->adr_type()); 1066 // recheck phi/non-phi consistency at leaves: 1067 assert((nat != NULL) == (at != NULL), ""); 1068 assert(nat == at || nat == TypePtr::BOTTOM, 1069 "adr_type must be consistent at leaves of phi nest"); 1070 } 1071 } 1072 } 1073 1074 // Verify a whole nest of phis rooted at this one. 1075 void PhiNode::verify_adr_type(bool recursive) const { 1076 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error 1077 if (Node::in_dump()) return; // muzzle asserts when printing 1078 1079 assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only"); 1080 // Flat array element shouldn't get their own memory slice until flattened_accesses_share_alias is cleared. 1081 // It could be the graph has no loads/stores and flattened_accesses_share_alias is never cleared. EA could still 1082 // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array. 1083 assert(_adr_type == NULL || _adr_type->isa_aryptr() == NULL || 1084 _adr_type->is_aryptr()->is_known_instance() || 1085 !_adr_type->is_aryptr()->is_flat() || 1086 !Compile::current()->flattened_accesses_share_alias() || 1087 _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet"); 1088 1089 if (!VerifyAliases) return; // verify thoroughly only if requested 1090 1091 assert(_adr_type == flatten_phi_adr_type(_adr_type), 1092 "Phi::adr_type must be pre-normalized"); 1093 1094 if (recursive) { 1095 VectorSet visited; 1096 verify_adr_type(visited, _adr_type); 1097 } 1098 } 1099 #endif 1100 1101 1102 //------------------------------Value------------------------------------------ 1103 // Compute the type of the PhiNode 1104 const Type* PhiNode::Value(PhaseGVN* phase) const { 1105 Node *r = in(0); // RegionNode 1106 if( !r ) // Copy or dead 1107 return in(1) ? phase->type(in(1)) : Type::TOP; 1108 1109 // Note: During parsing, phis are often transformed before their regions. 1110 // This means we have to use type_or_null to defend against untyped regions. 1111 if( phase->type_or_null(r) == Type::TOP ) // Dead code? 1112 return Type::TOP; 1113 1114 // Check for trip-counted loop. If so, be smarter. 1115 BaseCountedLoopNode* l = r->is_BaseCountedLoop() ? r->as_BaseCountedLoop() : NULL; 1116 if (l && ((const Node*)l->phi() == this)) { // Trip counted loop! 1117 // protect against init_trip() or limit() returning NULL 1118 if (l->can_be_counted_loop(phase)) { 1119 const Node* init = l->init_trip(); 1120 const Node* limit = l->limit(); 1121 const Node* stride = l->stride(); 1122 if (init != NULL && limit != NULL && stride != NULL) { 1123 const TypeInteger* lo = phase->type(init)->isa_integer(l->bt()); 1124 const TypeInteger* hi = phase->type(limit)->isa_integer(l->bt()); 1125 const TypeInteger* stride_t = phase->type(stride)->isa_integer(l->bt()); 1126 if (lo != NULL && hi != NULL && stride_t != NULL) { // Dying loops might have TOP here 1127 assert(stride_t->hi_as_long() >= stride_t->lo_as_long(), "bad stride type"); 1128 BoolTest::mask bt = l->loopexit()->test_trip(); 1129 // If the loop exit condition is "not equal", the condition 1130 // would not trigger if init > limit (if stride > 0) or if 1131 // init < limit if (stride > 0) so we can't deduce bounds 1132 // for the iv from the exit condition. 1133 if (bt != BoolTest::ne) { 1134 if (stride_t->hi_as_long() < 0) { // Down-counter loop 1135 swap(lo, hi); 1136 return TypeInteger::make(MIN2(lo->lo_as_long(), hi->lo_as_long()), hi->hi_as_long(), 3, l->bt())->filter_speculative(_type); 1137 } else if (stride_t->lo_as_long() >= 0) { 1138 return TypeInteger::make(lo->lo_as_long(), MAX2(lo->hi_as_long(), hi->hi_as_long()), 3, l->bt())->filter_speculative(_type); 1139 } 1140 } 1141 } 1142 } 1143 } else if (l->in(LoopNode::LoopBackControl) != NULL && 1144 in(LoopNode::EntryControl) != NULL && 1145 phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) { 1146 // During CCP, if we saturate the type of a counted loop's Phi 1147 // before the special code for counted loop above has a chance 1148 // to run (that is as long as the type of the backedge's control 1149 // is top), we might end up with non monotonic types 1150 return phase->type(in(LoopNode::EntryControl))->filter_speculative(_type); 1151 } 1152 } 1153 1154 // Until we have harmony between classes and interfaces in the type 1155 // lattice, we must tread carefully around phis which implicitly 1156 // convert the one to the other. 1157 const TypePtr* ttp = _type->make_ptr(); 1158 const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL; 1159 const TypeKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_instklassptr() : NULL; 1160 bool is_intf = false; 1161 if (ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) { 1162 is_intf = true; 1163 } else if (ttkp != NULL && ttkp->is_loaded() && ttkp->klass()->is_interface()) { 1164 is_intf = true; 1165 } 1166 1167 // Default case: merge all inputs 1168 const Type *t = Type::TOP; // Merged type starting value 1169 for (uint i = 1; i < req(); ++i) {// For all paths in 1170 // Reachable control path? 1171 if (r->in(i) && phase->type(r->in(i)) == Type::CONTROL) { 1172 const Type* ti = phase->type(in(i)); 1173 // We assume that each input of an interface-valued Phi is a true 1174 // subtype of that interface. This might not be true of the meet 1175 // of all the input types. The lattice is not distributive in 1176 // such cases. Ward off asserts in type.cpp by refusing to do 1177 // meets between interfaces and proper classes. 1178 const TypePtr* tip = ti->make_ptr(); 1179 const TypeInstPtr* tiip = (tip != NULL) ? tip->isa_instptr() : NULL; 1180 if (tiip) { 1181 bool ti_is_intf = false; 1182 ciKlass* k = tiip->klass(); 1183 if (k->is_loaded() && k->is_interface()) 1184 ti_is_intf = true; 1185 if (is_intf != ti_is_intf) 1186 { t = _type; break; } 1187 } 1188 t = t->meet_speculative(ti); 1189 } 1190 } 1191 1192 // The worst-case type (from ciTypeFlow) should be consistent with "t". 1193 // That is, we expect that "t->higher_equal(_type)" holds true. 1194 // There are various exceptions: 1195 // - Inputs which are phis might in fact be widened unnecessarily. 1196 // For example, an input might be a widened int while the phi is a short. 1197 // - Inputs might be BotPtrs but this phi is dependent on a null check, 1198 // and postCCP has removed the cast which encodes the result of the check. 1199 // - The type of this phi is an interface, and the inputs are classes. 1200 // - Value calls on inputs might produce fuzzy results. 1201 // (Occurrences of this case suggest improvements to Value methods.) 1202 // 1203 // It is not possible to see Type::BOTTOM values as phi inputs, 1204 // because the ciTypeFlow pre-pass produces verifier-quality types. 1205 const Type* ft = t->filter_speculative(_type); // Worst case type 1206 1207 #ifdef ASSERT 1208 // The following logic has been moved into TypeOopPtr::filter. 1209 const Type* jt = t->join_speculative(_type); 1210 if (jt->empty()) { // Emptied out??? 1211 1212 // Check for evil case of 't' being a class and '_type' expecting an 1213 // interface. This can happen because the bytecodes do not contain 1214 // enough type info to distinguish a Java-level interface variable 1215 // from a Java-level object variable. If we meet 2 classes which 1216 // both implement interface I, but their meet is at 'j/l/O' which 1217 // doesn't implement I, we have no way to tell if the result should 1218 // be 'I' or 'j/l/O'. Thus we'll pick 'j/l/O'. If this then flows 1219 // into a Phi which "knows" it's an Interface type we'll have to 1220 // uplift the type. 1221 if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) { 1222 assert(ft == _type, ""); // Uplift to interface 1223 } else if (!t->empty() && ttkp != NULL && ttkp->is_loaded() && ttkp->klass()->is_interface()) { 1224 assert(ft == _type, ""); // Uplift to interface 1225 } else { 1226 // We also have to handle 'evil cases' of interface- vs. class-arrays 1227 Type::get_arrays_base_elements(jt, _type, NULL, &ttip); 1228 if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) { 1229 assert(ft == _type, ""); // Uplift to array of interface 1230 } else { 1231 // Otherwise it's something stupid like non-overlapping int ranges 1232 // found on dying counted loops. 1233 assert(ft == Type::TOP, ""); // Canonical empty value 1234 } 1235 } 1236 } 1237 1238 else { 1239 1240 // If we have an interface-typed Phi and we narrow to a class type, the join 1241 // should report back the class. However, if we have a J/L/Object 1242 // class-typed Phi and an interface flows in, it's possible that the meet & 1243 // join report an interface back out. This isn't possible but happens 1244 // because the type system doesn't interact well with interfaces. 1245 const TypePtr *jtp = jt->make_ptr(); 1246 const TypeInstPtr *jtip = (jtp != NULL) ? jtp->isa_instptr() : NULL; 1247 const TypeKlassPtr *jtkp = (jtp != NULL) ? jtp->isa_instklassptr() : NULL; 1248 if( jtip && ttip ) { 1249 if( jtip->is_loaded() && jtip->klass()->is_interface() && 1250 ttip->is_loaded() && !ttip->klass()->is_interface() ) { 1251 assert(ft == ttip->cast_to_ptr_type(jtip->ptr()) || 1252 ft->isa_narrowoop() && ft->make_ptr() == ttip->cast_to_ptr_type(jtip->ptr()), ""); 1253 jt = ft; 1254 } 1255 } 1256 if( jtkp && ttkp ) { 1257 if( jtkp->is_loaded() && jtkp->klass()->is_interface() && 1258 !jtkp->klass_is_exact() && // Keep exact interface klass (6894807) 1259 ttkp->is_loaded() && !ttkp->klass()->is_interface() ) { 1260 assert(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) || 1261 ft->isa_narrowklass() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), ""); 1262 jt = ft; 1263 } 1264 } 1265 if (jt != ft && jt->base() == ft->base()) { 1266 if (jt->isa_int() && 1267 jt->is_int()->_lo == ft->is_int()->_lo && 1268 jt->is_int()->_hi == ft->is_int()->_hi) 1269 jt = ft; 1270 if (jt->isa_long() && 1271 jt->is_long()->_lo == ft->is_long()->_lo && 1272 jt->is_long()->_hi == ft->is_long()->_hi) 1273 jt = ft; 1274 } 1275 if (jt != ft) { 1276 tty->print("merge type: "); t->dump(); tty->cr(); 1277 tty->print("kill type: "); _type->dump(); tty->cr(); 1278 tty->print("join type: "); jt->dump(); tty->cr(); 1279 tty->print("filter type: "); ft->dump(); tty->cr(); 1280 } 1281 assert(jt == ft, ""); 1282 } 1283 #endif //ASSERT 1284 1285 // Deal with conversion problems found in data loops. 1286 ft = phase->saturate(ft, phase->type_or_null(this), _type); 1287 1288 return ft; 1289 } 1290 1291 1292 //------------------------------is_diamond_phi--------------------------------- 1293 // Does this Phi represent a simple well-shaped diamond merge? Return the 1294 // index of the true path or 0 otherwise. 1295 // If check_control_only is true, do not inspect the If node at the 1296 // top, and return -1 (not an edge number) on success. 1297 int PhiNode::is_diamond_phi(bool check_control_only) const { 1298 // Check for a 2-path merge 1299 Node *region = in(0); 1300 if( !region ) return 0; 1301 if( region->req() != 3 ) return 0; 1302 if( req() != 3 ) return 0; 1303 // Check that both paths come from the same If 1304 Node *ifp1 = region->in(1); 1305 Node *ifp2 = region->in(2); 1306 if( !ifp1 || !ifp2 ) return 0; 1307 Node *iff = ifp1->in(0); 1308 if( !iff || !iff->is_If() ) return 0; 1309 if( iff != ifp2->in(0) ) return 0; 1310 if (check_control_only) return -1; 1311 // Check for a proper bool/cmp 1312 const Node *b = iff->in(1); 1313 if( !b->is_Bool() ) return 0; 1314 const Node *cmp = b->in(1); 1315 if( !cmp->is_Cmp() ) return 0; 1316 1317 // Check for branching opposite expected 1318 if( ifp2->Opcode() == Op_IfTrue ) { 1319 assert( ifp1->Opcode() == Op_IfFalse, "" ); 1320 return 2; 1321 } else { 1322 assert( ifp1->Opcode() == Op_IfTrue, "" ); 1323 return 1; 1324 } 1325 } 1326 1327 //----------------------------check_cmove_id----------------------------------- 1328 // Check for CMove'ing a constant after comparing against the constant. 1329 // Happens all the time now, since if we compare equality vs a constant in 1330 // the parser, we "know" the variable is constant on one path and we force 1331 // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a 1332 // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more 1333 // general in that we don't need constants. Since CMove's are only inserted 1334 // in very special circumstances, we do it here on generic Phi's. 1335 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) { 1336 assert(true_path !=0, "only diamond shape graph expected"); 1337 1338 // is_diamond_phi() has guaranteed the correctness of the nodes sequence: 1339 // phi->region->if_proj->ifnode->bool->cmp 1340 Node* region = in(0); 1341 Node* iff = region->in(1)->in(0); 1342 BoolNode* b = iff->in(1)->as_Bool(); 1343 Node* cmp = b->in(1); 1344 Node* tval = in(true_path); 1345 Node* fval = in(3-true_path); 1346 Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b); 1347 if (id == NULL) 1348 return NULL; 1349 1350 // Either value might be a cast that depends on a branch of 'iff'. 1351 // Since the 'id' value will float free of the diamond, either 1352 // decast or return failure. 1353 Node* ctl = id->in(0); 1354 if (ctl != NULL && ctl->in(0) == iff) { 1355 if (id->is_ConstraintCast()) { 1356 return id->in(1); 1357 } else { 1358 // Don't know how to disentangle this value. 1359 return NULL; 1360 } 1361 } 1362 1363 return id; 1364 } 1365 1366 //------------------------------Identity--------------------------------------- 1367 // Check for Region being Identity. 1368 Node* PhiNode::Identity(PhaseGVN* phase) { 1369 // Check for no merging going on 1370 // (There used to be special-case code here when this->region->is_Loop. 1371 // It would check for a tributary phi on the backedge that the main phi 1372 // trivially, perhaps with a single cast. The unique_input method 1373 // does all this and more, by reducing such tributaries to 'this'.) 1374 Node* uin = unique_input(phase, false); 1375 if (uin != NULL) { 1376 return uin; 1377 } 1378 1379 int true_path = is_diamond_phi(); 1380 // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet. 1381 if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) { 1382 Node* id = is_cmove_id(phase, true_path); 1383 if (id != NULL) { 1384 return id; 1385 } 1386 } 1387 1388 if (phase->is_IterGVN()) { 1389 Node* m = try_clean_mem_phi(phase); 1390 if (m != NULL) { 1391 return m; 1392 } 1393 } 1394 1395 1396 // Looking for phis with identical inputs. If we find one that has 1397 // type TypePtr::BOTTOM, replace the current phi with the bottom phi. 1398 if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() != 1399 TypePtr::BOTTOM && !adr_type()->is_known_instance()) { 1400 uint phi_len = req(); 1401 Node* phi_reg = region(); 1402 for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) { 1403 Node* u = phi_reg->fast_out(i); 1404 if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY && 1405 u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg && 1406 u->req() == phi_len) { 1407 for (uint j = 1; j < phi_len; j++) { 1408 if (in(j) != u->in(j)) { 1409 u = NULL; 1410 break; 1411 } 1412 } 1413 if (u != NULL) { 1414 return u; 1415 } 1416 } 1417 } 1418 } 1419 1420 return this; // No identity 1421 } 1422 1423 //-----------------------------unique_input------------------------------------ 1424 // Find the unique value, discounting top, self-loops, and casts. 1425 // Return top if there are no inputs, and self if there are multiple. 1426 Node* PhiNode::unique_input(PhaseTransform* phase, bool uncast) { 1427 // 1) One unique direct input, 1428 // or if uncast is true: 1429 // 2) some of the inputs have an intervening ConstraintCast 1430 // 3) an input is a self loop 1431 // 1432 // 1) input or 2) input or 3) input __ 1433 // / \ / \ \ / \ 1434 // \ / | cast phi cast 1435 // phi \ / / \ / 1436 // phi / -- 1437 1438 Node* r = in(0); // RegionNode 1439 Node* input = NULL; // The unique direct input (maybe uncasted = ConstraintCasts removed) 1440 1441 for (uint i = 1, cnt = req(); i < cnt; ++i) { 1442 Node* rc = r->in(i); 1443 if (rc == NULL || phase->type(rc) == Type::TOP) 1444 continue; // ignore unreachable control path 1445 Node* n = in(i); 1446 if (n == NULL) 1447 continue; 1448 Node* un = n; 1449 if (uncast) { 1450 #ifdef ASSERT 1451 Node* m = un->uncast(); 1452 #endif 1453 while (un != NULL && un->req() == 2 && un->is_ConstraintCast()) { 1454 Node* next = un->in(1); 1455 if (phase->type(next)->isa_rawptr() && phase->type(un)->isa_oopptr()) { 1456 // risk exposing raw ptr at safepoint 1457 break; 1458 } 1459 un = next; 1460 } 1461 assert(m == un || un->in(1) == m, "Only expected at CheckCastPP from allocation"); 1462 } 1463 if (un == NULL || un == this || phase->type(un) == Type::TOP) { 1464 continue; // ignore if top, or in(i) and "this" are in a data cycle 1465 } 1466 // Check for a unique input (maybe uncasted) 1467 if (input == NULL) { 1468 input = un; 1469 } else if (input != un) { 1470 input = NodeSentinel; // no unique input 1471 } 1472 } 1473 if (input == NULL) { 1474 return phase->C->top(); // no inputs 1475 } 1476 1477 if (input != NodeSentinel) { 1478 return input; // one unique direct input 1479 } 1480 1481 // Nothing. 1482 return NULL; 1483 } 1484 1485 //------------------------------is_x2logic------------------------------------- 1486 // Check for simple convert-to-boolean pattern 1487 // If:(C Bool) Region:(IfF IfT) Phi:(Region 0 1) 1488 // Convert Phi to an ConvIB. 1489 static Node *is_x2logic( PhaseGVN *phase, PhiNode *phi, int true_path ) { 1490 assert(true_path !=0, "only diamond shape graph expected"); 1491 // Convert the true/false index into an expected 0/1 return. 1492 // Map 2->0 and 1->1. 1493 int flipped = 2-true_path; 1494 1495 // is_diamond_phi() has guaranteed the correctness of the nodes sequence: 1496 // phi->region->if_proj->ifnode->bool->cmp 1497 Node *region = phi->in(0); 1498 Node *iff = region->in(1)->in(0); 1499 BoolNode *b = (BoolNode*)iff->in(1); 1500 const CmpNode *cmp = (CmpNode*)b->in(1); 1501 1502 Node *zero = phi->in(1); 1503 Node *one = phi->in(2); 1504 const Type *tzero = phase->type( zero ); 1505 const Type *tone = phase->type( one ); 1506 1507 // Check for compare vs 0 1508 const Type *tcmp = phase->type(cmp->in(2)); 1509 if( tcmp != TypeInt::ZERO && tcmp != TypePtr::NULL_PTR ) { 1510 // Allow cmp-vs-1 if the other input is bounded by 0-1 1511 if( !(tcmp == TypeInt::ONE && phase->type(cmp->in(1)) == TypeInt::BOOL) ) 1512 return NULL; 1513 flipped = 1-flipped; // Test is vs 1 instead of 0! 1514 } 1515 1516 // Check for setting zero/one opposite expected 1517 if( tzero == TypeInt::ZERO ) { 1518 if( tone == TypeInt::ONE ) { 1519 } else return NULL; 1520 } else if( tzero == TypeInt::ONE ) { 1521 if( tone == TypeInt::ZERO ) { 1522 flipped = 1-flipped; 1523 } else return NULL; 1524 } else return NULL; 1525 1526 // Check for boolean test backwards 1527 if( b->_test._test == BoolTest::ne ) { 1528 } else if( b->_test._test == BoolTest::eq ) { 1529 flipped = 1-flipped; 1530 } else return NULL; 1531 1532 // Build int->bool conversion 1533 Node *n = new Conv2BNode(cmp->in(1)); 1534 if( flipped ) 1535 n = new XorINode( phase->transform(n), phase->intcon(1) ); 1536 1537 return n; 1538 } 1539 1540 //------------------------------is_cond_add------------------------------------ 1541 // Check for simple conditional add pattern: "(P < Q) ? X+Y : X;" 1542 // To be profitable the control flow has to disappear; there can be no other 1543 // values merging here. We replace the test-and-branch with: 1544 // "(sgn(P-Q))&Y) + X". Basically, convert "(P < Q)" into 0 or -1 by 1545 // moving the carry bit from (P-Q) into a register with 'sbb EAX,EAX'. 1546 // Then convert Y to 0-or-Y and finally add. 1547 // This is a key transform for SpecJava _201_compress. 1548 static Node* is_cond_add(PhaseGVN *phase, PhiNode *phi, int true_path) { 1549 assert(true_path !=0, "only diamond shape graph expected"); 1550 1551 // is_diamond_phi() has guaranteed the correctness of the nodes sequence: 1552 // phi->region->if_proj->ifnode->bool->cmp 1553 RegionNode *region = (RegionNode*)phi->in(0); 1554 Node *iff = region->in(1)->in(0); 1555 BoolNode* b = iff->in(1)->as_Bool(); 1556 const CmpNode *cmp = (CmpNode*)b->in(1); 1557 1558 // Make sure only merging this one phi here 1559 if (region->has_unique_phi() != phi) return NULL; 1560 1561 // Make sure each arm of the diamond has exactly one output, which we assume 1562 // is the region. Otherwise, the control flow won't disappear. 1563 if (region->in(1)->outcnt() != 1) return NULL; 1564 if (region->in(2)->outcnt() != 1) return NULL; 1565 1566 // Check for "(P < Q)" of type signed int 1567 if (b->_test._test != BoolTest::lt) return NULL; 1568 if (cmp->Opcode() != Op_CmpI) return NULL; 1569 1570 Node *p = cmp->in(1); 1571 Node *q = cmp->in(2); 1572 Node *n1 = phi->in( true_path); 1573 Node *n2 = phi->in(3-true_path); 1574 1575 int op = n1->Opcode(); 1576 if( op != Op_AddI // Need zero as additive identity 1577 /*&&op != Op_SubI && 1578 op != Op_AddP && 1579 op != Op_XorI && 1580 op != Op_OrI*/ ) 1581 return NULL; 1582 1583 Node *x = n2; 1584 Node *y = NULL; 1585 if( x == n1->in(1) ) { 1586 y = n1->in(2); 1587 } else if( x == n1->in(2) ) { 1588 y = n1->in(1); 1589 } else return NULL; 1590 1591 // Not so profitable if compare and add are constants 1592 if( q->is_Con() && phase->type(q) != TypeInt::ZERO && y->is_Con() ) 1593 return NULL; 1594 1595 Node *cmplt = phase->transform( new CmpLTMaskNode(p,q) ); 1596 Node *j_and = phase->transform( new AndINode(cmplt,y) ); 1597 return new AddINode(j_and,x); 1598 } 1599 1600 //------------------------------is_absolute------------------------------------ 1601 // Check for absolute value. 1602 static Node* is_absolute( PhaseGVN *phase, PhiNode *phi_root, int true_path) { 1603 assert(true_path !=0, "only diamond shape graph expected"); 1604 1605 int cmp_zero_idx = 0; // Index of compare input where to look for zero 1606 int phi_x_idx = 0; // Index of phi input where to find naked x 1607 1608 // ABS ends with the merge of 2 control flow paths. 1609 // Find the false path from the true path. With only 2 inputs, 3 - x works nicely. 1610 int false_path = 3 - true_path; 1611 1612 // is_diamond_phi() has guaranteed the correctness of the nodes sequence: 1613 // phi->region->if_proj->ifnode->bool->cmp 1614 BoolNode *bol = phi_root->in(0)->in(1)->in(0)->in(1)->as_Bool(); 1615 Node *cmp = bol->in(1); 1616 1617 // Check bool sense 1618 if (cmp->Opcode() == Op_CmpF || cmp->Opcode() == Op_CmpD) { 1619 switch (bol->_test._test) { 1620 case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = true_path; break; 1621 case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break; 1622 case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = true_path; break; 1623 case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = false_path; break; 1624 default: return NULL; break; 1625 } 1626 } else if (cmp->Opcode() == Op_CmpI || cmp->Opcode() == Op_CmpL) { 1627 switch (bol->_test._test) { 1628 case BoolTest::lt: 1629 case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break; 1630 case BoolTest::gt: 1631 case BoolTest::ge: cmp_zero_idx = 2; phi_x_idx = true_path; break; 1632 default: return NULL; break; 1633 } 1634 } 1635 1636 // Test is next 1637 const Type *tzero = NULL; 1638 switch (cmp->Opcode()) { 1639 case Op_CmpI: tzero = TypeInt::ZERO; break; // Integer ABS 1640 case Op_CmpL: tzero = TypeLong::ZERO; break; // Long ABS 1641 case Op_CmpF: tzero = TypeF::ZERO; break; // Float ABS 1642 case Op_CmpD: tzero = TypeD::ZERO; break; // Double ABS 1643 default: return NULL; 1644 } 1645 1646 // Find zero input of compare; the other input is being abs'd 1647 Node *x = NULL; 1648 bool flip = false; 1649 if( phase->type(cmp->in(cmp_zero_idx)) == tzero ) { 1650 x = cmp->in(3 - cmp_zero_idx); 1651 } else if( phase->type(cmp->in(3 - cmp_zero_idx)) == tzero ) { 1652 // The test is inverted, we should invert the result... 1653 x = cmp->in(cmp_zero_idx); 1654 flip = true; 1655 } else { 1656 return NULL; 1657 } 1658 1659 // Next get the 2 pieces being selected, one is the original value 1660 // and the other is the negated value. 1661 if( phi_root->in(phi_x_idx) != x ) return NULL; 1662 1663 // Check other phi input for subtract node 1664 Node *sub = phi_root->in(3 - phi_x_idx); 1665 1666 bool is_sub = sub->Opcode() == Op_SubF || sub->Opcode() == Op_SubD || 1667 sub->Opcode() == Op_SubI || sub->Opcode() == Op_SubL; 1668 1669 // Allow only Sub(0,X) and fail out for all others; Neg is not OK 1670 if (!is_sub || phase->type(sub->in(1)) != tzero || sub->in(2) != x) return NULL; 1671 1672 if (tzero == TypeF::ZERO) { 1673 x = new AbsFNode(x); 1674 if (flip) { 1675 x = new SubFNode(sub->in(1), phase->transform(x)); 1676 } 1677 } else if (tzero == TypeD::ZERO) { 1678 x = new AbsDNode(x); 1679 if (flip) { 1680 x = new SubDNode(sub->in(1), phase->transform(x)); 1681 } 1682 } else if (tzero == TypeInt::ZERO && Matcher::match_rule_supported(Op_AbsI)) { 1683 x = new AbsINode(x); 1684 if (flip) { 1685 x = new SubINode(sub->in(1), phase->transform(x)); 1686 } 1687 } else if (tzero == TypeLong::ZERO && Matcher::match_rule_supported(Op_AbsL)) { 1688 x = new AbsLNode(x); 1689 if (flip) { 1690 x = new SubLNode(sub->in(1), phase->transform(x)); 1691 } 1692 } else return NULL; 1693 1694 return x; 1695 } 1696 1697 //------------------------------split_once------------------------------------- 1698 // Helper for split_flow_path 1699 static void split_once(PhaseIterGVN *igvn, Node *phi, Node *val, Node *n, Node *newn) { 1700 igvn->hash_delete(n); // Remove from hash before hacking edges 1701 1702 uint j = 1; 1703 for (uint i = phi->req()-1; i > 0; i--) { 1704 if (phi->in(i) == val) { // Found a path with val? 1705 // Add to NEW Region/Phi, no DU info 1706 newn->set_req( j++, n->in(i) ); 1707 // Remove from OLD Region/Phi 1708 n->del_req(i); 1709 } 1710 } 1711 1712 // Register the new node but do not transform it. Cannot transform until the 1713 // entire Region/Phi conglomerate has been hacked as a single huge transform. 1714 igvn->register_new_node_with_optimizer( newn ); 1715 1716 // Now I can point to the new node. 1717 n->add_req(newn); 1718 igvn->_worklist.push(n); 1719 } 1720 1721 //------------------------------split_flow_path-------------------------------- 1722 // Check for merging identical values and split flow paths 1723 static Node* split_flow_path(PhaseGVN *phase, PhiNode *phi) { 1724 BasicType bt = phi->type()->basic_type(); 1725 if( bt == T_ILLEGAL || type2size[bt] <= 0 ) 1726 return NULL; // Bail out on funny non-value stuff 1727 if( phi->req() <= 3 ) // Need at least 2 matched inputs and a 1728 return NULL; // third unequal input to be worth doing 1729 1730 // Scan for a constant 1731 uint i; 1732 for( i = 1; i < phi->req()-1; i++ ) { 1733 Node *n = phi->in(i); 1734 if( !n ) return NULL; 1735 if( phase->type(n) == Type::TOP ) return NULL; 1736 if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN || n->Opcode() == Op_ConNKlass ) 1737 break; 1738 } 1739 if( i >= phi->req() ) // Only split for constants 1740 return NULL; 1741 1742 Node *val = phi->in(i); // Constant to split for 1743 uint hit = 0; // Number of times it occurs 1744 Node *r = phi->region(); 1745 1746 for( ; i < phi->req(); i++ ){ // Count occurrences of constant 1747 Node *n = phi->in(i); 1748 if( !n ) return NULL; 1749 if( phase->type(n) == Type::TOP ) return NULL; 1750 if( phi->in(i) == val ) { 1751 hit++; 1752 if (PhaseIdealLoop::find_predicate(r->in(i)) != NULL) { 1753 return NULL; // don't split loop entry path 1754 } 1755 } 1756 } 1757 1758 if( hit <= 1 || // Make sure we find 2 or more 1759 hit == phi->req()-1 ) // and not ALL the same value 1760 return NULL; 1761 1762 // Now start splitting out the flow paths that merge the same value. 1763 // Split first the RegionNode. 1764 PhaseIterGVN *igvn = phase->is_IterGVN(); 1765 RegionNode *newr = new RegionNode(hit+1); 1766 split_once(igvn, phi, val, r, newr); 1767 1768 // Now split all other Phis than this one 1769 for (DUIterator_Fast kmax, k = r->fast_outs(kmax); k < kmax; k++) { 1770 Node* phi2 = r->fast_out(k); 1771 if( phi2->is_Phi() && phi2->as_Phi() != phi ) { 1772 PhiNode *newphi = PhiNode::make_blank(newr, phi2); 1773 split_once(igvn, phi, val, phi2, newphi); 1774 } 1775 } 1776 1777 // Clean up this guy 1778 igvn->hash_delete(phi); 1779 for( i = phi->req()-1; i > 0; i-- ) { 1780 if( phi->in(i) == val ) { 1781 phi->del_req(i); 1782 } 1783 } 1784 phi->add_req(val); 1785 1786 return phi; 1787 } 1788 1789 //============================================================================= 1790 //------------------------------simple_data_loop_check------------------------- 1791 // Try to determining if the phi node in a simple safe/unsafe data loop. 1792 // Returns: 1793 // enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop }; 1794 // Safe - safe case when the phi and it's inputs reference only safe data 1795 // nodes; 1796 // Unsafe - the phi and it's inputs reference unsafe data nodes but there 1797 // is no reference back to the phi - need a graph walk 1798 // to determine if it is in a loop; 1799 // UnsafeLoop - unsafe case when the phi references itself directly or through 1800 // unsafe data node. 1801 // Note: a safe data node is a node which could/never reference itself during 1802 // GVN transformations. For now it is Con, Proj, Phi, CastPP, CheckCastPP. 1803 // I mark Phi nodes as safe node not only because they can reference itself 1804 // but also to prevent mistaking the fallthrough case inside an outer loop 1805 // as dead loop when the phi references itselfs through an other phi. 1806 PhiNode::LoopSafety PhiNode::simple_data_loop_check(Node *in) const { 1807 // It is unsafe loop if the phi node references itself directly. 1808 if (in == (Node*)this) 1809 return UnsafeLoop; // Unsafe loop 1810 // Unsafe loop if the phi node references itself through an unsafe data node. 1811 // Exclude cases with null inputs or data nodes which could reference 1812 // itself (safe for dead loops). 1813 if (in != NULL && !in->is_dead_loop_safe()) { 1814 // Check inputs of phi's inputs also. 1815 // It is much less expensive then full graph walk. 1816 uint cnt = in->req(); 1817 uint i = (in->is_Proj() && !in->is_CFG()) ? 0 : 1; 1818 for (; i < cnt; ++i) { 1819 Node* m = in->in(i); 1820 if (m == (Node*)this) 1821 return UnsafeLoop; // Unsafe loop 1822 if (m != NULL && !m->is_dead_loop_safe()) { 1823 // Check the most common case (about 30% of all cases): 1824 // phi->Load/Store->AddP->(ConP ConP Con)/(Parm Parm Con). 1825 Node *m1 = (m->is_AddP() && m->req() > 3) ? m->in(1) : NULL; 1826 if (m1 == (Node*)this) 1827 return UnsafeLoop; // Unsafe loop 1828 if (m1 != NULL && m1 == m->in(2) && 1829 m1->is_dead_loop_safe() && m->in(3)->is_Con()) { 1830 continue; // Safe case 1831 } 1832 // The phi references an unsafe node - need full analysis. 1833 return Unsafe; 1834 } 1835 } 1836 } 1837 return Safe; // Safe case - we can optimize the phi node. 1838 } 1839 1840 //------------------------------is_unsafe_data_reference----------------------- 1841 // If phi can be reached through the data input - it is data loop. 1842 bool PhiNode::is_unsafe_data_reference(Node *in) const { 1843 assert(req() > 1, ""); 1844 // First, check simple cases when phi references itself directly or 1845 // through an other node. 1846 LoopSafety safety = simple_data_loop_check(in); 1847 if (safety == UnsafeLoop) 1848 return true; // phi references itself - unsafe loop 1849 else if (safety == Safe) 1850 return false; // Safe case - phi could be replaced with the unique input. 1851 1852 // Unsafe case when we should go through data graph to determine 1853 // if the phi references itself. 1854 1855 ResourceMark rm; 1856 1857 Node_List nstack; 1858 VectorSet visited; 1859 1860 nstack.push(in); // Start with unique input. 1861 visited.set(in->_idx); 1862 while (nstack.size() != 0) { 1863 Node* n = nstack.pop(); 1864 uint cnt = n->req(); 1865 uint i = (n->is_Proj() && !n->is_CFG()) ? 0 : 1; 1866 for (; i < cnt; i++) { 1867 Node* m = n->in(i); 1868 if (m == (Node*)this) { 1869 return true; // Data loop 1870 } 1871 if (m != NULL && !m->is_dead_loop_safe()) { // Only look for unsafe cases. 1872 if (!visited.test_set(m->_idx)) 1873 nstack.push(m); 1874 } 1875 } 1876 } 1877 return false; // The phi is not reachable from its inputs 1878 } 1879 1880 // Is this Phi's region or some inputs to the region enqueued for IGVN 1881 // and so could cause the region to be optimized out? 1882 bool PhiNode::wait_for_region_igvn(PhaseGVN* phase) { 1883 PhaseIterGVN* igvn = phase->is_IterGVN(); 1884 Unique_Node_List& worklist = igvn->_worklist; 1885 bool delay = false; 1886 Node* r = in(0); 1887 for (uint j = 1; j < req(); j++) { 1888 Node* rc = r->in(j); 1889 Node* n = in(j); 1890 if (rc != NULL && 1891 rc->is_Proj()) { 1892 if (worklist.member(rc)) { 1893 delay = true; 1894 } else if (rc->in(0) != NULL && 1895 rc->in(0)->is_If()) { 1896 if (worklist.member(rc->in(0))) { 1897 delay = true; 1898 } else if (rc->in(0)->in(1) != NULL && 1899 rc->in(0)->in(1)->is_Bool()) { 1900 if (worklist.member(rc->in(0)->in(1))) { 1901 delay = true; 1902 } else if (rc->in(0)->in(1)->in(1) != NULL && 1903 rc->in(0)->in(1)->in(1)->is_Cmp()) { 1904 if (worklist.member(rc->in(0)->in(1)->in(1))) { 1905 delay = true; 1906 } 1907 } 1908 } 1909 } 1910 } 1911 } 1912 if (delay) { 1913 worklist.push(this); 1914 } 1915 return delay; 1916 } 1917 1918 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops). 1919 InlineTypeBaseNode* PhiNode::push_inline_types_through(PhaseGVN* phase, bool can_reshape, ciInlineKlass* vk, bool is_init) { 1920 InlineTypeBaseNode* vt = NULL; 1921 if (_type->isa_ptr()) { 1922 vt = InlineTypePtrNode::make_null(*phase, vk)->clone_with_phis(phase, in(0), is_init); 1923 } else { 1924 vt = InlineTypeNode::make_null(*phase, vk)->clone_with_phis(phase, in(0), is_init); 1925 } 1926 if (can_reshape) { 1927 // Replace phi right away to be able to use the inline 1928 // type node when reaching the phi again through data loops. 1929 PhaseIterGVN* igvn = phase->is_IterGVN(); 1930 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 1931 Node* u = fast_out(i); 1932 igvn->rehash_node_delayed(u); 1933 imax -= u->replace_edge(this, vt); 1934 --i; 1935 } 1936 assert(outcnt() == 0, "should be dead now"); 1937 } 1938 ResourceMark rm; 1939 Node_List casts; 1940 for (uint i = 1; i < req(); ++i) { 1941 Node* n = in(i); 1942 while (n->is_ConstraintCast()) { 1943 casts.push(n); 1944 n = n->in(1); 1945 } 1946 if (phase->type(n)->is_zero_type()) { 1947 n = InlineTypePtrNode::make_null(*phase, vk); 1948 } else if (n->is_Phi()) { 1949 assert(can_reshape, "can only handle phis during IGVN"); 1950 n = phase->transform(n->as_Phi()->push_inline_types_through(phase, can_reshape, vk, is_init)); 1951 } 1952 while (casts.size() != 0) { 1953 // Push the cast(s) through the InlineTypePtrNode 1954 Node* cast = casts.pop()->clone(); 1955 cast->set_req_X(1, n->as_InlineTypePtr()->get_oop(), phase); 1956 n = n->clone(); 1957 n->as_InlineTypePtr()->set_oop(phase->transform(cast)); 1958 n = phase->transform(n); 1959 } 1960 bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge 1961 vt->merge_with(phase, n->as_InlineTypeBase(), i, transform); 1962 } 1963 return vt; 1964 } 1965 1966 //------------------------------Ideal------------------------------------------ 1967 // Return a node which is more "ideal" than the current node. Must preserve 1968 // the CFG, but we can still strip out dead paths. 1969 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1970 Node *r = in(0); // RegionNode 1971 assert(r != NULL && r->is_Region(), "this phi must have a region"); 1972 assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge"); 1973 1974 // Note: During parsing, phis are often transformed before their regions. 1975 // This means we have to use type_or_null to defend against untyped regions. 1976 if( phase->type_or_null(r) == Type::TOP ) // Dead code? 1977 return NULL; // No change 1978 1979 Node *top = phase->C->top(); 1980 bool new_phi = (outcnt() == 0); // transforming new Phi 1981 // No change for igvn if new phi is not hooked 1982 if (new_phi && can_reshape) 1983 return NULL; 1984 1985 // The are 2 situations when only one valid phi's input is left 1986 // (in addition to Region input). 1987 // One: region is not loop - replace phi with this input. 1988 // Two: region is loop - replace phi with top since this data path is dead 1989 // and we need to break the dead data loop. 1990 Node* progress = NULL; // Record if any progress made 1991 for( uint j = 1; j < req(); ++j ){ // For all paths in 1992 // Check unreachable control paths 1993 Node* rc = r->in(j); 1994 Node* n = in(j); // Get the input 1995 if (rc == NULL || phase->type(rc) == Type::TOP) { 1996 if (n != top) { // Not already top? 1997 PhaseIterGVN *igvn = phase->is_IterGVN(); 1998 if (can_reshape && igvn != NULL) { 1999 igvn->_worklist.push(r); 2000 } 2001 // Nuke it down 2002 set_req_X(j, top, phase); 2003 progress = this; // Record progress 2004 } 2005 } 2006 } 2007 2008 if (can_reshape && outcnt() == 0) { 2009 // set_req() above may kill outputs if Phi is referenced 2010 // only by itself on the dead (top) control path. 2011 return top; 2012 } 2013 2014 bool uncasted = false; 2015 Node* uin = unique_input(phase, false); 2016 if (uin == NULL && can_reshape && 2017 // If there is a chance that the region can be optimized out do 2018 // not add a cast node that we can't remove yet. 2019 !wait_for_region_igvn(phase)) { 2020 uncasted = true; 2021 uin = unique_input(phase, true); 2022 } 2023 if (uin == top) { // Simplest case: no alive inputs. 2024 if (can_reshape) // IGVN transformation 2025 return top; 2026 else 2027 return NULL; // Identity will return TOP 2028 } else if (uin != NULL) { 2029 // Only one not-NULL unique input path is left. 2030 // Determine if this input is backedge of a loop. 2031 // (Skip new phis which have no uses and dead regions). 2032 if (outcnt() > 0 && r->in(0) != NULL) { 2033 if (is_data_loop(r->as_Region(), uin, phase)) { 2034 // Break this data loop to avoid creation of a dead loop. 2035 if (can_reshape) { 2036 return top; 2037 } else { 2038 // We can't return top if we are in Parse phase - cut inputs only 2039 // let Identity to handle the case. 2040 replace_edge(uin, top, phase); 2041 return NULL; 2042 } 2043 } 2044 } 2045 2046 if (uncasted) { 2047 // Add cast nodes between the phi to be removed and its unique input. 2048 // Wait until after parsing for the type information to propagate from the casts. 2049 assert(can_reshape, "Invalid during parsing"); 2050 const Type* phi_type = bottom_type(); 2051 // Add casts to carry the control dependency of the Phi that is 2052 // going away 2053 Node* cast = NULL; 2054 if (phi_type->isa_ptr()) { 2055 const Type* uin_type = phase->type(uin); 2056 if (!phi_type->isa_oopptr() && !uin_type->isa_oopptr()) { 2057 cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, ConstraintCastNode::StrongDependency); 2058 } else { 2059 // Use a CastPP for a cast to not null and a CheckCastPP for 2060 // a cast to a new klass (and both if both null-ness and 2061 // klass change). 2062 2063 // If the type of phi is not null but the type of uin may be 2064 // null, uin's type must be casted to not null 2065 if (phi_type->join(TypePtr::NOTNULL) == phi_type->remove_speculative() && 2066 uin_type->join(TypePtr::NOTNULL) != uin_type->remove_speculative()) { 2067 cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, TypePtr::NOTNULL, ConstraintCastNode::StrongDependency); 2068 } 2069 2070 // If the type of phi and uin, both casted to not null, 2071 // differ the klass of uin must be (check)cast'ed to match 2072 // that of phi 2073 if (phi_type->join_speculative(TypePtr::NOTNULL) != uin_type->join_speculative(TypePtr::NOTNULL)) { 2074 Node* n = uin; 2075 if (cast != NULL) { 2076 cast = phase->transform(cast); 2077 n = cast; 2078 } 2079 cast = ConstraintCastNode::make_cast(Op_CheckCastPP, r, n, phi_type, ConstraintCastNode::StrongDependency); 2080 } 2081 if (cast == NULL) { 2082 cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, ConstraintCastNode::StrongDependency); 2083 } 2084 } 2085 } else { 2086 cast = ConstraintCastNode::make_cast_for_type(r, uin, phi_type, ConstraintCastNode::StrongDependency); 2087 } 2088 assert(cast != NULL, "cast should be set"); 2089 cast = phase->transform(cast); 2090 // set all inputs to the new cast(s) so the Phi is removed by Identity 2091 PhaseIterGVN* igvn = phase->is_IterGVN(); 2092 for (uint i = 1; i < req(); i++) { 2093 set_req_X(i, cast, igvn); 2094 } 2095 uin = cast; 2096 } 2097 2098 // One unique input. 2099 debug_only(Node* ident = Identity(phase)); 2100 // The unique input must eventually be detected by the Identity call. 2101 #ifdef ASSERT 2102 if (ident != uin && !ident->is_top()) { 2103 // print this output before failing assert 2104 r->dump(3); 2105 this->dump(3); 2106 ident->dump(); 2107 uin->dump(); 2108 } 2109 #endif 2110 assert(ident == uin || ident->is_top(), "Identity must clean this up"); 2111 return NULL; 2112 } 2113 2114 Node* opt = NULL; 2115 int true_path = is_diamond_phi(); 2116 if (true_path != 0 && 2117 // If one of the diamond's branch is in the process of dying then, the Phi's input for that branch might transform 2118 // to top. If that happens replacing the Phi with an operation that consumes the Phi's inputs will cause the Phi 2119 // to be replaced by top. To prevent that, delay the transformation until the branch has a chance to be removed. 2120 !(can_reshape && wait_for_region_igvn(phase))) { 2121 // Check for CMove'ing identity. If it would be unsafe, 2122 // handle it here. In the safe case, let Identity handle it. 2123 Node* unsafe_id = is_cmove_id(phase, true_path); 2124 if( unsafe_id != NULL && is_unsafe_data_reference(unsafe_id) ) 2125 opt = unsafe_id; 2126 2127 // Check for simple convert-to-boolean pattern 2128 if( opt == NULL ) 2129 opt = is_x2logic(phase, this, true_path); 2130 2131 // Check for absolute value 2132 if( opt == NULL ) 2133 opt = is_absolute(phase, this, true_path); 2134 2135 // Check for conditional add 2136 if( opt == NULL && can_reshape ) 2137 opt = is_cond_add(phase, this, true_path); 2138 2139 // These 4 optimizations could subsume the phi: 2140 // have to check for a dead data loop creation. 2141 if( opt != NULL ) { 2142 if( opt == unsafe_id || is_unsafe_data_reference(opt) ) { 2143 // Found dead loop. 2144 if( can_reshape ) 2145 return top; 2146 // We can't return top if we are in Parse phase - cut inputs only 2147 // to stop further optimizations for this phi. Identity will return TOP. 2148 assert(req() == 3, "only diamond merge phi here"); 2149 set_req(1, top); 2150 set_req(2, top); 2151 return NULL; 2152 } else { 2153 return opt; 2154 } 2155 } 2156 } 2157 2158 // Check for merging identical values and split flow paths 2159 if (can_reshape) { 2160 opt = split_flow_path(phase, this); 2161 // This optimization only modifies phi - don't need to check for dead loop. 2162 assert(opt == NULL || opt == this, "do not elide phi"); 2163 if (opt != NULL) return opt; 2164 } 2165 2166 if (in(1) != NULL && in(1)->Opcode() == Op_AddP && can_reshape) { 2167 // Try to undo Phi of AddP: 2168 // (Phi (AddP base address offset) (AddP base2 address2 offset2)) 2169 // becomes: 2170 // newbase := (Phi base base2) 2171 // newaddress := (Phi address address2) 2172 // newoffset := (Phi offset offset2) 2173 // (AddP newbase newaddress newoffset) 2174 // 2175 // This occurs as a result of unsuccessful split_thru_phi and 2176 // interferes with taking advantage of addressing modes. See the 2177 // clone_shift_expressions code in matcher.cpp 2178 Node* addp = in(1); 2179 Node* base = addp->in(AddPNode::Base); 2180 Node* address = addp->in(AddPNode::Address); 2181 Node* offset = addp->in(AddPNode::Offset); 2182 if (base != NULL && address != NULL && offset != NULL && 2183 !base->is_top() && !address->is_top() && !offset->is_top()) { 2184 const Type* base_type = base->bottom_type(); 2185 const Type* address_type = address->bottom_type(); 2186 // make sure that all the inputs are similar to the first one, 2187 // i.e. AddP with base == address and same offset as first AddP 2188 bool doit = true; 2189 for (uint i = 2; i < req(); i++) { 2190 if (in(i) == NULL || 2191 in(i)->Opcode() != Op_AddP || 2192 in(i)->in(AddPNode::Base) == NULL || 2193 in(i)->in(AddPNode::Address) == NULL || 2194 in(i)->in(AddPNode::Offset) == NULL || 2195 in(i)->in(AddPNode::Base)->is_top() || 2196 in(i)->in(AddPNode::Address)->is_top() || 2197 in(i)->in(AddPNode::Offset)->is_top()) { 2198 doit = false; 2199 break; 2200 } 2201 if (in(i)->in(AddPNode::Offset) != base) { 2202 base = NULL; 2203 } 2204 if (in(i)->in(AddPNode::Offset) != offset) { 2205 offset = NULL; 2206 } 2207 if (in(i)->in(AddPNode::Address) != address) { 2208 address = NULL; 2209 } 2210 // Accumulate type for resulting Phi 2211 base_type = base_type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type()); 2212 address_type = address_type->meet_speculative(in(i)->in(AddPNode::Address)->bottom_type()); 2213 } 2214 if (doit && base == NULL) { 2215 // Check for neighboring AddP nodes in a tree. 2216 // If they have a base, use that it. 2217 for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) { 2218 Node* u = this->fast_out(k); 2219 if (u->is_AddP()) { 2220 Node* base2 = u->in(AddPNode::Base); 2221 if (base2 != NULL && !base2->is_top()) { 2222 if (base == NULL) 2223 base = base2; 2224 else if (base != base2) 2225 { doit = false; break; } 2226 } 2227 } 2228 } 2229 } 2230 if (doit) { 2231 if (base == NULL) { 2232 base = new PhiNode(in(0), base_type, NULL); 2233 for (uint i = 1; i < req(); i++) { 2234 base->init_req(i, in(i)->in(AddPNode::Base)); 2235 } 2236 phase->is_IterGVN()->register_new_node_with_optimizer(base); 2237 } 2238 if (address == NULL) { 2239 address = new PhiNode(in(0), address_type, NULL); 2240 for (uint i = 1; i < req(); i++) { 2241 address->init_req(i, in(i)->in(AddPNode::Address)); 2242 } 2243 phase->is_IterGVN()->register_new_node_with_optimizer(address); 2244 } 2245 if (offset == NULL) { 2246 offset = new PhiNode(in(0), TypeX_X, NULL); 2247 for (uint i = 1; i < req(); i++) { 2248 offset->init_req(i, in(i)->in(AddPNode::Offset)); 2249 } 2250 phase->is_IterGVN()->register_new_node_with_optimizer(offset); 2251 } 2252 return new AddPNode(base, address, offset); 2253 } 2254 } 2255 } 2256 2257 // Split phis through memory merges, so that the memory merges will go away. 2258 // Piggy-back this transformation on the search for a unique input.... 2259 // It will be as if the merged memory is the unique value of the phi. 2260 // (Do not attempt this optimization unless parsing is complete. 2261 // It would make the parser's memory-merge logic sick.) 2262 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.) 2263 if (progress == NULL && can_reshape && type() == Type::MEMORY) { 2264 // see if this phi should be sliced 2265 uint merge_width = 0; 2266 bool saw_self = false; 2267 // TODO revisit this with JDK-8247216 2268 bool mergemem_only = true; 2269 for( uint i=1; i<req(); ++i ) {// For all paths in 2270 Node *ii = in(i); 2271 // TOP inputs should not be counted as safe inputs because if the 2272 // Phi references itself through all other inputs then splitting the 2273 // Phi through memory merges would create dead loop at later stage. 2274 if (ii == top) { 2275 return NULL; // Delay optimization until graph is cleaned. 2276 } 2277 if (ii->is_MergeMem()) { 2278 MergeMemNode* n = ii->as_MergeMem(); 2279 merge_width = MAX2(merge_width, n->req()); 2280 saw_self = saw_self || (n->base_memory() == this); 2281 } else { 2282 mergemem_only = false; 2283 } 2284 } 2285 2286 // This restriction is temporarily necessary to ensure termination: 2287 if (!mergemem_only && !saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0; 2288 2289 if (merge_width > Compile::AliasIdxRaw) { 2290 // found at least one non-empty MergeMem 2291 const TypePtr* at = adr_type(); 2292 if (at != TypePtr::BOTTOM) { 2293 // Patch the existing phi to select an input from the merge: 2294 // Phi:AT1(...MergeMem(m0, m1, m2)...) into 2295 // Phi:AT1(...m1...) 2296 int alias_idx = phase->C->get_alias_index(at); 2297 for (uint i=1; i<req(); ++i) { 2298 Node *ii = in(i); 2299 if (ii->is_MergeMem()) { 2300 MergeMemNode* n = ii->as_MergeMem(); 2301 // compress paths and change unreachable cycles to TOP 2302 // If not, we can update the input infinitely along a MergeMem cycle 2303 // Equivalent code is in MemNode::Ideal_common 2304 Node *m = phase->transform(n); 2305 if (outcnt() == 0) { // Above transform() may kill us! 2306 return top; 2307 } 2308 // If transformed to a MergeMem, get the desired slice 2309 // Otherwise the returned node represents memory for every slice 2310 Node *new_mem = (m->is_MergeMem()) ? 2311 m->as_MergeMem()->memory_at(alias_idx) : m; 2312 // Update input if it is progress over what we have now 2313 if (new_mem != ii) { 2314 set_req_X(i, new_mem, phase->is_IterGVN()); 2315 progress = this; 2316 } 2317 } 2318 } 2319 } else { 2320 // We know that at least one MergeMem->base_memory() == this 2321 // (saw_self == true). If all other inputs also references this phi 2322 // (directly or through data nodes) - it is a dead loop. 2323 bool saw_safe_input = false; 2324 for (uint j = 1; j < req(); ++j) { 2325 Node* n = in(j); 2326 if (n->is_MergeMem()) { 2327 MergeMemNode* mm = n->as_MergeMem(); 2328 if (mm->base_memory() == this || mm->base_memory() == mm->empty_memory()) { 2329 // Skip this input if it references back to this phi or if the memory path is dead 2330 continue; 2331 } 2332 } 2333 if (!is_unsafe_data_reference(n)) { 2334 saw_safe_input = true; // found safe input 2335 break; 2336 } 2337 } 2338 if (!saw_safe_input) { 2339 // There is a dead loop: All inputs are either dead or reference back to this phi 2340 return top; 2341 } 2342 2343 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into 2344 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...)) 2345 PhaseIterGVN* igvn = phase->is_IterGVN(); 2346 assert(igvn != NULL, "sanity check"); 2347 Node* hook = new Node(1); 2348 PhiNode* new_base = (PhiNode*) clone(); 2349 // Must eagerly register phis, since they participate in loops. 2350 igvn->register_new_node_with_optimizer(new_base); 2351 hook->add_req(new_base); 2352 2353 MergeMemNode* result = MergeMemNode::make(new_base); 2354 for (uint i = 1; i < req(); ++i) { 2355 Node *ii = in(i); 2356 if (ii->is_MergeMem()) { 2357 MergeMemNode* n = ii->as_MergeMem(); 2358 if (igvn) { 2359 // TODO revisit this with JDK-8247216 2360 // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup 2361 igvn->_worklist.push(n); 2362 } 2363 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) { 2364 // If we have not seen this slice yet, make a phi for it. 2365 bool made_new_phi = false; 2366 if (mms.is_empty()) { 2367 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C)); 2368 made_new_phi = true; 2369 igvn->register_new_node_with_optimizer(new_phi); 2370 hook->add_req(new_phi); 2371 mms.set_memory(new_phi); 2372 } 2373 Node* phi = mms.memory(); 2374 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice"); 2375 phi->set_req(i, mms.memory2()); 2376 } 2377 } 2378 } 2379 // Distribute all self-loops. 2380 { // (Extra braces to hide mms.) 2381 for (MergeMemStream mms(result); mms.next_non_empty(); ) { 2382 Node* phi = mms.memory(); 2383 for (uint i = 1; i < req(); ++i) { 2384 if (phi->in(i) == this) phi->set_req(i, phi); 2385 } 2386 } 2387 } 2388 // Already replace this phi node to cut it off from the graph to not interfere in dead loop checks during the 2389 // transformations of the new phi nodes below. Otherwise, we could wrongly conclude that there is no dead loop 2390 // because we are finding this phi node again. Also set the type of the new MergeMem node in case we are also 2391 // visiting it in the transformations below. 2392 igvn->replace_node(this, result); 2393 igvn->set_type(result, result->bottom_type()); 2394 2395 // now transform the new nodes, and return the mergemem 2396 for (MergeMemStream mms(result); mms.next_non_empty(); ) { 2397 Node* phi = mms.memory(); 2398 mms.set_memory(phase->transform(phi)); 2399 } 2400 hook->destruct(igvn); 2401 // Replace self with the result. 2402 return result; 2403 } 2404 } 2405 // 2406 // Other optimizations on the memory chain 2407 // 2408 const TypePtr* at = adr_type(); 2409 for( uint i=1; i<req(); ++i ) {// For all paths in 2410 Node *ii = in(i); 2411 Node *new_in = MemNode::optimize_memory_chain(ii, at, NULL, phase); 2412 if (ii != new_in ) { 2413 set_req(i, new_in); 2414 progress = this; 2415 } 2416 } 2417 } 2418 2419 #ifdef _LP64 2420 // Push DecodeN/DecodeNKlass down through phi. 2421 // The rest of phi graph will transform by split EncodeP node though phis up. 2422 if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == NULL) { 2423 bool may_push = true; 2424 bool has_decodeN = false; 2425 bool is_decodeN = false; 2426 for (uint i=1; i<req(); ++i) {// For all paths in 2427 Node *ii = in(i); 2428 if (ii->is_DecodeNarrowPtr() && ii->bottom_type() == bottom_type()) { 2429 // Do optimization if a non dead path exist. 2430 if (ii->in(1)->bottom_type() != Type::TOP) { 2431 has_decodeN = true; 2432 is_decodeN = ii->is_DecodeN(); 2433 } 2434 } else if (!ii->is_Phi()) { 2435 may_push = false; 2436 } 2437 } 2438 2439 if (has_decodeN && may_push) { 2440 PhaseIterGVN *igvn = phase->is_IterGVN(); 2441 // Make narrow type for new phi. 2442 const Type* narrow_t; 2443 if (is_decodeN) { 2444 narrow_t = TypeNarrowOop::make(this->bottom_type()->is_ptr()); 2445 } else { 2446 narrow_t = TypeNarrowKlass::make(this->bottom_type()->is_ptr()); 2447 } 2448 PhiNode* new_phi = new PhiNode(r, narrow_t); 2449 uint orig_cnt = req(); 2450 for (uint i=1; i<req(); ++i) {// For all paths in 2451 Node *ii = in(i); 2452 Node* new_ii = NULL; 2453 if (ii->is_DecodeNarrowPtr()) { 2454 assert(ii->bottom_type() == bottom_type(), "sanity"); 2455 new_ii = ii->in(1); 2456 } else { 2457 assert(ii->is_Phi(), "sanity"); 2458 if (ii->as_Phi() == this) { 2459 new_ii = new_phi; 2460 } else { 2461 if (is_decodeN) { 2462 new_ii = new EncodePNode(ii, narrow_t); 2463 } else { 2464 new_ii = new EncodePKlassNode(ii, narrow_t); 2465 } 2466 igvn->register_new_node_with_optimizer(new_ii); 2467 } 2468 } 2469 new_phi->set_req(i, new_ii); 2470 } 2471 igvn->register_new_node_with_optimizer(new_phi, this); 2472 if (is_decodeN) { 2473 progress = new DecodeNNode(new_phi, bottom_type()); 2474 } else { 2475 progress = new DecodeNKlassNode(new_phi, bottom_type()); 2476 } 2477 } 2478 } 2479 #endif 2480 2481 // Check recursively if inputs are either an inline type, constant null 2482 // or another Phi (including self references through data loops). If so, 2483 // push the inline types down through the phis to enable folding of loads. 2484 if (EnableValhalla && (_type->isa_ptr() || _type->isa_inlinetype()) && req() > 2) { 2485 ResourceMark rm; 2486 Unique_Node_List worklist; 2487 worklist.push(this); 2488 bool can_optimize = true; 2489 ciInlineKlass* vk = NULL; 2490 // true if all IsInit inputs of all InlineType* nodes are true 2491 bool is_init = true; 2492 Node_List casts; 2493 2494 // TODO 8284443 We need to prevent endless pushing through 2495 // TestLWorld -XX:+UseZGC -DScenarios=0 -DTest=test69 2496 // TestLWorld -XX:-TieredCompilation -XX:-DoEscapeAnalysis -XX:+AlwaysIncrementalInline 2497 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 2498 Node* n = fast_out(i); 2499 if (n->is_InlineTypePtr() && n->in(1) == this) { 2500 can_optimize = false; 2501 break; 2502 } 2503 } 2504 // TODO 8284443 We could revisit the same node over and over again, right? 2505 for (uint next = 0; next < worklist.size() && can_optimize; next++) { 2506 Node* phi = worklist.at(next); 2507 for (uint i = 1; i < phi->req() && can_optimize; i++) { 2508 Node* n = phi->in(i); 2509 if (n == NULL) { 2510 can_optimize = false; 2511 break; 2512 } 2513 while (n->is_ConstraintCast()) { 2514 if (n->in(0) != NULL && n->in(0)->is_top()) { 2515 // Will die, don't optimize 2516 can_optimize = false; 2517 break; 2518 } 2519 casts.push(n); 2520 n = n->in(1); 2521 } 2522 const Type* t = phase->type(n); 2523 if (n->is_InlineTypeBase() && n->as_InlineTypeBase()->can_merge() && 2524 (vk == NULL || vk == t->inline_klass())) { 2525 vk = (vk == NULL) ? t->inline_klass() : vk; 2526 if (phase->find_int_con(n->as_InlineTypeBase()->get_is_init(), 0) != 1) { 2527 is_init = false; 2528 } 2529 } else if (n->is_Phi() && can_reshape && (n->bottom_type()->isa_ptr() || n->bottom_type()->isa_inlinetype())) { 2530 worklist.push(n); 2531 } else if (t->is_zero_type()) { 2532 is_init = false; 2533 } else { 2534 can_optimize = false; 2535 } 2536 } 2537 } 2538 // Check if cast nodes can be pushed through 2539 const Type* t = Type::get_const_type(vk); 2540 while (casts.size() != 0 && can_optimize && t != NULL) { 2541 Node* cast = casts.pop(); 2542 if (t->filter(cast->bottom_type()) == Type::TOP) { 2543 can_optimize = false; 2544 } 2545 } 2546 if (can_optimize && vk != NULL) { 2547 // TODO 8275400 2548 // assert(!_type->isa_ptr() || _type->maybe_null() || is_init, "Phi not null but a possible null was seen"); 2549 return push_inline_types_through(phase, can_reshape, vk, is_init); 2550 } 2551 } 2552 2553 // Phi (VB ... VB) => VB (Phi ...) (Phi ...) 2554 if (EnableVectorReboxing && can_reshape && progress == NULL && type()->isa_oopptr()) { 2555 progress = merge_through_phi(this, phase->is_IterGVN()); 2556 } 2557 2558 return progress; // Return any progress 2559 } 2560 2561 Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) { 2562 Node_Stack stack(1); 2563 VectorSet visited; 2564 Node_List node_map; 2565 2566 stack.push(root_phi, 1); // ignore control 2567 visited.set(root_phi->_idx); 2568 2569 Node* new_phi = new PhiNode(root_phi->in(0), t); 2570 node_map.map(root_phi->_idx, new_phi); 2571 2572 while (stack.is_nonempty()) { 2573 Node* n = stack.node(); 2574 uint idx = stack.index(); 2575 assert(n->is_Phi(), "not a phi"); 2576 if (idx < n->req()) { 2577 stack.set_index(idx + 1); 2578 Node* def = n->in(idx); 2579 if (def == NULL) { 2580 continue; // ignore dead path 2581 } else if (def->is_Phi()) { // inner node 2582 Node* new_phi = node_map[n->_idx]; 2583 if (!visited.test_set(def->_idx)) { // not visited yet 2584 node_map.map(def->_idx, new PhiNode(def->in(0), t)); 2585 stack.push(def, 1); // ignore control 2586 } 2587 Node* new_in = node_map[def->_idx]; 2588 new_phi->set_req(idx, new_in); 2589 } else if (def->Opcode() == Op_VectorBox) { // leaf 2590 assert(n->is_Phi(), "not a phi"); 2591 Node* new_phi = node_map[n->_idx]; 2592 new_phi->set_req(idx, def->in(c)); 2593 } else { 2594 assert(false, "not optimizeable"); 2595 return NULL; 2596 } 2597 } else { 2598 Node* new_phi = node_map[n->_idx]; 2599 igvn->register_new_node_with_optimizer(new_phi, n); 2600 stack.pop(); 2601 } 2602 } 2603 return new_phi; 2604 } 2605 2606 Node* PhiNode::merge_through_phi(Node* root_phi, PhaseIterGVN* igvn) { 2607 Node_Stack stack(1); 2608 VectorSet visited; 2609 2610 stack.push(root_phi, 1); // ignore control 2611 visited.set(root_phi->_idx); 2612 2613 VectorBoxNode* cached_vbox = NULL; 2614 while (stack.is_nonempty()) { 2615 Node* n = stack.node(); 2616 uint idx = stack.index(); 2617 if (idx < n->req()) { 2618 stack.set_index(idx + 1); 2619 Node* in = n->in(idx); 2620 if (in == NULL) { 2621 continue; // ignore dead path 2622 } else if (in->isa_Phi()) { 2623 if (!visited.test_set(in->_idx)) { 2624 stack.push(in, 1); // ignore control 2625 } 2626 } else if (in->Opcode() == Op_VectorBox) { 2627 VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in); 2628 if (cached_vbox == NULL) { 2629 cached_vbox = vbox; 2630 } else if (vbox->vec_type() != cached_vbox->vec_type()) { 2631 // TODO: vector type mismatch can be handled with additional reinterpret casts 2632 assert(Type::cmp(vbox->vec_type(), cached_vbox->vec_type()) != 0, "inconsistent"); 2633 return NULL; // not optimizable: vector type mismatch 2634 } else if (vbox->box_type() != cached_vbox->box_type()) { 2635 assert(Type::cmp(vbox->box_type(), cached_vbox->box_type()) != 0, "inconsistent"); 2636 return NULL; // not optimizable: box type mismatch 2637 } 2638 } else { 2639 return NULL; // not optimizable: neither Phi nor VectorBox 2640 } 2641 } else { 2642 stack.pop(); 2643 } 2644 } 2645 2646 assert(cached_vbox != NULL, "sanity"); 2647 const TypeInstPtr* btype = cached_vbox->box_type(); 2648 const TypeVect* vtype = cached_vbox->vec_type(); 2649 Node* new_vbox_phi = clone_through_phi(root_phi, btype, VectorBoxNode::Box, igvn); 2650 Node* new_vect_phi = clone_through_phi(root_phi, vtype, VectorBoxNode::Value, igvn); 2651 return new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, btype, vtype); 2652 } 2653 2654 bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) { 2655 // First, take the short cut when we know it is a loop and the EntryControl data path is dead. 2656 // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators(). 2657 // Then, check if there is a data loop when the phi references itself directly or through other data nodes. 2658 assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs"); 2659 const bool is_loop = (r->is_Loop() && r->req() == 3); 2660 const Node* top = phase->C->top(); 2661 if (is_loop) { 2662 return !uin->eqv_uncast(in(LoopNode::EntryControl)); 2663 } else { 2664 // We have a data loop either with an unsafe data reference or if a region is unreachable. 2665 return is_unsafe_data_reference(uin) 2666 || (r->req() == 3 && (r->in(1) != top && r->in(2) == top && r->is_unreachable_region(phase))); 2667 } 2668 } 2669 2670 //------------------------------is_tripcount----------------------------------- 2671 bool PhiNode::is_tripcount(BasicType bt) const { 2672 return (in(0) != NULL && in(0)->is_BaseCountedLoop() && 2673 in(0)->as_BaseCountedLoop()->bt() == bt && 2674 in(0)->as_BaseCountedLoop()->phi() == this); 2675 } 2676 2677 //------------------------------out_RegMask------------------------------------ 2678 const RegMask &PhiNode::in_RegMask(uint i) const { 2679 return i ? out_RegMask() : RegMask::Empty; 2680 } 2681 2682 const RegMask &PhiNode::out_RegMask() const { 2683 uint ideal_reg = _type->ideal_reg(); 2684 assert( ideal_reg != Node::NotAMachineReg, "invalid type at Phi" ); 2685 if( ideal_reg == 0 ) return RegMask::Empty; 2686 assert(ideal_reg != Op_RegFlags, "flags register is not spillable"); 2687 return *(Compile::current()->matcher()->idealreg2spillmask[ideal_reg]); 2688 } 2689 2690 #ifndef PRODUCT 2691 void PhiNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 2692 // For a PhiNode, the set of related nodes includes all inputs till level 2, 2693 // and all outputs till level 1. In compact mode, inputs till level 1 are 2694 // collected. 2695 this->collect_nodes(in_rel, compact ? 1 : 2, false, false); 2696 this->collect_nodes(out_rel, -1, false, false); 2697 } 2698 2699 void PhiNode::dump_spec(outputStream *st) const { 2700 TypeNode::dump_spec(st); 2701 if (is_tripcount(T_INT) || is_tripcount(T_LONG)) { 2702 st->print(" #tripcount"); 2703 } 2704 } 2705 #endif 2706 2707 2708 //============================================================================= 2709 const Type* GotoNode::Value(PhaseGVN* phase) const { 2710 // If the input is reachable, then we are executed. 2711 // If the input is not reachable, then we are not executed. 2712 return phase->type(in(0)); 2713 } 2714 2715 Node* GotoNode::Identity(PhaseGVN* phase) { 2716 return in(0); // Simple copy of incoming control 2717 } 2718 2719 const RegMask &GotoNode::out_RegMask() const { 2720 return RegMask::Empty; 2721 } 2722 2723 #ifndef PRODUCT 2724 //-----------------------------related----------------------------------------- 2725 // The related nodes of a GotoNode are all inputs at level 1, as well as the 2726 // outputs at level 1. This is regardless of compact mode. 2727 void GotoNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 2728 this->collect_nodes(in_rel, 1, false, false); 2729 this->collect_nodes(out_rel, -1, false, false); 2730 } 2731 #endif 2732 2733 2734 //============================================================================= 2735 const RegMask &JumpNode::out_RegMask() const { 2736 return RegMask::Empty; 2737 } 2738 2739 #ifndef PRODUCT 2740 //-----------------------------related----------------------------------------- 2741 // The related nodes of a JumpNode are all inputs at level 1, as well as the 2742 // outputs at level 2 (to include actual jump targets beyond projection nodes). 2743 // This is regardless of compact mode. 2744 void JumpNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 2745 this->collect_nodes(in_rel, 1, false, false); 2746 this->collect_nodes(out_rel, -2, false, false); 2747 } 2748 #endif 2749 2750 //============================================================================= 2751 const RegMask &JProjNode::out_RegMask() const { 2752 return RegMask::Empty; 2753 } 2754 2755 //============================================================================= 2756 const RegMask &CProjNode::out_RegMask() const { 2757 return RegMask::Empty; 2758 } 2759 2760 2761 2762 //============================================================================= 2763 2764 uint PCTableNode::hash() const { return Node::hash() + _size; } 2765 bool PCTableNode::cmp( const Node &n ) const 2766 { return _size == ((PCTableNode&)n)._size; } 2767 2768 const Type *PCTableNode::bottom_type() const { 2769 const Type** f = TypeTuple::fields(_size); 2770 for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL; 2771 return TypeTuple::make(_size, f); 2772 } 2773 2774 //------------------------------Value------------------------------------------ 2775 // Compute the type of the PCTableNode. If reachable it is a tuple of 2776 // Control, otherwise the table targets are not reachable 2777 const Type* PCTableNode::Value(PhaseGVN* phase) const { 2778 if( phase->type(in(0)) == Type::CONTROL ) 2779 return bottom_type(); 2780 return Type::TOP; // All paths dead? Then so are we 2781 } 2782 2783 //------------------------------Ideal------------------------------------------ 2784 // Return a node which is more "ideal" than the current node. Strip out 2785 // control copies 2786 Node *PCTableNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2787 return remove_dead_region(phase, can_reshape) ? this : NULL; 2788 } 2789 2790 //============================================================================= 2791 uint JumpProjNode::hash() const { 2792 return Node::hash() + _dest_bci; 2793 } 2794 2795 bool JumpProjNode::cmp( const Node &n ) const { 2796 return ProjNode::cmp(n) && 2797 _dest_bci == ((JumpProjNode&)n)._dest_bci; 2798 } 2799 2800 #ifndef PRODUCT 2801 void JumpProjNode::dump_spec(outputStream *st) const { 2802 ProjNode::dump_spec(st); 2803 st->print("@bci %d ",_dest_bci); 2804 } 2805 2806 void JumpProjNode::dump_compact_spec(outputStream *st) const { 2807 ProjNode::dump_compact_spec(st); 2808 st->print("(%d)%d@%d", _switch_val, _proj_no, _dest_bci); 2809 } 2810 2811 void JumpProjNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 2812 // The related nodes of a JumpProjNode are its inputs and outputs at level 1. 2813 this->collect_nodes(in_rel, 1, false, false); 2814 this->collect_nodes(out_rel, -1, false, false); 2815 } 2816 #endif 2817 2818 //============================================================================= 2819 //------------------------------Value------------------------------------------ 2820 // Check for being unreachable, or for coming from a Rethrow. Rethrow's cannot 2821 // have the default "fall_through_index" path. 2822 const Type* CatchNode::Value(PhaseGVN* phase) const { 2823 // Unreachable? Then so are all paths from here. 2824 if( phase->type(in(0)) == Type::TOP ) return Type::TOP; 2825 // First assume all paths are reachable 2826 const Type** f = TypeTuple::fields(_size); 2827 for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL; 2828 // Identify cases that will always throw an exception 2829 // () rethrow call 2830 // () virtual or interface call with NULL receiver 2831 // () call is a check cast with incompatible arguments 2832 if( in(1)->is_Proj() ) { 2833 Node *i10 = in(1)->in(0); 2834 if( i10->is_Call() ) { 2835 CallNode *call = i10->as_Call(); 2836 // Rethrows always throw exceptions, never return 2837 if (call->entry_point() == OptoRuntime::rethrow_stub()) { 2838 f[CatchProjNode::fall_through_index] = Type::TOP; 2839 } else if (call->is_AllocateArray()) { 2840 Node* klass_node = call->in(AllocateNode::KlassNode); 2841 Node* length = call->in(AllocateNode::ALength); 2842 const Type* length_type = phase->type(length); 2843 const Type* klass_type = phase->type(klass_node); 2844 Node* valid_length_test = call->in(AllocateNode::ValidLengthTest); 2845 const Type* valid_length_test_t = phase->type(valid_length_test); 2846 if (length_type == Type::TOP || klass_type == Type::TOP || valid_length_test_t == Type::TOP || 2847 valid_length_test_t->is_int()->is_con(0)) { 2848 f[CatchProjNode::fall_through_index] = Type::TOP; 2849 } 2850 } else if( call->req() > TypeFunc::Parms ) { 2851 const Type *arg0 = phase->type( call->in(TypeFunc::Parms) ); 2852 // Check for null receiver to virtual or interface calls 2853 if( call->is_CallDynamicJava() && 2854 arg0->higher_equal(TypePtr::NULL_PTR) ) { 2855 f[CatchProjNode::fall_through_index] = Type::TOP; 2856 } 2857 } // End of if not a runtime stub 2858 } // End of if have call above me 2859 } // End of slot 1 is not a projection 2860 return TypeTuple::make(_size, f); 2861 } 2862 2863 //============================================================================= 2864 uint CatchProjNode::hash() const { 2865 return Node::hash() + _handler_bci; 2866 } 2867 2868 2869 bool CatchProjNode::cmp( const Node &n ) const { 2870 return ProjNode::cmp(n) && 2871 _handler_bci == ((CatchProjNode&)n)._handler_bci; 2872 } 2873 2874 2875 //------------------------------Identity--------------------------------------- 2876 // If only 1 target is possible, choose it if it is the main control 2877 Node* CatchProjNode::Identity(PhaseGVN* phase) { 2878 // If my value is control and no other value is, then treat as ID 2879 const TypeTuple *t = phase->type(in(0))->is_tuple(); 2880 if (t->field_at(_con) != Type::CONTROL) return this; 2881 // If we remove the last CatchProj and elide the Catch/CatchProj, then we 2882 // also remove any exception table entry. Thus we must know the call 2883 // feeding the Catch will not really throw an exception. This is ok for 2884 // the main fall-thru control (happens when we know a call can never throw 2885 // an exception) or for "rethrow", because a further optimization will 2886 // yank the rethrow (happens when we inline a function that can throw an 2887 // exception and the caller has no handler). Not legal, e.g., for passing 2888 // a NULL receiver to a v-call, or passing bad types to a slow-check-cast. 2889 // These cases MUST throw an exception via the runtime system, so the VM 2890 // will be looking for a table entry. 2891 Node *proj = in(0)->in(1); // Expect a proj feeding CatchNode 2892 CallNode *call; 2893 if (_con != TypeFunc::Control && // Bail out if not the main control. 2894 !(proj->is_Proj() && // AND NOT a rethrow 2895 proj->in(0)->is_Call() && 2896 (call = proj->in(0)->as_Call()) && 2897 call->entry_point() == OptoRuntime::rethrow_stub())) 2898 return this; 2899 2900 // Search for any other path being control 2901 for (uint i = 0; i < t->cnt(); i++) { 2902 if (i != _con && t->field_at(i) == Type::CONTROL) 2903 return this; 2904 } 2905 // Only my path is possible; I am identity on control to the jump 2906 return in(0)->in(0); 2907 } 2908 2909 2910 #ifndef PRODUCT 2911 void CatchProjNode::dump_spec(outputStream *st) const { 2912 ProjNode::dump_spec(st); 2913 st->print("@bci %d ",_handler_bci); 2914 } 2915 #endif 2916 2917 //============================================================================= 2918 //------------------------------Identity--------------------------------------- 2919 // Check for CreateEx being Identity. 2920 Node* CreateExNode::Identity(PhaseGVN* phase) { 2921 if( phase->type(in(1)) == Type::TOP ) return in(1); 2922 if( phase->type(in(0)) == Type::TOP ) return in(0); 2923 // We only come from CatchProj, unless the CatchProj goes away. 2924 // If the CatchProj is optimized away, then we just carry the 2925 // exception oop through. 2926 2927 // CheckCastPPNode::Ideal() for inline types reuses the exception 2928 // paths of a call to perform an allocation: we can see a Phi here. 2929 if (in(1)->is_Phi()) { 2930 return this; 2931 } 2932 CallNode *call = in(1)->in(0)->as_Call(); 2933 2934 return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) ) 2935 ? this 2936 : call->in(TypeFunc::Parms); 2937 } 2938 2939 //============================================================================= 2940 //------------------------------Value------------------------------------------ 2941 // Check for being unreachable. 2942 const Type* NeverBranchNode::Value(PhaseGVN* phase) const { 2943 if (!in(0) || in(0)->is_top()) return Type::TOP; 2944 return bottom_type(); 2945 } 2946 2947 //------------------------------Ideal------------------------------------------ 2948 // Check for no longer being part of a loop 2949 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2950 if (can_reshape && !in(0)->is_Region()) { 2951 // Dead code elimination can sometimes delete this projection so 2952 // if it's not there, there's nothing to do. 2953 Node* fallthru = proj_out_or_null(0); 2954 if (fallthru != NULL) { 2955 phase->is_IterGVN()->replace_node(fallthru, in(0)); 2956 } 2957 return phase->C->top(); 2958 } 2959 return NULL; 2960 } 2961 2962 #ifndef PRODUCT 2963 void NeverBranchNode::format( PhaseRegAlloc *ra_, outputStream *st) const { 2964 st->print("%s", Name()); 2965 } 2966 #endif