1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/barrierSet.hpp" 27 #include "gc/shared/c2/barrierSetC2.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "opto/block.hpp" 31 #include "opto/callnode.hpp" 32 #include "opto/castnode.hpp" 33 #include "opto/cfgnode.hpp" 34 #include "opto/idealGraphPrinter.hpp" 35 #include "opto/loopnode.hpp" 36 #include "opto/machnode.hpp" 37 #include "opto/opcodes.hpp" 38 #include "opto/phaseX.hpp" 39 #include "opto/regalloc.hpp" 40 #include "opto/rootnode.hpp" 41 #include "utilities/macros.hpp" 42 #include "utilities/powerOfTwo.hpp" 43 44 //============================================================================= 45 #define NODE_HASH_MINIMUM_SIZE 255 46 47 //------------------------------NodeHash--------------------------------------- 48 NodeHash::NodeHash(Arena *arena, uint est_max_size) : 49 _a(arena), 50 _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ), 51 _inserts(0), _insert_limit( insert_limit() ), 52 _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ) 53 #ifndef PRODUCT 54 , _grows(0),_look_probes(0), _lookup_hits(0), _lookup_misses(0), 55 _insert_probes(0), _delete_probes(0), _delete_hits(0), _delete_misses(0), 56 _total_inserts(0), _total_insert_probes(0) 57 #endif 58 { 59 // _sentinel must be in the current node space 60 _sentinel = new ProjNode(nullptr, TypeFunc::Control); 61 memset(_table,0,sizeof(Node*)*_max); 62 } 63 64 //------------------------------hash_find-------------------------------------- 65 // Find in hash table 66 Node *NodeHash::hash_find( const Node *n ) { 67 // ((Node*)n)->set_hash( n->hash() ); 68 uint hash = n->hash(); 69 if (hash == Node::NO_HASH) { 70 NOT_PRODUCT( _lookup_misses++ ); 71 return nullptr; 72 } 73 uint key = hash & (_max-1); 74 uint stride = key | 0x01; 75 NOT_PRODUCT( _look_probes++ ); 76 Node *k = _table[key]; // Get hashed value 77 if( !k ) { // ?Miss? 78 NOT_PRODUCT( _lookup_misses++ ); 79 return nullptr; // Miss! 80 } 81 82 int op = n->Opcode(); 83 uint req = n->req(); 84 while( 1 ) { // While probing hash table 85 if( k->req() == req && // Same count of inputs 86 k->Opcode() == op ) { // Same Opcode 87 for( uint i=0; i<req; i++ ) 88 if( n->in(i)!=k->in(i)) // Different inputs? 89 goto collision; // "goto" is a speed hack... 90 if( n->cmp(*k) ) { // Check for any special bits 91 NOT_PRODUCT( _lookup_hits++ ); 92 return k; // Hit! 93 } 94 } 95 collision: 96 NOT_PRODUCT( _look_probes++ ); 97 key = (key + stride/*7*/) & (_max-1); // Stride through table with relative prime 98 k = _table[key]; // Get hashed value 99 if( !k ) { // ?Miss? 100 NOT_PRODUCT( _lookup_misses++ ); 101 return nullptr; // Miss! 102 } 103 } 104 ShouldNotReachHere(); 105 return nullptr; 106 } 107 108 //------------------------------hash_find_insert------------------------------- 109 // Find in hash table, insert if not already present 110 // Used to preserve unique entries in hash table 111 Node *NodeHash::hash_find_insert( Node *n ) { 112 // n->set_hash( ); 113 uint hash = n->hash(); 114 if (hash == Node::NO_HASH) { 115 NOT_PRODUCT( _lookup_misses++ ); 116 return nullptr; 117 } 118 uint key = hash & (_max-1); 119 uint stride = key | 0x01; // stride must be relatively prime to table siz 120 uint first_sentinel = 0; // replace a sentinel if seen. 121 NOT_PRODUCT( _look_probes++ ); 122 Node *k = _table[key]; // Get hashed value 123 if( !k ) { // ?Miss? 124 NOT_PRODUCT( _lookup_misses++ ); 125 _table[key] = n; // Insert into table! 126 debug_only(n->enter_hash_lock()); // Lock down the node while in the table. 127 check_grow(); // Grow table if insert hit limit 128 return nullptr; // Miss! 129 } 130 else if( k == _sentinel ) { 131 first_sentinel = key; // Can insert here 132 } 133 134 int op = n->Opcode(); 135 uint req = n->req(); 136 while( 1 ) { // While probing hash table 137 if( k->req() == req && // Same count of inputs 138 k->Opcode() == op ) { // Same Opcode 139 for( uint i=0; i<req; i++ ) 140 if( n->in(i)!=k->in(i)) // Different inputs? 141 goto collision; // "goto" is a speed hack... 142 if( n->cmp(*k) ) { // Check for any special bits 143 NOT_PRODUCT( _lookup_hits++ ); 144 return k; // Hit! 145 } 146 } 147 collision: 148 NOT_PRODUCT( _look_probes++ ); 149 key = (key + stride) & (_max-1); // Stride through table w/ relative prime 150 k = _table[key]; // Get hashed value 151 if( !k ) { // ?Miss? 152 NOT_PRODUCT( _lookup_misses++ ); 153 key = (first_sentinel == 0) ? key : first_sentinel; // ?saw sentinel? 154 _table[key] = n; // Insert into table! 155 debug_only(n->enter_hash_lock()); // Lock down the node while in the table. 156 check_grow(); // Grow table if insert hit limit 157 return nullptr; // Miss! 158 } 159 else if( first_sentinel == 0 && k == _sentinel ) { 160 first_sentinel = key; // Can insert here 161 } 162 163 } 164 ShouldNotReachHere(); 165 return nullptr; 166 } 167 168 //------------------------------hash_insert------------------------------------ 169 // Insert into hash table 170 void NodeHash::hash_insert( Node *n ) { 171 // // "conflict" comments -- print nodes that conflict 172 // bool conflict = false; 173 // n->set_hash(); 174 uint hash = n->hash(); 175 if (hash == Node::NO_HASH) { 176 return; 177 } 178 check_grow(); 179 uint key = hash & (_max-1); 180 uint stride = key | 0x01; 181 182 while( 1 ) { // While probing hash table 183 NOT_PRODUCT( _insert_probes++ ); 184 Node *k = _table[key]; // Get hashed value 185 if( !k || (k == _sentinel) ) break; // Found a slot 186 assert( k != n, "already inserted" ); 187 // if( PrintCompilation && PrintOptoStatistics && Verbose ) { tty->print(" conflict: "); k->dump(); conflict = true; } 188 key = (key + stride) & (_max-1); // Stride through table w/ relative prime 189 } 190 _table[key] = n; // Insert into table! 191 debug_only(n->enter_hash_lock()); // Lock down the node while in the table. 192 // if( conflict ) { n->dump(); } 193 } 194 195 //------------------------------hash_delete------------------------------------ 196 // Replace in hash table with sentinel 197 bool NodeHash::hash_delete( const Node *n ) { 198 Node *k; 199 uint hash = n->hash(); 200 if (hash == Node::NO_HASH) { 201 NOT_PRODUCT( _delete_misses++ ); 202 return false; 203 } 204 uint key = hash & (_max-1); 205 uint stride = key | 0x01; 206 debug_only( uint counter = 0; ); 207 for( ; /* (k != nullptr) && (k != _sentinel) */; ) { 208 debug_only( counter++ ); 209 NOT_PRODUCT( _delete_probes++ ); 210 k = _table[key]; // Get hashed value 211 if( !k ) { // Miss? 212 NOT_PRODUCT( _delete_misses++ ); 213 return false; // Miss! Not in chain 214 } 215 else if( n == k ) { 216 NOT_PRODUCT( _delete_hits++ ); 217 _table[key] = _sentinel; // Hit! Label as deleted entry 218 debug_only(((Node*)n)->exit_hash_lock()); // Unlock the node upon removal from table. 219 return true; 220 } 221 else { 222 // collision: move through table with prime offset 223 key = (key + stride/*7*/) & (_max-1); 224 assert( counter <= _insert_limit, "Cycle in hash-table"); 225 } 226 } 227 ShouldNotReachHere(); 228 return false; 229 } 230 231 //------------------------------round_up--------------------------------------- 232 // Round up to nearest power of 2 233 uint NodeHash::round_up(uint x) { 234 x += (x >> 2); // Add 25% slop 235 return MAX2(16U, round_up_power_of_2(x)); 236 } 237 238 //------------------------------grow------------------------------------------- 239 // Grow _table to next power of 2 and insert old entries 240 void NodeHash::grow() { 241 // Record old state 242 uint old_max = _max; 243 Node **old_table = _table; 244 // Construct new table with twice the space 245 #ifndef PRODUCT 246 _grows++; 247 _total_inserts += _inserts; 248 _total_insert_probes += _insert_probes; 249 _insert_probes = 0; 250 #endif 251 _inserts = 0; 252 _max = _max << 1; 253 _table = NEW_ARENA_ARRAY( _a , Node* , _max ); // (Node**)_a->Amalloc( _max * sizeof(Node*) ); 254 memset(_table,0,sizeof(Node*)*_max); 255 _insert_limit = insert_limit(); 256 // Insert old entries into the new table 257 for( uint i = 0; i < old_max; i++ ) { 258 Node *m = *old_table++; 259 if( !m || m == _sentinel ) continue; 260 debug_only(m->exit_hash_lock()); // Unlock the node upon removal from old table. 261 hash_insert(m); 262 } 263 } 264 265 //------------------------------clear------------------------------------------ 266 // Clear all entries in _table to null but keep storage 267 void NodeHash::clear() { 268 #ifdef ASSERT 269 // Unlock all nodes upon removal from table. 270 for (uint i = 0; i < _max; i++) { 271 Node* n = _table[i]; 272 if (!n || n == _sentinel) continue; 273 n->exit_hash_lock(); 274 } 275 #endif 276 277 memset( _table, 0, _max * sizeof(Node*) ); 278 } 279 280 //-----------------------remove_useless_nodes---------------------------------- 281 // Remove useless nodes from value table, 282 // implementation does not depend on hash function 283 void NodeHash::remove_useless_nodes(VectorSet &useful) { 284 285 // Dead nodes in the hash table inherited from GVN should not replace 286 // existing nodes, remove dead nodes. 287 uint max = size(); 288 Node *sentinel_node = sentinel(); 289 for( uint i = 0; i < max; ++i ) { 290 Node *n = at(i); 291 if(n != nullptr && n != sentinel_node && !useful.test(n->_idx)) { 292 debug_only(n->exit_hash_lock()); // Unlock the node when removed 293 _table[i] = sentinel_node; // Replace with placeholder 294 } 295 } 296 } 297 298 299 void NodeHash::check_no_speculative_types() { 300 #ifdef ASSERT 301 uint max = size(); 302 Unique_Node_List live_nodes; 303 Compile::current()->identify_useful_nodes(live_nodes); 304 Node *sentinel_node = sentinel(); 305 for (uint i = 0; i < max; ++i) { 306 Node *n = at(i); 307 if (n != nullptr && 308 n != sentinel_node && 309 n->is_Type() && 310 live_nodes.member(n)) { 311 TypeNode* tn = n->as_Type(); 312 const Type* t = tn->type(); 313 const Type* t_no_spec = t->remove_speculative(); 314 assert(t == t_no_spec, "dead node in hash table or missed node during speculative cleanup"); 315 } 316 } 317 #endif 318 } 319 320 #ifndef PRODUCT 321 //------------------------------dump------------------------------------------- 322 // Dump statistics for the hash table 323 void NodeHash::dump() { 324 _total_inserts += _inserts; 325 _total_insert_probes += _insert_probes; 326 if (PrintCompilation && PrintOptoStatistics && Verbose && (_inserts > 0)) { 327 if (WizardMode) { 328 for (uint i=0; i<_max; i++) { 329 if (_table[i]) 330 tty->print("%d/%d/%d ",i,_table[i]->hash()&(_max-1),_table[i]->_idx); 331 } 332 } 333 tty->print("\nGVN Hash stats: %d grows to %d max_size\n", _grows, _max); 334 tty->print(" %d/%d (%8.1f%% full)\n", _inserts, _max, (double)_inserts/_max*100.0); 335 tty->print(" %dp/(%dh+%dm) (%8.2f probes/lookup)\n", _look_probes, _lookup_hits, _lookup_misses, (double)_look_probes/(_lookup_hits+_lookup_misses)); 336 tty->print(" %dp/%di (%8.2f probes/insert)\n", _total_insert_probes, _total_inserts, (double)_total_insert_probes/_total_inserts); 337 // sentinels increase lookup cost, but not insert cost 338 assert((_lookup_misses+_lookup_hits)*4+100 >= _look_probes, "bad hash function"); 339 assert( _inserts+(_inserts>>3) < _max, "table too full" ); 340 assert( _inserts*3+100 >= _insert_probes, "bad hash function" ); 341 } 342 } 343 344 Node *NodeHash::find_index(uint idx) { // For debugging 345 // Find an entry by its index value 346 for( uint i = 0; i < _max; i++ ) { 347 Node *m = _table[i]; 348 if( !m || m == _sentinel ) continue; 349 if( m->_idx == (uint)idx ) return m; 350 } 351 return nullptr; 352 } 353 #endif 354 355 #ifdef ASSERT 356 NodeHash::~NodeHash() { 357 // Unlock all nodes upon destruction of table. 358 if (_table != (Node**)badAddress) clear(); 359 } 360 #endif 361 362 363 //============================================================================= 364 //------------------------------PhaseRemoveUseless----------------------------- 365 // 1) Use a breadthfirst walk to collect useful nodes reachable from root. 366 PhaseRemoveUseless::PhaseRemoveUseless(PhaseGVN* gvn, Unique_Node_List& worklist, PhaseNumber phase_num) : Phase(phase_num) { 367 // Implementation requires an edge from root to each SafePointNode 368 // at a backward branch. Inserted in add_safepoint(). 369 370 // Identify nodes that are reachable from below, useful. 371 C->identify_useful_nodes(_useful); 372 // Update dead node list 373 C->update_dead_node_list(_useful); 374 375 // Remove all useless nodes from PhaseValues' recorded types 376 // Must be done before disconnecting nodes to preserve hash-table-invariant 377 gvn->remove_useless_nodes(_useful.member_set()); 378 379 // Remove all useless nodes from future worklist 380 worklist.remove_useless_nodes(_useful.member_set()); 381 382 // Disconnect 'useless' nodes that are adjacent to useful nodes 383 C->disconnect_useless_nodes(_useful, worklist); 384 } 385 386 //============================================================================= 387 //------------------------------PhaseRenumberLive------------------------------ 388 // First, remove useless nodes (equivalent to identifying live nodes). 389 // Then, renumber live nodes. 390 // 391 // The set of live nodes is returned by PhaseRemoveUseless in the _useful structure. 392 // If the number of live nodes is 'x' (where 'x' == _useful.size()), then the 393 // PhaseRenumberLive updates the node ID of each node (the _idx field) with a unique 394 // value in the range [0, x). 395 // 396 // At the end of the PhaseRenumberLive phase, the compiler's count of unique nodes is 397 // updated to 'x' and the list of dead nodes is reset (as there are no dead nodes). 398 // 399 // The PhaseRenumberLive phase updates two data structures with the new node IDs. 400 // (1) The "worklist" is "C->igvn_worklist()", which is to collect which nodes need to 401 // be processed by IGVN after removal of the useless nodes. 402 // (2) Type information "gvn->types()" (same as "C->types()") maps every node ID to 403 // the node's type. The mapping is updated to use the new node IDs as well. We 404 // create a new map, and swap it with the old one. 405 // 406 // Other data structures used by the compiler are not updated. The hash table for value 407 // numbering ("C->node_hash()", referenced by PhaseValue::_table) is not updated because 408 // computing the hash values is not based on node IDs. 409 PhaseRenumberLive::PhaseRenumberLive(PhaseGVN* gvn, 410 Unique_Node_List& worklist, 411 PhaseNumber phase_num) : 412 PhaseRemoveUseless(gvn, worklist, Remove_Useless_And_Renumber_Live), 413 _new_type_array(C->comp_arena()), 414 _old2new_map(C->unique(), C->unique(), -1), 415 _is_pass_finished(false), 416 _live_node_count(C->live_nodes()) 417 { 418 assert(RenumberLiveNodes, "RenumberLiveNodes must be set to true for node renumbering to take place"); 419 assert(C->live_nodes() == _useful.size(), "the number of live nodes must match the number of useful nodes"); 420 assert(_delayed.size() == 0, "should be empty"); 421 assert(&worklist == C->igvn_worklist(), "reference still same as the one from Compile"); 422 assert(&gvn->types() == C->types(), "reference still same as that from Compile"); 423 424 GrowableArray<Node_Notes*>* old_node_note_array = C->node_note_array(); 425 if (old_node_note_array != nullptr) { 426 int new_size = (_useful.size() >> 8) + 1; // The node note array uses blocks, see C->_log2_node_notes_block_size 427 new_size = MAX2(8, new_size); 428 C->set_node_note_array(new (C->comp_arena()) GrowableArray<Node_Notes*> (C->comp_arena(), new_size, 0, nullptr)); 429 C->grow_node_notes(C->node_note_array(), new_size); 430 } 431 432 assert(worklist.is_subset_of(_useful), "only useful nodes should still be in the worklist"); 433 434 // Iterate over the set of live nodes. 435 for (uint current_idx = 0; current_idx < _useful.size(); current_idx++) { 436 Node* n = _useful.at(current_idx); 437 438 const Type* type = gvn->type_or_null(n); 439 _new_type_array.map(current_idx, type); 440 441 assert(_old2new_map.at(n->_idx) == -1, "already seen"); 442 _old2new_map.at_put(n->_idx, current_idx); 443 444 if (old_node_note_array != nullptr) { 445 Node_Notes* nn = C->locate_node_notes(old_node_note_array, n->_idx); 446 C->set_node_notes_at(current_idx, nn); 447 } 448 449 n->set_idx(current_idx); // Update node ID. 450 451 if (update_embedded_ids(n) < 0) { 452 _delayed.push(n); // has embedded IDs; handle later 453 } 454 } 455 456 // VectorSet in Unique_Node_Set must be recomputed, since IDs have changed. 457 worklist.recompute_idx_set(); 458 459 assert(_live_node_count == _useful.size(), "all live nodes must be processed"); 460 461 _is_pass_finished = true; // pass finished; safe to process delayed updates 462 463 while (_delayed.size() > 0) { 464 Node* n = _delayed.pop(); 465 int no_of_updates = update_embedded_ids(n); 466 assert(no_of_updates > 0, "should be updated"); 467 } 468 469 // Replace the compiler's type information with the updated type information. 470 gvn->types().swap(_new_type_array); 471 472 // Update the unique node count of the compilation to the number of currently live nodes. 473 C->set_unique(_live_node_count); 474 475 // Set the dead node count to 0 and reset dead node list. 476 C->reset_dead_node_list(); 477 } 478 479 int PhaseRenumberLive::new_index(int old_idx) { 480 assert(_is_pass_finished, "not finished"); 481 if (_old2new_map.at(old_idx) == -1) { // absent 482 // Allocate a placeholder to preserve uniqueness 483 _old2new_map.at_put(old_idx, _live_node_count); 484 _live_node_count++; 485 } 486 return _old2new_map.at(old_idx); 487 } 488 489 int PhaseRenumberLive::update_embedded_ids(Node* n) { 490 int no_of_updates = 0; 491 if (n->is_Phi()) { 492 PhiNode* phi = n->as_Phi(); 493 if (phi->_inst_id != -1) { 494 if (!_is_pass_finished) { 495 return -1; // delay 496 } 497 int new_idx = new_index(phi->_inst_id); 498 assert(new_idx != -1, ""); 499 phi->_inst_id = new_idx; 500 no_of_updates++; 501 } 502 if (phi->_inst_mem_id != -1) { 503 if (!_is_pass_finished) { 504 return -1; // delay 505 } 506 int new_idx = new_index(phi->_inst_mem_id); 507 assert(new_idx != -1, ""); 508 phi->_inst_mem_id = new_idx; 509 no_of_updates++; 510 } 511 } 512 513 const Type* type = _new_type_array.fast_lookup(n->_idx); 514 if (type != nullptr && type->isa_oopptr() && type->is_oopptr()->is_known_instance()) { 515 if (!_is_pass_finished) { 516 return -1; // delay 517 } 518 int old_idx = type->is_oopptr()->instance_id(); 519 int new_idx = new_index(old_idx); 520 const Type* new_type = type->is_oopptr()->with_instance_id(new_idx); 521 _new_type_array.map(n->_idx, new_type); 522 no_of_updates++; 523 } 524 525 return no_of_updates; 526 } 527 528 void PhaseValues::init_con_caches() { 529 memset(_icons,0,sizeof(_icons)); 530 memset(_lcons,0,sizeof(_lcons)); 531 memset(_zcons,0,sizeof(_zcons)); 532 } 533 534 //--------------------------------find_int_type-------------------------------- 535 const TypeInt* PhaseValues::find_int_type(Node* n) { 536 if (n == nullptr) return nullptr; 537 // Call type_or_null(n) to determine node's type since we might be in 538 // parse phase and call n->Value() may return wrong type. 539 // (For example, a phi node at the beginning of loop parsing is not ready.) 540 const Type* t = type_or_null(n); 541 if (t == nullptr) return nullptr; 542 return t->isa_int(); 543 } 544 545 546 //-------------------------------find_long_type-------------------------------- 547 const TypeLong* PhaseValues::find_long_type(Node* n) { 548 if (n == nullptr) return nullptr; 549 // (See comment above on type_or_null.) 550 const Type* t = type_or_null(n); 551 if (t == nullptr) return nullptr; 552 return t->isa_long(); 553 } 554 555 //------------------------------~PhaseValues----------------------------------- 556 #ifndef PRODUCT 557 PhaseValues::~PhaseValues() { 558 // Statistics for NodeHash 559 _table.dump(); 560 // Statistics for value progress and efficiency 561 if( PrintCompilation && Verbose && WizardMode ) { 562 tty->print("\n%sValues: %d nodes ---> %d/%d (%d)", 563 is_IterGVN() ? "Iter" : " ", C->unique(), made_progress(), made_transforms(), made_new_values()); 564 if( made_transforms() != 0 ) { 565 tty->print_cr(" ratio %f", made_progress()/(float)made_transforms() ); 566 } else { 567 tty->cr(); 568 } 569 } 570 } 571 #endif 572 573 //------------------------------makecon---------------------------------------- 574 ConNode* PhaseValues::makecon(const Type* t) { 575 assert(t->singleton(), "must be a constant"); 576 assert(!t->empty() || t == Type::TOP, "must not be vacuous range"); 577 switch (t->base()) { // fast paths 578 case Type::Half: 579 case Type::Top: return (ConNode*) C->top(); 580 case Type::Int: return intcon( t->is_int()->get_con() ); 581 case Type::Long: return longcon( t->is_long()->get_con() ); 582 default: break; 583 } 584 if (t->is_zero_type()) 585 return zerocon(t->basic_type()); 586 return uncached_makecon(t); 587 } 588 589 //--------------------------uncached_makecon----------------------------------- 590 // Make an idealized constant - one of ConINode, ConPNode, etc. 591 ConNode* PhaseValues::uncached_makecon(const Type *t) { 592 assert(t->singleton(), "must be a constant"); 593 ConNode* x = ConNode::make(t); 594 ConNode* k = (ConNode*)hash_find_insert(x); // Value numbering 595 if (k == nullptr) { 596 set_type(x, t); // Missed, provide type mapping 597 GrowableArray<Node_Notes*>* nna = C->node_note_array(); 598 if (nna != nullptr) { 599 Node_Notes* loc = C->locate_node_notes(nna, x->_idx, true); 600 loc->clear(); // do not put debug info on constants 601 } 602 } else { 603 x->destruct(this); // Hit, destroy duplicate constant 604 x = k; // use existing constant 605 } 606 return x; 607 } 608 609 //------------------------------intcon----------------------------------------- 610 // Fast integer constant. Same as "transform(new ConINode(TypeInt::make(i)))" 611 ConINode* PhaseValues::intcon(jint i) { 612 // Small integer? Check cache! Check that cached node is not dead 613 if (i >= _icon_min && i <= _icon_max) { 614 ConINode* icon = _icons[i-_icon_min]; 615 if (icon != nullptr && icon->in(TypeFunc::Control) != nullptr) 616 return icon; 617 } 618 ConINode* icon = (ConINode*) uncached_makecon(TypeInt::make(i)); 619 assert(icon->is_Con(), ""); 620 if (i >= _icon_min && i <= _icon_max) 621 _icons[i-_icon_min] = icon; // Cache small integers 622 return icon; 623 } 624 625 //------------------------------longcon---------------------------------------- 626 // Fast long constant. 627 ConLNode* PhaseValues::longcon(jlong l) { 628 // Small integer? Check cache! Check that cached node is not dead 629 if (l >= _lcon_min && l <= _lcon_max) { 630 ConLNode* lcon = _lcons[l-_lcon_min]; 631 if (lcon != nullptr && lcon->in(TypeFunc::Control) != nullptr) 632 return lcon; 633 } 634 ConLNode* lcon = (ConLNode*) uncached_makecon(TypeLong::make(l)); 635 assert(lcon->is_Con(), ""); 636 if (l >= _lcon_min && l <= _lcon_max) 637 _lcons[l-_lcon_min] = lcon; // Cache small integers 638 return lcon; 639 } 640 ConNode* PhaseValues::integercon(jlong l, BasicType bt) { 641 if (bt == T_INT) { 642 return intcon(checked_cast<jint>(l)); 643 } 644 assert(bt == T_LONG, "not an integer"); 645 return longcon(l); 646 } 647 648 649 //------------------------------zerocon----------------------------------------- 650 // Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))" 651 ConNode* PhaseValues::zerocon(BasicType bt) { 652 assert((uint)bt <= _zcon_max, "domain check"); 653 ConNode* zcon = _zcons[bt]; 654 if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr) 655 return zcon; 656 zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt)); 657 _zcons[bt] = zcon; 658 return zcon; 659 } 660 661 662 663 //============================================================================= 664 Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) { 665 Node* i = BarrierSet::barrier_set()->barrier_set_c2()->ideal_node(this, k, can_reshape); 666 if (i == nullptr) { 667 i = k->Ideal(this, can_reshape); 668 } 669 return i; 670 } 671 672 //------------------------------transform-------------------------------------- 673 // Return a node which computes the same function as this node, but in a 674 // faster or cheaper fashion. 675 Node *PhaseGVN::transform( Node *n ) { 676 return transform_no_reclaim(n); 677 } 678 679 //------------------------------transform-------------------------------------- 680 // Return a node which computes the same function as this node, but 681 // in a faster or cheaper fashion. 682 Node *PhaseGVN::transform_no_reclaim(Node *n) { 683 NOT_PRODUCT( set_transforms(); ) 684 685 // Apply the Ideal call in a loop until it no longer applies 686 Node* k = n; 687 Node* i = apply_ideal(k, /*can_reshape=*/false); 688 NOT_PRODUCT(uint loop_count = 1;) 689 while (i != nullptr) { 690 assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" ); 691 k = i; 692 #ifdef ASSERT 693 if (loop_count >= K + C->live_nodes()) { 694 dump_infinite_loop_info(i, "PhaseGVN::transform_no_reclaim"); 695 } 696 #endif 697 i = apply_ideal(k, /*can_reshape=*/false); 698 NOT_PRODUCT(loop_count++;) 699 } 700 NOT_PRODUCT(if (loop_count != 0) { set_progress(); }) 701 702 // If brand new node, make space in type array. 703 ensure_type_or_null(k); 704 705 // Since I just called 'Value' to compute the set of run-time values 706 // for this Node, and 'Value' is non-local (and therefore expensive) I'll 707 // cache Value. Later requests for the local phase->type of this Node can 708 // use the cached Value instead of suffering with 'bottom_type'. 709 const Type* t = k->Value(this); // Get runtime Value set 710 assert(t != nullptr, "value sanity"); 711 if (type_or_null(k) != t) { 712 #ifndef PRODUCT 713 // Do not count initial visit to node as a transformation 714 if (type_or_null(k) == nullptr) { 715 inc_new_values(); 716 set_progress(); 717 } 718 #endif 719 set_type(k, t); 720 // If k is a TypeNode, capture any more-precise type permanently into Node 721 k->raise_bottom_type(t); 722 } 723 724 if (t->singleton() && !k->is_Con()) { 725 NOT_PRODUCT(set_progress();) 726 return makecon(t); // Turn into a constant 727 } 728 729 // Now check for Identities 730 i = k->Identity(this); // Look for a nearby replacement 731 if (i != k) { // Found? Return replacement! 732 NOT_PRODUCT(set_progress();) 733 return i; 734 } 735 736 // Global Value Numbering 737 i = hash_find_insert(k); // Insert if new 738 if (i && (i != k)) { 739 // Return the pre-existing node 740 NOT_PRODUCT(set_progress();) 741 return i; 742 } 743 744 // Return Idealized original 745 return k; 746 } 747 748 bool PhaseGVN::is_dominator_helper(Node *d, Node *n, bool linear_only) { 749 if (d->is_top() || (d->is_Proj() && d->in(0)->is_top())) { 750 return false; 751 } 752 if (n->is_top() || (n->is_Proj() && n->in(0)->is_top())) { 753 return false; 754 } 755 assert(d->is_CFG() && n->is_CFG(), "must have CFG nodes"); 756 int i = 0; 757 while (d != n) { 758 n = IfNode::up_one_dom(n, linear_only); 759 i++; 760 if (n == nullptr || i >= 100) { 761 return false; 762 } 763 } 764 return true; 765 } 766 767 #ifdef ASSERT 768 //------------------------------dead_loop_check-------------------------------- 769 // Check for a simple dead loop when a data node references itself directly 770 // or through an other data node excluding cons and phis. 771 void PhaseGVN::dead_loop_check( Node *n ) { 772 // Phi may reference itself in a loop 773 if (n != nullptr && !n->is_dead_loop_safe() && !n->is_CFG()) { 774 // Do 2 levels check and only data inputs. 775 bool no_dead_loop = true; 776 uint cnt = n->req(); 777 for (uint i = 1; i < cnt && no_dead_loop; i++) { 778 Node *in = n->in(i); 779 if (in == n) { 780 no_dead_loop = false; 781 } else if (in != nullptr && !in->is_dead_loop_safe()) { 782 uint icnt = in->req(); 783 for (uint j = 1; j < icnt && no_dead_loop; j++) { 784 if (in->in(j) == n || in->in(j) == in) 785 no_dead_loop = false; 786 } 787 } 788 } 789 if (!no_dead_loop) n->dump_bfs(100,0,"#"); 790 assert(no_dead_loop, "dead loop detected"); 791 } 792 } 793 794 795 /** 796 * Dumps information that can help to debug the problem. A debug 797 * build fails with an assert. 798 */ 799 void PhaseGVN::dump_infinite_loop_info(Node* n, const char* where) { 800 n->dump(4); 801 assert(false, "infinite loop in %s", where); 802 } 803 #endif 804 805 //============================================================================= 806 //------------------------------PhaseIterGVN----------------------------------- 807 // Initialize with previous PhaseIterGVN info; used by PhaseCCP 808 PhaseIterGVN::PhaseIterGVN(PhaseIterGVN* igvn) : _delay_transform(igvn->_delay_transform), 809 _worklist(*C->igvn_worklist()) 810 { 811 _iterGVN = true; 812 assert(&_worklist == &igvn->_worklist, "sanity"); 813 } 814 815 //------------------------------PhaseIterGVN----------------------------------- 816 // Initialize with previous PhaseGVN info from Parser 817 PhaseIterGVN::PhaseIterGVN(PhaseGVN* gvn) : _delay_transform(false), 818 _worklist(*C->igvn_worklist()) 819 { 820 _iterGVN = true; 821 uint max; 822 823 // Dead nodes in the hash table inherited from GVN were not treated as 824 // roots during def-use info creation; hence they represent an invisible 825 // use. Clear them out. 826 max = _table.size(); 827 for( uint i = 0; i < max; ++i ) { 828 Node *n = _table.at(i); 829 if(n != nullptr && n != _table.sentinel() && n->outcnt() == 0) { 830 if( n->is_top() ) continue; 831 // If remove_useless_nodes() has run, we expect no such nodes left. 832 assert(false, "remove_useless_nodes missed this node"); 833 hash_delete(n); 834 } 835 } 836 837 // Any Phis or Regions on the worklist probably had uses that could not 838 // make more progress because the uses were made while the Phis and Regions 839 // were in half-built states. Put all uses of Phis and Regions on worklist. 840 max = _worklist.size(); 841 for( uint j = 0; j < max; j++ ) { 842 Node *n = _worklist.at(j); 843 uint uop = n->Opcode(); 844 if( uop == Op_Phi || uop == Op_Region || 845 n->is_Type() || 846 n->is_Mem() ) 847 add_users_to_worklist(n); 848 } 849 } 850 851 void PhaseIterGVN::shuffle_worklist() { 852 if (_worklist.size() < 2) return; 853 for (uint i = _worklist.size() - 1; i >= 1; i--) { 854 uint j = C->random() % (i + 1); 855 swap(_worklist.adr()[i], _worklist.adr()[j]); 856 } 857 } 858 859 #ifndef PRODUCT 860 void PhaseIterGVN::verify_step(Node* n) { 861 if (is_verify_def_use()) { 862 ResourceMark rm; 863 VectorSet visited; 864 Node_List worklist; 865 866 _verify_window[_verify_counter % _verify_window_size] = n; 867 ++_verify_counter; 868 if (C->unique() < 1000 || 0 == _verify_counter % (C->unique() < 10000 ? 10 : 100)) { 869 ++_verify_full_passes; 870 worklist.push(C->root()); 871 Node::verify(-1, visited, worklist); 872 return; 873 } 874 for (int i = 0; i < _verify_window_size; i++) { 875 Node* n = _verify_window[i]; 876 if (n == nullptr) { 877 continue; 878 } 879 if (n->in(0) == NodeSentinel) { // xform_idom 880 _verify_window[i] = n->in(1); 881 --i; 882 continue; 883 } 884 // Typical fanout is 1-2, so this call visits about 6 nodes. 885 if (!visited.test_set(n->_idx)) { 886 worklist.push(n); 887 } 888 } 889 Node::verify(4, visited, worklist); 890 } 891 } 892 893 void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype) { 894 if (TraceIterativeGVN) { 895 uint wlsize = _worklist.size(); 896 const Type* newtype = type_or_null(n); 897 if (nn != n) { 898 // print old node 899 tty->print("< "); 900 if (oldtype != newtype && oldtype != nullptr) { 901 oldtype->dump(); 902 } 903 do { tty->print("\t"); } while (tty->position() < 16); 904 tty->print("<"); 905 n->dump(); 906 } 907 if (oldtype != newtype || nn != n) { 908 // print new node and/or new type 909 if (oldtype == nullptr) { 910 tty->print("* "); 911 } else if (nn != n) { 912 tty->print("> "); 913 } else { 914 tty->print("= "); 915 } 916 if (newtype == nullptr) { 917 tty->print("null"); 918 } else { 919 newtype->dump(); 920 } 921 do { tty->print("\t"); } while (tty->position() < 16); 922 nn->dump(); 923 } 924 if (Verbose && wlsize < _worklist.size()) { 925 tty->print(" Push {"); 926 while (wlsize != _worklist.size()) { 927 Node* pushed = _worklist.at(wlsize++); 928 tty->print(" %d", pushed->_idx); 929 } 930 tty->print_cr(" }"); 931 } 932 if (nn != n) { 933 // ignore n, it might be subsumed 934 verify_step((Node*) nullptr); 935 } 936 } 937 } 938 939 void PhaseIterGVN::init_verifyPhaseIterGVN() { 940 _verify_counter = 0; 941 _verify_full_passes = 0; 942 for (int i = 0; i < _verify_window_size; i++) { 943 _verify_window[i] = nullptr; 944 } 945 #ifdef ASSERT 946 // Verify that all modified nodes are on _worklist 947 Unique_Node_List* modified_list = C->modified_nodes(); 948 while (modified_list != nullptr && modified_list->size()) { 949 Node* n = modified_list->pop(); 950 if (!n->is_Con() && !_worklist.member(n)) { 951 n->dump(); 952 fatal("modified node is not on IGVN._worklist"); 953 } 954 } 955 #endif 956 } 957 958 void PhaseIterGVN::verify_PhaseIterGVN() { 959 #ifdef ASSERT 960 // Verify nodes with changed inputs. 961 Unique_Node_List* modified_list = C->modified_nodes(); 962 while (modified_list != nullptr && modified_list->size()) { 963 Node* n = modified_list->pop(); 964 if (!n->is_Con()) { // skip Con nodes 965 n->dump(); 966 fatal("modified node was not processed by IGVN.transform_old()"); 967 } 968 } 969 #endif 970 971 C->verify_graph_edges(); 972 if (is_verify_def_use() && PrintOpto) { 973 if (_verify_counter == _verify_full_passes) { 974 tty->print_cr("VerifyIterativeGVN: %d transforms and verify passes", 975 (int) _verify_full_passes); 976 } else { 977 tty->print_cr("VerifyIterativeGVN: %d transforms, %d full verify passes", 978 (int) _verify_counter, (int) _verify_full_passes); 979 } 980 } 981 982 #ifdef ASSERT 983 if (modified_list != nullptr) { 984 while (modified_list->size() > 0) { 985 Node* n = modified_list->pop(); 986 n->dump(); 987 assert(false, "VerifyIterativeGVN: new modified node was added"); 988 } 989 } 990 991 verify_optimize(); 992 #endif 993 } 994 #endif /* PRODUCT */ 995 996 #ifdef ASSERT 997 /** 998 * Dumps information that can help to debug the problem. A debug 999 * build fails with an assert. 1000 */ 1001 void PhaseIterGVN::dump_infinite_loop_info(Node* n, const char* where) { 1002 n->dump(4); 1003 _worklist.dump(); 1004 assert(false, "infinite loop in %s", where); 1005 } 1006 1007 /** 1008 * Prints out information about IGVN if the 'verbose' option is used. 1009 */ 1010 void PhaseIterGVN::trace_PhaseIterGVN_verbose(Node* n, int num_processed) { 1011 if (TraceIterativeGVN && Verbose) { 1012 tty->print(" Pop "); 1013 n->dump(); 1014 if ((num_processed % 100) == 0) { 1015 _worklist.print_set(); 1016 } 1017 } 1018 } 1019 #endif /* ASSERT */ 1020 1021 void PhaseIterGVN::optimize() { 1022 DEBUG_ONLY(uint num_processed = 0;) 1023 NOT_PRODUCT(init_verifyPhaseIterGVN();) 1024 if (StressIGVN) { 1025 shuffle_worklist(); 1026 } 1027 1028 uint loop_count = 0; 1029 // Pull from worklist and transform the node. If the node has changed, 1030 // update edge info and put uses on worklist. 1031 while(_worklist.size()) { 1032 if (C->check_node_count(NodeLimitFudgeFactor * 2, "Out of nodes")) { 1033 return; 1034 } 1035 Node* n = _worklist.pop(); 1036 if (loop_count >= K * C->live_nodes()) { 1037 DEBUG_ONLY(dump_infinite_loop_info(n, "PhaseIterGVN::optimize");) 1038 C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize"); 1039 return; 1040 } 1041 DEBUG_ONLY(trace_PhaseIterGVN_verbose(n, num_processed++);) 1042 if (n->outcnt() != 0) { 1043 NOT_PRODUCT(const Type* oldtype = type_or_null(n)); 1044 // Do the transformation 1045 Node* nn = transform_old(n); 1046 NOT_PRODUCT(trace_PhaseIterGVN(n, nn, oldtype);) 1047 } else if (!n->is_top()) { 1048 remove_dead_node(n); 1049 } 1050 loop_count++; 1051 } 1052 NOT_PRODUCT(verify_PhaseIterGVN();) 1053 } 1054 1055 #ifdef ASSERT 1056 void PhaseIterGVN::verify_optimize() { 1057 if (is_verify_Value()) { 1058 ResourceMark rm; 1059 Unique_Node_List worklist; 1060 bool failure = false; 1061 // BFS all nodes, starting at root 1062 worklist.push(C->root()); 1063 for (uint j = 0; j < worklist.size(); ++j) { 1064 Node* n = worklist.at(j); 1065 failure |= verify_node_value(n); 1066 // traverse all inputs and outputs 1067 for (uint i = 0; i < n->req(); i++) { 1068 if (n->in(i) != nullptr) { 1069 worklist.push(n->in(i)); 1070 } 1071 } 1072 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1073 worklist.push(n->fast_out(i)); 1074 } 1075 } 1076 // If we get this assert, check why the reported nodes were not processed again in IGVN. 1077 // We should either make sure that these nodes are properly added back to the IGVN worklist 1078 // in PhaseIterGVN::add_users_to_worklist to update them again or add an exception 1079 // in the verification code above if that is not possible for some reason (like Load nodes). 1080 assert(!failure, "Missed optimization opportunity in PhaseIterGVN"); 1081 } 1082 } 1083 1084 // Check that type(n) == n->Value(), return true if we have a failure. 1085 // We have a list of exceptions, see detailed comments in code. 1086 // (1) Integer "widen" changes, but the range is the same. 1087 // (2) LoadNode performs deep traversals. Load is not notified for changes far away. 1088 // (3) CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away. 1089 bool PhaseIterGVN::verify_node_value(Node* n) { 1090 // If we assert inside type(n), because the type is still a null, then maybe 1091 // the node never went through gvn.transform, which would be a bug. 1092 const Type* told = type(n); 1093 const Type* tnew = n->Value(this); 1094 if (told == tnew) { 1095 return false; 1096 } 1097 // Exception (1) 1098 // Integer "widen" changes, but range is the same. 1099 if (told->isa_integer(tnew->basic_type()) != nullptr) { // both either int or long 1100 const TypeInteger* t0 = told->is_integer(tnew->basic_type()); 1101 const TypeInteger* t1 = tnew->is_integer(tnew->basic_type()); 1102 if (t0->lo_as_long() == t1->lo_as_long() && 1103 t0->hi_as_long() == t1->hi_as_long()) { 1104 return false; // ignore integer widen 1105 } 1106 } 1107 // Exception (2) 1108 // LoadNode performs deep traversals. Load is not notified for changes far away. 1109 if (n->is_Load() && !told->singleton()) { 1110 // MemNode::can_see_stored_value looks up through many memory nodes, 1111 // which means we would need to notify modifications from far up in 1112 // the inputs all the way down to the LoadNode. We don't do that. 1113 return false; 1114 } 1115 // Exception (3) 1116 // CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away. 1117 if (n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) { 1118 // SubNode::Value 1119 // CmpPNode::sub 1120 // MemNode::detect_ptr_independence 1121 // MemNode::all_controls_dominate 1122 // We find all controls of a pointer load, and see if they dominate the control of 1123 // an allocation. If they all dominate, we know the allocation is after (independent) 1124 // of the pointer load, and we can say the pointers are different. For this we call 1125 // n->dominates(sub, nlist) to check if controls n of the pointer load dominate the 1126 // control sub of the allocation. The problems is that sometimes dominates answers 1127 // false conservatively, and later it can determine that it is indeed true. Loops with 1128 // Region heads can lead to giving up, whereas LoopNodes can be skipped easier, and 1129 // so the traversal becomes more powerful. This is difficult to remidy, we would have 1130 // to notify the CmpP of CFG updates. Luckily, we recompute CmpP::Value during CCP 1131 // after loop-opts, so that should take care of many of these cases. 1132 return false; 1133 } 1134 tty->cr(); 1135 tty->print_cr("Missed Value optimization:"); 1136 n->dump_bfs(1, 0, ""); 1137 tty->print_cr("Current type:"); 1138 told->dump_on(tty); 1139 tty->cr(); 1140 tty->print_cr("Optimized type:"); 1141 tnew->dump_on(tty); 1142 tty->cr(); 1143 return true; 1144 } 1145 #endif 1146 1147 /** 1148 * Register a new node with the optimizer. Update the types array, the def-use 1149 * info. Put on worklist. 1150 */ 1151 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) { 1152 set_type_bottom(n); 1153 _worklist.push(n); 1154 if (orig != nullptr) C->copy_node_notes_to(n, orig); 1155 return n; 1156 } 1157 1158 //------------------------------transform-------------------------------------- 1159 // Non-recursive: idealize Node 'n' with respect to its inputs and its value 1160 Node *PhaseIterGVN::transform( Node *n ) { 1161 // If brand new node, make space in type array, and give it a type. 1162 ensure_type_or_null(n); 1163 if (type_or_null(n) == nullptr) { 1164 set_type_bottom(n); 1165 } 1166 1167 if (_delay_transform) { 1168 // Add the node to the worklist but don't optimize for now 1169 _worklist.push(n); 1170 return n; 1171 } 1172 1173 return transform_old(n); 1174 } 1175 1176 Node *PhaseIterGVN::transform_old(Node* n) { 1177 NOT_PRODUCT(set_transforms()); 1178 // Remove 'n' from hash table in case it gets modified 1179 _table.hash_delete(n); 1180 #ifdef ASSERT 1181 if (is_verify_def_use()) { 1182 assert(!_table.find_index(n->_idx), "found duplicate entry in table"); 1183 } 1184 #endif 1185 1186 // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool 1187 if (n->is_Cmp()) { 1188 add_users_to_worklist(n); 1189 } 1190 1191 // Apply the Ideal call in a loop until it no longer applies 1192 Node* k = n; 1193 DEBUG_ONLY(dead_loop_check(k);) 1194 DEBUG_ONLY(bool is_new = (k->outcnt() == 0);) 1195 C->remove_modified_node(k); 1196 Node* i = apply_ideal(k, /*can_reshape=*/true); 1197 assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes"); 1198 #ifndef PRODUCT 1199 verify_step(k); 1200 #endif 1201 1202 DEBUG_ONLY(uint loop_count = 1;) 1203 while (i != nullptr) { 1204 #ifdef ASSERT 1205 if (loop_count >= K + C->live_nodes()) { 1206 dump_infinite_loop_info(i, "PhaseIterGVN::transform_old"); 1207 } 1208 #endif 1209 assert((i->_idx >= k->_idx) || i->is_top(), "Idealize should return new nodes, use Identity to return old nodes"); 1210 // Made a change; put users of original Node on worklist 1211 add_users_to_worklist(k); 1212 // Replacing root of transform tree? 1213 if (k != i) { 1214 // Make users of old Node now use new. 1215 subsume_node(k, i); 1216 k = i; 1217 } 1218 DEBUG_ONLY(dead_loop_check(k);) 1219 // Try idealizing again 1220 DEBUG_ONLY(is_new = (k->outcnt() == 0);) 1221 C->remove_modified_node(k); 1222 i = apply_ideal(k, /*can_reshape=*/true); 1223 assert(i != k || is_new || (i->outcnt() > 0), "don't return dead nodes"); 1224 #ifndef PRODUCT 1225 verify_step(k); 1226 #endif 1227 DEBUG_ONLY(loop_count++;) 1228 } 1229 1230 // If brand new node, make space in type array. 1231 ensure_type_or_null(k); 1232 1233 // See what kind of values 'k' takes on at runtime 1234 const Type* t = k->Value(this); 1235 assert(t != nullptr, "value sanity"); 1236 1237 // Since I just called 'Value' to compute the set of run-time values 1238 // for this Node, and 'Value' is non-local (and therefore expensive) I'll 1239 // cache Value. Later requests for the local phase->type of this Node can 1240 // use the cached Value instead of suffering with 'bottom_type'. 1241 if (type_or_null(k) != t) { 1242 #ifndef PRODUCT 1243 inc_new_values(); 1244 set_progress(); 1245 #endif 1246 set_type(k, t); 1247 // If k is a TypeNode, capture any more-precise type permanently into Node 1248 k->raise_bottom_type(t); 1249 // Move users of node to worklist 1250 add_users_to_worklist(k); 1251 } 1252 // If 'k' computes a constant, replace it with a constant 1253 if (t->singleton() && !k->is_Con()) { 1254 NOT_PRODUCT(set_progress();) 1255 Node* con = makecon(t); // Make a constant 1256 add_users_to_worklist(k); 1257 subsume_node(k, con); // Everybody using k now uses con 1258 return con; 1259 } 1260 1261 // Now check for Identities 1262 i = k->Identity(this); // Look for a nearby replacement 1263 if (i != k) { // Found? Return replacement! 1264 NOT_PRODUCT(set_progress();) 1265 add_users_to_worklist(k); 1266 subsume_node(k, i); // Everybody using k now uses i 1267 return i; 1268 } 1269 1270 // Global Value Numbering 1271 i = hash_find_insert(k); // Check for pre-existing node 1272 if (i && (i != k)) { 1273 // Return the pre-existing node if it isn't dead 1274 NOT_PRODUCT(set_progress();) 1275 add_users_to_worklist(k); 1276 subsume_node(k, i); // Everybody using k now uses i 1277 return i; 1278 } 1279 1280 // Return Idealized original 1281 return k; 1282 } 1283 1284 //---------------------------------saturate------------------------------------ 1285 const Type* PhaseIterGVN::saturate(const Type* new_type, const Type* old_type, 1286 const Type* limit_type) const { 1287 return new_type->narrow(old_type); 1288 } 1289 1290 //------------------------------remove_globally_dead_node---------------------- 1291 // Kill a globally dead Node. All uses are also globally dead and are 1292 // aggressively trimmed. 1293 void PhaseIterGVN::remove_globally_dead_node( Node *dead ) { 1294 enum DeleteProgress { 1295 PROCESS_INPUTS, 1296 PROCESS_OUTPUTS 1297 }; 1298 ResourceMark rm; 1299 Node_Stack stack(32); 1300 stack.push(dead, PROCESS_INPUTS); 1301 1302 while (stack.is_nonempty()) { 1303 dead = stack.node(); 1304 if (dead->Opcode() == Op_SafePoint) { 1305 dead->as_SafePoint()->disconnect_from_root(this); 1306 } 1307 uint progress_state = stack.index(); 1308 assert(dead != C->root(), "killing root, eh?"); 1309 assert(!dead->is_top(), "add check for top when pushing"); 1310 NOT_PRODUCT( set_progress(); ) 1311 if (progress_state == PROCESS_INPUTS) { 1312 // After following inputs, continue to outputs 1313 stack.set_index(PROCESS_OUTPUTS); 1314 if (!dead->is_Con()) { // Don't kill cons but uses 1315 bool recurse = false; 1316 // Remove from hash table 1317 _table.hash_delete( dead ); 1318 // Smash all inputs to 'dead', isolating him completely 1319 for (uint i = 0; i < dead->req(); i++) { 1320 Node *in = dead->in(i); 1321 if (in != nullptr && in != C->top()) { // Points to something? 1322 int nrep = dead->replace_edge(in, nullptr, this); // Kill edges 1323 assert((nrep > 0), "sanity"); 1324 if (in->outcnt() == 0) { // Made input go dead? 1325 stack.push(in, PROCESS_INPUTS); // Recursively remove 1326 recurse = true; 1327 } else if (in->outcnt() == 1 && 1328 in->has_special_unique_user()) { 1329 _worklist.push(in->unique_out()); 1330 } else if (in->outcnt() <= 2 && dead->is_Phi()) { 1331 if (in->Opcode() == Op_Region) { 1332 _worklist.push(in); 1333 } else if (in->is_Store()) { 1334 DUIterator_Fast imax, i = in->fast_outs(imax); 1335 _worklist.push(in->fast_out(i)); 1336 i++; 1337 if (in->outcnt() == 2) { 1338 _worklist.push(in->fast_out(i)); 1339 i++; 1340 } 1341 assert(!(i < imax), "sanity"); 1342 } 1343 } else { 1344 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(this, in); 1345 } 1346 if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory && 1347 in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) { 1348 // A Load that directly follows an InitializeNode is 1349 // going away. The Stores that follow are candidates 1350 // again to be captured by the InitializeNode. 1351 for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) { 1352 Node *n = in->fast_out(j); 1353 if (n->is_Store()) { 1354 _worklist.push(n); 1355 } 1356 } 1357 } 1358 } // if (in != nullptr && in != C->top()) 1359 } // for (uint i = 0; i < dead->req(); i++) 1360 if (recurse) { 1361 continue; 1362 } 1363 } // if (!dead->is_Con()) 1364 } // if (progress_state == PROCESS_INPUTS) 1365 1366 // Aggressively kill globally dead uses 1367 // (Rather than pushing all the outs at once, we push one at a time, 1368 // plus the parent to resume later, because of the indefinite number 1369 // of edge deletions per loop trip.) 1370 if (dead->outcnt() > 0) { 1371 // Recursively remove output edges 1372 stack.push(dead->raw_out(0), PROCESS_INPUTS); 1373 } else { 1374 // Finished disconnecting all input and output edges. 1375 stack.pop(); 1376 // Remove dead node from iterative worklist 1377 _worklist.remove(dead); 1378 C->remove_useless_node(dead); 1379 } 1380 } // while (stack.is_nonempty()) 1381 } 1382 1383 //------------------------------subsume_node----------------------------------- 1384 // Remove users from node 'old' and add them to node 'nn'. 1385 void PhaseIterGVN::subsume_node( Node *old, Node *nn ) { 1386 if (old->Opcode() == Op_SafePoint) { 1387 old->as_SafePoint()->disconnect_from_root(this); 1388 } 1389 assert( old != hash_find(old), "should already been removed" ); 1390 assert( old != C->top(), "cannot subsume top node"); 1391 // Copy debug or profile information to the new version: 1392 C->copy_node_notes_to(nn, old); 1393 // Move users of node 'old' to node 'nn' 1394 for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) { 1395 Node* use = old->last_out(i); // for each use... 1396 // use might need re-hashing (but it won't if it's a new node) 1397 rehash_node_delayed(use); 1398 // Update use-def info as well 1399 // We remove all occurrences of old within use->in, 1400 // so as to avoid rehashing any node more than once. 1401 // The hash table probe swamps any outer loop overhead. 1402 uint num_edges = 0; 1403 for (uint jmax = use->len(), j = 0; j < jmax; j++) { 1404 if (use->in(j) == old) { 1405 use->set_req(j, nn); 1406 ++num_edges; 1407 } 1408 } 1409 i -= num_edges; // we deleted 1 or more copies of this edge 1410 } 1411 1412 // Search for instance field data PhiNodes in the same region pointing to the old 1413 // memory PhiNode and update their instance memory ids to point to the new node. 1414 if (old->is_Phi() && old->as_Phi()->type()->has_memory() && old->in(0) != nullptr) { 1415 Node* region = old->in(0); 1416 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 1417 PhiNode* phi = region->fast_out(i)->isa_Phi(); 1418 if (phi != nullptr && phi->inst_mem_id() == (int)old->_idx) { 1419 phi->set_inst_mem_id((int)nn->_idx); 1420 } 1421 } 1422 } 1423 1424 // Smash all inputs to 'old', isolating him completely 1425 Node *temp = new Node(1); 1426 temp->init_req(0,nn); // Add a use to nn to prevent him from dying 1427 remove_dead_node( old ); 1428 temp->del_req(0); // Yank bogus edge 1429 if (nn != nullptr && nn->outcnt() == 0) { 1430 _worklist.push(nn); 1431 } 1432 #ifndef PRODUCT 1433 if (is_verify_def_use()) { 1434 for ( int i = 0; i < _verify_window_size; i++ ) { 1435 if ( _verify_window[i] == old ) 1436 _verify_window[i] = nn; 1437 } 1438 } 1439 #endif 1440 temp->destruct(this); // reuse the _idx of this little guy 1441 } 1442 1443 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) { 1444 assert(n != nullptr, "sanity"); 1445 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1446 Node* u = n->fast_out(i); 1447 if (u != n) { 1448 rehash_node_delayed(u); 1449 int nb = u->replace_edge(n, m); 1450 --i, imax -= nb; 1451 } 1452 } 1453 assert(n->outcnt() == 0, "all uses must be deleted"); 1454 } 1455 1456 //------------------------------add_users_to_worklist-------------------------- 1457 void PhaseIterGVN::add_users_to_worklist0( Node *n ) { 1458 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1459 _worklist.push(n->fast_out(i)); // Push on worklist 1460 } 1461 } 1462 1463 // Return counted loop Phi if as a counted loop exit condition, cmp 1464 // compares the induction variable with n 1465 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) { 1466 for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) { 1467 Node* bol = cmp->fast_out(i); 1468 for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) { 1469 Node* iff = bol->fast_out(i2); 1470 if (iff->is_BaseCountedLoopEnd()) { 1471 BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd(); 1472 if (cle->limit() == n) { 1473 PhiNode* phi = cle->phi(); 1474 if (phi != nullptr) { 1475 return phi; 1476 } 1477 } 1478 } 1479 } 1480 } 1481 return nullptr; 1482 } 1483 1484 void PhaseIterGVN::add_users_to_worklist( Node *n ) { 1485 add_users_to_worklist0(n); 1486 1487 // Move users of node to worklist 1488 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1489 Node* use = n->fast_out(i); // Get use 1490 1491 if( use->is_Multi() || // Multi-definer? Push projs on worklist 1492 use->is_Store() ) // Enable store/load same address 1493 add_users_to_worklist0(use); 1494 1495 // If we changed the receiver type to a call, we need to revisit 1496 // the Catch following the call. It's looking for a non-null 1497 // receiver to know when to enable the regular fall-through path 1498 // in addition to the NullPtrException path. 1499 if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) { 1500 Node* p = use->as_CallDynamicJava()->proj_out_or_null(TypeFunc::Control); 1501 if (p != nullptr) { 1502 add_users_to_worklist0(p); 1503 } 1504 } 1505 1506 uint use_op = use->Opcode(); 1507 if(use->is_Cmp()) { // Enable CMP/BOOL optimization 1508 add_users_to_worklist(use); // Put Bool on worklist 1509 if (use->outcnt() > 0) { 1510 Node* bol = use->raw_out(0); 1511 if (bol->outcnt() > 0) { 1512 Node* iff = bol->raw_out(0); 1513 if (iff->outcnt() == 2) { 1514 // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the 1515 // phi merging either 0 or 1 onto the worklist 1516 Node* ifproj0 = iff->raw_out(0); 1517 Node* ifproj1 = iff->raw_out(1); 1518 if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) { 1519 Node* region0 = ifproj0->raw_out(0); 1520 Node* region1 = ifproj1->raw_out(0); 1521 if( region0 == region1 ) 1522 add_users_to_worklist0(region0); 1523 } 1524 } 1525 } 1526 } 1527 if (use_op == Op_CmpI || use_op == Op_CmpL) { 1528 Node* phi = countedloop_phi_from_cmp(use->as_Cmp(), n); 1529 if (phi != nullptr) { 1530 // Input to the cmp of a loop exit check has changed, thus 1531 // the loop limit may have changed, which can then change the 1532 // range values of the trip-count Phi. 1533 _worklist.push(phi); 1534 } 1535 } 1536 if (use_op == Op_CmpI) { 1537 Node* cmp = use; 1538 Node* in1 = cmp->in(1); 1539 Node* in2 = cmp->in(2); 1540 // Notify CmpI / If pattern from CastIINode::Value (left pattern). 1541 // Must also notify if in1 is modified and possibly turns into X (right pattern). 1542 // 1543 // in1 in2 in1 in2 1544 // | | | | 1545 // +--- | --+ | | 1546 // | | | | | 1547 // CmpINode | CmpINode 1548 // | | | 1549 // BoolNode | BoolNode 1550 // | | OR | 1551 // IfNode | IfNode 1552 // | | | 1553 // IfProj | IfProj X 1554 // | | | | 1555 // CastIINode CastIINode 1556 // 1557 if (in1 != in2) { // if they are equal, the CmpI can fold them away 1558 if (in1 == n) { 1559 // in1 modified -> could turn into X -> do traversal based on right pattern. 1560 for (DUIterator_Fast i2max, i2 = cmp->fast_outs(i2max); i2 < i2max; i2++) { 1561 Node* bol = cmp->fast_out(i2); // For each Bool 1562 if (bol->is_Bool()) { 1563 for (DUIterator_Fast i3max, i3 = bol->fast_outs(i3max); i3 < i3max; i3++) { 1564 Node* iff = bol->fast_out(i3); // For each If 1565 if (iff->is_If()) { 1566 for (DUIterator_Fast i4max, i4 = iff->fast_outs(i4max); i4 < i4max; i4++) { 1567 Node* if_proj = iff->fast_out(i4); // For each IfProj 1568 assert(if_proj->is_IfProj(), "If only has IfTrue and IfFalse as outputs"); 1569 for (DUIterator_Fast i5max, i5 = if_proj->fast_outs(i5max); i5 < i5max; i5++) { 1570 Node* castii = if_proj->fast_out(i5); // For each CastII 1571 if (castii->is_CastII() && 1572 castii->as_CastII()->carry_dependency()) { 1573 _worklist.push(castii); 1574 } 1575 } 1576 } 1577 } 1578 } 1579 } 1580 } 1581 } else { 1582 // Only in2 modified -> can assume X == in2 (left pattern). 1583 assert(n == in2, "only in2 modified"); 1584 // Find all CastII with input in1. 1585 for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) { 1586 Node* castii = in1->fast_out(j); 1587 if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) { 1588 // Find If. 1589 if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) { 1590 Node* ifnode = castii->in(0)->in(0); 1591 // Check that if connects to the cmp 1592 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) { 1593 _worklist.push(castii); 1594 } 1595 } 1596 } 1597 } 1598 } 1599 } 1600 } 1601 } 1602 1603 // Inline type nodes can have other inline types as users. If an input gets 1604 // updated, make sure that inline type users get a chance for optimization. 1605 if (use->is_InlineType()) { 1606 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { 1607 Node* u = use->fast_out(i2); 1608 if (u->is_InlineType()) 1609 _worklist.push(u); 1610 } 1611 } 1612 // If changed Cast input, notify down for Phi and Sub - both do "uncast" 1613 // Patterns: 1614 // ConstraintCast+ -> Sub 1615 // ConstraintCast+ -> Phi 1616 if (use->is_ConstraintCast()) { 1617 auto push_phi_or_sub_uses_to_worklist = [&](Node* n){ 1618 if (n->is_Phi() || n->is_Sub()) { 1619 _worklist.push(n); 1620 } 1621 }; 1622 ConstraintCastNode::visit_uncasted_uses(use, push_phi_or_sub_uses_to_worklist); 1623 } 1624 // If changed LShift inputs, check RShift users for useless sign-ext 1625 if( use_op == Op_LShiftI ) { 1626 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { 1627 Node* u = use->fast_out(i2); 1628 if (u->Opcode() == Op_RShiftI) 1629 _worklist.push(u); 1630 } 1631 } 1632 // If changed LShift inputs, check And users for shift and mask (And) operation 1633 if (use_op == Op_LShiftI || use_op == Op_LShiftL) { 1634 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { 1635 Node* u = use->fast_out(i2); 1636 if (u->Opcode() == Op_AndI || u->Opcode() == Op_AndL) { 1637 _worklist.push(u); 1638 } 1639 } 1640 } 1641 // If changed AddI/SubI inputs, check CmpU for range check optimization. 1642 if (use_op == Op_AddI || use_op == Op_SubI) { 1643 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { 1644 Node* u = use->fast_out(i2); 1645 if (u->is_Cmp() && (u->Opcode() == Op_CmpU)) { 1646 _worklist.push(u); 1647 } 1648 } 1649 } 1650 // If changed AddP inputs, check Stores for loop invariant 1651 if( use_op == Op_AddP ) { 1652 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { 1653 Node* u = use->fast_out(i2); 1654 if (u->is_Mem()) 1655 _worklist.push(u); 1656 } 1657 } 1658 // If changed initialization activity, check dependent Stores 1659 if (use_op == Op_Allocate || use_op == Op_AllocateArray) { 1660 InitializeNode* init = use->as_Allocate()->initialization(); 1661 if (init != nullptr) { 1662 Node* imem = init->proj_out_or_null(TypeFunc::Memory); 1663 if (imem != nullptr) add_users_to_worklist0(imem); 1664 } 1665 } 1666 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead. 1667 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn 1668 // to guarantee the change is not missed. 1669 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) { 1670 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control); 1671 if (p != nullptr) { 1672 add_users_to_worklist0(p); 1673 } 1674 } 1675 1676 if (use_op == Op_Initialize) { 1677 Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory); 1678 if (imem != nullptr) add_users_to_worklist0(imem); 1679 } 1680 if (use_op == Op_CastP2X) { 1681 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { 1682 Node* u = use->fast_out(i2); 1683 if (u->Opcode() == Op_AndX) { 1684 _worklist.push(u); 1685 } 1686 } 1687 } 1688 // Loading the java mirror from a Klass requires two loads and the type 1689 // of the mirror load depends on the type of 'n'. See LoadNode::Value(). 1690 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror)))) 1691 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1692 bool has_load_barrier_nodes = bs->has_load_barrier_nodes(); 1693 1694 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) { 1695 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { 1696 Node* u = use->fast_out(i2); 1697 const Type* ut = u->bottom_type(); 1698 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) { 1699 if (has_load_barrier_nodes) { 1700 // Search for load barriers behind the load 1701 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) { 1702 Node* b = u->fast_out(i3); 1703 if (bs->is_gc_barrier_node(b)) { 1704 _worklist.push(b); 1705 } 1706 } 1707 } 1708 _worklist.push(u); 1709 } 1710 } 1711 } 1712 1713 // Give CallStaticJavaNode::remove_useless_allocation a chance to run 1714 if (use->is_Region()) { 1715 Node* c = use; 1716 do { 1717 c = c->unique_ctrl_out_or_null(); 1718 } while (c != nullptr && c->is_Region()); 1719 if (c != nullptr && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) { 1720 _worklist.push(c); 1721 } 1722 } 1723 if (use->Opcode() == Op_OpaqueZeroTripGuard) { 1724 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared"); 1725 if (use->outcnt() == 1) { 1726 Node* cmp = use->unique_out(); 1727 _worklist.push(cmp); 1728 } 1729 } 1730 } 1731 } 1732 1733 /** 1734 * Remove the speculative part of all types that we know of 1735 */ 1736 void PhaseIterGVN::remove_speculative_types() { 1737 assert(UseTypeSpeculation, "speculation is off"); 1738 for (uint i = 0; i < _types.Size(); i++) { 1739 const Type* t = _types.fast_lookup(i); 1740 if (t != nullptr) { 1741 _types.map(i, t->remove_speculative()); 1742 } 1743 } 1744 _table.check_no_speculative_types(); 1745 } 1746 1747 // Check if the type of a divisor of a Div or Mod node includes zero. 1748 bool PhaseIterGVN::no_dependent_zero_check(Node* n) const { 1749 switch (n->Opcode()) { 1750 case Op_DivI: 1751 case Op_ModI: { 1752 // Type of divisor includes 0? 1753 if (type(n->in(2)) == Type::TOP) { 1754 // 'n' is dead. Treat as if zero check is still there to avoid any further optimizations. 1755 return false; 1756 } 1757 const TypeInt* type_divisor = type(n->in(2))->is_int(); 1758 return (type_divisor->_hi < 0 || type_divisor->_lo > 0); 1759 } 1760 case Op_DivL: 1761 case Op_ModL: { 1762 // Type of divisor includes 0? 1763 if (type(n->in(2)) == Type::TOP) { 1764 // 'n' is dead. Treat as if zero check is still there to avoid any further optimizations. 1765 return false; 1766 } 1767 const TypeLong* type_divisor = type(n->in(2))->is_long(); 1768 return (type_divisor->_hi < 0 || type_divisor->_lo > 0); 1769 } 1770 } 1771 return true; 1772 } 1773 1774 //============================================================================= 1775 #ifndef PRODUCT 1776 uint PhaseCCP::_total_invokes = 0; 1777 uint PhaseCCP::_total_constants = 0; 1778 #endif 1779 //------------------------------PhaseCCP--------------------------------------- 1780 // Conditional Constant Propagation, ala Wegman & Zadeck 1781 PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) { 1782 NOT_PRODUCT( clear_constants(); ) 1783 assert( _worklist.size() == 0, "" ); 1784 analyze(); 1785 } 1786 1787 #ifndef PRODUCT 1788 //------------------------------~PhaseCCP-------------------------------------- 1789 PhaseCCP::~PhaseCCP() { 1790 inc_invokes(); 1791 _total_constants += count_constants(); 1792 } 1793 #endif 1794 1795 1796 #ifdef ASSERT 1797 void PhaseCCP::verify_type(Node* n, const Type* tnew, const Type* told) { 1798 if (tnew->meet(told) != tnew->remove_speculative()) { 1799 n->dump(1); 1800 tty->print("told = "); told->dump(); tty->cr(); 1801 tty->print("tnew = "); tnew->dump(); tty->cr(); 1802 fatal("Not monotonic"); 1803 } 1804 assert(!told->isa_int() || !tnew->isa_int() || told->is_int()->_widen <= tnew->is_int()->_widen, "widen increases"); 1805 assert(!told->isa_long() || !tnew->isa_long() || told->is_long()->_widen <= tnew->is_long()->_widen, "widen increases"); 1806 } 1807 #endif //ASSERT 1808 1809 // In this analysis, all types are initially set to TOP. We iteratively call Value() on all nodes of the graph until 1810 // we reach a fixed-point (i.e. no types change anymore). We start with a list that only contains the root node. Each time 1811 // a new type is set, we push all uses of that node back to the worklist (in some cases, we also push grandchildren 1812 // or nodes even further down back to the worklist because their type could change as a result of the current type 1813 // change). 1814 void PhaseCCP::analyze() { 1815 // Initialize all types to TOP, optimistic analysis 1816 for (uint i = 0; i < C->unique(); i++) { 1817 _types.map(i, Type::TOP); 1818 } 1819 1820 // CCP worklist is placed on a local arena, so that we can allow ResourceMarks on "Compile::current()->resource_arena()". 1821 // We also do not want to put the worklist on "Compile::current()->comp_arena()", as that one only gets de-allocated after 1822 // Compile is over. The local arena gets de-allocated at the end of its scope. 1823 ResourceArea local_arena(mtCompiler); 1824 Unique_Node_List worklist(&local_arena); 1825 DEBUG_ONLY(Unique_Node_List worklist_verify(&local_arena);) 1826 1827 // Push root onto worklist 1828 worklist.push(C->root()); 1829 1830 assert(_root_and_safepoints.size() == 0, "must be empty (unused)"); 1831 _root_and_safepoints.push(C->root()); 1832 1833 // Pull from worklist; compute new value; push changes out. 1834 // This loop is the meat of CCP. 1835 while (worklist.size() != 0) { 1836 Node* n = fetch_next_node(worklist); 1837 DEBUG_ONLY(worklist_verify.push(n);) 1838 if (n->is_SafePoint()) { 1839 // Make sure safepoints are processed by PhaseCCP::transform even if they are 1840 // not reachable from the bottom. Otherwise, infinite loops would be removed. 1841 _root_and_safepoints.push(n); 1842 } 1843 const Type* new_type = n->Value(this); 1844 if (new_type != type(n)) { 1845 DEBUG_ONLY(verify_type(n, new_type, type(n));) 1846 dump_type_and_node(n, new_type); 1847 set_type(n, new_type); 1848 push_child_nodes_to_worklist(worklist, n); 1849 } 1850 } 1851 DEBUG_ONLY(verify_analyze(worklist_verify);) 1852 } 1853 1854 #ifdef ASSERT 1855 // For every node n on verify list, check if type(n) == n->Value() 1856 // We have a list of exceptions, see comments in verify_node_value. 1857 void PhaseCCP::verify_analyze(Unique_Node_List& worklist_verify) { 1858 bool failure = false; 1859 while (worklist_verify.size()) { 1860 Node* n = worklist_verify.pop(); 1861 failure |= verify_node_value(n); 1862 } 1863 // If we get this assert, check why the reported nodes were not processed again in CCP. 1864 // We should either make sure that these nodes are properly added back to the CCP worklist 1865 // in PhaseCCP::push_child_nodes_to_worklist() to update their type or add an exception 1866 // in the verification code above if that is not possible for some reason (like Load nodes). 1867 assert(!failure, "Missed optimization opportunity in PhaseCCP"); 1868 } 1869 #endif 1870 1871 // Fetch next node from worklist to be examined in this iteration. 1872 Node* PhaseCCP::fetch_next_node(Unique_Node_List& worklist) { 1873 if (StressCCP) { 1874 return worklist.remove(C->random() % worklist.size()); 1875 } else { 1876 return worklist.pop(); 1877 } 1878 } 1879 1880 #ifndef PRODUCT 1881 void PhaseCCP::dump_type_and_node(const Node* n, const Type* t) { 1882 if (TracePhaseCCP) { 1883 t->dump(); 1884 do { 1885 tty->print("\t"); 1886 } while (tty->position() < 16); 1887 n->dump(); 1888 } 1889 } 1890 #endif 1891 1892 // We need to propagate the type change of 'n' to all its uses. Depending on the kind of node, additional nodes 1893 // (grandchildren or even further down) need to be revisited as their types could also be improved as a result 1894 // of the new type of 'n'. Push these nodes to the worklist. 1895 void PhaseCCP::push_child_nodes_to_worklist(Unique_Node_List& worklist, Node* n) const { 1896 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1897 Node* use = n->fast_out(i); 1898 push_if_not_bottom_type(worklist, use); 1899 push_more_uses(worklist, n, use); 1900 } 1901 } 1902 1903 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const { 1904 if (n->bottom_type() != type(n)) { 1905 worklist.push(n); 1906 } 1907 } 1908 1909 // For some nodes, we need to propagate the type change to grandchildren or even further down. 1910 // Add them back to the worklist. 1911 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const { 1912 push_phis(worklist, use); 1913 push_catch(worklist, use); 1914 push_cmpu(worklist, use); 1915 push_counted_loop_phi(worklist, parent, use); 1916 push_cast(worklist, use); 1917 push_loadp(worklist, use); 1918 push_and(worklist, parent, use); 1919 push_cast_ii(worklist, parent, use); 1920 push_opaque_zero_trip_guard(worklist, use); 1921 } 1922 1923 1924 // We must recheck Phis too if use is a Region. 1925 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const { 1926 if (use->is_Region()) { 1927 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { 1928 push_if_not_bottom_type(worklist, use->fast_out(i)); 1929 } 1930 } 1931 } 1932 1933 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a 1934 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path. 1935 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes. 1936 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) { 1937 if (use->is_Call()) { 1938 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { 1939 Node* proj = use->fast_out(i); 1940 if (proj->is_Proj() && proj->as_Proj()->_con == TypeFunc::Control) { 1941 Node* catch_node = proj->find_out_with(Op_Catch); 1942 if (catch_node != nullptr) { 1943 worklist.push(catch_node); 1944 } 1945 } 1946 } 1947 } 1948 } 1949 1950 // CmpU nodes can get their type information from two nodes up in the graph (instead of from the nodes immediately 1951 // above). Make sure they are added to the worklist if nodes they depend on are updated since they could be missed 1952 // and get wrong types otherwise. 1953 void PhaseCCP::push_cmpu(Unique_Node_List& worklist, const Node* use) const { 1954 uint use_op = use->Opcode(); 1955 if (use_op == Op_AddI || use_op == Op_SubI) { 1956 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { 1957 Node* cmpu = use->fast_out(i); 1958 if (cmpu->Opcode() == Op_CmpU) { 1959 // Got a CmpU which might need the new type information from node n. 1960 push_if_not_bottom_type(worklist, cmpu); 1961 } 1962 } 1963 } 1964 } 1965 1966 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'. 1967 // Seem PhiNode::Value(). 1968 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) { 1969 uint use_op = use->Opcode(); 1970 if (use_op == Op_CmpI || use_op == Op_CmpL) { 1971 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent); 1972 if (phi != nullptr) { 1973 worklist.push(phi); 1974 } 1975 } 1976 } 1977 1978 void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) { 1979 uint use_op = use->Opcode(); 1980 if (use_op == Op_CastP2X) { 1981 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { 1982 Node* u = use->fast_out(i2); 1983 if (u->Opcode() == Op_AndX) { 1984 worklist.push(u); 1985 } 1986 } 1987 } 1988 } 1989 1990 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'. 1991 // See LoadNode::Value(). 1992 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const { 1993 BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2(); 1994 bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes(); 1995 1996 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) { 1997 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { 1998 Node* loadp = use->fast_out(i); 1999 const Type* ut = loadp->bottom_type(); 2000 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) { 2001 if (has_load_barrier_nodes) { 2002 // Search for load barriers behind the load 2003 push_load_barrier(worklist, barrier_set, loadp); 2004 } 2005 worklist.push(loadp); 2006 } 2007 } 2008 } 2009 } 2010 2011 void PhaseCCP::push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use) { 2012 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { 2013 Node* barrier_node = use->fast_out(i); 2014 if (barrier_set->is_gc_barrier_node(barrier_node)) { 2015 worklist.push(barrier_node); 2016 } 2017 } 2018 } 2019 2020 // AndI/L::Value() optimizes patterns similar to (v << 2) & 3 to zero if they are bitwise disjoint. 2021 // Add the AndI/L nodes back to the worklist to re-apply Value() in case the shift value changed. 2022 // Pattern: parent -> LShift (use) -> ConstraintCast* -> And 2023 void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const { 2024 uint use_op = use->Opcode(); 2025 if ((use_op == Op_LShiftI || use_op == Op_LShiftL) 2026 && use->in(2) == parent) { // is shift value (right-hand side of LShift) 2027 auto push_and_uses_to_worklist = [&](Node* n){ 2028 uint opc = n->Opcode(); 2029 if (opc == Op_AndI || opc == Op_AndL) { 2030 push_if_not_bottom_type(worklist, n); 2031 } 2032 }; 2033 ConstraintCastNode::visit_uncasted_uses(use, push_and_uses_to_worklist); 2034 } 2035 } 2036 2037 // CastII::Value() optimizes CmpI/If patterns if the right input of the CmpI has a constant type. If the CastII input is 2038 // the same node as the left input into the CmpI node, the type of the CastII node can be improved accordingly. Add the 2039 // CastII node back to the worklist to re-apply Value() to either not miss this optimization or to undo it because it 2040 // cannot be applied anymore. We could have optimized the type of the CastII before but now the type of the right input 2041 // of the CmpI (i.e. 'parent') is no longer constant. The type of the CastII must be widened in this case. 2042 void PhaseCCP::push_cast_ii(Unique_Node_List& worklist, const Node* parent, const Node* use) const { 2043 if (use->Opcode() == Op_CmpI && use->in(2) == parent) { 2044 Node* other_cmp_input = use->in(1); 2045 for (DUIterator_Fast imax, i = other_cmp_input->fast_outs(imax); i < imax; i++) { 2046 Node* cast_ii = other_cmp_input->fast_out(i); 2047 if (cast_ii->is_CastII()) { 2048 push_if_not_bottom_type(worklist, cast_ii); 2049 } 2050 } 2051 } 2052 } 2053 2054 void PhaseCCP::push_opaque_zero_trip_guard(Unique_Node_List& worklist, const Node* use) const { 2055 if (use->Opcode() == Op_OpaqueZeroTripGuard) { 2056 push_if_not_bottom_type(worklist, use->unique_out()); 2057 } 2058 } 2059 2060 //------------------------------do_transform----------------------------------- 2061 // Top level driver for the recursive transformer 2062 void PhaseCCP::do_transform() { 2063 // Correct leaves of new-space Nodes; they point to old-space. 2064 C->set_root( transform(C->root())->as_Root() ); 2065 assert( C->top(), "missing TOP node" ); 2066 assert( C->root(), "missing root" ); 2067 } 2068 2069 //------------------------------transform-------------------------------------- 2070 // Given a Node in old-space, clone him into new-space. 2071 // Convert any of his old-space children into new-space children. 2072 Node *PhaseCCP::transform( Node *n ) { 2073 assert(n->is_Root(), "traversal must start at root"); 2074 assert(_root_and_safepoints.member(n), "root (n) must be in list"); 2075 2076 ResourceMark rm; 2077 // Map: old node idx -> node after CCP (or nullptr if not yet transformed or useless). 2078 Node_List node_map; 2079 // Pre-allocate to avoid frequent realloc 2080 GrowableArray <Node *> transform_stack(C->live_nodes() >> 1); 2081 // track all visited nodes, so that we can remove the complement 2082 Unique_Node_List useful; 2083 2084 // Initialize the traversal. 2085 // This CCP pass may prove that no exit test for a loop ever succeeds (i.e. the loop is infinite). In that case, 2086 // the logic below doesn't follow any path from Root to the loop body: there's at least one such path but it's proven 2087 // never taken (its type is TOP). As a consequence the node on the exit path that's input to Root (let's call it n) is 2088 // replaced by the top node and the inputs of that node n are not enqueued for further processing. If CCP only works 2089 // through the graph from Root, this causes the loop body to never be processed here even when it's not dead (that 2090 // is reachable from Root following its uses). To prevent that issue, transform() starts walking the graph from Root 2091 // and all safepoints. 2092 for (uint i = 0; i < _root_and_safepoints.size(); ++i) { 2093 Node* nn = _root_and_safepoints.at(i); 2094 Node* new_node = node_map[nn->_idx]; 2095 assert(new_node == nullptr, ""); 2096 new_node = transform_once(nn); // Check for constant 2097 node_map.map(nn->_idx, new_node); // Flag as having been cloned 2098 transform_stack.push(new_node); // Process children of cloned node 2099 useful.push(new_node); 2100 } 2101 2102 while (transform_stack.is_nonempty()) { 2103 Node* clone = transform_stack.pop(); 2104 uint cnt = clone->req(); 2105 for( uint i = 0; i < cnt; i++ ) { // For all inputs do 2106 Node *input = clone->in(i); 2107 if( input != nullptr ) { // Ignore nulls 2108 Node *new_input = node_map[input->_idx]; // Check for cloned input node 2109 if( new_input == nullptr ) { 2110 new_input = transform_once(input); // Check for constant 2111 node_map.map( input->_idx, new_input );// Flag as having been cloned 2112 transform_stack.push(new_input); // Process children of cloned node 2113 useful.push(new_input); 2114 } 2115 assert( new_input == clone->in(i), "insanity check"); 2116 } 2117 } 2118 } 2119 2120 // The above transformation might lead to subgraphs becoming unreachable from the 2121 // bottom while still being reachable from the top. As a result, nodes in that 2122 // subgraph are not transformed and their bottom types are not updated, leading to 2123 // an inconsistency between bottom_type() and type(). In rare cases, LoadNodes in 2124 // such a subgraph, might be re-enqueued for IGVN indefinitely by MemNode::Ideal_common 2125 // because their address type is inconsistent. Therefore, we aggressively remove 2126 // all useless nodes here even before PhaseIdealLoop::build_loop_late gets a chance 2127 // to remove them anyway. 2128 if (C->cached_top_node()) { 2129 useful.push(C->cached_top_node()); 2130 } 2131 C->update_dead_node_list(useful); 2132 remove_useless_nodes(useful.member_set()); 2133 _worklist.remove_useless_nodes(useful.member_set()); 2134 C->disconnect_useless_nodes(useful, _worklist); 2135 2136 Node* new_root = node_map[n->_idx]; 2137 assert(new_root->is_Root(), "transformed root node must be a root node"); 2138 return new_root; 2139 } 2140 2141 //------------------------------transform_once--------------------------------- 2142 // For PhaseCCP, transformation is IDENTITY unless Node computed a constant. 2143 Node *PhaseCCP::transform_once( Node *n ) { 2144 const Type *t = type(n); 2145 // Constant? Use constant Node instead 2146 if( t->singleton() ) { 2147 Node *nn = n; // Default is to return the original constant 2148 if( t == Type::TOP ) { 2149 // cache my top node on the Compile instance 2150 if( C->cached_top_node() == nullptr || C->cached_top_node()->in(0) == nullptr ) { 2151 C->set_cached_top_node(ConNode::make(Type::TOP)); 2152 set_type(C->top(), Type::TOP); 2153 } 2154 nn = C->top(); 2155 } 2156 if( !n->is_Con() ) { 2157 if( t != Type::TOP ) { 2158 nn = makecon(t); // ConNode::make(t); 2159 NOT_PRODUCT( inc_constants(); ) 2160 } else if( n->is_Region() ) { // Unreachable region 2161 // Note: nn == C->top() 2162 n->set_req(0, nullptr); // Cut selfreference 2163 bool progress = true; 2164 uint max = n->outcnt(); 2165 DUIterator i; 2166 while (progress) { 2167 progress = false; 2168 // Eagerly remove dead phis to avoid phis copies creation. 2169 for (i = n->outs(); n->has_out(i); i++) { 2170 Node* m = n->out(i); 2171 if (m->is_Phi()) { 2172 assert(type(m) == Type::TOP, "Unreachable region should not have live phis."); 2173 replace_node(m, nn); 2174 if (max != n->outcnt()) { 2175 progress = true; 2176 i = n->refresh_out_pos(i); 2177 max = n->outcnt(); 2178 } 2179 } 2180 } 2181 } 2182 } 2183 replace_node(n,nn); // Update DefUse edges for new constant 2184 } 2185 return nn; 2186 } 2187 2188 // If x is a TypeNode, capture any more-precise type permanently into Node 2189 if (t != n->bottom_type()) { 2190 hash_delete(n); // changing bottom type may force a rehash 2191 n->raise_bottom_type(t); 2192 _worklist.push(n); // n re-enters the hash table via the worklist 2193 } 2194 2195 // TEMPORARY fix to ensure that 2nd GVN pass eliminates null checks 2196 switch( n->Opcode() ) { 2197 case Op_CallStaticJava: // Give post-parse call devirtualization a chance 2198 case Op_CallDynamicJava: 2199 case Op_FastLock: // Revisit FastLocks for lock coarsening 2200 case Op_If: 2201 case Op_CountedLoopEnd: 2202 case Op_Region: 2203 case Op_Loop: 2204 case Op_CountedLoop: 2205 case Op_Conv2B: 2206 case Op_Opaque1: 2207 _worklist.push(n); 2208 break; 2209 default: 2210 break; 2211 } 2212 2213 return n; 2214 } 2215 2216 //---------------------------------saturate------------------------------------ 2217 const Type* PhaseCCP::saturate(const Type* new_type, const Type* old_type, 2218 const Type* limit_type) const { 2219 const Type* wide_type = new_type->widen(old_type, limit_type); 2220 if (wide_type != new_type) { // did we widen? 2221 // If so, we may have widened beyond the limit type. Clip it back down. 2222 new_type = wide_type->filter(limit_type); 2223 } 2224 return new_type; 2225 } 2226 2227 //------------------------------print_statistics------------------------------- 2228 #ifndef PRODUCT 2229 void PhaseCCP::print_statistics() { 2230 tty->print_cr("CCP: %d constants found: %d", _total_invokes, _total_constants); 2231 } 2232 #endif 2233 2234 2235 //============================================================================= 2236 #ifndef PRODUCT 2237 uint PhasePeephole::_total_peepholes = 0; 2238 #endif 2239 //------------------------------PhasePeephole---------------------------------- 2240 // Conditional Constant Propagation, ala Wegman & Zadeck 2241 PhasePeephole::PhasePeephole( PhaseRegAlloc *regalloc, PhaseCFG &cfg ) 2242 : PhaseTransform(Peephole), _regalloc(regalloc), _cfg(cfg) { 2243 NOT_PRODUCT( clear_peepholes(); ) 2244 } 2245 2246 #ifndef PRODUCT 2247 //------------------------------~PhasePeephole--------------------------------- 2248 PhasePeephole::~PhasePeephole() { 2249 _total_peepholes += count_peepholes(); 2250 } 2251 #endif 2252 2253 //------------------------------transform-------------------------------------- 2254 Node *PhasePeephole::transform( Node *n ) { 2255 ShouldNotCallThis(); 2256 return nullptr; 2257 } 2258 2259 //------------------------------do_transform----------------------------------- 2260 void PhasePeephole::do_transform() { 2261 bool method_name_not_printed = true; 2262 2263 // Examine each basic block 2264 for (uint block_number = 1; block_number < _cfg.number_of_blocks(); ++block_number) { 2265 Block* block = _cfg.get_block(block_number); 2266 bool block_not_printed = true; 2267 2268 for (bool progress = true; progress;) { 2269 progress = false; 2270 // block->end_idx() not valid after PhaseRegAlloc 2271 uint end_index = block->number_of_nodes(); 2272 for( uint instruction_index = end_index - 1; instruction_index > 0; --instruction_index ) { 2273 Node *n = block->get_node(instruction_index); 2274 if( n->is_Mach() ) { 2275 MachNode *m = n->as_Mach(); 2276 // check for peephole opportunities 2277 int result = m->peephole(block, instruction_index, &_cfg, _regalloc); 2278 if( result != -1 ) { 2279 #ifndef PRODUCT 2280 if( PrintOptoPeephole ) { 2281 // Print method, first time only 2282 if( C->method() && method_name_not_printed ) { 2283 C->method()->print_short_name(); tty->cr(); 2284 method_name_not_printed = false; 2285 } 2286 // Print this block 2287 if( Verbose && block_not_printed) { 2288 tty->print_cr("in block"); 2289 block->dump(); 2290 block_not_printed = false; 2291 } 2292 // Print the peephole number 2293 tty->print_cr("peephole number: %d", result); 2294 } 2295 inc_peepholes(); 2296 #endif 2297 // Set progress, start again 2298 progress = true; 2299 break; 2300 } 2301 } 2302 } 2303 } 2304 } 2305 } 2306 2307 //------------------------------print_statistics------------------------------- 2308 #ifndef PRODUCT 2309 void PhasePeephole::print_statistics() { 2310 tty->print_cr("Peephole: peephole rules applied: %d", _total_peepholes); 2311 } 2312 #endif 2313 2314 2315 //============================================================================= 2316 //------------------------------set_req_X-------------------------------------- 2317 void Node::set_req_X( uint i, Node *n, PhaseIterGVN *igvn ) { 2318 assert( is_not_dead(n), "can not use dead node"); 2319 assert( igvn->hash_find(this) != this, "Need to remove from hash before changing edges" ); 2320 Node *old = in(i); 2321 set_req(i, n); 2322 2323 // old goes dead? 2324 if( old ) { 2325 switch (old->outcnt()) { 2326 case 0: 2327 // Put into the worklist to kill later. We do not kill it now because the 2328 // recursive kill will delete the current node (this) if dead-loop exists 2329 if (!old->is_top()) 2330 igvn->_worklist.push( old ); 2331 break; 2332 case 1: 2333 if( old->is_Store() || old->has_special_unique_user() ) 2334 igvn->add_users_to_worklist( old ); 2335 break; 2336 case 2: 2337 if( old->is_Store() ) 2338 igvn->add_users_to_worklist( old ); 2339 if( old->Opcode() == Op_Region ) 2340 igvn->_worklist.push(old); 2341 break; 2342 case 3: 2343 if( old->Opcode() == Op_Region ) { 2344 igvn->_worklist.push(old); 2345 igvn->add_users_to_worklist( old ); 2346 } 2347 break; 2348 default: 2349 break; 2350 } 2351 2352 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn, old); 2353 } 2354 } 2355 2356 void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) { 2357 PhaseIterGVN* igvn = gvn->is_IterGVN(); 2358 if (igvn == nullptr) { 2359 set_req(i, n); 2360 return; 2361 } 2362 set_req_X(i, n, igvn); 2363 } 2364 2365 //-------------------------------replace_by----------------------------------- 2366 // Using def-use info, replace one node for another. Follow the def-use info 2367 // to all users of the OLD node. Then make all uses point to the NEW node. 2368 void Node::replace_by(Node *new_node) { 2369 assert(!is_top(), "top node has no DU info"); 2370 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) { 2371 Node* use = last_out(i); 2372 uint uses_found = 0; 2373 for (uint j = 0; j < use->len(); j++) { 2374 if (use->in(j) == this) { 2375 if (j < use->req()) 2376 use->set_req(j, new_node); 2377 else use->set_prec(j, new_node); 2378 uses_found++; 2379 } 2380 } 2381 i -= uses_found; // we deleted 1 or more copies of this edge 2382 } 2383 } 2384 2385 //============================================================================= 2386 //----------------------------------------------------------------------------- 2387 void Type_Array::grow( uint i ) { 2388 if( !_max ) { 2389 _max = 1; 2390 _types = (const Type**)_a->Amalloc( _max * sizeof(Type*) ); 2391 _types[0] = nullptr; 2392 } 2393 uint old = _max; 2394 _max = next_power_of_2(i); 2395 _types = (const Type**)_a->Arealloc( _types, old*sizeof(Type*),_max*sizeof(Type*)); 2396 memset( &_types[old], 0, (_max-old)*sizeof(Type*) ); 2397 } 2398 2399 //------------------------------dump------------------------------------------- 2400 #ifndef PRODUCT 2401 void Type_Array::dump() const { 2402 uint max = Size(); 2403 for( uint i = 0; i < max; i++ ) { 2404 if( _types[i] != nullptr ) { 2405 tty->print(" %d\t== ", i); _types[i]->dump(); tty->cr(); 2406 } 2407 } 2408 } 2409 #endif