1 /*
   2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/block.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/idealGraphPrinter.hpp"
  35 #include "opto/loopnode.hpp"
  36 #include "opto/machnode.hpp"
  37 #include "opto/opcodes.hpp"
  38 #include "opto/phaseX.hpp"
  39 #include "opto/regalloc.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "utilities/macros.hpp"
  42 #include "utilities/powerOfTwo.hpp"
  43 
  44 //=============================================================================
  45 #define NODE_HASH_MINIMUM_SIZE    255
  46 
  47 //------------------------------NodeHash---------------------------------------
  48 NodeHash::NodeHash(Arena *arena, uint est_max_size) :
  49   _a(arena),
  50   _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
  51   _inserts(0), _insert_limit( insert_limit() ),
  52   _table( NEW_ARENA_ARRAY( _a , Node* , _max ) )
  53 #ifndef PRODUCT
  54   , _grows(0),_look_probes(0), _lookup_hits(0), _lookup_misses(0),
  55   _insert_probes(0), _delete_probes(0), _delete_hits(0), _delete_misses(0),
  56    _total_inserts(0), _total_insert_probes(0)
  57 #endif
  58 {
  59   // _sentinel must be in the current node space
  60   _sentinel = new ProjNode(nullptr, TypeFunc::Control);
  61   memset(_table,0,sizeof(Node*)*_max);
  62 }
  63 
  64 //------------------------------hash_find--------------------------------------
  65 // Find in hash table
  66 Node *NodeHash::hash_find( const Node *n ) {
  67   // ((Node*)n)->set_hash( n->hash() );
  68   uint hash = n->hash();
  69   if (hash == Node::NO_HASH) {
  70     NOT_PRODUCT( _lookup_misses++ );
  71     return nullptr;
  72   }
  73   uint key = hash & (_max-1);
  74   uint stride = key | 0x01;
  75   NOT_PRODUCT( _look_probes++ );
  76   Node *k = _table[key];        // Get hashed value
  77   if( !k ) {                    // ?Miss?
  78     NOT_PRODUCT( _lookup_misses++ );
  79     return nullptr;             // Miss!
  80   }
  81 
  82   int op = n->Opcode();
  83   uint req = n->req();
  84   while( 1 ) {                  // While probing hash table
  85     if( k->req() == req &&      // Same count of inputs
  86         k->Opcode() == op ) {   // Same Opcode
  87       for( uint i=0; i<req; i++ )
  88         if( n->in(i)!=k->in(i)) // Different inputs?
  89           goto collision;       // "goto" is a speed hack...
  90       if( n->cmp(*k) ) {        // Check for any special bits
  91         NOT_PRODUCT( _lookup_hits++ );
  92         return k;               // Hit!
  93       }
  94     }
  95   collision:
  96     NOT_PRODUCT( _look_probes++ );
  97     key = (key + stride/*7*/) & (_max-1); // Stride through table with relative prime
  98     k = _table[key];            // Get hashed value
  99     if( !k ) {                  // ?Miss?
 100       NOT_PRODUCT( _lookup_misses++ );
 101       return nullptr;           // Miss!
 102     }
 103   }
 104   ShouldNotReachHere();
 105   return nullptr;
 106 }
 107 
 108 //------------------------------hash_find_insert-------------------------------
 109 // Find in hash table, insert if not already present
 110 // Used to preserve unique entries in hash table
 111 Node *NodeHash::hash_find_insert( Node *n ) {
 112   // n->set_hash( );
 113   uint hash = n->hash();
 114   if (hash == Node::NO_HASH) {
 115     NOT_PRODUCT( _lookup_misses++ );
 116     return nullptr;
 117   }
 118   uint key = hash & (_max-1);
 119   uint stride = key | 0x01;     // stride must be relatively prime to table siz
 120   uint first_sentinel = 0;      // replace a sentinel if seen.
 121   NOT_PRODUCT( _look_probes++ );
 122   Node *k = _table[key];        // Get hashed value
 123   if( !k ) {                    // ?Miss?
 124     NOT_PRODUCT( _lookup_misses++ );
 125     _table[key] = n;            // Insert into table!
 126     debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
 127     check_grow();               // Grow table if insert hit limit
 128     return nullptr;             // Miss!
 129   }
 130   else if( k == _sentinel ) {
 131     first_sentinel = key;      // Can insert here
 132   }
 133 
 134   int op = n->Opcode();
 135   uint req = n->req();
 136   while( 1 ) {                  // While probing hash table
 137     if( k->req() == req &&      // Same count of inputs
 138         k->Opcode() == op ) {   // Same Opcode
 139       for( uint i=0; i<req; i++ )
 140         if( n->in(i)!=k->in(i)) // Different inputs?
 141           goto collision;       // "goto" is a speed hack...
 142       if( n->cmp(*k) ) {        // Check for any special bits
 143         NOT_PRODUCT( _lookup_hits++ );
 144         return k;               // Hit!
 145       }
 146     }
 147   collision:
 148     NOT_PRODUCT( _look_probes++ );
 149     key = (key + stride) & (_max-1); // Stride through table w/ relative prime
 150     k = _table[key];            // Get hashed value
 151     if( !k ) {                  // ?Miss?
 152       NOT_PRODUCT( _lookup_misses++ );
 153       key = (first_sentinel == 0) ? key : first_sentinel; // ?saw sentinel?
 154       _table[key] = n;          // Insert into table!
 155       debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
 156       check_grow();             // Grow table if insert hit limit
 157       return nullptr;           // Miss!
 158     }
 159     else if( first_sentinel == 0 && k == _sentinel ) {
 160       first_sentinel = key;    // Can insert here
 161     }
 162 
 163   }
 164   ShouldNotReachHere();
 165   return nullptr;
 166 }
 167 
 168 //------------------------------hash_insert------------------------------------
 169 // Insert into hash table
 170 void NodeHash::hash_insert( Node *n ) {
 171   // // "conflict" comments -- print nodes that conflict
 172   // bool conflict = false;
 173   // n->set_hash();
 174   uint hash = n->hash();
 175   if (hash == Node::NO_HASH) {
 176     return;
 177   }
 178   check_grow();
 179   uint key = hash & (_max-1);
 180   uint stride = key | 0x01;
 181 
 182   while( 1 ) {                  // While probing hash table
 183     NOT_PRODUCT( _insert_probes++ );
 184     Node *k = _table[key];      // Get hashed value
 185     if( !k || (k == _sentinel) ) break;       // Found a slot
 186     assert( k != n, "already inserted" );
 187     // if( PrintCompilation && PrintOptoStatistics && Verbose ) { tty->print("  conflict: "); k->dump(); conflict = true; }
 188     key = (key + stride) & (_max-1); // Stride through table w/ relative prime
 189   }
 190   _table[key] = n;              // Insert into table!
 191   debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
 192   // if( conflict ) { n->dump(); }
 193 }
 194 
 195 //------------------------------hash_delete------------------------------------
 196 // Replace in hash table with sentinel
 197 bool NodeHash::hash_delete( const Node *n ) {
 198   Node *k;
 199   uint hash = n->hash();
 200   if (hash == Node::NO_HASH) {
 201     NOT_PRODUCT( _delete_misses++ );
 202     return false;
 203   }
 204   uint key = hash & (_max-1);
 205   uint stride = key | 0x01;
 206   debug_only( uint counter = 0; );
 207   for( ; /* (k != nullptr) && (k != _sentinel) */; ) {
 208     debug_only( counter++ );
 209     NOT_PRODUCT( _delete_probes++ );
 210     k = _table[key];            // Get hashed value
 211     if( !k ) {                  // Miss?
 212       NOT_PRODUCT( _delete_misses++ );
 213       return false;             // Miss! Not in chain
 214     }
 215     else if( n == k ) {
 216       NOT_PRODUCT( _delete_hits++ );
 217       _table[key] = _sentinel;  // Hit! Label as deleted entry
 218       debug_only(((Node*)n)->exit_hash_lock()); // Unlock the node upon removal from table.
 219       return true;
 220     }
 221     else {
 222       // collision: move through table with prime offset
 223       key = (key + stride/*7*/) & (_max-1);
 224       assert( counter <= _insert_limit, "Cycle in hash-table");
 225     }
 226   }
 227   ShouldNotReachHere();
 228   return false;
 229 }
 230 
 231 //------------------------------round_up---------------------------------------
 232 // Round up to nearest power of 2
 233 uint NodeHash::round_up(uint x) {
 234   x += (x >> 2);                  // Add 25% slop
 235   return MAX2(16U, round_up_power_of_2(x));
 236 }
 237 
 238 //------------------------------grow-------------------------------------------
 239 // Grow _table to next power of 2 and insert old entries
 240 void  NodeHash::grow() {
 241   // Record old state
 242   uint   old_max   = _max;
 243   Node **old_table = _table;
 244   // Construct new table with twice the space
 245 #ifndef PRODUCT
 246   _grows++;
 247   _total_inserts       += _inserts;
 248   _total_insert_probes += _insert_probes;
 249   _insert_probes   = 0;
 250 #endif
 251   _inserts         = 0;
 252   _max     = _max << 1;
 253   _table   = NEW_ARENA_ARRAY( _a , Node* , _max ); // (Node**)_a->Amalloc( _max * sizeof(Node*) );
 254   memset(_table,0,sizeof(Node*)*_max);
 255   _insert_limit = insert_limit();
 256   // Insert old entries into the new table
 257   for( uint i = 0; i < old_max; i++ ) {
 258     Node *m = *old_table++;
 259     if( !m || m == _sentinel ) continue;
 260     debug_only(m->exit_hash_lock()); // Unlock the node upon removal from old table.
 261     hash_insert(m);
 262   }
 263 }
 264 
 265 //------------------------------clear------------------------------------------
 266 // Clear all entries in _table to null but keep storage
 267 void  NodeHash::clear() {
 268 #ifdef ASSERT
 269   // Unlock all nodes upon removal from table.
 270   for (uint i = 0; i < _max; i++) {
 271     Node* n = _table[i];
 272     if (!n || n == _sentinel)  continue;
 273     n->exit_hash_lock();
 274   }
 275 #endif
 276 
 277   memset( _table, 0, _max * sizeof(Node*) );
 278 }
 279 
 280 //-----------------------remove_useless_nodes----------------------------------
 281 // Remove useless nodes from value table,
 282 // implementation does not depend on hash function
 283 void NodeHash::remove_useless_nodes(VectorSet &useful) {
 284 
 285   // Dead nodes in the hash table inherited from GVN should not replace
 286   // existing nodes, remove dead nodes.
 287   uint max = size();
 288   Node *sentinel_node = sentinel();
 289   for( uint i = 0; i < max; ++i ) {
 290     Node *n = at(i);
 291     if(n != nullptr && n != sentinel_node && !useful.test(n->_idx)) {
 292       debug_only(n->exit_hash_lock()); // Unlock the node when removed
 293       _table[i] = sentinel_node;       // Replace with placeholder
 294     }
 295   }
 296 }
 297 
 298 
 299 void NodeHash::check_no_speculative_types() {
 300 #ifdef ASSERT
 301   uint max = size();
 302   Unique_Node_List live_nodes;
 303   Compile::current()->identify_useful_nodes(live_nodes);
 304   Node *sentinel_node = sentinel();
 305   for (uint i = 0; i < max; ++i) {
 306     Node *n = at(i);
 307     if (n != nullptr &&
 308         n != sentinel_node &&
 309         n->is_Type() &&
 310         live_nodes.member(n)) {
 311       TypeNode* tn = n->as_Type();
 312       const Type* t = tn->type();
 313       const Type* t_no_spec = t->remove_speculative();
 314       assert(t == t_no_spec, "dead node in hash table or missed node during speculative cleanup");
 315     }
 316   }
 317 #endif
 318 }
 319 
 320 #ifndef PRODUCT
 321 //------------------------------dump-------------------------------------------
 322 // Dump statistics for the hash table
 323 void NodeHash::dump() {
 324   _total_inserts       += _inserts;
 325   _total_insert_probes += _insert_probes;
 326   if (PrintCompilation && PrintOptoStatistics && Verbose && (_inserts > 0)) {
 327     if (WizardMode) {
 328       for (uint i=0; i<_max; i++) {
 329         if (_table[i])
 330           tty->print("%d/%d/%d ",i,_table[i]->hash()&(_max-1),_table[i]->_idx);
 331       }
 332     }
 333     tty->print("\nGVN Hash stats:  %d grows to %d max_size\n", _grows, _max);
 334     tty->print("  %d/%d (%8.1f%% full)\n", _inserts, _max, (double)_inserts/_max*100.0);
 335     tty->print("  %dp/(%dh+%dm) (%8.2f probes/lookup)\n", _look_probes, _lookup_hits, _lookup_misses, (double)_look_probes/(_lookup_hits+_lookup_misses));
 336     tty->print("  %dp/%di (%8.2f probes/insert)\n", _total_insert_probes, _total_inserts, (double)_total_insert_probes/_total_inserts);
 337     // sentinels increase lookup cost, but not insert cost
 338     assert((_lookup_misses+_lookup_hits)*4+100 >= _look_probes, "bad hash function");
 339     assert( _inserts+(_inserts>>3) < _max, "table too full" );
 340     assert( _inserts*3+100 >= _insert_probes, "bad hash function" );
 341   }
 342 }
 343 
 344 Node *NodeHash::find_index(uint idx) { // For debugging
 345   // Find an entry by its index value
 346   for( uint i = 0; i < _max; i++ ) {
 347     Node *m = _table[i];
 348     if( !m || m == _sentinel ) continue;
 349     if( m->_idx == (uint)idx ) return m;
 350   }
 351   return nullptr;
 352 }
 353 #endif
 354 
 355 #ifdef ASSERT
 356 NodeHash::~NodeHash() {
 357   // Unlock all nodes upon destruction of table.
 358   if (_table != (Node**)badAddress)  clear();
 359 }
 360 #endif
 361 
 362 
 363 //=============================================================================
 364 //------------------------------PhaseRemoveUseless-----------------------------
 365 // 1) Use a breadthfirst walk to collect useful nodes reachable from root.
 366 PhaseRemoveUseless::PhaseRemoveUseless(PhaseGVN* gvn, Unique_Node_List& worklist, PhaseNumber phase_num) : Phase(phase_num) {
 367   C->print_method(PHASE_BEFORE_REMOVEUSELESS, 3);
 368   // Implementation requires an edge from root to each SafePointNode
 369   // at a backward branch. Inserted in add_safepoint().
 370 
 371   // Identify nodes that are reachable from below, useful.
 372   C->identify_useful_nodes(_useful);
 373   // Update dead node list
 374   C->update_dead_node_list(_useful);
 375 
 376   // Remove all useless nodes from PhaseValues' recorded types
 377   // Must be done before disconnecting nodes to preserve hash-table-invariant
 378   gvn->remove_useless_nodes(_useful.member_set());
 379 
 380   // Remove all useless nodes from future worklist
 381   worklist.remove_useless_nodes(_useful.member_set());
 382 
 383   // Disconnect 'useless' nodes that are adjacent to useful nodes
 384   C->disconnect_useless_nodes(_useful, worklist);
 385 }
 386 
 387 //=============================================================================
 388 //------------------------------PhaseRenumberLive------------------------------
 389 // First, remove useless nodes (equivalent to identifying live nodes).
 390 // Then, renumber live nodes.
 391 //
 392 // The set of live nodes is returned by PhaseRemoveUseless in the _useful structure.
 393 // If the number of live nodes is 'x' (where 'x' == _useful.size()), then the
 394 // PhaseRenumberLive updates the node ID of each node (the _idx field) with a unique
 395 // value in the range [0, x).
 396 //
 397 // At the end of the PhaseRenumberLive phase, the compiler's count of unique nodes is
 398 // updated to 'x' and the list of dead nodes is reset (as there are no dead nodes).
 399 //
 400 // The PhaseRenumberLive phase updates two data structures with the new node IDs.
 401 // (1) The "worklist" is "C->igvn_worklist()", which is to collect which nodes need to
 402 //     be processed by IGVN after removal of the useless nodes.
 403 // (2) Type information "gvn->types()" (same as "C->types()") maps every node ID to
 404 //     the node's type. The mapping is updated to use the new node IDs as well. We
 405 //     create a new map, and swap it with the old one.
 406 //
 407 // Other data structures used by the compiler are not updated. The hash table for value
 408 // numbering ("C->node_hash()", referenced by PhaseValue::_table) is not updated because
 409 // computing the hash values is not based on node IDs.
 410 PhaseRenumberLive::PhaseRenumberLive(PhaseGVN* gvn,
 411                                      Unique_Node_List& worklist,
 412                                      PhaseNumber phase_num) :
 413   PhaseRemoveUseless(gvn, worklist, Remove_Useless_And_Renumber_Live),
 414   _new_type_array(C->comp_arena()),
 415   _old2new_map(C->unique(), C->unique(), -1),
 416   _is_pass_finished(false),
 417   _live_node_count(C->live_nodes())
 418 {
 419   assert(RenumberLiveNodes, "RenumberLiveNodes must be set to true for node renumbering to take place");
 420   assert(C->live_nodes() == _useful.size(), "the number of live nodes must match the number of useful nodes");
 421   assert(_delayed.size() == 0, "should be empty");
 422   assert(&worklist == C->igvn_worklist(), "reference still same as the one from Compile");
 423   assert(&gvn->types() == C->types(), "reference still same as that from Compile");
 424 
 425   GrowableArray<Node_Notes*>* old_node_note_array = C->node_note_array();
 426   if (old_node_note_array != nullptr) {
 427     int new_size = (_useful.size() >> 8) + 1; // The node note array uses blocks, see C->_log2_node_notes_block_size
 428     new_size = MAX2(8, new_size);
 429     C->set_node_note_array(new (C->comp_arena()) GrowableArray<Node_Notes*> (C->comp_arena(), new_size, 0, nullptr));
 430     C->grow_node_notes(C->node_note_array(), new_size);
 431   }
 432 
 433   assert(worklist.is_subset_of(_useful), "only useful nodes should still be in the worklist");
 434 
 435   // Iterate over the set of live nodes.
 436   for (uint current_idx = 0; current_idx < _useful.size(); current_idx++) {
 437     Node* n = _useful.at(current_idx);
 438 
 439     const Type* type = gvn->type_or_null(n);
 440     _new_type_array.map(current_idx, type);
 441 
 442     assert(_old2new_map.at(n->_idx) == -1, "already seen");
 443     _old2new_map.at_put(n->_idx, current_idx);
 444 
 445     if (old_node_note_array != nullptr) {
 446       Node_Notes* nn = C->locate_node_notes(old_node_note_array, n->_idx);
 447       C->set_node_notes_at(current_idx, nn);
 448     }
 449 
 450     n->set_idx(current_idx); // Update node ID.
 451 
 452     if (update_embedded_ids(n) < 0) {
 453       _delayed.push(n); // has embedded IDs; handle later
 454     }
 455   }
 456 
 457   // VectorSet in Unique_Node_Set must be recomputed, since IDs have changed.
 458   worklist.recompute_idx_set();
 459 
 460   assert(_live_node_count == _useful.size(), "all live nodes must be processed");
 461 
 462   _is_pass_finished = true; // pass finished; safe to process delayed updates
 463 
 464   while (_delayed.size() > 0) {
 465     Node* n = _delayed.pop();
 466     int no_of_updates = update_embedded_ids(n);
 467     assert(no_of_updates > 0, "should be updated");
 468   }
 469 
 470   // Replace the compiler's type information with the updated type information.
 471   gvn->types().swap(_new_type_array);
 472 
 473   // Update the unique node count of the compilation to the number of currently live nodes.
 474   C->set_unique(_live_node_count);
 475 
 476   // Set the dead node count to 0 and reset dead node list.
 477   C->reset_dead_node_list();
 478 }
 479 
 480 int PhaseRenumberLive::new_index(int old_idx) {
 481   assert(_is_pass_finished, "not finished");
 482   if (_old2new_map.at(old_idx) == -1) { // absent
 483     // Allocate a placeholder to preserve uniqueness
 484     _old2new_map.at_put(old_idx, _live_node_count);
 485     _live_node_count++;
 486   }
 487   return _old2new_map.at(old_idx);
 488 }
 489 
 490 int PhaseRenumberLive::update_embedded_ids(Node* n) {
 491   int no_of_updates = 0;
 492   if (n->is_Phi()) {
 493     PhiNode* phi = n->as_Phi();
 494     if (phi->_inst_id != -1) {
 495       if (!_is_pass_finished) {
 496         return -1; // delay
 497       }
 498       int new_idx = new_index(phi->_inst_id);
 499       assert(new_idx != -1, "");
 500       phi->_inst_id = new_idx;
 501       no_of_updates++;
 502     }
 503     if (phi->_inst_mem_id != -1) {
 504       if (!_is_pass_finished) {
 505         return -1; // delay
 506       }
 507       int new_idx = new_index(phi->_inst_mem_id);
 508       assert(new_idx != -1, "");
 509       phi->_inst_mem_id = new_idx;
 510       no_of_updates++;
 511     }
 512   }
 513 
 514   const Type* type = _new_type_array.fast_lookup(n->_idx);
 515   if (type != nullptr && type->isa_oopptr() && type->is_oopptr()->is_known_instance()) {
 516     if (!_is_pass_finished) {
 517         return -1; // delay
 518     }
 519     int old_idx = type->is_oopptr()->instance_id();
 520     int new_idx = new_index(old_idx);
 521     const Type* new_type = type->is_oopptr()->with_instance_id(new_idx);
 522     _new_type_array.map(n->_idx, new_type);
 523     no_of_updates++;
 524   }
 525 
 526   return no_of_updates;
 527 }
 528 
 529 void PhaseValues::init_con_caches() {
 530   memset(_icons,0,sizeof(_icons));
 531   memset(_lcons,0,sizeof(_lcons));
 532   memset(_zcons,0,sizeof(_zcons));
 533 }
 534 
 535 //--------------------------------find_int_type--------------------------------
 536 const TypeInt* PhaseValues::find_int_type(Node* n) {
 537   if (n == nullptr)  return nullptr;
 538   // Call type_or_null(n) to determine node's type since we might be in
 539   // parse phase and call n->Value() may return wrong type.
 540   // (For example, a phi node at the beginning of loop parsing is not ready.)
 541   const Type* t = type_or_null(n);
 542   if (t == nullptr)  return nullptr;
 543   return t->isa_int();
 544 }
 545 
 546 
 547 //-------------------------------find_long_type--------------------------------
 548 const TypeLong* PhaseValues::find_long_type(Node* n) {
 549   if (n == nullptr)  return nullptr;
 550   // (See comment above on type_or_null.)
 551   const Type* t = type_or_null(n);
 552   if (t == nullptr)  return nullptr;
 553   return t->isa_long();
 554 }
 555 
 556 //------------------------------~PhaseValues-----------------------------------
 557 #ifndef PRODUCT
 558 PhaseValues::~PhaseValues() {
 559   // Statistics for NodeHash
 560   _table.dump();
 561   // Statistics for value progress and efficiency
 562   if( PrintCompilation && Verbose && WizardMode ) {
 563     tty->print("\n%sValues: %d nodes ---> %d/%d (%d)",
 564       is_IterGVN() ? "Iter" : "    ", C->unique(), made_progress(), made_transforms(), made_new_values());
 565     if( made_transforms() != 0 ) {
 566       tty->print_cr("  ratio %f", made_progress()/(float)made_transforms() );
 567     } else {
 568       tty->cr();
 569     }
 570   }
 571 }
 572 #endif
 573 
 574 //------------------------------makecon----------------------------------------
 575 ConNode* PhaseValues::makecon(const Type* t) {
 576   assert(t->singleton(), "must be a constant");
 577   assert(!t->empty() || t == Type::TOP, "must not be vacuous range");
 578   switch (t->base()) {  // fast paths
 579   case Type::Half:
 580   case Type::Top:  return (ConNode*) C->top();
 581   case Type::Int:  return intcon( t->is_int()->get_con() );
 582   case Type::Long: return longcon( t->is_long()->get_con() );
 583   default:         break;
 584   }
 585   if (t->is_zero_type())
 586     return zerocon(t->basic_type());
 587   return uncached_makecon(t);
 588 }
 589 
 590 //--------------------------uncached_makecon-----------------------------------
 591 // Make an idealized constant - one of ConINode, ConPNode, etc.
 592 ConNode* PhaseValues::uncached_makecon(const Type *t) {
 593   assert(t->singleton(), "must be a constant");
 594   ConNode* x = ConNode::make(t);
 595   ConNode* k = (ConNode*)hash_find_insert(x); // Value numbering
 596   if (k == nullptr) {
 597     set_type(x, t);             // Missed, provide type mapping
 598     GrowableArray<Node_Notes*>* nna = C->node_note_array();
 599     if (nna != nullptr) {
 600       Node_Notes* loc = C->locate_node_notes(nna, x->_idx, true);
 601       loc->clear(); // do not put debug info on constants
 602     }
 603   } else {
 604     x->destruct(this);          // Hit, destroy duplicate constant
 605     x = k;                      // use existing constant
 606   }
 607   return x;
 608 }
 609 
 610 //------------------------------intcon-----------------------------------------
 611 // Fast integer constant.  Same as "transform(new ConINode(TypeInt::make(i)))"
 612 ConINode* PhaseValues::intcon(jint i) {
 613   // Small integer?  Check cache! Check that cached node is not dead
 614   if (i >= _icon_min && i <= _icon_max) {
 615     ConINode* icon = _icons[i-_icon_min];
 616     if (icon != nullptr && icon->in(TypeFunc::Control) != nullptr)
 617       return icon;
 618   }
 619   ConINode* icon = (ConINode*) uncached_makecon(TypeInt::make(i));
 620   assert(icon->is_Con(), "");
 621   if (i >= _icon_min && i <= _icon_max)
 622     _icons[i-_icon_min] = icon;   // Cache small integers
 623   return icon;
 624 }
 625 
 626 //------------------------------longcon----------------------------------------
 627 // Fast long constant.
 628 ConLNode* PhaseValues::longcon(jlong l) {
 629   // Small integer?  Check cache! Check that cached node is not dead
 630   if (l >= _lcon_min && l <= _lcon_max) {
 631     ConLNode* lcon = _lcons[l-_lcon_min];
 632     if (lcon != nullptr && lcon->in(TypeFunc::Control) != nullptr)
 633       return lcon;
 634   }
 635   ConLNode* lcon = (ConLNode*) uncached_makecon(TypeLong::make(l));
 636   assert(lcon->is_Con(), "");
 637   if (l >= _lcon_min && l <= _lcon_max)
 638     _lcons[l-_lcon_min] = lcon;      // Cache small integers
 639   return lcon;
 640 }
 641 ConNode* PhaseValues::integercon(jlong l, BasicType bt) {
 642   if (bt == T_INT) {
 643     return intcon(checked_cast<jint>(l));
 644   }
 645   assert(bt == T_LONG, "not an integer");
 646   return longcon(l);
 647 }
 648 
 649 
 650 //------------------------------zerocon-----------------------------------------
 651 // Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
 652 ConNode* PhaseValues::zerocon(BasicType bt) {
 653   assert((uint)bt <= _zcon_max, "domain check");
 654   ConNode* zcon = _zcons[bt];
 655   if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr)
 656     return zcon;
 657   zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
 658   _zcons[bt] = zcon;
 659   return zcon;
 660 }
 661 
 662 
 663 
 664 //=============================================================================
 665 Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) {
 666   Node* i = BarrierSet::barrier_set()->barrier_set_c2()->ideal_node(this, k, can_reshape);
 667   if (i == nullptr) {
 668     i = k->Ideal(this, can_reshape);
 669   }
 670   return i;
 671 }
 672 
 673 //------------------------------transform--------------------------------------
 674 // Return a node which computes the same function as this node, but
 675 // in a faster or cheaper fashion.
 676 Node* PhaseGVN::transform(Node* n) {
 677   NOT_PRODUCT( set_transforms(); )
 678 
 679   // Apply the Ideal call in a loop until it no longer applies
 680   Node* k = n;
 681   Node* i = apply_ideal(k, /*can_reshape=*/false);
 682   NOT_PRODUCT(uint loop_count = 1;)
 683   while (i != nullptr) {
 684     assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
 685     k = i;
 686 #ifdef ASSERT
 687     if (loop_count >= K + C->live_nodes()) {
 688       dump_infinite_loop_info(i, "PhaseGVN::transform");
 689     }
 690 #endif
 691     i = apply_ideal(k, /*can_reshape=*/false);
 692     NOT_PRODUCT(loop_count++;)
 693   }
 694   NOT_PRODUCT(if (loop_count != 0) { set_progress(); })
 695 
 696   // If brand new node, make space in type array.
 697   ensure_type_or_null(k);
 698 
 699   // Since I just called 'Value' to compute the set of run-time values
 700   // for this Node, and 'Value' is non-local (and therefore expensive) I'll
 701   // cache Value.  Later requests for the local phase->type of this Node can
 702   // use the cached Value instead of suffering with 'bottom_type'.
 703   const Type* t = k->Value(this); // Get runtime Value set
 704   assert(t != nullptr, "value sanity");
 705   if (type_or_null(k) != t) {
 706 #ifndef PRODUCT
 707     // Do not count initial visit to node as a transformation
 708     if (type_or_null(k) == nullptr) {
 709       inc_new_values();
 710       set_progress();
 711     }
 712 #endif
 713     set_type(k, t);
 714     // If k is a TypeNode, capture any more-precise type permanently into Node
 715     k->raise_bottom_type(t);
 716   }
 717 
 718   if (t->singleton() && !k->is_Con()) {
 719     NOT_PRODUCT(set_progress();)
 720     return makecon(t);          // Turn into a constant
 721   }
 722 
 723   // Now check for Identities
 724   i = k->Identity(this);        // Look for a nearby replacement
 725   if (i != k) {                 // Found? Return replacement!
 726     NOT_PRODUCT(set_progress();)
 727     return i;
 728   }
 729 
 730   // Global Value Numbering
 731   i = hash_find_insert(k);      // Insert if new
 732   if (i && (i != k)) {
 733     // Return the pre-existing node
 734     NOT_PRODUCT(set_progress();)
 735     return i;
 736   }
 737 
 738   // Return Idealized original
 739   return k;
 740 }
 741 
 742 bool PhaseGVN::is_dominator_helper(Node *d, Node *n, bool linear_only) {
 743   if (d->is_top() || (d->is_Proj() && d->in(0)->is_top())) {
 744     return false;
 745   }
 746   if (n->is_top() || (n->is_Proj() && n->in(0)->is_top())) {
 747     return false;
 748   }
 749   assert(d->is_CFG() && n->is_CFG(), "must have CFG nodes");
 750   int i = 0;
 751   while (d != n) {
 752     n = IfNode::up_one_dom(n, linear_only);
 753     i++;
 754     if (n == nullptr || i >= 100) {
 755       return false;
 756     }
 757   }
 758   return true;
 759 }
 760 
 761 #ifdef ASSERT
 762 //------------------------------dead_loop_check--------------------------------
 763 // Check for a simple dead loop when a data node references itself directly
 764 // or through an other data node excluding cons and phis.
 765 void PhaseGVN::dead_loop_check( Node *n ) {
 766   // Phi may reference itself in a loop
 767   if (n != nullptr && !n->is_dead_loop_safe() && !n->is_CFG()) {
 768     // Do 2 levels check and only data inputs.
 769     bool no_dead_loop = true;
 770     uint cnt = n->req();
 771     for (uint i = 1; i < cnt && no_dead_loop; i++) {
 772       Node *in = n->in(i);
 773       if (in == n) {
 774         no_dead_loop = false;
 775       } else if (in != nullptr && !in->is_dead_loop_safe()) {
 776         uint icnt = in->req();
 777         for (uint j = 1; j < icnt && no_dead_loop; j++) {
 778           if (in->in(j) == n || in->in(j) == in)
 779             no_dead_loop = false;
 780         }
 781       }
 782     }
 783     if (!no_dead_loop) n->dump_bfs(100,nullptr,"#");
 784     assert(no_dead_loop, "dead loop detected");
 785   }
 786 }
 787 
 788 
 789 /**
 790  * Dumps information that can help to debug the problem. A debug
 791  * build fails with an assert.
 792  */
 793 void PhaseGVN::dump_infinite_loop_info(Node* n, const char* where) {
 794   n->dump(4);
 795   assert(false, "infinite loop in %s", where);
 796 }
 797 #endif
 798 
 799 //=============================================================================
 800 //------------------------------PhaseIterGVN-----------------------------------
 801 // Initialize with previous PhaseIterGVN info; used by PhaseCCP
 802 PhaseIterGVN::PhaseIterGVN(PhaseIterGVN* igvn) : _delay_transform(igvn->_delay_transform),
 803                                                  _worklist(*C->igvn_worklist())
 804 {
 805   _iterGVN = true;
 806   assert(&_worklist == &igvn->_worklist, "sanity");
 807 }
 808 
 809 //------------------------------PhaseIterGVN-----------------------------------
 810 // Initialize with previous PhaseGVN info from Parser
 811 PhaseIterGVN::PhaseIterGVN(PhaseGVN* gvn) : _delay_transform(false),
 812                                             _worklist(*C->igvn_worklist())
 813 {
 814   _iterGVN = true;
 815   uint max;
 816 
 817   // Dead nodes in the hash table inherited from GVN were not treated as
 818   // roots during def-use info creation; hence they represent an invisible
 819   // use.  Clear them out.
 820   max = _table.size();
 821   for( uint i = 0; i < max; ++i ) {
 822     Node *n = _table.at(i);
 823     if(n != nullptr && n != _table.sentinel() && n->outcnt() == 0) {
 824       if( n->is_top() ) continue;
 825       // If remove_useless_nodes() has run, we expect no such nodes left.
 826       assert(false, "remove_useless_nodes missed this node");
 827       hash_delete(n);
 828     }
 829   }
 830 
 831   // Any Phis or Regions on the worklist probably had uses that could not
 832   // make more progress because the uses were made while the Phis and Regions
 833   // were in half-built states.  Put all uses of Phis and Regions on worklist.
 834   max = _worklist.size();
 835   for( uint j = 0; j < max; j++ ) {
 836     Node *n = _worklist.at(j);
 837     uint uop = n->Opcode();
 838     if( uop == Op_Phi || uop == Op_Region ||
 839         n->is_Type() ||
 840         n->is_Mem() )
 841       add_users_to_worklist(n);
 842   }
 843 }
 844 
 845 void PhaseIterGVN::shuffle_worklist() {
 846   if (_worklist.size() < 2) return;
 847   for (uint i = _worklist.size() - 1; i >= 1; i--) {
 848     uint j = C->random() % (i + 1);
 849     swap(_worklist.adr()[i], _worklist.adr()[j]);
 850   }
 851 }
 852 
 853 #ifndef PRODUCT
 854 void PhaseIterGVN::verify_step(Node* n) {
 855   if (is_verify_def_use()) {
 856     ResourceMark rm;
 857     VectorSet visited;
 858     Node_List worklist;
 859 
 860     _verify_window[_verify_counter % _verify_window_size] = n;
 861     ++_verify_counter;
 862     if (C->unique() < 1000 || 0 == _verify_counter % (C->unique() < 10000 ? 10 : 100)) {
 863       ++_verify_full_passes;
 864       worklist.push(C->root());
 865       Node::verify(-1, visited, worklist);
 866       return;
 867     }
 868     for (int i = 0; i < _verify_window_size; i++) {
 869       Node* n = _verify_window[i];
 870       if (n == nullptr) {
 871         continue;
 872       }
 873       if (n->in(0) == NodeSentinel) { // xform_idom
 874         _verify_window[i] = n->in(1);
 875         --i;
 876         continue;
 877       }
 878       // Typical fanout is 1-2, so this call visits about 6 nodes.
 879       if (!visited.test_set(n->_idx)) {
 880         worklist.push(n);
 881       }
 882     }
 883     Node::verify(4, visited, worklist);
 884   }
 885 }
 886 
 887 void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype) {
 888   const Type* newtype = type_or_null(n);
 889   if (nn != n || oldtype != newtype) {
 890     C->print_method(PHASE_AFTER_ITER_GVN_STEP, 5, n);
 891   }
 892   if (TraceIterativeGVN) {
 893     uint wlsize = _worklist.size();
 894     if (nn != n) {
 895       // print old node
 896       tty->print("< ");
 897       if (oldtype != newtype && oldtype != nullptr) {
 898         oldtype->dump();
 899       }
 900       do { tty->print("\t"); } while (tty->position() < 16);
 901       tty->print("<");
 902       n->dump();
 903     }
 904     if (oldtype != newtype || nn != n) {
 905       // print new node and/or new type
 906       if (oldtype == nullptr) {
 907         tty->print("* ");
 908       } else if (nn != n) {
 909         tty->print("> ");
 910       } else {
 911         tty->print("= ");
 912       }
 913       if (newtype == nullptr) {
 914         tty->print("null");
 915       } else {
 916         newtype->dump();
 917       }
 918       do { tty->print("\t"); } while (tty->position() < 16);
 919       nn->dump();
 920     }
 921     if (Verbose && wlsize < _worklist.size()) {
 922       tty->print("  Push {");
 923       while (wlsize != _worklist.size()) {
 924         Node* pushed = _worklist.at(wlsize++);
 925         tty->print(" %d", pushed->_idx);
 926       }
 927       tty->print_cr(" }");
 928     }
 929     if (nn != n) {
 930       // ignore n, it might be subsumed
 931       verify_step((Node*) nullptr);
 932     }
 933   }
 934 }
 935 
 936 void PhaseIterGVN::init_verifyPhaseIterGVN() {
 937   _verify_counter = 0;
 938   _verify_full_passes = 0;
 939   for (int i = 0; i < _verify_window_size; i++) {
 940     _verify_window[i] = nullptr;
 941   }
 942 #ifdef ASSERT
 943   // Verify that all modified nodes are on _worklist
 944   Unique_Node_List* modified_list = C->modified_nodes();
 945   while (modified_list != nullptr && modified_list->size()) {
 946     Node* n = modified_list->pop();
 947     if (!n->is_Con() && !_worklist.member(n)) {
 948       n->dump();
 949       fatal("modified node is not on IGVN._worklist");
 950     }
 951   }
 952 #endif
 953 }
 954 
 955 void PhaseIterGVN::verify_PhaseIterGVN() {
 956 #ifdef ASSERT
 957   // Verify nodes with changed inputs.
 958   Unique_Node_List* modified_list = C->modified_nodes();
 959   while (modified_list != nullptr && modified_list->size()) {
 960     Node* n = modified_list->pop();
 961     if (!n->is_Con()) { // skip Con nodes
 962       n->dump();
 963       fatal("modified node was not processed by IGVN.transform_old()");
 964     }
 965   }
 966 #endif
 967 
 968   C->verify_graph_edges();
 969   if (is_verify_def_use() && PrintOpto) {
 970     if (_verify_counter == _verify_full_passes) {
 971       tty->print_cr("VerifyIterativeGVN: %d transforms and verify passes",
 972                     (int) _verify_full_passes);
 973     } else {
 974       tty->print_cr("VerifyIterativeGVN: %d transforms, %d full verify passes",
 975                   (int) _verify_counter, (int) _verify_full_passes);
 976     }
 977   }
 978 
 979 #ifdef ASSERT
 980   if (modified_list != nullptr) {
 981     while (modified_list->size() > 0) {
 982       Node* n = modified_list->pop();
 983       n->dump();
 984       assert(false, "VerifyIterativeGVN: new modified node was added");
 985     }
 986   }
 987 
 988   verify_optimize();
 989 #endif
 990 }
 991 #endif /* PRODUCT */
 992 
 993 #ifdef ASSERT
 994 /**
 995  * Dumps information that can help to debug the problem. A debug
 996  * build fails with an assert.
 997  */
 998 void PhaseIterGVN::dump_infinite_loop_info(Node* n, const char* where) {
 999   n->dump(4);
1000   _worklist.dump();
1001   assert(false, "infinite loop in %s", where);
1002 }
1003 
1004 /**
1005  * Prints out information about IGVN if the 'verbose' option is used.
1006  */
1007 void PhaseIterGVN::trace_PhaseIterGVN_verbose(Node* n, int num_processed) {
1008   if (TraceIterativeGVN && Verbose) {
1009     tty->print("  Pop ");
1010     n->dump();
1011     if ((num_processed % 100) == 0) {
1012       _worklist.print_set();
1013     }
1014   }
1015 }
1016 #endif /* ASSERT */
1017 
1018 void PhaseIterGVN::optimize() {
1019   DEBUG_ONLY(uint num_processed  = 0;)
1020   NOT_PRODUCT(init_verifyPhaseIterGVN();)
1021   NOT_PRODUCT(C->reset_igv_phase_iter(PHASE_AFTER_ITER_GVN_STEP);)
1022   C->print_method(PHASE_BEFORE_ITER_GVN, 3);
1023   if (StressIGVN) {
1024     shuffle_worklist();
1025   }
1026 
1027   uint loop_count = 0;
1028   // Pull from worklist and transform the node. If the node has changed,
1029   // update edge info and put uses on worklist.
1030   while(_worklist.size()) {
1031     if (C->check_node_count(NodeLimitFudgeFactor * 2, "Out of nodes")) {
1032       C->print_method(PHASE_AFTER_ITER_GVN, 3);
1033       return;
1034     }
1035     Node* n  = _worklist.pop();
1036     if (loop_count >= K * C->live_nodes()) {
1037       DEBUG_ONLY(dump_infinite_loop_info(n, "PhaseIterGVN::optimize");)
1038       C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize");
1039       C->print_method(PHASE_AFTER_ITER_GVN, 3);
1040       return;
1041     }
1042     DEBUG_ONLY(trace_PhaseIterGVN_verbose(n, num_processed++);)
1043     if (n->outcnt() != 0) {
1044       NOT_PRODUCT(const Type* oldtype = type_or_null(n));
1045       // Do the transformation
1046       Node* nn = transform_old(n);
1047       NOT_PRODUCT(trace_PhaseIterGVN(n, nn, oldtype);)
1048     } else if (!n->is_top()) {
1049       remove_dead_node(n);
1050     }
1051     loop_count++;
1052   }
1053   NOT_PRODUCT(verify_PhaseIterGVN();)
1054   C->print_method(PHASE_AFTER_ITER_GVN, 3);
1055 }
1056 
1057 #ifdef ASSERT
1058 void PhaseIterGVN::verify_optimize() {
1059   if (is_verify_Value()) {
1060     ResourceMark rm;
1061     Unique_Node_List worklist;
1062     bool failure = false;
1063     // BFS all nodes, starting at root
1064     worklist.push(C->root());
1065     for (uint j = 0; j < worklist.size(); ++j) {
1066       Node* n = worklist.at(j);
1067       failure |= verify_node_value(n);
1068       // traverse all inputs and outputs
1069       for (uint i = 0; i < n->req(); i++) {
1070         if (n->in(i) != nullptr) {
1071           worklist.push(n->in(i));
1072         }
1073       }
1074       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1075         worklist.push(n->fast_out(i));
1076       }
1077     }
1078     // If we get this assert, check why the reported nodes were not processed again in IGVN.
1079     // We should either make sure that these nodes are properly added back to the IGVN worklist
1080     // in PhaseIterGVN::add_users_to_worklist to update them again or add an exception
1081     // in the verification code above if that is not possible for some reason (like Load nodes).
1082     assert(!failure, "Missed optimization opportunity in PhaseIterGVN");
1083   }
1084 }
1085 
1086 // Check that type(n) == n->Value(), return true if we have a failure.
1087 // We have a list of exceptions, see detailed comments in code.
1088 // (1) Integer "widen" changes, but the range is the same.
1089 // (2) LoadNode performs deep traversals. Load is not notified for changes far away.
1090 // (3) CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
1091 bool PhaseIterGVN::verify_node_value(Node* n) {
1092   // If we assert inside type(n), because the type is still a null, then maybe
1093   // the node never went through gvn.transform, which would be a bug.
1094   const Type* told = type(n);
1095   const Type* tnew = n->Value(this);
1096   if (told == tnew) {
1097     return false;
1098   }
1099   // Exception (1)
1100   // Integer "widen" changes, but range is the same.
1101   if (told->isa_integer(tnew->basic_type()) != nullptr) { // both either int or long
1102     const TypeInteger* t0 = told->is_integer(tnew->basic_type());
1103     const TypeInteger* t1 = tnew->is_integer(tnew->basic_type());
1104     if (t0->lo_as_long() == t1->lo_as_long() &&
1105         t0->hi_as_long() == t1->hi_as_long()) {
1106       return false; // ignore integer widen
1107     }
1108   }
1109   // Exception (2)
1110   // LoadNode performs deep traversals. Load is not notified for changes far away.
1111   if (n->is_Load() && !told->singleton()) {
1112     // MemNode::can_see_stored_value looks up through many memory nodes,
1113     // which means we would need to notify modifications from far up in
1114     // the inputs all the way down to the LoadNode. We don't do that.
1115     return false;
1116   }
1117   // Exception (3)
1118   // CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
1119   if (n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
1120     // SubNode::Value
1121     // CmpPNode::sub
1122     // MemNode::detect_ptr_independence
1123     // MemNode::all_controls_dominate
1124     // We find all controls of a pointer load, and see if they dominate the control of
1125     // an allocation. If they all dominate, we know the allocation is after (independent)
1126     // of the pointer load, and we can say the pointers are different. For this we call
1127     // n->dominates(sub, nlist) to check if controls n of the pointer load dominate the
1128     // control sub of the allocation. The problems is that sometimes dominates answers
1129     // false conservatively, and later it can determine that it is indeed true. Loops with
1130     // Region heads can lead to giving up, whereas LoopNodes can be skipped easier, and
1131     // so the traversal becomes more powerful. This is difficult to remidy, we would have
1132     // to notify the CmpP of CFG updates. Luckily, we recompute CmpP::Value during CCP
1133     // after loop-opts, so that should take care of many of these cases.
1134     return false;
1135   }
1136   tty->cr();
1137   tty->print_cr("Missed Value optimization:");
1138   n->dump_bfs(3, nullptr, "");
1139   tty->print_cr("Current type:");
1140   told->dump_on(tty);
1141   tty->cr();
1142   tty->print_cr("Optimized type:");
1143   tnew->dump_on(tty);
1144   tty->cr();
1145   return true;
1146 }
1147 #endif
1148 
1149 /**
1150  * Register a new node with the optimizer.  Update the types array, the def-use
1151  * info.  Put on worklist.
1152  */
1153 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1154   set_type_bottom(n);
1155   _worklist.push(n);
1156   if (orig != nullptr)  C->copy_node_notes_to(n, orig);
1157   return n;
1158 }
1159 
1160 //------------------------------transform--------------------------------------
1161 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1162 Node *PhaseIterGVN::transform( Node *n ) {
1163   // If brand new node, make space in type array, and give it a type.
1164   ensure_type_or_null(n);
1165   if (type_or_null(n) == nullptr) {
1166     set_type_bottom(n);
1167   }
1168 
1169   if (_delay_transform) {
1170     // Add the node to the worklist but don't optimize for now
1171     _worklist.push(n);
1172     return n;
1173   }
1174 
1175   return transform_old(n);
1176 }
1177 
1178 Node *PhaseIterGVN::transform_old(Node* n) {
1179   NOT_PRODUCT(set_transforms());
1180   // Remove 'n' from hash table in case it gets modified
1181   _table.hash_delete(n);
1182 #ifdef ASSERT
1183   if (is_verify_def_use()) {
1184     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1185   }
1186 #endif
1187 
1188   // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1189   if (n->is_Cmp()) {
1190     add_users_to_worklist(n);
1191   }
1192 
1193   // Apply the Ideal call in a loop until it no longer applies
1194   Node* k = n;
1195   DEBUG_ONLY(dead_loop_check(k);)
1196   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1197   C->remove_modified_node(k);
1198   Node* i = apply_ideal(k, /*can_reshape=*/true);
1199   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
1200 #ifndef PRODUCT
1201   verify_step(k);
1202 #endif
1203 
1204   DEBUG_ONLY(uint loop_count = 1;)
1205   while (i != nullptr) {
1206 #ifdef ASSERT
1207     if (loop_count >= K + C->live_nodes()) {
1208       dump_infinite_loop_info(i, "PhaseIterGVN::transform_old");
1209     }
1210 #endif
1211     assert((i->_idx >= k->_idx) || i->is_top(), "Idealize should return new nodes, use Identity to return old nodes");
1212     // Made a change; put users of original Node on worklist
1213     add_users_to_worklist(k);
1214     // Replacing root of transform tree?
1215     if (k != i) {
1216       // Make users of old Node now use new.
1217       subsume_node(k, i);
1218       k = i;
1219     }
1220     DEBUG_ONLY(dead_loop_check(k);)
1221     // Try idealizing again
1222     DEBUG_ONLY(is_new = (k->outcnt() == 0);)
1223     C->remove_modified_node(k);
1224     i = apply_ideal(k, /*can_reshape=*/true);
1225     assert(i != k || is_new || (i->outcnt() > 0), "don't return dead nodes");
1226 #ifndef PRODUCT
1227     verify_step(k);
1228 #endif
1229     DEBUG_ONLY(loop_count++;)
1230   }
1231 
1232   // If brand new node, make space in type array.
1233   ensure_type_or_null(k);
1234 
1235   // See what kind of values 'k' takes on at runtime
1236   const Type* t = k->Value(this);
1237   assert(t != nullptr, "value sanity");
1238 
1239   // Since I just called 'Value' to compute the set of run-time values
1240   // for this Node, and 'Value' is non-local (and therefore expensive) I'll
1241   // cache Value.  Later requests for the local phase->type of this Node can
1242   // use the cached Value instead of suffering with 'bottom_type'.
1243   if (type_or_null(k) != t) {
1244 #ifndef PRODUCT
1245     inc_new_values();
1246     set_progress();
1247 #endif
1248     set_type(k, t);
1249     // If k is a TypeNode, capture any more-precise type permanently into Node
1250     k->raise_bottom_type(t);
1251     // Move users of node to worklist
1252     add_users_to_worklist(k);
1253   }
1254   // If 'k' computes a constant, replace it with a constant
1255   if (t->singleton() && !k->is_Con()) {
1256     NOT_PRODUCT(set_progress();)
1257     Node* con = makecon(t);     // Make a constant
1258     add_users_to_worklist(k);
1259     subsume_node(k, con);       // Everybody using k now uses con
1260     return con;
1261   }
1262 
1263   // Now check for Identities
1264   i = k->Identity(this);      // Look for a nearby replacement
1265   if (i != k) {                // Found? Return replacement!
1266     NOT_PRODUCT(set_progress();)
1267     add_users_to_worklist(k);
1268     subsume_node(k, i);       // Everybody using k now uses i
1269     return i;
1270   }
1271 
1272   // Global Value Numbering
1273   i = hash_find_insert(k);      // Check for pre-existing node
1274   if (i && (i != k)) {
1275     // Return the pre-existing node if it isn't dead
1276     NOT_PRODUCT(set_progress();)
1277     add_users_to_worklist(k);
1278     subsume_node(k, i);       // Everybody using k now uses i
1279     return i;
1280   }
1281 
1282   // Return Idealized original
1283   return k;
1284 }
1285 
1286 //---------------------------------saturate------------------------------------
1287 const Type* PhaseIterGVN::saturate(const Type* new_type, const Type* old_type,
1288                                    const Type* limit_type) const {
1289   return new_type->narrow(old_type);
1290 }
1291 
1292 //------------------------------remove_globally_dead_node----------------------
1293 // Kill a globally dead Node.  All uses are also globally dead and are
1294 // aggressively trimmed.
1295 void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
1296   enum DeleteProgress {
1297     PROCESS_INPUTS,
1298     PROCESS_OUTPUTS
1299   };
1300   ResourceMark rm;
1301   Node_Stack stack(32);
1302   stack.push(dead, PROCESS_INPUTS);
1303 
1304   while (stack.is_nonempty()) {
1305     dead = stack.node();
1306     if (dead->Opcode() == Op_SafePoint) {
1307       dead->as_SafePoint()->disconnect_from_root(this);
1308     }
1309     uint progress_state = stack.index();
1310     assert(dead != C->root(), "killing root, eh?");
1311     assert(!dead->is_top(), "add check for top when pushing");
1312     NOT_PRODUCT( set_progress(); )
1313     if (progress_state == PROCESS_INPUTS) {
1314       // After following inputs, continue to outputs
1315       stack.set_index(PROCESS_OUTPUTS);
1316       if (!dead->is_Con()) { // Don't kill cons but uses
1317         bool recurse = false;
1318         // Remove from hash table
1319         _table.hash_delete( dead );
1320         // Smash all inputs to 'dead', isolating him completely
1321         for (uint i = 0; i < dead->req(); i++) {
1322           Node *in = dead->in(i);
1323           if (in != nullptr && in != C->top()) {  // Points to something?
1324             int nrep = dead->replace_edge(in, nullptr, this);  // Kill edges
1325             assert((nrep > 0), "sanity");
1326             if (in->outcnt() == 0) { // Made input go dead?
1327               stack.push(in, PROCESS_INPUTS); // Recursively remove
1328               recurse = true;
1329             } else if (in->outcnt() == 1 &&
1330                        in->has_special_unique_user()) {
1331               _worklist.push(in->unique_out());
1332             } else if (in->outcnt() <= 2 && dead->is_Phi()) {
1333               if (in->Opcode() == Op_Region) {
1334                 _worklist.push(in);
1335               } else if (in->is_Store()) {
1336                 DUIterator_Fast imax, i = in->fast_outs(imax);
1337                 _worklist.push(in->fast_out(i));
1338                 i++;
1339                 if (in->outcnt() == 2) {
1340                   _worklist.push(in->fast_out(i));
1341                   i++;
1342                 }
1343                 assert(!(i < imax), "sanity");
1344               }
1345             } else {
1346               BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(this, in);
1347             }
1348             if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
1349                 in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) {
1350               // A Load that directly follows an InitializeNode is
1351               // going away. The Stores that follow are candidates
1352               // again to be captured by the InitializeNode.
1353               for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
1354                 Node *n = in->fast_out(j);
1355                 if (n->is_Store()) {
1356                   _worklist.push(n);
1357                 }
1358               }
1359             }
1360           } // if (in != nullptr && in != C->top())
1361         } // for (uint i = 0; i < dead->req(); i++)
1362         if (recurse) {
1363           continue;
1364         }
1365       } // if (!dead->is_Con())
1366     } // if (progress_state == PROCESS_INPUTS)
1367 
1368     // Aggressively kill globally dead uses
1369     // (Rather than pushing all the outs at once, we push one at a time,
1370     // plus the parent to resume later, because of the indefinite number
1371     // of edge deletions per loop trip.)
1372     if (dead->outcnt() > 0) {
1373       // Recursively remove output edges
1374       stack.push(dead->raw_out(0), PROCESS_INPUTS);
1375     } else {
1376       // Finished disconnecting all input and output edges.
1377       stack.pop();
1378       // Remove dead node from iterative worklist
1379       _worklist.remove(dead);
1380       C->remove_useless_node(dead);
1381     }
1382   } // while (stack.is_nonempty())
1383 }
1384 
1385 //------------------------------subsume_node-----------------------------------
1386 // Remove users from node 'old' and add them to node 'nn'.
1387 void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
1388   if (old->Opcode() == Op_SafePoint) {
1389     old->as_SafePoint()->disconnect_from_root(this);
1390   }
1391   assert( old != hash_find(old), "should already been removed" );
1392   assert( old != C->top(), "cannot subsume top node");
1393   // Copy debug or profile information to the new version:
1394   C->copy_node_notes_to(nn, old);
1395   // Move users of node 'old' to node 'nn'
1396   for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
1397     Node* use = old->last_out(i);  // for each use...
1398     // use might need re-hashing (but it won't if it's a new node)
1399     rehash_node_delayed(use);
1400     // Update use-def info as well
1401     // We remove all occurrences of old within use->in,
1402     // so as to avoid rehashing any node more than once.
1403     // The hash table probe swamps any outer loop overhead.
1404     uint num_edges = 0;
1405     for (uint jmax = use->len(), j = 0; j < jmax; j++) {
1406       if (use->in(j) == old) {
1407         use->set_req(j, nn);
1408         ++num_edges;
1409       }
1410     }
1411     i -= num_edges;    // we deleted 1 or more copies of this edge
1412   }
1413 
1414   // Search for instance field data PhiNodes in the same region pointing to the old
1415   // memory PhiNode and update their instance memory ids to point to the new node.
1416   if (old->is_Phi() && old->as_Phi()->type()->has_memory() && old->in(0) != nullptr) {
1417     Node* region = old->in(0);
1418     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1419       PhiNode* phi = region->fast_out(i)->isa_Phi();
1420       if (phi != nullptr && phi->inst_mem_id() == (int)old->_idx) {
1421         phi->set_inst_mem_id((int)nn->_idx);
1422       }
1423     }
1424   }
1425 
1426   // Smash all inputs to 'old', isolating him completely
1427   Node *temp = new Node(1);
1428   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1429   remove_dead_node( old );
1430   temp->del_req(0);         // Yank bogus edge
1431   if (nn != nullptr && nn->outcnt() == 0) {
1432     _worklist.push(nn);
1433   }
1434 #ifndef PRODUCT
1435   if (is_verify_def_use()) {
1436     for ( int i = 0; i < _verify_window_size; i++ ) {
1437       if ( _verify_window[i] == old )
1438         _verify_window[i] = nn;
1439     }
1440   }
1441 #endif
1442   temp->destruct(this);     // reuse the _idx of this little guy
1443 }
1444 
1445 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
1446   assert(n != nullptr, "sanity");
1447   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1448     Node* u = n->fast_out(i);
1449     if (u != n) {
1450       rehash_node_delayed(u);
1451       int nb = u->replace_edge(n, m);
1452       --i, imax -= nb;
1453     }
1454   }
1455   assert(n->outcnt() == 0, "all uses must be deleted");
1456 }
1457 
1458 //------------------------------add_users_to_worklist--------------------------
1459 void PhaseIterGVN::add_users_to_worklist0(Node* n, Unique_Node_List& worklist) {
1460   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1461     worklist.push(n->fast_out(i));  // Push on worklist
1462   }
1463 }
1464 
1465 // Return counted loop Phi if as a counted loop exit condition, cmp
1466 // compares the induction variable with n
1467 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1468   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1469     Node* bol = cmp->fast_out(i);
1470     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1471       Node* iff = bol->fast_out(i2);
1472       if (iff->is_BaseCountedLoopEnd()) {
1473         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1474         if (cle->limit() == n) {
1475           PhiNode* phi = cle->phi();
1476           if (phi != nullptr) {
1477             return phi;
1478           }
1479         }
1480       }
1481     }
1482   }
1483   return nullptr;
1484 }
1485 
1486 void PhaseIterGVN::add_users_to_worklist(Node *n) {
1487   add_users_to_worklist0(n, _worklist);
1488 
1489   Unique_Node_List& worklist = _worklist;
1490   // Move users of node to worklist
1491   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1492     Node* use = n->fast_out(i); // Get use
1493     add_users_of_use_to_worklist(n, use, worklist);
1494   }
1495 }
1496 
1497 void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_List& worklist) {
1498   if(use->is_Multi() ||      // Multi-definer?  Push projs on worklist
1499       use->is_Store() )       // Enable store/load same address
1500     add_users_to_worklist0(use, worklist);
1501 
1502   // If we changed the receiver type to a call, we need to revisit
1503   // the Catch following the call.  It's looking for a non-null
1504   // receiver to know when to enable the regular fall-through path
1505   // in addition to the NullPtrException path.
1506   if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1507     Node* p = use->as_CallDynamicJava()->proj_out_or_null(TypeFunc::Control);
1508     if (p != nullptr) {
1509       add_users_to_worklist0(p, worklist);
1510     }
1511   }
1512 
1513   // AndLNode::Ideal folds GraphKit::mark_word_test patterns. Give it a chance to run.
1514   if (n->is_Load() && use->is_Phi()) {
1515     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1516       Node* u = use->fast_out(i);
1517       if (u->Opcode() == Op_AndL) {
1518         worklist.push(u);
1519       }
1520     }
1521   }
1522 
1523   uint use_op = use->Opcode();
1524   if(use->is_Cmp()) {       // Enable CMP/BOOL optimization
1525     add_users_to_worklist0(use, worklist); // Put Bool on worklist
1526     if (use->outcnt() > 0) {
1527       Node* bol = use->raw_out(0);
1528       if (bol->outcnt() > 0) {
1529         Node* iff = bol->raw_out(0);
1530         if (iff->outcnt() == 2) {
1531           // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1532           // phi merging either 0 or 1 onto the worklist
1533           Node* ifproj0 = iff->raw_out(0);
1534           Node* ifproj1 = iff->raw_out(1);
1535           if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1536             Node* region0 = ifproj0->raw_out(0);
1537             Node* region1 = ifproj1->raw_out(0);
1538             if( region0 == region1 )
1539               add_users_to_worklist0(region0, worklist);
1540           }
1541         }
1542       }
1543     }
1544     if (use_op == Op_CmpI || use_op == Op_CmpL) {
1545       Node* phi = countedloop_phi_from_cmp(use->as_Cmp(), n);
1546       if (phi != nullptr) {
1547         // Input to the cmp of a loop exit check has changed, thus
1548         // the loop limit may have changed, which can then change the
1549         // range values of the trip-count Phi.
1550         worklist.push(phi);
1551       }
1552     }
1553     if (use_op == Op_CmpI) {
1554       Node* cmp = use;
1555       Node* in1 = cmp->in(1);
1556       Node* in2 = cmp->in(2);
1557       // Notify CmpI / If pattern from CastIINode::Value (left pattern).
1558       // Must also notify if in1 is modified and possibly turns into X (right pattern).
1559       //
1560       // in1  in2                   in1  in2
1561       //  |    |                     |    |
1562       //  +--- | --+                 |    |
1563       //  |    |   |                 |    |
1564       // CmpINode  |                CmpINode
1565       //    |      |                   |
1566       // BoolNode  |                BoolNode
1567       //    |      |        OR         |
1568       //  IfNode   |                 IfNode
1569       //    |      |                   |
1570       //  IfProj   |                 IfProj   X
1571       //    |      |                   |      |
1572       //   CastIINode                 CastIINode
1573       //
1574       if (in1 != in2) { // if they are equal, the CmpI can fold them away
1575         if (in1 == n) {
1576           // in1 modified -> could turn into X -> do traversal based on right pattern.
1577           for (DUIterator_Fast i2max, i2 = cmp->fast_outs(i2max); i2 < i2max; i2++) {
1578             Node* bol = cmp->fast_out(i2); // For each Bool
1579             if (bol->is_Bool()) {
1580               for (DUIterator_Fast i3max, i3 = bol->fast_outs(i3max); i3 < i3max; i3++) {
1581                 Node* iff = bol->fast_out(i3); // For each If
1582                 if (iff->is_If()) {
1583                   for (DUIterator_Fast i4max, i4 = iff->fast_outs(i4max); i4 < i4max; i4++) {
1584                     Node* if_proj = iff->fast_out(i4); // For each IfProj
1585                     assert(if_proj->is_IfProj(), "If only has IfTrue and IfFalse as outputs");
1586                     for (DUIterator_Fast i5max, i5 = if_proj->fast_outs(i5max); i5 < i5max; i5++) {
1587                       Node* castii = if_proj->fast_out(i5); // For each CastII
1588                       if (castii->is_CastII() &&
1589                           castii->as_CastII()->carry_dependency()) {
1590                         worklist.push(castii);
1591                       }
1592                     }
1593                   }
1594                 }
1595               }
1596             }
1597           }
1598         } else {
1599           // Only in2 modified -> can assume X == in2 (left pattern).
1600           assert(n == in2, "only in2 modified");
1601           // Find all CastII with input in1.
1602           for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1603             Node* castii = in1->fast_out(j);
1604             if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1605               // Find If.
1606               if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1607                 Node* ifnode = castii->in(0)->in(0);
1608                 // Check that if connects to the cmp
1609                 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1610                   worklist.push(castii);
1611                 }
1612               }
1613             }
1614           }
1615         }
1616       }
1617     }
1618   }
1619 
1620   // Inline type nodes can have other inline types as users. If an input gets
1621   // updated, make sure that inline type users get a chance for optimization.
1622   if (use->is_InlineType()) {
1623     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1624       Node* u = use->fast_out(i2);
1625       if (u->is_InlineType())
1626         worklist.push(u);
1627     }
1628   }
1629   // If changed Cast input, notify down for Phi, Sub, and Xor - all do "uncast"
1630   // Patterns:
1631   // ConstraintCast+ -> Sub
1632   // ConstraintCast+ -> Phi
1633   // ConstraintCast+ -> Xor
1634   if (use->is_ConstraintCast()) {
1635     auto push_the_uses_to_worklist = [&](Node* n){
1636       if (n->is_Phi() || n->is_Sub() || n->Opcode() == Op_XorI || n->Opcode() == Op_XorL) {
1637         worklist.push(n);
1638       }
1639     };
1640     auto is_boundary = [](Node* n){ return !n->is_ConstraintCast(); };
1641     use->visit_uses(push_the_uses_to_worklist, is_boundary);
1642   }
1643   // If changed LShift inputs, check RShift users for useless sign-ext
1644   if( use_op == Op_LShiftI ) {
1645     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1646       Node* u = use->fast_out(i2);
1647       if (u->Opcode() == Op_RShiftI)
1648         worklist.push(u);
1649     }
1650   }
1651   // If changed LShift inputs, check And users for shift and mask (And) operation
1652   if (use_op == Op_LShiftI || use_op == Op_LShiftL) {
1653     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1654       Node* u = use->fast_out(i2);
1655       if (u->Opcode() == Op_AndI || u->Opcode() == Op_AndL) {
1656         worklist.push(u);
1657       }
1658     }
1659   }
1660   // If changed AddI/SubI inputs, check CmpU for range check optimization.
1661   if (use_op == Op_AddI || use_op == Op_SubI) {
1662     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1663       Node* u = use->fast_out(i2);
1664       if (u->is_Cmp() && (u->Opcode() == Op_CmpU)) {
1665         worklist.push(u);
1666       }
1667     }
1668   }
1669   // If changed AddP inputs, check Stores for loop invariant
1670   if( use_op == Op_AddP ) {
1671     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1672       Node* u = use->fast_out(i2);
1673       if (u->is_Mem())
1674         worklist.push(u);
1675     }
1676   }
1677   // If changed initialization activity, check dependent Stores
1678   if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1679     InitializeNode* init = use->as_Allocate()->initialization();
1680     if (init != nullptr) {
1681       Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1682       if (imem != nullptr) add_users_to_worklist0(imem, worklist);
1683     }
1684   }
1685   // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1686   // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1687   // to guarantee the change is not missed.
1688   if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1689     Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1690     if (p != nullptr) {
1691       add_users_to_worklist0(p, worklist);
1692     }
1693   }
1694 
1695   if (use_op == Op_Initialize) {
1696     Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1697     if (imem != nullptr) add_users_to_worklist0(imem, worklist);
1698   }
1699   // Loading the java mirror from a Klass requires two loads and the type
1700   // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1701   //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1702   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1703   bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1704 
1705   if (use_op == Op_CastP2X) {
1706     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1707       Node* u = use->fast_out(i2);
1708       if (u->Opcode() == Op_AndX) {
1709         worklist.push(u);
1710       }
1711     }
1712   }
1713   if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1714     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1715       Node* u = use->fast_out(i2);
1716       const Type* ut = u->bottom_type();
1717       if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1718         if (has_load_barrier_nodes) {
1719           // Search for load barriers behind the load
1720           for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1721             Node* b = u->fast_out(i3);
1722             if (bs->is_gc_barrier_node(b)) {
1723               worklist.push(b);
1724             }
1725           }
1726         }
1727         worklist.push(u);
1728       }
1729     }
1730   }
1731   // Give CallStaticJavaNode::remove_useless_allocation a chance to run
1732   if (use->is_Region()) {
1733     Node* c = use;
1734     do {
1735       c = c->unique_ctrl_out_or_null();
1736     } while (c != nullptr && c->is_Region());
1737     if (c != nullptr && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
1738       worklist.push(c);
1739     }
1740   }
1741   if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1742     assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1743     if (use->outcnt() == 1) {
1744       Node* cmp = use->unique_out();
1745       worklist.push(cmp);
1746     }
1747   }
1748 }
1749 
1750 /**
1751  * Remove the speculative part of all types that we know of
1752  */
1753 void PhaseIterGVN::remove_speculative_types()  {
1754   assert(UseTypeSpeculation, "speculation is off");
1755   for (uint i = 0; i < _types.Size(); i++)  {
1756     const Type* t = _types.fast_lookup(i);
1757     if (t != nullptr) {
1758       _types.map(i, t->remove_speculative());
1759     }
1760   }
1761   _table.check_no_speculative_types();
1762 }
1763 
1764 // Check if the type of a divisor of a Div or Mod node includes zero.
1765 bool PhaseIterGVN::no_dependent_zero_check(Node* n) const {
1766   switch (n->Opcode()) {
1767     case Op_DivI:
1768     case Op_ModI: {
1769       // Type of divisor includes 0?
1770       if (type(n->in(2)) == Type::TOP) {
1771         // 'n' is dead. Treat as if zero check is still there to avoid any further optimizations.
1772         return false;
1773       }
1774       const TypeInt* type_divisor = type(n->in(2))->is_int();
1775       return (type_divisor->_hi < 0 || type_divisor->_lo > 0);
1776     }
1777     case Op_DivL:
1778     case Op_ModL: {
1779       // Type of divisor includes 0?
1780       if (type(n->in(2)) == Type::TOP) {
1781         // 'n' is dead. Treat as if zero check is still there to avoid any further optimizations.
1782         return false;
1783       }
1784       const TypeLong* type_divisor = type(n->in(2))->is_long();
1785       return (type_divisor->_hi < 0 || type_divisor->_lo > 0);
1786     }
1787   }
1788   return true;
1789 }
1790 
1791 //=============================================================================
1792 #ifndef PRODUCT
1793 uint PhaseCCP::_total_invokes   = 0;
1794 uint PhaseCCP::_total_constants = 0;
1795 #endif
1796 //------------------------------PhaseCCP---------------------------------------
1797 // Conditional Constant Propagation, ala Wegman & Zadeck
1798 PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
1799   NOT_PRODUCT( clear_constants(); )
1800   assert( _worklist.size() == 0, "" );
1801   analyze();
1802 }
1803 
1804 #ifndef PRODUCT
1805 //------------------------------~PhaseCCP--------------------------------------
1806 PhaseCCP::~PhaseCCP() {
1807   inc_invokes();
1808   _total_constants += count_constants();
1809 }
1810 #endif
1811 
1812 
1813 #ifdef ASSERT
1814 void PhaseCCP::verify_type(Node* n, const Type* tnew, const Type* told) {
1815   if (tnew->meet(told) != tnew->remove_speculative()) {
1816     n->dump(3);
1817     tty->print("told = "); told->dump(); tty->cr();
1818     tty->print("tnew = "); tnew->dump(); tty->cr();
1819     fatal("Not monotonic");
1820   }
1821   assert(!told->isa_int() || !tnew->isa_int() || told->is_int()->_widen <= tnew->is_int()->_widen, "widen increases");
1822   assert(!told->isa_long() || !tnew->isa_long() || told->is_long()->_widen <= tnew->is_long()->_widen, "widen increases");
1823 }
1824 #endif //ASSERT
1825 
1826 // In this analysis, all types are initially set to TOP. We iteratively call Value() on all nodes of the graph until
1827 // we reach a fixed-point (i.e. no types change anymore). We start with a list that only contains the root node. Each time
1828 // a new type is set, we push all uses of that node back to the worklist (in some cases, we also push grandchildren
1829 // or nodes even further down back to the worklist because their type could change as a result of the current type
1830 // change).
1831 void PhaseCCP::analyze() {
1832   // Initialize all types to TOP, optimistic analysis
1833   for (uint i = 0; i < C->unique(); i++)  {
1834     _types.map(i, Type::TOP);
1835   }
1836 
1837   // CCP worklist is placed on a local arena, so that we can allow ResourceMarks on "Compile::current()->resource_arena()".
1838   // We also do not want to put the worklist on "Compile::current()->comp_arena()", as that one only gets de-allocated after
1839   // Compile is over. The local arena gets de-allocated at the end of its scope.
1840   ResourceArea local_arena(mtCompiler);
1841   Unique_Node_List worklist(&local_arena);
1842   DEBUG_ONLY(Unique_Node_List worklist_verify(&local_arena);)
1843 
1844   // Push root onto worklist
1845   worklist.push(C->root());
1846 
1847   assert(_root_and_safepoints.size() == 0, "must be empty (unused)");
1848   _root_and_safepoints.push(C->root());
1849 
1850   // Pull from worklist; compute new value; push changes out.
1851   // This loop is the meat of CCP.
1852   while (worklist.size() != 0) {
1853     Node* n = fetch_next_node(worklist);
1854     DEBUG_ONLY(worklist_verify.push(n);)
1855     if (n->is_SafePoint()) {
1856       // Make sure safepoints are processed by PhaseCCP::transform even if they are
1857       // not reachable from the bottom. Otherwise, infinite loops would be removed.
1858       _root_and_safepoints.push(n);
1859     }
1860     const Type* new_type = n->Value(this);
1861     if (new_type != type(n)) {
1862       DEBUG_ONLY(verify_type(n, new_type, type(n));)
1863       dump_type_and_node(n, new_type);
1864       set_type(n, new_type);
1865       push_child_nodes_to_worklist(worklist, n);
1866     }
1867   }
1868   DEBUG_ONLY(verify_analyze(worklist_verify);)
1869 }
1870 
1871 #ifdef ASSERT
1872 // For every node n on verify list, check if type(n) == n->Value()
1873 // We have a list of exceptions, see comments in verify_node_value.
1874 void PhaseCCP::verify_analyze(Unique_Node_List& worklist_verify) {
1875   bool failure = false;
1876   while (worklist_verify.size()) {
1877     Node* n = worklist_verify.pop();
1878     failure |= verify_node_value(n);
1879   }
1880   // If we get this assert, check why the reported nodes were not processed again in CCP.
1881   // We should either make sure that these nodes are properly added back to the CCP worklist
1882   // in PhaseCCP::push_child_nodes_to_worklist() to update their type or add an exception
1883   // in the verification code above if that is not possible for some reason (like Load nodes).
1884   assert(!failure, "PhaseCCP not at fixpoint: analysis result may be unsound.");
1885 }
1886 #endif
1887 
1888 // Fetch next node from worklist to be examined in this iteration.
1889 Node* PhaseCCP::fetch_next_node(Unique_Node_List& worklist) {
1890   if (StressCCP) {
1891     return worklist.remove(C->random() % worklist.size());
1892   } else {
1893     return worklist.pop();
1894   }
1895 }
1896 
1897 #ifndef PRODUCT
1898 void PhaseCCP::dump_type_and_node(const Node* n, const Type* t) {
1899   if (TracePhaseCCP) {
1900     t->dump();
1901     do {
1902       tty->print("\t");
1903     } while (tty->position() < 16);
1904     n->dump();
1905   }
1906 }
1907 #endif
1908 
1909 // We need to propagate the type change of 'n' to all its uses. Depending on the kind of node, additional nodes
1910 // (grandchildren or even further down) need to be revisited as their types could also be improved as a result
1911 // of the new type of 'n'. Push these nodes to the worklist.
1912 void PhaseCCP::push_child_nodes_to_worklist(Unique_Node_List& worklist, Node* n) const {
1913   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1914     Node* use = n->fast_out(i);
1915     push_if_not_bottom_type(worklist, use);
1916     push_more_uses(worklist, n, use);
1917   }
1918 }
1919 
1920 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1921   if (n->bottom_type() != type(n)) {
1922     worklist.push(n);
1923   }
1924 }
1925 
1926 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1927 // Add them back to the worklist.
1928 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1929   push_phis(worklist, use);
1930   push_catch(worklist, use);
1931   push_cmpu(worklist, use);
1932   push_counted_loop_phi(worklist, parent, use);
1933   push_cast(worklist, use);
1934   push_loadp(worklist, use);
1935   push_and(worklist, parent, use);
1936   push_cast_ii(worklist, parent, use);
1937   push_opaque_zero_trip_guard(worklist, use);
1938 }
1939 
1940 
1941 // We must recheck Phis too if use is a Region.
1942 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1943   if (use->is_Region()) {
1944     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1945       push_if_not_bottom_type(worklist, use->fast_out(i));
1946     }
1947   }
1948 }
1949 
1950 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1951 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1952 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
1953 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {
1954   if (use->is_Call()) {
1955     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1956       Node* proj = use->fast_out(i);
1957       if (proj->is_Proj() && proj->as_Proj()->_con == TypeFunc::Control) {
1958         Node* catch_node = proj->find_out_with(Op_Catch);
1959         if (catch_node != nullptr) {
1960           worklist.push(catch_node);
1961         }
1962       }
1963     }
1964   }
1965 }
1966 
1967 // CmpU nodes can get their type information from two nodes up in the graph (instead of from the nodes immediately
1968 // above). Make sure they are added to the worklist if nodes they depend on are updated since they could be missed
1969 // and get wrong types otherwise.
1970 void PhaseCCP::push_cmpu(Unique_Node_List& worklist, const Node* use) const {
1971   uint use_op = use->Opcode();
1972   if (use_op == Op_AddI || use_op == Op_SubI) {
1973     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1974       Node* cmpu = use->fast_out(i);
1975       const uint cmpu_opcode = cmpu->Opcode();
1976       if (cmpu_opcode == Op_CmpU || cmpu_opcode == Op_CmpU3) {
1977         // Got a CmpU or CmpU3 which might need the new type information from node n.
1978         push_if_not_bottom_type(worklist, cmpu);
1979       }
1980     }
1981   }
1982 }
1983 
1984 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
1985 // Seem PhiNode::Value().
1986 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
1987   uint use_op = use->Opcode();
1988   if (use_op == Op_CmpI || use_op == Op_CmpL) {
1989     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
1990     if (phi != nullptr) {
1991       worklist.push(phi);
1992     }
1993   }
1994 }
1995 
1996 void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) {
1997   uint use_op = use->Opcode();
1998   if (use_op == Op_CastP2X) {
1999     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2000       Node* u = use->fast_out(i2);
2001       if (u->Opcode() == Op_AndX) {
2002         worklist.push(u);
2003       }
2004     }
2005   }
2006 }
2007 
2008 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
2009 // See LoadNode::Value().
2010 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
2011   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
2012   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
2013 
2014   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2015     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2016       Node* loadp = use->fast_out(i);
2017       const Type* ut = loadp->bottom_type();
2018       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
2019         if (has_load_barrier_nodes) {
2020           // Search for load barriers behind the load
2021           push_load_barrier(worklist, barrier_set, loadp);
2022         }
2023         worklist.push(loadp);
2024       }
2025     }
2026   }
2027 }
2028 
2029 void PhaseCCP::push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use) {
2030   for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2031     Node* barrier_node = use->fast_out(i);
2032     if (barrier_set->is_gc_barrier_node(barrier_node)) {
2033       worklist.push(barrier_node);
2034     }
2035   }
2036 }
2037 
2038 // AndI/L::Value() optimizes patterns similar to (v << 2) & 3 to zero if they are bitwise disjoint.
2039 // Add the AndI/L nodes back to the worklist to re-apply Value() in case the shift value changed.
2040 // Pattern: parent -> LShift (use) -> (ConstraintCast | ConvI2L)* -> And
2041 void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const {
2042   uint use_op = use->Opcode();
2043   if ((use_op == Op_LShiftI || use_op == Op_LShiftL)
2044       && use->in(2) == parent) { // is shift value (right-hand side of LShift)
2045     auto push_and_uses_to_worklist = [&](Node* n){
2046       uint opc = n->Opcode();
2047       if (opc == Op_AndI || opc == Op_AndL) {
2048         push_if_not_bottom_type(worklist, n);
2049       }
2050     };
2051     auto is_boundary = [](Node* n) {
2052       return !(n->is_ConstraintCast() || n->Opcode() == Op_ConvI2L);
2053     };
2054     use->visit_uses(push_and_uses_to_worklist, is_boundary);
2055   }
2056 }
2057 
2058 // CastII::Value() optimizes CmpI/If patterns if the right input of the CmpI has a constant type. If the CastII input is
2059 // the same node as the left input into the CmpI node, the type of the CastII node can be improved accordingly. Add the
2060 // CastII node back to the worklist to re-apply Value() to either not miss this optimization or to undo it because it
2061 // cannot be applied anymore. We could have optimized the type of the CastII before but now the type of the right input
2062 // of the CmpI (i.e. 'parent') is no longer constant. The type of the CastII must be widened in this case.
2063 void PhaseCCP::push_cast_ii(Unique_Node_List& worklist, const Node* parent, const Node* use) const {
2064   if (use->Opcode() == Op_CmpI && use->in(2) == parent) {
2065     Node* other_cmp_input = use->in(1);
2066     for (DUIterator_Fast imax, i = other_cmp_input->fast_outs(imax); i < imax; i++) {
2067       Node* cast_ii = other_cmp_input->fast_out(i);
2068       if (cast_ii->is_CastII()) {
2069         push_if_not_bottom_type(worklist, cast_ii);
2070       }
2071     }
2072   }
2073 }
2074 
2075 void PhaseCCP::push_opaque_zero_trip_guard(Unique_Node_List& worklist, const Node* use) const {
2076   if (use->Opcode() == Op_OpaqueZeroTripGuard) {
2077     push_if_not_bottom_type(worklist, use->unique_out());
2078   }
2079 }
2080 
2081 //------------------------------do_transform-----------------------------------
2082 // Top level driver for the recursive transformer
2083 void PhaseCCP::do_transform() {
2084   // Correct leaves of new-space Nodes; they point to old-space.
2085   C->set_root( transform(C->root())->as_Root() );
2086   assert( C->top(),  "missing TOP node" );
2087   assert( C->root(), "missing root" );
2088 }
2089 
2090 //------------------------------transform--------------------------------------
2091 // Given a Node in old-space, clone him into new-space.
2092 // Convert any of his old-space children into new-space children.
2093 Node *PhaseCCP::transform( Node *n ) {
2094   assert(n->is_Root(), "traversal must start at root");
2095   assert(_root_and_safepoints.member(n), "root (n) must be in list");
2096 
2097   ResourceMark rm;
2098   // Map: old node idx -> node after CCP (or nullptr if not yet transformed or useless).
2099   Node_List node_map;
2100   // Pre-allocate to avoid frequent realloc
2101   GrowableArray <Node *> transform_stack(C->live_nodes() >> 1);
2102   // track all visited nodes, so that we can remove the complement
2103   Unique_Node_List useful;
2104 
2105   // Initialize the traversal.
2106   // This CCP pass may prove that no exit test for a loop ever succeeds (i.e. the loop is infinite). In that case,
2107   // the logic below doesn't follow any path from Root to the loop body: there's at least one such path but it's proven
2108   // never taken (its type is TOP). As a consequence the node on the exit path that's input to Root (let's call it n) is
2109   // replaced by the top node and the inputs of that node n are not enqueued for further processing. If CCP only works
2110   // through the graph from Root, this causes the loop body to never be processed here even when it's not dead (that
2111   // is reachable from Root following its uses). To prevent that issue, transform() starts walking the graph from Root
2112   // and all safepoints.
2113   for (uint i = 0; i < _root_and_safepoints.size(); ++i) {
2114     Node* nn = _root_and_safepoints.at(i);
2115     Node* new_node = node_map[nn->_idx];
2116     assert(new_node == nullptr, "");
2117     new_node = transform_once(nn);  // Check for constant
2118     node_map.map(nn->_idx, new_node); // Flag as having been cloned
2119     transform_stack.push(new_node); // Process children of cloned node
2120     useful.push(new_node);
2121   }
2122 
2123   while (transform_stack.is_nonempty()) {
2124     Node* clone = transform_stack.pop();
2125     uint cnt = clone->req();
2126     for( uint i = 0; i < cnt; i++ ) {          // For all inputs do
2127       Node *input = clone->in(i);
2128       if( input != nullptr ) {                 // Ignore nulls
2129         Node *new_input = node_map[input->_idx]; // Check for cloned input node
2130         if( new_input == nullptr ) {
2131           new_input = transform_once(input);   // Check for constant
2132           node_map.map( input->_idx, new_input );// Flag as having been cloned
2133           transform_stack.push(new_input);     // Process children of cloned node
2134           useful.push(new_input);
2135         }
2136         assert( new_input == clone->in(i), "insanity check");
2137       }
2138     }
2139   }
2140 
2141   // The above transformation might lead to subgraphs becoming unreachable from the
2142   // bottom while still being reachable from the top. As a result, nodes in that
2143   // subgraph are not transformed and their bottom types are not updated, leading to
2144   // an inconsistency between bottom_type() and type(). In rare cases, LoadNodes in
2145   // such a subgraph, might be re-enqueued for IGVN indefinitely by MemNode::Ideal_common
2146   // because their address type is inconsistent. Therefore, we aggressively remove
2147   // all useless nodes here even before PhaseIdealLoop::build_loop_late gets a chance
2148   // to remove them anyway.
2149   if (C->cached_top_node()) {
2150     useful.push(C->cached_top_node());
2151   }
2152   C->update_dead_node_list(useful);
2153   remove_useless_nodes(useful.member_set());
2154   _worklist.remove_useless_nodes(useful.member_set());
2155   C->disconnect_useless_nodes(useful, _worklist);
2156 
2157   Node* new_root = node_map[n->_idx];
2158   assert(new_root->is_Root(), "transformed root node must be a root node");
2159   return new_root;
2160 }
2161 
2162 //------------------------------transform_once---------------------------------
2163 // For PhaseCCP, transformation is IDENTITY unless Node computed a constant.
2164 Node *PhaseCCP::transform_once( Node *n ) {
2165   const Type *t = type(n);
2166   // Constant?  Use constant Node instead
2167   if( t->singleton() ) {
2168     Node *nn = n;               // Default is to return the original constant
2169     if( t == Type::TOP ) {
2170       // cache my top node on the Compile instance
2171       if( C->cached_top_node() == nullptr || C->cached_top_node()->in(0) == nullptr ) {
2172         C->set_cached_top_node(ConNode::make(Type::TOP));
2173         set_type(C->top(), Type::TOP);
2174       }
2175       nn = C->top();
2176     }
2177     if( !n->is_Con() ) {
2178       if( t != Type::TOP ) {
2179         nn = makecon(t);        // ConNode::make(t);
2180         NOT_PRODUCT( inc_constants(); )
2181       } else if( n->is_Region() ) { // Unreachable region
2182         // Note: nn == C->top()
2183         n->set_req(0, nullptr);     // Cut selfreference
2184         bool progress = true;
2185         uint max = n->outcnt();
2186         DUIterator i;
2187         while (progress) {
2188           progress = false;
2189           // Eagerly remove dead phis to avoid phis copies creation.
2190           for (i = n->outs(); n->has_out(i); i++) {
2191             Node* m = n->out(i);
2192             if (m->is_Phi()) {
2193               assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
2194               replace_node(m, nn);
2195               if (max != n->outcnt()) {
2196                 progress = true;
2197                 i = n->refresh_out_pos(i);
2198                 max = n->outcnt();
2199               }
2200             }
2201           }
2202         }
2203       }
2204       replace_node(n,nn);       // Update DefUse edges for new constant
2205     }
2206     return nn;
2207   }
2208 
2209   // If x is a TypeNode, capture any more-precise type permanently into Node
2210   if (t != n->bottom_type()) {
2211     hash_delete(n);             // changing bottom type may force a rehash
2212     n->raise_bottom_type(t);
2213     _worklist.push(n);          // n re-enters the hash table via the worklist
2214   }
2215 
2216   // TEMPORARY fix to ensure that 2nd GVN pass eliminates null checks
2217   switch( n->Opcode() ) {
2218   case Op_CallStaticJava:  // Give post-parse call devirtualization a chance
2219   case Op_CallDynamicJava:
2220   case Op_FastLock:        // Revisit FastLocks for lock coarsening
2221   case Op_If:
2222   case Op_CountedLoopEnd:
2223   case Op_Region:
2224   case Op_Loop:
2225   case Op_CountedLoop:
2226   case Op_Conv2B:
2227   case Op_Opaque1:
2228     _worklist.push(n);
2229     break;
2230   default:
2231     break;
2232   }
2233 
2234   return  n;
2235 }
2236 
2237 //---------------------------------saturate------------------------------------
2238 const Type* PhaseCCP::saturate(const Type* new_type, const Type* old_type,
2239                                const Type* limit_type) const {
2240   const Type* wide_type = new_type->widen(old_type, limit_type);
2241   if (wide_type != new_type) {          // did we widen?
2242     // If so, we may have widened beyond the limit type.  Clip it back down.
2243     new_type = wide_type->filter(limit_type);
2244   }
2245   return new_type;
2246 }
2247 
2248 //------------------------------print_statistics-------------------------------
2249 #ifndef PRODUCT
2250 void PhaseCCP::print_statistics() {
2251   tty->print_cr("CCP: %d  constants found: %d", _total_invokes, _total_constants);
2252 }
2253 #endif
2254 
2255 
2256 //=============================================================================
2257 #ifndef PRODUCT
2258 uint PhasePeephole::_total_peepholes = 0;
2259 #endif
2260 //------------------------------PhasePeephole----------------------------------
2261 // Conditional Constant Propagation, ala Wegman & Zadeck
2262 PhasePeephole::PhasePeephole( PhaseRegAlloc *regalloc, PhaseCFG &cfg )
2263   : PhaseTransform(Peephole), _regalloc(regalloc), _cfg(cfg) {
2264   NOT_PRODUCT( clear_peepholes(); )
2265 }
2266 
2267 #ifndef PRODUCT
2268 //------------------------------~PhasePeephole---------------------------------
2269 PhasePeephole::~PhasePeephole() {
2270   _total_peepholes += count_peepholes();
2271 }
2272 #endif
2273 
2274 //------------------------------transform--------------------------------------
2275 Node *PhasePeephole::transform( Node *n ) {
2276   ShouldNotCallThis();
2277   return nullptr;
2278 }
2279 
2280 //------------------------------do_transform-----------------------------------
2281 void PhasePeephole::do_transform() {
2282   bool method_name_not_printed = true;
2283 
2284   // Examine each basic block
2285   for (uint block_number = 1; block_number < _cfg.number_of_blocks(); ++block_number) {
2286     Block* block = _cfg.get_block(block_number);
2287     bool block_not_printed = true;
2288 
2289     for (bool progress = true; progress;) {
2290       progress = false;
2291       // block->end_idx() not valid after PhaseRegAlloc
2292       uint end_index = block->number_of_nodes();
2293       for( uint instruction_index = end_index - 1; instruction_index > 0; --instruction_index ) {
2294         Node     *n = block->get_node(instruction_index);
2295         if( n->is_Mach() ) {
2296           MachNode *m = n->as_Mach();
2297           // check for peephole opportunities
2298           int result = m->peephole(block, instruction_index, &_cfg, _regalloc);
2299           if( result != -1 ) {
2300 #ifndef PRODUCT
2301             if( PrintOptoPeephole ) {
2302               // Print method, first time only
2303               if( C->method() && method_name_not_printed ) {
2304                 C->method()->print_short_name(); tty->cr();
2305                 method_name_not_printed = false;
2306               }
2307               // Print this block
2308               if( Verbose && block_not_printed) {
2309                 tty->print_cr("in block");
2310                 block->dump();
2311                 block_not_printed = false;
2312               }
2313               // Print the peephole number
2314               tty->print_cr("peephole number: %d", result);
2315             }
2316             inc_peepholes();
2317 #endif
2318             // Set progress, start again
2319             progress = true;
2320             break;
2321           }
2322         }
2323       }
2324     }
2325   }
2326 }
2327 
2328 //------------------------------print_statistics-------------------------------
2329 #ifndef PRODUCT
2330 void PhasePeephole::print_statistics() {
2331   tty->print_cr("Peephole: peephole rules applied: %d",  _total_peepholes);
2332 }
2333 #endif
2334 
2335 
2336 //=============================================================================
2337 //------------------------------set_req_X--------------------------------------
2338 void Node::set_req_X( uint i, Node *n, PhaseIterGVN *igvn ) {
2339   assert( is_not_dead(n), "can not use dead node");
2340 #ifdef ASSERT
2341   if (igvn->hash_find(this) == this) {
2342     tty->print_cr("Need to remove from hash before changing edges");
2343     this->dump(1);
2344     tty->print_cr("Set at i = %d", i);
2345     n->dump();
2346     assert(false, "Need to remove from hash before changing edges");
2347   }
2348 #endif
2349   Node *old = in(i);
2350   set_req(i, n);
2351 
2352   // old goes dead?
2353   if( old ) {
2354     switch (old->outcnt()) {
2355     case 0:
2356       // Put into the worklist to kill later. We do not kill it now because the
2357       // recursive kill will delete the current node (this) if dead-loop exists
2358       if (!old->is_top())
2359         igvn->_worklist.push( old );
2360       break;
2361     case 1:
2362       if( old->is_Store() || old->has_special_unique_user() )
2363         igvn->add_users_to_worklist( old );
2364       break;
2365     case 2:
2366       if( old->is_Store() )
2367         igvn->add_users_to_worklist( old );
2368       if( old->Opcode() == Op_Region )
2369         igvn->_worklist.push(old);
2370       break;
2371     case 3:
2372       if( old->Opcode() == Op_Region ) {
2373         igvn->_worklist.push(old);
2374         igvn->add_users_to_worklist( old );
2375       }
2376       break;
2377     default:
2378       break;
2379     }
2380 
2381     BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn, old);
2382   }
2383 }
2384 
2385 void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) {
2386   PhaseIterGVN* igvn = gvn->is_IterGVN();
2387   if (igvn == nullptr) {
2388     set_req(i, n);
2389     return;
2390   }
2391   set_req_X(i, n, igvn);
2392 }
2393 
2394 //-------------------------------replace_by-----------------------------------
2395 // Using def-use info, replace one node for another.  Follow the def-use info
2396 // to all users of the OLD node.  Then make all uses point to the NEW node.
2397 void Node::replace_by(Node *new_node) {
2398   assert(!is_top(), "top node has no DU info");
2399   for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
2400     Node* use = last_out(i);
2401     uint uses_found = 0;
2402     for (uint j = 0; j < use->len(); j++) {
2403       if (use->in(j) == this) {
2404         if (j < use->req())
2405               use->set_req(j, new_node);
2406         else  use->set_prec(j, new_node);
2407         uses_found++;
2408       }
2409     }
2410     i -= uses_found;    // we deleted 1 or more copies of this edge
2411   }
2412 }
2413 
2414 //=============================================================================
2415 //-----------------------------------------------------------------------------
2416 void Type_Array::grow( uint i ) {
2417   assert(_a == Compile::current()->comp_arena(), "Should be allocated in comp_arena");
2418   if( !_max ) {
2419     _max = 1;
2420     _types = (const Type**)_a->Amalloc( _max * sizeof(Type*) );
2421     _types[0] = nullptr;
2422   }
2423   uint old = _max;
2424   _max = next_power_of_2(i);
2425   _types = (const Type**)_a->Arealloc( _types, old*sizeof(Type*),_max*sizeof(Type*));
2426   memset( &_types[old], 0, (_max-old)*sizeof(Type*) );
2427 }
2428 
2429 //------------------------------dump-------------------------------------------
2430 #ifndef PRODUCT
2431 void Type_Array::dump() const {
2432   uint max = Size();
2433   for( uint i = 0; i < max; i++ ) {
2434     if( _types[i] != nullptr ) {
2435       tty->print("  %d\t== ", i); _types[i]->dump(); tty->cr();
2436     }
2437   }
2438 }
2439 #endif