1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "libadt/vectset.hpp"
  29 #include "memory/allocation.inline.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/cfgnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/loopnode.hpp"
  35 #include "opto/machnode.hpp"
  36 #include "opto/matcher.hpp"
  37 #include "opto/node.hpp"
  38 #include "opto/opcodes.hpp"
  39 #include "opto/regmask.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/type.hpp"
  42 #include "utilities/copy.hpp"
  43 #include "utilities/macros.hpp"
  44 
  45 class RegMask;
  46 // #include "phase.hpp"
  47 class PhaseTransform;
  48 class PhaseGVN;
  49 
  50 // Arena we are currently building Nodes in
  51 const uint Node::NotAMachineReg = 0xffff0000;
  52 
  53 #ifndef PRODUCT
  54 extern int nodes_created;
  55 #endif
  56 #ifdef __clang__
  57 #pragma clang diagnostic push
  58 #pragma GCC diagnostic ignored "-Wuninitialized"
  59 #endif
  60 
  61 #ifdef ASSERT
  62 
  63 //-------------------------- construct_node------------------------------------
  64 // Set a breakpoint here to identify where a particular node index is built.
  65 void Node::verify_construction() {
  66   _debug_orig = NULL;
  67   int old_debug_idx = Compile::debug_idx();
  68   int new_debug_idx = old_debug_idx+1;
  69   if (new_debug_idx > 0) {
  70     // Arrange that the lowest five decimal digits of _debug_idx
  71     // will repeat those of _idx. In case this is somehow pathological,
  72     // we continue to assign negative numbers (!) consecutively.
  73     const int mod = 100000;
  74     int bump = (int)(_idx - new_debug_idx) % mod;
  75     if (bump < 0)  bump += mod;
  76     assert(bump >= 0 && bump < mod, "");
  77     new_debug_idx += bump;
  78   }
  79   Compile::set_debug_idx(new_debug_idx);
  80   set_debug_idx( new_debug_idx );
  81   assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX");
  82   assert(Compile::current()->live_nodes() < Compile::current()->max_node_limit(), "Live Node limit exceeded limit");
  83   if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
  84     tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
  85     BREAKPOINT;
  86   }
  87 #if OPTO_DU_ITERATOR_ASSERT
  88   _last_del = NULL;
  89   _del_tick = 0;
  90 #endif
  91   _hash_lock = 0;
  92 }
  93 
  94 
  95 // #ifdef ASSERT ...
  96 
  97 #if OPTO_DU_ITERATOR_ASSERT
  98 void DUIterator_Common::sample(const Node* node) {
  99   _vdui     = VerifyDUIterators;
 100   _node     = node;
 101   _outcnt   = node->_outcnt;
 102   _del_tick = node->_del_tick;
 103   _last     = NULL;
 104 }
 105 
 106 void DUIterator_Common::verify(const Node* node, bool at_end_ok) {
 107   assert(_node     == node, "consistent iterator source");
 108   assert(_del_tick == node->_del_tick, "no unexpected deletions allowed");
 109 }
 110 
 111 void DUIterator_Common::verify_resync() {
 112   // Ensure that the loop body has just deleted the last guy produced.
 113   const Node* node = _node;
 114   // Ensure that at least one copy of the last-seen edge was deleted.
 115   // Note:  It is OK to delete multiple copies of the last-seen edge.
 116   // Unfortunately, we have no way to verify that all the deletions delete
 117   // that same edge.  On this point we must use the Honor System.
 118   assert(node->_del_tick >= _del_tick+1, "must have deleted an edge");
 119   assert(node->_last_del == _last, "must have deleted the edge just produced");
 120   // We liked this deletion, so accept the resulting outcnt and tick.
 121   _outcnt   = node->_outcnt;
 122   _del_tick = node->_del_tick;
 123 }
 124 
 125 void DUIterator_Common::reset(const DUIterator_Common& that) {
 126   if (this == &that)  return;  // ignore assignment to self
 127   if (!_vdui) {
 128     // We need to initialize everything, overwriting garbage values.
 129     _last = that._last;
 130     _vdui = that._vdui;
 131   }
 132   // Note:  It is legal (though odd) for an iterator over some node x
 133   // to be reassigned to iterate over another node y.  Some doubly-nested
 134   // progress loops depend on being able to do this.
 135   const Node* node = that._node;
 136   // Re-initialize everything, except _last.
 137   _node     = node;
 138   _outcnt   = node->_outcnt;
 139   _del_tick = node->_del_tick;
 140 }
 141 
 142 void DUIterator::sample(const Node* node) {
 143   DUIterator_Common::sample(node);      // Initialize the assertion data.
 144   _refresh_tick = 0;                    // No refreshes have happened, as yet.
 145 }
 146 
 147 void DUIterator::verify(const Node* node, bool at_end_ok) {
 148   DUIterator_Common::verify(node, at_end_ok);
 149   assert(_idx      <  node->_outcnt + (uint)at_end_ok, "idx in range");
 150 }
 151 
 152 void DUIterator::verify_increment() {
 153   if (_refresh_tick & 1) {
 154     // We have refreshed the index during this loop.
 155     // Fix up _idx to meet asserts.
 156     if (_idx > _outcnt)  _idx = _outcnt;
 157   }
 158   verify(_node, true);
 159 }
 160 
 161 void DUIterator::verify_resync() {
 162   // Note:  We do not assert on _outcnt, because insertions are OK here.
 163   DUIterator_Common::verify_resync();
 164   // Make sure we are still in sync, possibly with no more out-edges:
 165   verify(_node, true);
 166 }
 167 
 168 void DUIterator::reset(const DUIterator& that) {
 169   if (this == &that)  return;  // self assignment is always a no-op
 170   assert(that._refresh_tick == 0, "assign only the result of Node::outs()");
 171   assert(that._idx          == 0, "assign only the result of Node::outs()");
 172   assert(_idx               == that._idx, "already assigned _idx");
 173   if (!_vdui) {
 174     // We need to initialize everything, overwriting garbage values.
 175     sample(that._node);
 176   } else {
 177     DUIterator_Common::reset(that);
 178     if (_refresh_tick & 1) {
 179       _refresh_tick++;                  // Clear the "was refreshed" flag.
 180     }
 181     assert(_refresh_tick < 2*100000, "DU iteration must converge quickly");
 182   }
 183 }
 184 
 185 void DUIterator::refresh() {
 186   DUIterator_Common::sample(_node);     // Re-fetch assertion data.
 187   _refresh_tick |= 1;                   // Set the "was refreshed" flag.
 188 }
 189 
 190 void DUIterator::verify_finish() {
 191   // If the loop has killed the node, do not require it to re-run.
 192   if (_node->_outcnt == 0)  _refresh_tick &= ~1;
 193   // If this assert triggers, it means that a loop used refresh_out_pos
 194   // to re-synch an iteration index, but the loop did not correctly
 195   // re-run itself, using a "while (progress)" construct.
 196   // This iterator enforces the rule that you must keep trying the loop
 197   // until it "runs clean" without any need for refreshing.
 198   assert(!(_refresh_tick & 1), "the loop must run once with no refreshing");
 199 }
 200 
 201 
 202 void DUIterator_Fast::verify(const Node* node, bool at_end_ok) {
 203   DUIterator_Common::verify(node, at_end_ok);
 204   Node** out    = node->_out;
 205   uint   cnt    = node->_outcnt;
 206   assert(cnt == _outcnt, "no insertions allowed");
 207   assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range");
 208   // This last check is carefully designed to work for NO_OUT_ARRAY.
 209 }
 210 
 211 void DUIterator_Fast::verify_limit() {
 212   const Node* node = _node;
 213   verify(node, true);
 214   assert(_outp == node->_out + node->_outcnt, "limit still correct");
 215 }
 216 
 217 void DUIterator_Fast::verify_resync() {
 218   const Node* node = _node;
 219   if (_outp == node->_out + _outcnt) {
 220     // Note that the limit imax, not the pointer i, gets updated with the
 221     // exact count of deletions.  (For the pointer it's always "--i".)
 222     assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)");
 223     // This is a limit pointer, with a name like "imax".
 224     // Fudge the _last field so that the common assert will be happy.
 225     _last = (Node*) node->_last_del;
 226     DUIterator_Common::verify_resync();
 227   } else {
 228     assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)");
 229     // A normal internal pointer.
 230     DUIterator_Common::verify_resync();
 231     // Make sure we are still in sync, possibly with no more out-edges:
 232     verify(node, true);
 233   }
 234 }
 235 
 236 void DUIterator_Fast::verify_relimit(uint n) {
 237   const Node* node = _node;
 238   assert((int)n > 0, "use imax -= n only with a positive count");
 239   // This must be a limit pointer, with a name like "imax".
 240   assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)");
 241   // The reported number of deletions must match what the node saw.
 242   assert(node->_del_tick == _del_tick + n, "must have deleted n edges");
 243   // Fudge the _last field so that the common assert will be happy.
 244   _last = (Node*) node->_last_del;
 245   DUIterator_Common::verify_resync();
 246 }
 247 
 248 void DUIterator_Fast::reset(const DUIterator_Fast& that) {
 249   assert(_outp              == that._outp, "already assigned _outp");
 250   DUIterator_Common::reset(that);
 251 }
 252 
 253 void DUIterator_Last::verify(const Node* node, bool at_end_ok) {
 254   // at_end_ok means the _outp is allowed to underflow by 1
 255   _outp += at_end_ok;
 256   DUIterator_Fast::verify(node, at_end_ok);  // check _del_tick, etc.
 257   _outp -= at_end_ok;
 258   assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes");
 259 }
 260 
 261 void DUIterator_Last::verify_limit() {
 262   // Do not require the limit address to be resynched.
 263   //verify(node, true);
 264   assert(_outp == _node->_out, "limit still correct");
 265 }
 266 
 267 void DUIterator_Last::verify_step(uint num_edges) {
 268   assert((int)num_edges > 0, "need non-zero edge count for loop progress");
 269   _outcnt   -= num_edges;
 270   _del_tick += num_edges;
 271   // Make sure we are still in sync, possibly with no more out-edges:
 272   const Node* node = _node;
 273   verify(node, true);
 274   assert(node->_last_del == _last, "must have deleted the edge just produced");
 275 }
 276 
 277 #endif //OPTO_DU_ITERATOR_ASSERT
 278 
 279 
 280 #endif //ASSERT
 281 
 282 
 283 // This constant used to initialize _out may be any non-null value.
 284 // The value NULL is reserved for the top node only.
 285 #define NO_OUT_ARRAY ((Node**)-1)
 286 
 287 // Out-of-line code from node constructors.
 288 // Executed only when extra debug info. is being passed around.
 289 static void init_node_notes(Compile* C, int idx, Node_Notes* nn) {
 290   C->set_node_notes_at(idx, nn);
 291 }
 292 
 293 // Shared initialization code.
 294 inline int Node::Init(int req) {
 295   Compile* C = Compile::current();
 296   int idx = C->next_unique();
 297 
 298   // Allocate memory for the necessary number of edges.
 299   if (req > 0) {
 300     // Allocate space for _in array to have double alignment.
 301     _in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*))));
 302   }
 303   // If there are default notes floating around, capture them:
 304   Node_Notes* nn = C->default_node_notes();
 305   if (nn != NULL)  init_node_notes(C, idx, nn);
 306 
 307   // Note:  At this point, C is dead,
 308   // and we begin to initialize the new Node.
 309 
 310   _cnt = _max = req;
 311   _outcnt = _outmax = 0;
 312   _class_id = Class_Node;
 313   _flags = 0;
 314   _out = NO_OUT_ARRAY;
 315   return idx;
 316 }
 317 
 318 //------------------------------Node-------------------------------------------
 319 // Create a Node, with a given number of required edges.
 320 Node::Node(uint req)
 321   : _idx(Init(req))
 322 #ifdef ASSERT
 323   , _parse_idx(_idx)
 324 #endif
 325 {
 326   assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" );
 327   debug_only( verify_construction() );
 328   NOT_PRODUCT(nodes_created++);
 329   if (req == 0) {
 330     _in = NULL;
 331   } else {
 332     Node** to = _in;
 333     for(uint i = 0; i < req; i++) {
 334       to[i] = NULL;
 335     }
 336   }
 337 }
 338 
 339 //------------------------------Node-------------------------------------------
 340 Node::Node(Node *n0)
 341   : _idx(Init(1))
 342 #ifdef ASSERT
 343   , _parse_idx(_idx)
 344 #endif
 345 {
 346   debug_only( verify_construction() );
 347   NOT_PRODUCT(nodes_created++);
 348   assert( is_not_dead(n0), "can not use dead node");
 349   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
 350 }
 351 
 352 //------------------------------Node-------------------------------------------
 353 Node::Node(Node *n0, Node *n1)
 354   : _idx(Init(2))
 355 #ifdef ASSERT
 356   , _parse_idx(_idx)
 357 #endif
 358 {
 359   debug_only( verify_construction() );
 360   NOT_PRODUCT(nodes_created++);
 361   assert( is_not_dead(n0), "can not use dead node");
 362   assert( is_not_dead(n1), "can not use dead node");
 363   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
 364   _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
 365 }
 366 
 367 //------------------------------Node-------------------------------------------
 368 Node::Node(Node *n0, Node *n1, Node *n2)
 369   : _idx(Init(3))
 370 #ifdef ASSERT
 371   , _parse_idx(_idx)
 372 #endif
 373 {
 374   debug_only( verify_construction() );
 375   NOT_PRODUCT(nodes_created++);
 376   assert( is_not_dead(n0), "can not use dead node");
 377   assert( is_not_dead(n1), "can not use dead node");
 378   assert( is_not_dead(n2), "can not use dead node");
 379   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
 380   _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
 381   _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
 382 }
 383 
 384 //------------------------------Node-------------------------------------------
 385 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
 386   : _idx(Init(4))
 387 #ifdef ASSERT
 388   , _parse_idx(_idx)
 389 #endif
 390 {
 391   debug_only( verify_construction() );
 392   NOT_PRODUCT(nodes_created++);
 393   assert( is_not_dead(n0), "can not use dead node");
 394   assert( is_not_dead(n1), "can not use dead node");
 395   assert( is_not_dead(n2), "can not use dead node");
 396   assert( is_not_dead(n3), "can not use dead node");
 397   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
 398   _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
 399   _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
 400   _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
 401 }
 402 
 403 //------------------------------Node-------------------------------------------
 404 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
 405   : _idx(Init(5))
 406 #ifdef ASSERT
 407   , _parse_idx(_idx)
 408 #endif
 409 {
 410   debug_only( verify_construction() );
 411   NOT_PRODUCT(nodes_created++);
 412   assert( is_not_dead(n0), "can not use dead node");
 413   assert( is_not_dead(n1), "can not use dead node");
 414   assert( is_not_dead(n2), "can not use dead node");
 415   assert( is_not_dead(n3), "can not use dead node");
 416   assert( is_not_dead(n4), "can not use dead node");
 417   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
 418   _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
 419   _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
 420   _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
 421   _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
 422 }
 423 
 424 //------------------------------Node-------------------------------------------
 425 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
 426                      Node *n4, Node *n5)
 427   : _idx(Init(6))
 428 #ifdef ASSERT
 429   , _parse_idx(_idx)
 430 #endif
 431 {
 432   debug_only( verify_construction() );
 433   NOT_PRODUCT(nodes_created++);
 434   assert( is_not_dead(n0), "can not use dead node");
 435   assert( is_not_dead(n1), "can not use dead node");
 436   assert( is_not_dead(n2), "can not use dead node");
 437   assert( is_not_dead(n3), "can not use dead node");
 438   assert( is_not_dead(n4), "can not use dead node");
 439   assert( is_not_dead(n5), "can not use dead node");
 440   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
 441   _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
 442   _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
 443   _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
 444   _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
 445   _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
 446 }
 447 
 448 //------------------------------Node-------------------------------------------
 449 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
 450                      Node *n4, Node *n5, Node *n6)
 451   : _idx(Init(7))
 452 #ifdef ASSERT
 453   , _parse_idx(_idx)
 454 #endif
 455 {
 456   debug_only( verify_construction() );
 457   NOT_PRODUCT(nodes_created++);
 458   assert( is_not_dead(n0), "can not use dead node");
 459   assert( is_not_dead(n1), "can not use dead node");
 460   assert( is_not_dead(n2), "can not use dead node");
 461   assert( is_not_dead(n3), "can not use dead node");
 462   assert( is_not_dead(n4), "can not use dead node");
 463   assert( is_not_dead(n5), "can not use dead node");
 464   assert( is_not_dead(n6), "can not use dead node");
 465   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
 466   _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
 467   _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
 468   _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
 469   _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
 470   _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
 471   _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this);
 472 }
 473 
 474 #ifdef __clang__
 475 #pragma clang diagnostic pop
 476 #endif
 477 
 478 
 479 //------------------------------clone------------------------------------------
 480 // Clone a Node.
 481 Node *Node::clone() const {
 482   Compile* C = Compile::current();
 483   uint s = size_of();           // Size of inherited Node
 484   Node *n = (Node*)C->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*));
 485   Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s);
 486   // Set the new input pointer array
 487   n->_in = (Node**)(((char*)n)+s);
 488   // Cannot share the old output pointer array, so kill it
 489   n->_out = NO_OUT_ARRAY;
 490   // And reset the counters to 0
 491   n->_outcnt = 0;
 492   n->_outmax = 0;
 493   // Unlock this guy, since he is not in any hash table.
 494   debug_only(n->_hash_lock = 0);
 495   // Walk the old node's input list to duplicate its edges
 496   uint i;
 497   for( i = 0; i < len(); i++ ) {
 498     Node *x = in(i);
 499     n->_in[i] = x;
 500     if (x != NULL) x->add_out(n);
 501   }
 502   if (is_macro())
 503     C->add_macro_node(n);
 504   if (is_expensive())
 505     C->add_expensive_node(n);
 506   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 507   bs->register_potential_barrier_node(n);
 508   // If the cloned node is a range check dependent CastII, add it to the list.
 509   CastIINode* cast = n->isa_CastII();
 510   if (cast != NULL && cast->has_range_check()) {
 511     C->add_range_check_cast(cast);
 512   }
 513   if (n->Opcode() == Op_Opaque4) {
 514     C->add_opaque4_node(n);
 515   }
 516 
 517   n->set_idx(C->next_unique()); // Get new unique index as well
 518   debug_only( n->verify_construction() );
 519   NOT_PRODUCT(nodes_created++);
 520   // Do not patch over the debug_idx of a clone, because it makes it
 521   // impossible to break on the clone's moment of creation.
 522   //debug_only( n->set_debug_idx( debug_idx() ) );
 523 
 524   C->copy_node_notes_to(n, (Node*) this);
 525 
 526   // MachNode clone
 527   uint nopnds;
 528   if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) {
 529     MachNode *mach  = n->as_Mach();
 530     MachNode *mthis = this->as_Mach();
 531     // Get address of _opnd_array.
 532     // It should be the same offset since it is the clone of this node.
 533     MachOper **from = mthis->_opnds;
 534     MachOper **to = (MachOper **)((size_t)(&mach->_opnds) +
 535                     pointer_delta((const void*)from,
 536                                   (const void*)(&mthis->_opnds), 1));
 537     mach->_opnds = to;
 538     for ( uint i = 0; i < nopnds; ++i ) {
 539       to[i] = from[i]->clone();
 540     }
 541   }
 542   // cloning CallNode may need to clone JVMState
 543   if (n->is_Call()) {
 544     n->as_Call()->clone_jvms(C);
 545   }
 546   if (n->is_SafePoint()) {
 547     n->as_SafePoint()->clone_replaced_nodes();
 548   }
 549   if (n->is_Load()) {
 550     n->as_Load()->copy_barrier_info(this);
 551   }
 552   if (n->is_ValueTypeBase()) {
 553     C->add_value_type(n);
 554   }
 555   return n;                     // Return the clone
 556 }
 557 
 558 //---------------------------setup_is_top--------------------------------------
 559 // Call this when changing the top node, to reassert the invariants
 560 // required by Node::is_top.  See Compile::set_cached_top_node.
 561 void Node::setup_is_top() {
 562   if (this == (Node*)Compile::current()->top()) {
 563     // This node has just become top.  Kill its out array.
 564     _outcnt = _outmax = 0;
 565     _out = NULL;                           // marker value for top
 566     assert(is_top(), "must be top");
 567   } else {
 568     if (_out == NULL)  _out = NO_OUT_ARRAY;
 569     assert(!is_top(), "must not be top");
 570   }
 571 }
 572 
 573 //------------------------------~Node------------------------------------------
 574 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
 575 void Node::destruct() {
 576   // Eagerly reclaim unique Node numberings
 577   Compile* compile = Compile::current();
 578   if ((uint)_idx+1 == compile->unique()) {
 579     compile->set_unique(compile->unique()-1);
 580   }
 581   // Clear debug info:
 582   Node_Notes* nn = compile->node_notes_at(_idx);
 583   if (nn != NULL)  nn->clear();
 584   // Walk the input array, freeing the corresponding output edges
 585   _cnt = _max;  // forget req/prec distinction
 586   uint i;
 587   for( i = 0; i < _max; i++ ) {
 588     set_req(i, NULL);
 589     //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim");
 590   }
 591   assert(outcnt() == 0, "deleting a node must not leave a dangling use");
 592   // See if the input array was allocated just prior to the object
 593   int edge_size = _max*sizeof(void*);
 594   int out_edge_size = _outmax*sizeof(void*);
 595   char *edge_end = ((char*)_in) + edge_size;
 596   char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out);
 597   int node_size = size_of();
 598 
 599   // Free the output edge array
 600   if (out_edge_size > 0) {
 601     compile->node_arena()->Afree(out_array, out_edge_size);
 602   }
 603 
 604   // Free the input edge array and the node itself
 605   if( edge_end == (char*)this ) {
 606     // It was; free the input array and object all in one hit
 607 #ifndef ASSERT
 608     compile->node_arena()->Afree(_in,edge_size+node_size);
 609 #endif
 610   } else {
 611     // Free just the input array
 612     compile->node_arena()->Afree(_in,edge_size);
 613 
 614     // Free just the object
 615 #ifndef ASSERT
 616     compile->node_arena()->Afree(this,node_size);
 617 #endif
 618   }
 619   if (is_macro()) {
 620     compile->remove_macro_node(this);
 621   }
 622   if (is_expensive()) {
 623     compile->remove_expensive_node(this);
 624   }
 625   CastIINode* cast = isa_CastII();
 626   if (cast != NULL && cast->has_range_check()) {
 627     compile->remove_range_check_cast(cast);
 628   }
 629   if (Opcode() == Op_Opaque4) {
 630     compile->remove_opaque4_node(this);
 631   }
 632   if (is_ValueTypeBase()) {
 633     compile->remove_value_type(this);
 634   }
 635 
 636   if (is_SafePoint()) {
 637     as_SafePoint()->delete_replaced_nodes();
 638   }
 639   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 640   bs->unregister_potential_barrier_node(this);
 641 #ifdef ASSERT
 642   // We will not actually delete the storage, but we'll make the node unusable.
 643   *(address*)this = badAddress;  // smash the C++ vtbl, probably
 644   _in = _out = (Node**) badAddress;
 645   _max = _cnt = _outmax = _outcnt = 0;
 646   compile->remove_modified_node(this);
 647 #endif
 648 }
 649 
 650 //------------------------------grow-------------------------------------------
 651 // Grow the input array, making space for more edges
 652 void Node::grow( uint len ) {
 653   Arena* arena = Compile::current()->node_arena();
 654   uint new_max = _max;
 655   if( new_max == 0 ) {
 656     _max = 4;
 657     _in = (Node**)arena->Amalloc(4*sizeof(Node*));
 658     Node** to = _in;
 659     to[0] = NULL;
 660     to[1] = NULL;
 661     to[2] = NULL;
 662     to[3] = NULL;
 663     return;
 664   }
 665   while( new_max <= len ) new_max <<= 1; // Find next power-of-2
 666   // Trimming to limit allows a uint8 to handle up to 255 edges.
 667   // Previously I was using only powers-of-2 which peaked at 128 edges.
 668   //if( new_max >= limit ) new_max = limit-1;
 669   _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*));
 670   Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space
 671   _max = new_max;               // Record new max length
 672   // This assertion makes sure that Node::_max is wide enough to
 673   // represent the numerical value of new_max.
 674   assert(_max == new_max && _max > len, "int width of _max is too small");
 675 }
 676 
 677 //-----------------------------out_grow----------------------------------------
 678 // Grow the input array, making space for more edges
 679 void Node::out_grow( uint len ) {
 680   assert(!is_top(), "cannot grow a top node's out array");
 681   Arena* arena = Compile::current()->node_arena();
 682   uint new_max = _outmax;
 683   if( new_max == 0 ) {
 684     _outmax = 4;
 685     _out = (Node **)arena->Amalloc(4*sizeof(Node*));
 686     return;
 687   }
 688   while( new_max <= len ) new_max <<= 1; // Find next power-of-2
 689   // Trimming to limit allows a uint8 to handle up to 255 edges.
 690   // Previously I was using only powers-of-2 which peaked at 128 edges.
 691   //if( new_max >= limit ) new_max = limit-1;
 692   assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value");
 693   _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*));
 694   //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space
 695   _outmax = new_max;               // Record new max length
 696   // This assertion makes sure that Node::_max is wide enough to
 697   // represent the numerical value of new_max.
 698   assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small");
 699 }
 700 
 701 #ifdef ASSERT
 702 //------------------------------is_dead----------------------------------------
 703 bool Node::is_dead() const {
 704   // Mach and pinch point nodes may look like dead.
 705   if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) )
 706     return false;
 707   for( uint i = 0; i < _max; i++ )
 708     if( _in[i] != NULL )
 709       return false;
 710   dump();
 711   return true;
 712 }
 713 #endif
 714 
 715 
 716 //------------------------------is_unreachable---------------------------------
 717 bool Node::is_unreachable(PhaseIterGVN &igvn) const {
 718   assert(!is_Mach(), "doesn't work with MachNodes");
 719   return outcnt() == 0 || igvn.type(this) == Type::TOP || (in(0) != NULL && in(0)->is_top());
 720 }
 721 
 722 //------------------------------add_req----------------------------------------
 723 // Add a new required input at the end
 724 void Node::add_req( Node *n ) {
 725   assert( is_not_dead(n), "can not use dead node");
 726 
 727   // Look to see if I can move precedence down one without reallocating
 728   if( (_cnt >= _max) || (in(_max-1) != NULL) )
 729     grow( _max+1 );
 730 
 731   // Find a precedence edge to move
 732   if( in(_cnt) != NULL ) {       // Next precedence edge is busy?
 733     uint i;
 734     for( i=_cnt; i<_max; i++ )
 735       if( in(i) == NULL )       // Find the NULL at end of prec edge list
 736         break;                  // There must be one, since we grew the array
 737     _in[i] = in(_cnt);          // Move prec over, making space for req edge
 738   }
 739   _in[_cnt++] = n;            // Stuff over old prec edge
 740   if (n != NULL) n->add_out((Node *)this);
 741 }
 742 
 743 //---------------------------add_req_batch-------------------------------------
 744 // Add a new required input at the end
 745 void Node::add_req_batch( Node *n, uint m ) {
 746   assert( is_not_dead(n), "can not use dead node");
 747   // check various edge cases
 748   if ((int)m <= 1) {
 749     assert((int)m >= 0, "oob");
 750     if (m != 0)  add_req(n);
 751     return;
 752   }
 753 
 754   // Look to see if I can move precedence down one without reallocating
 755   if( (_cnt+m) > _max || _in[_max-m] )
 756     grow( _max+m );
 757 
 758   // Find a precedence edge to move
 759   if( _in[_cnt] != NULL ) {     // Next precedence edge is busy?
 760     uint i;
 761     for( i=_cnt; i<_max; i++ )
 762       if( _in[i] == NULL )      // Find the NULL at end of prec edge list
 763         break;                  // There must be one, since we grew the array
 764     // Slide all the precs over by m positions (assume #prec << m).
 765     Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*)));
 766   }
 767 
 768   // Stuff over the old prec edges
 769   for(uint i=0; i<m; i++ ) {
 770     _in[_cnt++] = n;
 771   }
 772 
 773   // Insert multiple out edges on the node.
 774   if (n != NULL && !n->is_top()) {
 775     for(uint i=0; i<m; i++ ) {
 776       n->add_out((Node *)this);
 777     }
 778   }
 779 }
 780 
 781 //------------------------------del_req----------------------------------------
 782 // Delete the required edge and compact the edge array
 783 void Node::del_req( uint idx ) {
 784   assert( idx < _cnt, "oob");
 785   assert( !VerifyHashTableKeys || _hash_lock == 0,
 786           "remove node from hash table before modifying it");
 787   // First remove corresponding def-use edge
 788   Node *n = in(idx);
 789   if (n != NULL) n->del_out((Node *)this);
 790   _in[idx] = in(--_cnt); // Compact the array
 791   // Avoid spec violation: Gap in prec edges.
 792   close_prec_gap_at(_cnt);
 793   Compile::current()->record_modified_node(this);
 794 }
 795 
 796 //------------------------------del_req_ordered--------------------------------
 797 // Delete the required edge and compact the edge array with preserved order
 798 void Node::del_req_ordered( uint idx ) {
 799   assert( idx < _cnt, "oob");
 800   assert( !VerifyHashTableKeys || _hash_lock == 0,
 801           "remove node from hash table before modifying it");
 802   // First remove corresponding def-use edge
 803   Node *n = in(idx);
 804   if (n != NULL) n->del_out((Node *)this);
 805   if (idx < --_cnt) {    // Not last edge ?
 806     Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx)*sizeof(Node*)));
 807   }
 808   // Avoid spec violation: Gap in prec edges.
 809   close_prec_gap_at(_cnt);
 810   Compile::current()->record_modified_node(this);
 811 }
 812 
 813 //------------------------------ins_req----------------------------------------
 814 // Insert a new required input at the end
 815 void Node::ins_req( uint idx, Node *n ) {
 816   assert( is_not_dead(n), "can not use dead node");
 817   add_req(NULL);                // Make space
 818   assert( idx < _max, "Must have allocated enough space");
 819   // Slide over
 820   if(_cnt-idx-1 > 0) {
 821     Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*)));
 822   }
 823   _in[idx] = n;                            // Stuff over old required edge
 824   if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge
 825 }
 826 
 827 //-----------------------------find_edge---------------------------------------
 828 int Node::find_edge(Node* n) {
 829   for (uint i = 0; i < len(); i++) {
 830     if (_in[i] == n)  return i;
 831   }
 832   return -1;
 833 }
 834 
 835 //----------------------------replace_edge-------------------------------------
 836 int Node::replace_edge(Node* old, Node* neww) {
 837   if (old == neww)  return 0;  // nothing to do
 838   uint nrep = 0;
 839   for (uint i = 0; i < len(); i++) {
 840     if (in(i) == old) {
 841       if (i < req()) {
 842         set_req(i, neww);
 843       } else {
 844         assert(find_prec_edge(neww) == -1, "spec violation: duplicated prec edge (node %d -> %d)", _idx, neww->_idx);
 845         set_prec(i, neww);
 846       }
 847       nrep++;
 848     }
 849   }
 850   return nrep;
 851 }
 852 
 853 /**
 854  * Replace input edges in the range pointing to 'old' node.
 855  */
 856 int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end) {
 857   if (old == neww)  return 0;  // nothing to do
 858   uint nrep = 0;
 859   for (int i = start; i < end; i++) {
 860     if (in(i) == old) {
 861       set_req(i, neww);
 862       nrep++;
 863     }
 864   }
 865   return nrep;
 866 }
 867 
 868 //-------------------------disconnect_inputs-----------------------------------
 869 // NULL out all inputs to eliminate incoming Def-Use edges.
 870 // Return the number of edges between 'n' and 'this'
 871 int Node::disconnect_inputs(Node *n, Compile* C) {
 872   int edges_to_n = 0;
 873 
 874   uint cnt = req();
 875   for( uint i = 0; i < cnt; ++i ) {
 876     if( in(i) == 0 ) continue;
 877     if( in(i) == n ) ++edges_to_n;
 878     set_req(i, NULL);
 879   }
 880   // Remove precedence edges if any exist
 881   // Note: Safepoints may have precedence edges, even during parsing
 882   if( (req() != len()) && (in(req()) != NULL) ) {
 883     uint max = len();
 884     for( uint i = 0; i < max; ++i ) {
 885       if( in(i) == 0 ) continue;
 886       if( in(i) == n ) ++edges_to_n;
 887       set_prec(i, NULL);
 888     }
 889   }
 890 
 891   // Node::destruct requires all out edges be deleted first
 892   // debug_only(destruct();)   // no reuse benefit expected
 893   if (edges_to_n == 0) {
 894     C->record_dead_node(_idx);
 895   }
 896   return edges_to_n;
 897 }
 898 
 899 //-----------------------------uncast---------------------------------------
 900 // %%% Temporary, until we sort out CheckCastPP vs. CastPP.
 901 // Strip away casting.  (It is depth-limited.)
 902 // Optionally, keep casts with dependencies.
 903 Node* Node::uncast(bool keep_deps) const {
 904   // Should be inline:
 905   //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this;
 906   if (is_ConstraintCast()) {
 907     return uncast_helper(this, keep_deps);
 908   } else {
 909     return (Node*) this;
 910   }
 911 }
 912 
 913 // Find out of current node that matches opcode.
 914 Node* Node::find_out_with(int opcode) {
 915   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 916     Node* use = fast_out(i);
 917     if (use->Opcode() == opcode) {
 918       return use;
 919     }
 920   }
 921   return NULL;
 922 }
 923 
 924 // Return true if the current node has an out that matches opcode.
 925 bool Node::has_out_with(int opcode) {
 926   return (find_out_with(opcode) != NULL);
 927 }
 928 
 929 // Return true if the current node has an out that matches any of the opcodes.
 930 bool Node::has_out_with(int opcode1, int opcode2, int opcode3, int opcode4) {
 931   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 932       int opcode = fast_out(i)->Opcode();
 933       if (opcode == opcode1 || opcode == opcode2 || opcode == opcode3 || opcode == opcode4) {
 934         return true;
 935       }
 936   }
 937   return false;
 938 }
 939 
 940 
 941 //---------------------------uncast_helper-------------------------------------
 942 Node* Node::uncast_helper(const Node* p, bool keep_deps) {
 943 #ifdef ASSERT
 944   uint depth_count = 0;
 945   const Node* orig_p = p;
 946 #endif
 947 
 948   while (true) {
 949 #ifdef ASSERT
 950     if (depth_count >= K) {
 951       orig_p->dump(4);
 952       if (p != orig_p)
 953         p->dump(1);
 954     }
 955     assert(depth_count++ < K, "infinite loop in Node::uncast_helper");
 956 #endif
 957     if (p == NULL || p->req() != 2) {
 958       break;
 959     } else if (p->is_ConstraintCast()) {
 960       if (keep_deps && p->as_ConstraintCast()->carry_dependency()) {
 961         break; // stop at casts with dependencies
 962       }
 963       p = p->in(1);
 964     } else {
 965       break;
 966     }
 967   }
 968   return (Node*) p;
 969 }
 970 
 971 //------------------------------add_prec---------------------------------------
 972 // Add a new precedence input.  Precedence inputs are unordered, with
 973 // duplicates removed and NULLs packed down at the end.
 974 void Node::add_prec( Node *n ) {
 975   assert( is_not_dead(n), "can not use dead node");
 976 
 977   // Check for NULL at end
 978   if( _cnt >= _max || in(_max-1) )
 979     grow( _max+1 );
 980 
 981   // Find a precedence edge to move
 982   uint i = _cnt;
 983   while( in(i) != NULL ) {
 984     if (in(i) == n) return; // Avoid spec violation: duplicated prec edge.
 985     i++;
 986   }
 987   _in[i] = n;                                // Stuff prec edge over NULL
 988   if ( n != NULL) n->add_out((Node *)this);  // Add mirror edge
 989 
 990 #ifdef ASSERT
 991   while ((++i)<_max) { assert(_in[i] == NULL, "spec violation: Gap in prec edges (node %d)", _idx); }
 992 #endif
 993 }
 994 
 995 //------------------------------rm_prec----------------------------------------
 996 // Remove a precedence input.  Precedence inputs are unordered, with
 997 // duplicates removed and NULLs packed down at the end.
 998 void Node::rm_prec( uint j ) {
 999   assert(j < _max, "oob: i=%d, _max=%d", j, _max);
1000   assert(j >= _cnt, "not a precedence edge");
1001   if (_in[j] == NULL) return;   // Avoid spec violation: Gap in prec edges.
1002   _in[j]->del_out((Node *)this);
1003   close_prec_gap_at(j);
1004 }
1005 
1006 //------------------------------size_of----------------------------------------
1007 uint Node::size_of() const { return sizeof(*this); }
1008 
1009 //------------------------------ideal_reg--------------------------------------
1010 uint Node::ideal_reg() const { return 0; }
1011 
1012 //------------------------------jvms-------------------------------------------
1013 JVMState* Node::jvms() const { return NULL; }
1014 
1015 #ifdef ASSERT
1016 //------------------------------jvms-------------------------------------------
1017 bool Node::verify_jvms(const JVMState* using_jvms) const {
1018   for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
1019     if (jvms == using_jvms)  return true;
1020   }
1021   return false;
1022 }
1023 
1024 //------------------------------init_NodeProperty------------------------------
1025 void Node::init_NodeProperty() {
1026   assert(_max_classes <= max_jushort, "too many NodeProperty classes");
1027   assert(_max_flags <= max_jushort, "too many NodeProperty flags");
1028 }
1029 #endif
1030 
1031 //------------------------------format-----------------------------------------
1032 // Print as assembly
1033 void Node::format( PhaseRegAlloc *, outputStream *st ) const {}
1034 //------------------------------emit-------------------------------------------
1035 // Emit bytes starting at parameter 'ptr'.
1036 void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {}
1037 //------------------------------size-------------------------------------------
1038 // Size of instruction in bytes
1039 uint Node::size(PhaseRegAlloc *ra_) const { return 0; }
1040 
1041 //------------------------------CFG Construction-------------------------------
1042 // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root,
1043 // Goto and Return.
1044 const Node *Node::is_block_proj() const { return 0; }
1045 
1046 // Minimum guaranteed type
1047 const Type *Node::bottom_type() const { return Type::BOTTOM; }
1048 
1049 
1050 //------------------------------raise_bottom_type------------------------------
1051 // Get the worst-case Type output for this Node.
1052 void Node::raise_bottom_type(const Type* new_type) {
1053   if (is_Type()) {
1054     TypeNode *n = this->as_Type();
1055     if (VerifyAliases) {
1056       assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
1057     }
1058     n->set_type(new_type);
1059   } else if (is_Load()) {
1060     LoadNode *n = this->as_Load();
1061     if (VerifyAliases) {
1062       assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
1063     }
1064     n->set_type(new_type);
1065   }
1066 }
1067 
1068 //------------------------------Identity---------------------------------------
1069 // Return a node that the given node is equivalent to.
1070 Node* Node::Identity(PhaseGVN* phase) {
1071   return this;                  // Default to no identities
1072 }
1073 
1074 //------------------------------Value------------------------------------------
1075 // Compute a new Type for a node using the Type of the inputs.
1076 const Type* Node::Value(PhaseGVN* phase) const {
1077   return bottom_type();         // Default to worst-case Type
1078 }
1079 
1080 //------------------------------Ideal------------------------------------------
1081 //
1082 // 'Idealize' the graph rooted at this Node.
1083 //
1084 // In order to be efficient and flexible there are some subtle invariants
1085 // these Ideal calls need to hold.  Running with '+VerifyIterativeGVN' checks
1086 // these invariants, although its too slow to have on by default.  If you are
1087 // hacking an Ideal call, be sure to test with +VerifyIterativeGVN!
1088 //
1089 // The Ideal call almost arbitrarily reshape the graph rooted at the 'this'
1090 // pointer.  If ANY change is made, it must return the root of the reshaped
1091 // graph - even if the root is the same Node.  Example: swapping the inputs
1092 // to an AddINode gives the same answer and same root, but you still have to
1093 // return the 'this' pointer instead of NULL.
1094 //
1095 // You cannot return an OLD Node, except for the 'this' pointer.  Use the
1096 // Identity call to return an old Node; basically if Identity can find
1097 // another Node have the Ideal call make no change and return NULL.
1098 // Example: AddINode::Ideal must check for add of zero; in this case it
1099 // returns NULL instead of doing any graph reshaping.
1100 //
1101 // You cannot modify any old Nodes except for the 'this' pointer.  Due to
1102 // sharing there may be other users of the old Nodes relying on their current
1103 // semantics.  Modifying them will break the other users.
1104 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for
1105 // "X+3" unchanged in case it is shared.
1106 //
1107 // If you modify the 'this' pointer's inputs, you should use
1108 // 'set_req'.  If you are making a new Node (either as the new root or
1109 // some new internal piece) you may use 'init_req' to set the initial
1110 // value.  You can make a new Node with either 'new' or 'clone'.  In
1111 // either case, def-use info is correctly maintained.
1112 //
1113 // Example: reshape "(X+3)+4" into "X+7":
1114 //    set_req(1, in(1)->in(1));
1115 //    set_req(2, phase->intcon(7));
1116 //    return this;
1117 // Example: reshape "X*4" into "X<<2"
1118 //    return new LShiftINode(in(1), phase->intcon(2));
1119 //
1120 // You must call 'phase->transform(X)' on any new Nodes X you make, except
1121 // for the returned root node.  Example: reshape "X*31" with "(X<<5)-X".
1122 //    Node *shift=phase->transform(new LShiftINode(in(1),phase->intcon(5)));
1123 //    return new AddINode(shift, in(1));
1124 //
1125 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'.
1126 // These forms are faster than 'phase->transform(new ConNode())' and Do
1127 // The Right Thing with def-use info.
1128 //
1129 // You cannot bury the 'this' Node inside of a graph reshape.  If the reshaped
1130 // graph uses the 'this' Node it must be the root.  If you want a Node with
1131 // the same Opcode as the 'this' pointer use 'clone'.
1132 //
1133 Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) {
1134   return NULL;                  // Default to being Ideal already
1135 }
1136 
1137 // Some nodes have specific Ideal subgraph transformations only if they are
1138 // unique users of specific nodes. Such nodes should be put on IGVN worklist
1139 // for the transformations to happen.
1140 bool Node::has_special_unique_user() const {
1141   assert(outcnt() == 1, "match only for unique out");
1142   Node* n = unique_out();
1143   int op  = Opcode();
1144   if (this->is_Store()) {
1145     // Condition for back-to-back stores folding.
1146     return n->Opcode() == op && n->in(MemNode::Memory) == this;
1147   } else if (this->is_Load() || this->is_DecodeN() || this->is_Phi()) {
1148     // Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input
1149     return n->Opcode() == Op_MemBarAcquire;
1150   } else if (op == Op_AddL) {
1151     // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
1152     return n->Opcode() == Op_ConvL2I && n->in(1) == this;
1153   } else if (op == Op_SubI || op == Op_SubL) {
1154     // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y)
1155     return n->Opcode() == op && n->in(2) == this;
1156   } else if (is_If() && (n->is_IfFalse() || n->is_IfTrue())) {
1157     // See IfProjNode::Identity()
1158     return true;
1159   } else {
1160     return BarrierSet::barrier_set()->barrier_set_c2()->has_special_unique_user(this);
1161   }
1162 };
1163 
1164 //--------------------------find_exact_control---------------------------------
1165 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
1166 Node* Node::find_exact_control(Node* ctrl) {
1167   if (ctrl == NULL && this->is_Region())
1168     ctrl = this->as_Region()->is_copy();
1169 
1170   if (ctrl != NULL && ctrl->is_CatchProj()) {
1171     if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index)
1172       ctrl = ctrl->in(0);
1173     if (ctrl != NULL && !ctrl->is_top())
1174       ctrl = ctrl->in(0);
1175   }
1176 
1177   if (ctrl != NULL && ctrl->is_Proj())
1178     ctrl = ctrl->in(0);
1179 
1180   return ctrl;
1181 }
1182 
1183 //--------------------------dominates------------------------------------------
1184 // Helper function for MemNode::all_controls_dominate().
1185 // Check if 'this' control node dominates or equal to 'sub' control node.
1186 // We already know that if any path back to Root or Start reaches 'this',
1187 // then all paths so, so this is a simple search for one example,
1188 // not an exhaustive search for a counterexample.
1189 bool Node::dominates(Node* sub, Node_List &nlist) {
1190   assert(this->is_CFG(), "expecting control");
1191   assert(sub != NULL && sub->is_CFG(), "expecting control");
1192 
1193   // detect dead cycle without regions
1194   int iterations_without_region_limit = DominatorSearchLimit;
1195 
1196   Node* orig_sub = sub;
1197   Node* dom      = this;
1198   bool  met_dom  = false;
1199   nlist.clear();
1200 
1201   // Walk 'sub' backward up the chain to 'dom', watching for regions.
1202   // After seeing 'dom', continue up to Root or Start.
1203   // If we hit a region (backward split point), it may be a loop head.
1204   // Keep going through one of the region's inputs.  If we reach the
1205   // same region again, go through a different input.  Eventually we
1206   // will either exit through the loop head, or give up.
1207   // (If we get confused, break out and return a conservative 'false'.)
1208   while (sub != NULL) {
1209     if (sub->is_top())  break; // Conservative answer for dead code.
1210     if (sub == dom) {
1211       if (nlist.size() == 0) {
1212         // No Region nodes except loops were visited before and the EntryControl
1213         // path was taken for loops: it did not walk in a cycle.
1214         return true;
1215       } else if (met_dom) {
1216         break;          // already met before: walk in a cycle
1217       } else {
1218         // Region nodes were visited. Continue walk up to Start or Root
1219         // to make sure that it did not walk in a cycle.
1220         met_dom = true; // first time meet
1221         iterations_without_region_limit = DominatorSearchLimit; // Reset
1222      }
1223     }
1224     if (sub->is_Start() || sub->is_Root()) {
1225       // Success if we met 'dom' along a path to Start or Root.
1226       // We assume there are no alternative paths that avoid 'dom'.
1227       // (This assumption is up to the caller to ensure!)
1228       return met_dom;
1229     }
1230     Node* up = sub->in(0);
1231     // Normalize simple pass-through regions and projections:
1232     up = sub->find_exact_control(up);
1233     // If sub == up, we found a self-loop.  Try to push past it.
1234     if (sub == up && sub->is_Loop()) {
1235       // Take loop entry path on the way up to 'dom'.
1236       up = sub->in(1); // in(LoopNode::EntryControl);
1237     } else if (sub == up && sub->is_Region() && sub->req() != 3) {
1238       // Always take in(1) path on the way up to 'dom' for clone regions
1239       // (with only one input) or regions which merge > 2 paths
1240       // (usually used to merge fast/slow paths).
1241       up = sub->in(1);
1242     } else if (sub == up && sub->is_Region()) {
1243       // Try both paths for Regions with 2 input paths (it may be a loop head).
1244       // It could give conservative 'false' answer without information
1245       // which region's input is the entry path.
1246       iterations_without_region_limit = DominatorSearchLimit; // Reset
1247 
1248       bool region_was_visited_before = false;
1249       // Was this Region node visited before?
1250       // If so, we have reached it because we accidentally took a
1251       // loop-back edge from 'sub' back into the body of the loop,
1252       // and worked our way up again to the loop header 'sub'.
1253       // So, take the first unexplored path on the way up to 'dom'.
1254       for (int j = nlist.size() - 1; j >= 0; j--) {
1255         intptr_t ni = (intptr_t)nlist.at(j);
1256         Node* visited = (Node*)(ni & ~1);
1257         bool  visited_twice_already = ((ni & 1) != 0);
1258         if (visited == sub) {
1259           if (visited_twice_already) {
1260             // Visited 2 paths, but still stuck in loop body.  Give up.
1261             return false;
1262           }
1263           // The Region node was visited before only once.
1264           // (We will repush with the low bit set, below.)
1265           nlist.remove(j);
1266           // We will find a new edge and re-insert.
1267           region_was_visited_before = true;
1268           break;
1269         }
1270       }
1271 
1272       // Find an incoming edge which has not been seen yet; walk through it.
1273       assert(up == sub, "");
1274       uint skip = region_was_visited_before ? 1 : 0;
1275       for (uint i = 1; i < sub->req(); i++) {
1276         Node* in = sub->in(i);
1277         if (in != NULL && !in->is_top() && in != sub) {
1278           if (skip == 0) {
1279             up = in;
1280             break;
1281           }
1282           --skip;               // skip this nontrivial input
1283         }
1284       }
1285 
1286       // Set 0 bit to indicate that both paths were taken.
1287       nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0)));
1288     }
1289 
1290     if (up == sub) {
1291       break;    // some kind of tight cycle
1292     }
1293     if (up == orig_sub && met_dom) {
1294       // returned back after visiting 'dom'
1295       break;    // some kind of cycle
1296     }
1297     if (--iterations_without_region_limit < 0) {
1298       break;    // dead cycle
1299     }
1300     sub = up;
1301   }
1302 
1303   // Did not meet Root or Start node in pred. chain.
1304   // Conservative answer for dead code.
1305   return false;
1306 }
1307 
1308 //------------------------------remove_dead_region-----------------------------
1309 // This control node is dead.  Follow the subgraph below it making everything
1310 // using it dead as well.  This will happen normally via the usual IterGVN
1311 // worklist but this call is more efficient.  Do not update use-def info
1312 // inside the dead region, just at the borders.
1313 static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
1314   // Con's are a popular node to re-hit in the hash table again.
1315   if( dead->is_Con() ) return;
1316 
1317   // Can't put ResourceMark here since igvn->_worklist uses the same arena
1318   // for verify pass with +VerifyOpto and we add/remove elements in it here.
1319   Node_List  nstack(Thread::current()->resource_area());
1320 
1321   Node *top = igvn->C->top();
1322   nstack.push(dead);
1323   bool has_irreducible_loop = igvn->C->has_irreducible_loop();
1324 
1325   while (nstack.size() > 0) {
1326     dead = nstack.pop();
1327     if (dead->Opcode() == Op_SafePoint) {
1328       dead->as_SafePoint()->disconnect_from_root(igvn);
1329     }
1330     if (dead->outcnt() > 0) {
1331       // Keep dead node on stack until all uses are processed.
1332       nstack.push(dead);
1333       // For all Users of the Dead...    ;-)
1334       for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) {
1335         Node* use = dead->last_out(k);
1336         igvn->hash_delete(use);       // Yank from hash table prior to mod
1337         if (use->in(0) == dead) {     // Found another dead node
1338           assert (!use->is_Con(), "Control for Con node should be Root node.");
1339           use->set_req(0, top);       // Cut dead edge to prevent processing
1340           nstack.push(use);           // the dead node again.
1341         } else if (!has_irreducible_loop && // Backedge could be alive in irreducible loop
1342                    use->is_Loop() && !use->is_Root() &&       // Don't kill Root (RootNode extends LoopNode)
1343                    use->in(LoopNode::EntryControl) == dead) { // Dead loop if its entry is dead
1344           use->set_req(LoopNode::EntryControl, top);          // Cut dead edge to prevent processing
1345           use->set_req(0, top);       // Cut self edge
1346           nstack.push(use);
1347         } else {                      // Else found a not-dead user
1348           // Dead if all inputs are top or null
1349           bool dead_use = !use->is_Root(); // Keep empty graph alive
1350           for (uint j = 1; j < use->req(); j++) {
1351             Node* in = use->in(j);
1352             if (in == dead) {         // Turn all dead inputs into TOP
1353               use->set_req(j, top);
1354             } else if (in != NULL && !in->is_top()) {
1355               dead_use = false;
1356             }
1357           }
1358           if (dead_use) {
1359             if (use->is_Region()) {
1360               use->set_req(0, top);   // Cut self edge
1361             }
1362             nstack.push(use);
1363           } else {
1364             igvn->_worklist.push(use);
1365           }
1366         }
1367         // Refresh the iterator, since any number of kills might have happened.
1368         k = dead->last_outs(kmin);
1369       }
1370     } else { // (dead->outcnt() == 0)
1371       // Done with outputs.
1372       igvn->hash_delete(dead);
1373       igvn->_worklist.remove(dead);
1374       igvn->C->remove_modified_node(dead);
1375       igvn->set_type(dead, Type::TOP);
1376       if (dead->is_macro()) {
1377         igvn->C->remove_macro_node(dead);
1378       }
1379       if (dead->is_expensive()) {
1380         igvn->C->remove_expensive_node(dead);
1381       }
1382       CastIINode* cast = dead->isa_CastII();
1383       if (cast != NULL && cast->has_range_check()) {
1384         igvn->C->remove_range_check_cast(cast);
1385       }
1386       if (dead->Opcode() == Op_Opaque4) {
1387         igvn->C->remove_opaque4_node(dead);
1388       }
1389       if (dead->is_ValueTypeBase()) {
1390         igvn->C->remove_value_type(dead);
1391       }
1392       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1393       bs->unregister_potential_barrier_node(dead);
1394       igvn->C->record_dead_node(dead->_idx);
1395       // Kill all inputs to the dead guy
1396       for (uint i=0; i < dead->req(); i++) {
1397         Node *n = dead->in(i);      // Get input to dead guy
1398         if (n != NULL && !n->is_top()) { // Input is valid?
1399           dead->set_req(i, top);    // Smash input away
1400           if (n->outcnt() == 0) {   // Input also goes dead?
1401             if (!n->is_Con())
1402               nstack.push(n);       // Clear it out as well
1403           } else if (n->outcnt() == 1 &&
1404                      n->has_special_unique_user()) {
1405             igvn->add_users_to_worklist( n );
1406           } else if (n->outcnt() <= 2 && n->is_Store()) {
1407             // Push store's uses on worklist to enable folding optimization for
1408             // store/store and store/load to the same address.
1409             // The restriction (outcnt() <= 2) is the same as in set_req_X()
1410             // and remove_globally_dead_node().
1411             igvn->add_users_to_worklist( n );
1412           } else {
1413             BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn, n);
1414           }
1415         }
1416       }
1417     } // (dead->outcnt() == 0)
1418   }   // while (nstack.size() > 0) for outputs
1419   return;
1420 }
1421 
1422 //------------------------------remove_dead_region-----------------------------
1423 bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) {
1424   Node *n = in(0);
1425   if( !n ) return false;
1426   // Lost control into this guy?  I.e., it became unreachable?
1427   // Aggressively kill all unreachable code.
1428   if (can_reshape && n->is_top()) {
1429     kill_dead_code(this, phase->is_IterGVN());
1430     return false; // Node is dead.
1431   }
1432 
1433   if( n->is_Region() && n->as_Region()->is_copy() ) {
1434     Node *m = n->nonnull_req();
1435     set_req(0, m);
1436     return true;
1437   }
1438   return false;
1439 }
1440 
1441 //------------------------------hash-------------------------------------------
1442 // Hash function over Nodes.
1443 uint Node::hash() const {
1444   uint sum = 0;
1445   for( uint i=0; i<_cnt; i++ )  // Add in all inputs
1446     sum = (sum<<1)-(uintptr_t)in(i);        // Ignore embedded NULLs
1447   return (sum>>2) + _cnt + Opcode();
1448 }
1449 
1450 //------------------------------cmp--------------------------------------------
1451 // Compare special parts of simple Nodes
1452 bool Node::cmp( const Node &n ) const {
1453   return true;                  // Must be same
1454 }
1455 
1456 //------------------------------rematerialize-----------------------------------
1457 // Should we clone rather than spill this instruction?
1458 bool Node::rematerialize() const {
1459   if ( is_Mach() )
1460     return this->as_Mach()->rematerialize();
1461   else
1462     return (_flags & Flag_rematerialize) != 0;
1463 }
1464 
1465 //------------------------------needs_anti_dependence_check---------------------
1466 // Nodes which use memory without consuming it, hence need antidependences.
1467 bool Node::needs_anti_dependence_check() const {
1468   if (req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0) {
1469     return false;
1470   }
1471   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1472   if (!bs->needs_anti_dependence_check(this)) {
1473     return false;
1474   }
1475   return in(1)->bottom_type()->has_memory();
1476 }
1477 
1478 // Get an integer constant from a ConNode (or CastIINode).
1479 // Return a default value if there is no apparent constant here.
1480 const TypeInt* Node::find_int_type() const {
1481   if (this->is_Type()) {
1482     return this->as_Type()->type()->isa_int();
1483   } else if (this->is_Con()) {
1484     assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1485     return this->bottom_type()->isa_int();
1486   }
1487   return NULL;
1488 }
1489 
1490 // Get a pointer constant from a ConstNode.
1491 // Returns the constant if it is a pointer ConstNode
1492 intptr_t Node::get_ptr() const {
1493   assert( Opcode() == Op_ConP, "" );
1494   return ((ConPNode*)this)->type()->is_ptr()->get_con();
1495 }
1496 
1497 // Get a narrow oop constant from a ConNNode.
1498 intptr_t Node::get_narrowcon() const {
1499   assert( Opcode() == Op_ConN, "" );
1500   return ((ConNNode*)this)->type()->is_narrowoop()->get_con();
1501 }
1502 
1503 // Get a long constant from a ConNode.
1504 // Return a default value if there is no apparent constant here.
1505 const TypeLong* Node::find_long_type() const {
1506   if (this->is_Type()) {
1507     return this->as_Type()->type()->isa_long();
1508   } else if (this->is_Con()) {
1509     assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1510     return this->bottom_type()->isa_long();
1511   }
1512   return NULL;
1513 }
1514 
1515 
1516 /**
1517  * Return a ptr type for nodes which should have it.
1518  */
1519 const TypePtr* Node::get_ptr_type() const {
1520   const TypePtr* tp = this->bottom_type()->make_ptr();
1521 #ifdef ASSERT
1522   if (tp == NULL) {
1523     this->dump(1);
1524     assert((tp != NULL), "unexpected node type");
1525   }
1526 #endif
1527   return tp;
1528 }
1529 
1530 // Get a double constant from a ConstNode.
1531 // Returns the constant if it is a double ConstNode
1532 jdouble Node::getd() const {
1533   assert( Opcode() == Op_ConD, "" );
1534   return ((ConDNode*)this)->type()->is_double_constant()->getd();
1535 }
1536 
1537 // Get a float constant from a ConstNode.
1538 // Returns the constant if it is a float ConstNode
1539 jfloat Node::getf() const {
1540   assert( Opcode() == Op_ConF, "" );
1541   return ((ConFNode*)this)->type()->is_float_constant()->getf();
1542 }
1543 
1544 #ifndef PRODUCT
1545 
1546 //------------------------------find------------------------------------------
1547 // Find a neighbor of this Node with the given _idx
1548 // If idx is negative, find its absolute value, following both _in and _out.
1549 static void find_recur(Compile* C,  Node* &result, Node *n, int idx, bool only_ctrl,
1550                         VectorSet* old_space, VectorSet* new_space ) {
1551   int node_idx = (idx >= 0) ? idx : -idx;
1552   if (NotANode(n))  return;  // Gracefully handle NULL, -1, 0xabababab, etc.
1553   // Contained in new_space or old_space?   Check old_arena first since it's mostly empty.
1554   VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space;
1555   if( v->test(n->_idx) ) return;
1556   if( (int)n->_idx == node_idx
1557       debug_only(|| n->debug_idx() == node_idx) ) {
1558     if (result != NULL)
1559       tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n",
1560                  (uintptr_t)result, (uintptr_t)n, node_idx);
1561     result = n;
1562   }
1563   v->set(n->_idx);
1564   for( uint i=0; i<n->len(); i++ ) {
1565     if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue;
1566     find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space );
1567   }
1568   // Search along forward edges also:
1569   if (idx < 0 && !only_ctrl) {
1570     for( uint j=0; j<n->outcnt(); j++ ) {
1571       find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space );
1572     }
1573   }
1574 #ifdef ASSERT
1575   // Search along debug_orig edges last, checking for cycles
1576   Node* orig = n->debug_orig();
1577   if (orig != NULL) {
1578     do {
1579       if (NotANode(orig))  break;
1580       find_recur(C, result, orig, idx, only_ctrl, old_space, new_space );
1581       orig = orig->debug_orig();
1582     } while (orig != NULL && orig != n->debug_orig());
1583   }
1584 #endif //ASSERT
1585 }
1586 
1587 // call this from debugger:
1588 Node* find_node(Node* n, int idx) {
1589   return n->find(idx);
1590 }
1591 
1592 //------------------------------find-------------------------------------------
1593 Node* Node::find(int idx) const {
1594   ResourceArea *area = Thread::current()->resource_area();
1595   VectorSet old_space(area), new_space(area);
1596   Node* result = NULL;
1597   find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space );
1598   return result;
1599 }
1600 
1601 //------------------------------find_ctrl--------------------------------------
1602 // Find an ancestor to this node in the control history with given _idx
1603 Node* Node::find_ctrl(int idx) const {
1604   ResourceArea *area = Thread::current()->resource_area();
1605   VectorSet old_space(area), new_space(area);
1606   Node* result = NULL;
1607   find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space );
1608   return result;
1609 }
1610 #endif
1611 
1612 
1613 
1614 #ifndef PRODUCT
1615 
1616 // -----------------------------Name-------------------------------------------
1617 extern const char *NodeClassNames[];
1618 const char *Node::Name() const { return NodeClassNames[Opcode()]; }
1619 
1620 static bool is_disconnected(const Node* n) {
1621   for (uint i = 0; i < n->req(); i++) {
1622     if (n->in(i) != NULL)  return false;
1623   }
1624   return true;
1625 }
1626 
1627 #ifdef ASSERT
1628 static void dump_orig(Node* orig, outputStream *st) {
1629   Compile* C = Compile::current();
1630   if (NotANode(orig)) orig = NULL;
1631   if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
1632   if (orig == NULL) return;
1633   st->print(" !orig=");
1634   Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops
1635   if (NotANode(fast)) fast = NULL;
1636   while (orig != NULL) {
1637     bool discon = is_disconnected(orig);  // if discon, print [123] else 123
1638     if (discon) st->print("[");
1639     if (!Compile::current()->node_arena()->contains(orig))
1640       st->print("o");
1641     st->print("%d", orig->_idx);
1642     if (discon) st->print("]");
1643     orig = orig->debug_orig();
1644     if (NotANode(orig)) orig = NULL;
1645     if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
1646     if (orig != NULL) st->print(",");
1647     if (fast != NULL) {
1648       // Step fast twice for each single step of orig:
1649       fast = fast->debug_orig();
1650       if (NotANode(fast)) fast = NULL;
1651       if (fast != NULL && fast != orig) {
1652         fast = fast->debug_orig();
1653         if (NotANode(fast)) fast = NULL;
1654       }
1655       if (fast == orig) {
1656         st->print("...");
1657         break;
1658       }
1659     }
1660   }
1661 }
1662 
1663 void Node::set_debug_orig(Node* orig) {
1664   _debug_orig = orig;
1665   if (BreakAtNode == 0)  return;
1666   if (NotANode(orig))  orig = NULL;
1667   int trip = 10;
1668   while (orig != NULL) {
1669     if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) {
1670       tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d",
1671                     this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx());
1672       BREAKPOINT;
1673     }
1674     orig = orig->debug_orig();
1675     if (NotANode(orig))  orig = NULL;
1676     if (trip-- <= 0)  break;
1677   }
1678 }
1679 #endif //ASSERT
1680 
1681 //------------------------------dump------------------------------------------
1682 // Dump a Node
1683 void Node::dump(const char* suffix, bool mark, outputStream *st) const {
1684   Compile* C = Compile::current();
1685   bool is_new = C->node_arena()->contains(this);
1686   C->_in_dump_cnt++;
1687   st->print("%c%d%s\t%s\t=== ", is_new ? ' ' : 'o', _idx, mark ? " >" : "", Name());
1688 
1689   // Dump the required and precedence inputs
1690   dump_req(st);
1691   dump_prec(st);
1692   // Dump the outputs
1693   dump_out(st);
1694 
1695   if (is_disconnected(this)) {
1696 #ifdef ASSERT
1697     st->print("  [%d]",debug_idx());
1698     dump_orig(debug_orig(), st);
1699 #endif
1700     st->cr();
1701     C->_in_dump_cnt--;
1702     return;                     // don't process dead nodes
1703   }
1704 
1705   if (C->clone_map().value(_idx) != 0) {
1706     C->clone_map().dump(_idx);
1707   }
1708   // Dump node-specific info
1709   dump_spec(st);
1710 #ifdef ASSERT
1711   // Dump the non-reset _debug_idx
1712   if (Verbose && WizardMode) {
1713     st->print("  [%d]",debug_idx());
1714   }
1715 #endif
1716 
1717   const Type *t = bottom_type();
1718 
1719   if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) {
1720     const TypeInstPtr  *toop = t->isa_instptr();
1721     const TypeKlassPtr *tkls = t->isa_klassptr();
1722     ciKlass*           klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL );
1723     if (klass && klass->is_loaded() && klass->is_interface()) {
1724       st->print("  Interface:");
1725     } else if (toop) {
1726       st->print("  Oop:");
1727     } else if (tkls) {
1728       st->print("  Klass:");
1729     }
1730     t->dump_on(st);
1731   } else if (t == Type::MEMORY) {
1732     st->print("  Memory:");
1733     MemNode::dump_adr_type(this, adr_type(), st);
1734   } else if (Verbose || WizardMode) {
1735     st->print("  Type:");
1736     if (t) {
1737       t->dump_on(st);
1738     } else {
1739       st->print("no type");
1740     }
1741   } else if (t->isa_vect() && this->is_MachSpillCopy()) {
1742     // Dump MachSpillcopy vector type.
1743     t->dump_on(st);
1744   }
1745   if (is_new) {
1746     debug_only(dump_orig(debug_orig(), st));
1747     Node_Notes* nn = C->node_notes_at(_idx);
1748     if (nn != NULL && !nn->is_clear()) {
1749       if (nn->jvms() != NULL) {
1750         st->print(" !jvms:");
1751         nn->jvms()->dump_spec(st);
1752       }
1753     }
1754   }
1755   if (suffix) st->print("%s", suffix);
1756   C->_in_dump_cnt--;
1757 }
1758 
1759 //------------------------------dump_req--------------------------------------
1760 void Node::dump_req(outputStream *st) const {
1761   // Dump the required input edges
1762   for (uint i = 0; i < req(); i++) {    // For all required inputs
1763     Node* d = in(i);
1764     if (d == NULL) {
1765       st->print("_ ");
1766     } else if (NotANode(d)) {
1767       st->print("NotANode ");  // uninitialized, sentinel, garbage, etc.
1768     } else {
1769       st->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx);
1770     }
1771   }
1772 }
1773 
1774 
1775 //------------------------------dump_prec-------------------------------------
1776 void Node::dump_prec(outputStream *st) const {
1777   // Dump the precedence edges
1778   int any_prec = 0;
1779   for (uint i = req(); i < len(); i++) {       // For all precedence inputs
1780     Node* p = in(i);
1781     if (p != NULL) {
1782       if (!any_prec++) st->print(" |");
1783       if (NotANode(p)) { st->print("NotANode "); continue; }
1784       st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
1785     }
1786   }
1787 }
1788 
1789 //------------------------------dump_out--------------------------------------
1790 void Node::dump_out(outputStream *st) const {
1791   // Delimit the output edges
1792   st->print(" [[");
1793   // Dump the output edges
1794   for (uint i = 0; i < _outcnt; i++) {    // For all outputs
1795     Node* u = _out[i];
1796     if (u == NULL) {
1797       st->print("_ ");
1798     } else if (NotANode(u)) {
1799       st->print("NotANode ");
1800     } else {
1801       st->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx);
1802     }
1803   }
1804   st->print("]] ");
1805 }
1806 
1807 //----------------------------collect_nodes_i----------------------------------
1808 // Collects nodes from an Ideal graph, starting from a given start node and
1809 // moving in a given direction until a certain depth (distance from the start
1810 // node) is reached. Duplicates are ignored.
1811 // Arguments:
1812 //   nstack:        the nodes are collected into this array.
1813 //   start:         the node at which to start collecting.
1814 //   direction:     if this is a positive number, collect input nodes; if it is
1815 //                  a negative number, collect output nodes.
1816 //   depth:         collect nodes up to this distance from the start node.
1817 //   include_start: whether to include the start node in the result collection.
1818 //   only_ctrl:     whether to regard control edges only during traversal.
1819 //   only_data:     whether to regard data edges only during traversal.
1820 static void collect_nodes_i(GrowableArray<Node*> *nstack, const Node* start, int direction, uint depth, bool include_start, bool only_ctrl, bool only_data) {
1821   Node* s = (Node*) start; // remove const
1822   nstack->append(s);
1823   int begin = 0;
1824   int end = 0;
1825   for(uint i = 0; i < depth; i++) {
1826     end = nstack->length();
1827     for(int j = begin; j < end; j++) {
1828       Node* tp  = nstack->at(j);
1829       uint limit = direction > 0 ? tp->len() : tp->outcnt();
1830       for(uint k = 0; k < limit; k++) {
1831         Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k);
1832 
1833         if (NotANode(n))  continue;
1834         // do not recurse through top or the root (would reach unrelated stuff)
1835         if (n->is_Root() || n->is_top()) continue;
1836         if (only_ctrl && !n->is_CFG()) continue;
1837         if (only_data && n->is_CFG()) continue;
1838 
1839         bool on_stack = nstack->contains(n);
1840         if (!on_stack) {
1841           nstack->append(n);
1842         }
1843       }
1844     }
1845     begin = end;
1846   }
1847   if (!include_start) {
1848     nstack->remove(s);
1849   }
1850 }
1851 
1852 //------------------------------dump_nodes-------------------------------------
1853 static void dump_nodes(const Node* start, int d, bool only_ctrl) {
1854   if (NotANode(start)) return;
1855 
1856   GrowableArray <Node *> nstack(Compile::current()->live_nodes());
1857   collect_nodes_i(&nstack, start, d, (uint) ABS(d), true, only_ctrl, false);
1858 
1859   int end = nstack.length();
1860   if (d > 0) {
1861     for(int j = end-1; j >= 0; j--) {
1862       nstack.at(j)->dump();
1863     }
1864   } else {
1865     for(int j = 0; j < end; j++) {
1866       nstack.at(j)->dump();
1867     }
1868   }
1869 }
1870 
1871 //------------------------------dump-------------------------------------------
1872 void Node::dump(int d) const {
1873   dump_nodes(this, d, false);
1874 }
1875 
1876 //------------------------------dump_ctrl--------------------------------------
1877 // Dump a Node's control history to depth
1878 void Node::dump_ctrl(int d) const {
1879   dump_nodes(this, d, true);
1880 }
1881 
1882 //-----------------------------dump_compact------------------------------------
1883 void Node::dump_comp() const {
1884   this->dump_comp("\n");
1885 }
1886 
1887 //-----------------------------dump_compact------------------------------------
1888 // Dump a Node in compact representation, i.e., just print its name and index.
1889 // Nodes can specify additional specifics to print in compact representation by
1890 // implementing dump_compact_spec.
1891 void Node::dump_comp(const char* suffix, outputStream *st) const {
1892   Compile* C = Compile::current();
1893   C->_in_dump_cnt++;
1894   st->print("%s(%d)", Name(), _idx);
1895   this->dump_compact_spec(st);
1896   if (suffix) {
1897     st->print("%s", suffix);
1898   }
1899   C->_in_dump_cnt--;
1900 }
1901 
1902 //----------------------------dump_related-------------------------------------
1903 // Dump a Node's related nodes - the notion of "related" depends on the Node at
1904 // hand and is determined by the implementation of the virtual method rel.
1905 void Node::dump_related() const {
1906   Compile* C = Compile::current();
1907   GrowableArray <Node *> in_rel(C->unique());
1908   GrowableArray <Node *> out_rel(C->unique());
1909   this->related(&in_rel, &out_rel, false);
1910   for (int i = in_rel.length() - 1; i >= 0; i--) {
1911     in_rel.at(i)->dump();
1912   }
1913   this->dump("\n", true);
1914   for (int i = 0; i < out_rel.length(); i++) {
1915     out_rel.at(i)->dump();
1916   }
1917 }
1918 
1919 //----------------------------dump_related-------------------------------------
1920 // Dump a Node's related nodes up to a given depth (distance from the start
1921 // node).
1922 // Arguments:
1923 //   d_in:  depth for input nodes.
1924 //   d_out: depth for output nodes (note: this also is a positive number).
1925 void Node::dump_related(uint d_in, uint d_out) const {
1926   Compile* C = Compile::current();
1927   GrowableArray <Node *> in_rel(C->unique());
1928   GrowableArray <Node *> out_rel(C->unique());
1929 
1930   // call collect_nodes_i directly
1931   collect_nodes_i(&in_rel, this, 1, d_in, false, false, false);
1932   collect_nodes_i(&out_rel, this, -1, d_out, false, false, false);
1933 
1934   for (int i = in_rel.length() - 1; i >= 0; i--) {
1935     in_rel.at(i)->dump();
1936   }
1937   this->dump("\n", true);
1938   for (int i = 0; i < out_rel.length(); i++) {
1939     out_rel.at(i)->dump();
1940   }
1941 }
1942 
1943 //------------------------dump_related_compact---------------------------------
1944 // Dump a Node's related nodes in compact representation. The notion of
1945 // "related" depends on the Node at hand and is determined by the implementation
1946 // of the virtual method rel.
1947 void Node::dump_related_compact() const {
1948   Compile* C = Compile::current();
1949   GrowableArray <Node *> in_rel(C->unique());
1950   GrowableArray <Node *> out_rel(C->unique());
1951   this->related(&in_rel, &out_rel, true);
1952   int n_in = in_rel.length();
1953   int n_out = out_rel.length();
1954 
1955   this->dump_comp(n_in == 0 ? "\n" : "  ");
1956   for (int i = 0; i < n_in; i++) {
1957     in_rel.at(i)->dump_comp(i == n_in - 1 ? "\n" : "  ");
1958   }
1959   for (int i = 0; i < n_out; i++) {
1960     out_rel.at(i)->dump_comp(i == n_out - 1 ? "\n" : "  ");
1961   }
1962 }
1963 
1964 //------------------------------related----------------------------------------
1965 // Collect a Node's related nodes. The default behaviour just collects the
1966 // inputs and outputs at depth 1, including both control and data flow edges,
1967 // regardless of whether the presentation is compact or not. For data nodes,
1968 // the default is to collect all data inputs (till level 1 if compact), and
1969 // outputs till level 1.
1970 void Node::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const {
1971   if (this->is_CFG()) {
1972     collect_nodes_i(in_rel, this, 1, 1, false, false, false);
1973     collect_nodes_i(out_rel, this, -1, 1, false, false, false);
1974   } else {
1975     if (compact) {
1976       this->collect_nodes(in_rel, 1, false, true);
1977     } else {
1978       this->collect_nodes_in_all_data(in_rel, false);
1979     }
1980     this->collect_nodes(out_rel, -1, false, false);
1981   }
1982 }
1983 
1984 //---------------------------collect_nodes-------------------------------------
1985 // An entry point to the low-level node collection facility, to start from a
1986 // given node in the graph. The start node is by default not included in the
1987 // result.
1988 // Arguments:
1989 //   ns:   collect the nodes into this data structure.
1990 //   d:    the depth (distance from start node) to which nodes should be
1991 //         collected. A value >0 indicates input nodes, a value <0, output
1992 //         nodes.
1993 //   ctrl: include only control nodes.
1994 //   data: include only data nodes.
1995 void Node::collect_nodes(GrowableArray<Node*> *ns, int d, bool ctrl, bool data) const {
1996   if (ctrl && data) {
1997     // ignore nonsensical combination
1998     return;
1999   }
2000   collect_nodes_i(ns, this, d, (uint) ABS(d), false, ctrl, data);
2001 }
2002 
2003 //--------------------------collect_nodes_in-----------------------------------
2004 static void collect_nodes_in(Node* start, GrowableArray<Node*> *ns, bool primary_is_data, bool collect_secondary) {
2005   // The maximum depth is determined using a BFS that visits all primary (data
2006   // or control) inputs and increments the depth at each level.
2007   uint d_in = 0;
2008   GrowableArray<Node*> nodes(Compile::current()->unique());
2009   nodes.push(start);
2010   int nodes_at_current_level = 1;
2011   int n_idx = 0;
2012   while (nodes_at_current_level > 0) {
2013     // Add all primary inputs reachable from the current level to the list, and
2014     // increase the depth if there were any.
2015     int nodes_at_next_level = 0;
2016     bool nodes_added = false;
2017     while (nodes_at_current_level > 0) {
2018       nodes_at_current_level--;
2019       Node* current = nodes.at(n_idx++);
2020       for (uint i = 0; i < current->len(); i++) {
2021         Node* n = current->in(i);
2022         if (NotANode(n)) {
2023           continue;
2024         }
2025         if ((primary_is_data && n->is_CFG()) || (!primary_is_data && !n->is_CFG())) {
2026           continue;
2027         }
2028         if (!nodes.contains(n)) {
2029           nodes.push(n);
2030           nodes_added = true;
2031           nodes_at_next_level++;
2032         }
2033       }
2034     }
2035     if (nodes_added) {
2036       d_in++;
2037     }
2038     nodes_at_current_level = nodes_at_next_level;
2039   }
2040   start->collect_nodes(ns, d_in, !primary_is_data, primary_is_data);
2041   if (collect_secondary) {
2042     // Now, iterate over the secondary nodes in ns and add the respective
2043     // boundary reachable from them.
2044     GrowableArray<Node*> sns(Compile::current()->unique());
2045     for (GrowableArrayIterator<Node*> it = ns->begin(); it != ns->end(); ++it) {
2046       Node* n = *it;
2047       n->collect_nodes(&sns, 1, primary_is_data, !primary_is_data);
2048       for (GrowableArrayIterator<Node*> d = sns.begin(); d != sns.end(); ++d) {
2049         ns->append_if_missing(*d);
2050       }
2051       sns.clear();
2052     }
2053   }
2054 }
2055 
2056 //---------------------collect_nodes_in_all_data-------------------------------
2057 // Collect the entire data input graph. Include the control boundary if
2058 // requested.
2059 // Arguments:
2060 //   ns:   collect the nodes into this data structure.
2061 //   ctrl: if true, include the control boundary.
2062 void Node::collect_nodes_in_all_data(GrowableArray<Node*> *ns, bool ctrl) const {
2063   collect_nodes_in((Node*) this, ns, true, ctrl);
2064 }
2065 
2066 //--------------------------collect_nodes_in_all_ctrl--------------------------
2067 // Collect the entire control input graph. Include the data boundary if
2068 // requested.
2069 //   ns:   collect the nodes into this data structure.
2070 //   data: if true, include the control boundary.
2071 void Node::collect_nodes_in_all_ctrl(GrowableArray<Node*> *ns, bool data) const {
2072   collect_nodes_in((Node*) this, ns, false, data);
2073 }
2074 
2075 //------------------collect_nodes_out_all_ctrl_boundary------------------------
2076 // Collect the entire output graph until hitting control node boundaries, and
2077 // include those.
2078 void Node::collect_nodes_out_all_ctrl_boundary(GrowableArray<Node*> *ns) const {
2079   // Perform a BFS and stop at control nodes.
2080   GrowableArray<Node*> nodes(Compile::current()->unique());
2081   nodes.push((Node*) this);
2082   while (nodes.length() > 0) {
2083     Node* current = nodes.pop();
2084     if (NotANode(current)) {
2085       continue;
2086     }
2087     ns->append_if_missing(current);
2088     if (!current->is_CFG()) {
2089       for (DUIterator i = current->outs(); current->has_out(i); i++) {
2090         nodes.push(current->out(i));
2091       }
2092     }
2093   }
2094   ns->remove((Node*) this);
2095 }
2096 
2097 // VERIFICATION CODE
2098 // For each input edge to a node (ie - for each Use-Def edge), verify that
2099 // there is a corresponding Def-Use edge.
2100 //------------------------------verify_edges-----------------------------------
2101 void Node::verify_edges(Unique_Node_List &visited) {
2102   uint i, j, idx;
2103   int  cnt;
2104   Node *n;
2105 
2106   // Recursive termination test
2107   if (visited.member(this))  return;
2108   visited.push(this);
2109 
2110   // Walk over all input edges, checking for correspondence
2111   for( i = 0; i < len(); i++ ) {
2112     n = in(i);
2113     if (n != NULL && !n->is_top()) {
2114       // Count instances of (Node *)this
2115       cnt = 0;
2116       for (idx = 0; idx < n->_outcnt; idx++ ) {
2117         if (n->_out[idx] == (Node *)this)  cnt++;
2118       }
2119       assert( cnt > 0,"Failed to find Def-Use edge." );
2120       // Check for duplicate edges
2121       // walk the input array downcounting the input edges to n
2122       for( j = 0; j < len(); j++ ) {
2123         if( in(j) == n ) cnt--;
2124       }
2125       assert( cnt == 0,"Mismatched edge count.");
2126     } else if (n == NULL) {
2127       assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges");
2128     } else {
2129       assert(n->is_top(), "sanity");
2130       // Nothing to check.
2131     }
2132   }
2133   // Recursive walk over all input edges
2134   for( i = 0; i < len(); i++ ) {
2135     n = in(i);
2136     if( n != NULL )
2137       in(i)->verify_edges(visited);
2138   }
2139 }
2140 
2141 //------------------------------verify_recur-----------------------------------
2142 static const Node *unique_top = NULL;
2143 
2144 void Node::verify_recur(const Node *n, int verify_depth,
2145                         VectorSet &old_space, VectorSet &new_space) {
2146   if ( verify_depth == 0 )  return;
2147   if (verify_depth > 0)  --verify_depth;
2148 
2149   Compile* C = Compile::current();
2150 
2151   // Contained in new_space or old_space?
2152   VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space;
2153   // Check for visited in the proper space.  Numberings are not unique
2154   // across spaces so we need a separate VectorSet for each space.
2155   if( v->test_set(n->_idx) ) return;
2156 
2157   if (n->is_Con() && n->bottom_type() == Type::TOP) {
2158     if (C->cached_top_node() == NULL)
2159       C->set_cached_top_node((Node*)n);
2160     assert(C->cached_top_node() == n, "TOP node must be unique");
2161   }
2162 
2163   for( uint i = 0; i < n->len(); i++ ) {
2164     Node *x = n->in(i);
2165     if (!x || x->is_top()) continue;
2166 
2167     // Verify my input has a def-use edge to me
2168     if (true /*VerifyDefUse*/) {
2169       // Count use-def edges from n to x
2170       int cnt = 0;
2171       for( uint j = 0; j < n->len(); j++ )
2172         if( n->in(j) == x )
2173           cnt++;
2174       // Count def-use edges from x to n
2175       uint max = x->_outcnt;
2176       for( uint k = 0; k < max; k++ )
2177         if (x->_out[k] == n)
2178           cnt--;
2179       assert( cnt == 0, "mismatched def-use edge counts" );
2180     }
2181 
2182     verify_recur(x, verify_depth, old_space, new_space);
2183   }
2184 
2185 }
2186 
2187 //------------------------------verify-----------------------------------------
2188 // Check Def-Use info for my subgraph
2189 void Node::verify() const {
2190   Compile* C = Compile::current();
2191   Node* old_top = C->cached_top_node();
2192   ResourceMark rm;
2193   ResourceArea *area = Thread::current()->resource_area();
2194   VectorSet old_space(area), new_space(area);
2195   verify_recur(this, -1, old_space, new_space);
2196   C->set_cached_top_node(old_top);
2197 }
2198 #endif
2199 
2200 
2201 //------------------------------walk-------------------------------------------
2202 // Graph walk, with both pre-order and post-order functions
2203 void Node::walk(NFunc pre, NFunc post, void *env) {
2204   VectorSet visited(Thread::current()->resource_area()); // Setup for local walk
2205   walk_(pre, post, env, visited);
2206 }
2207 
2208 void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) {
2209   if( visited.test_set(_idx) ) return;
2210   pre(*this,env);               // Call the pre-order walk function
2211   for( uint i=0; i<_max; i++ )
2212     if( in(i) )                 // Input exists and is not walked?
2213       in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions
2214   post(*this,env);              // Call the post-order walk function
2215 }
2216 
2217 void Node::nop(Node &, void*) {}
2218 
2219 //------------------------------Registers--------------------------------------
2220 // Do we Match on this edge index or not?  Generally false for Control
2221 // and true for everything else.  Weird for calls & returns.
2222 uint Node::match_edge(uint idx) const {
2223   return idx;                   // True for other than index 0 (control)
2224 }
2225 
2226 static RegMask _not_used_at_all;
2227 // Register classes are defined for specific machines
2228 const RegMask &Node::out_RegMask() const {
2229   ShouldNotCallThis();
2230   return _not_used_at_all;
2231 }
2232 
2233 const RegMask &Node::in_RegMask(uint) const {
2234   ShouldNotCallThis();
2235   return _not_used_at_all;
2236 }
2237 
2238 //=============================================================================
2239 //-----------------------------------------------------------------------------
2240 void Node_Array::reset( Arena *new_arena ) {
2241   _a->Afree(_nodes,_max*sizeof(Node*));
2242   _max   = 0;
2243   _nodes = NULL;
2244   _a     = new_arena;
2245 }
2246 
2247 //------------------------------clear------------------------------------------
2248 // Clear all entries in _nodes to NULL but keep storage
2249 void Node_Array::clear() {
2250   Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) );
2251 }
2252 
2253 //-----------------------------------------------------------------------------
2254 void Node_Array::grow( uint i ) {
2255   if( !_max ) {
2256     _max = 1;
2257     _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) );
2258     _nodes[0] = NULL;
2259   }
2260   uint old = _max;
2261   while( i >= _max ) _max <<= 1;        // Double to fit
2262   _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*));
2263   Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) );
2264 }
2265 
2266 //-----------------------------------------------------------------------------
2267 void Node_Array::insert( uint i, Node *n ) {
2268   if( _nodes[_max-1] ) grow(_max);      // Get more space if full
2269   Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*)));
2270   _nodes[i] = n;
2271 }
2272 
2273 //-----------------------------------------------------------------------------
2274 void Node_Array::remove( uint i ) {
2275   Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*)));
2276   _nodes[_max-1] = NULL;
2277 }
2278 
2279 //-----------------------------------------------------------------------------
2280 void Node_Array::sort( C_sort_func_t func) {
2281   qsort( _nodes, _max, sizeof( Node* ), func );
2282 }
2283 
2284 //-----------------------------------------------------------------------------
2285 void Node_Array::dump() const {
2286 #ifndef PRODUCT
2287   for( uint i = 0; i < _max; i++ ) {
2288     Node *nn = _nodes[i];
2289     if( nn != NULL ) {
2290       tty->print("%5d--> ",i); nn->dump();
2291     }
2292   }
2293 #endif
2294 }
2295 
2296 //--------------------------is_iteratively_computed------------------------------
2297 // Operation appears to be iteratively computed (such as an induction variable)
2298 // It is possible for this operation to return false for a loop-varying
2299 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
2300 bool Node::is_iteratively_computed() {
2301   if (ideal_reg()) { // does operation have a result register?
2302     for (uint i = 1; i < req(); i++) {
2303       Node* n = in(i);
2304       if (n != NULL && n->is_Phi()) {
2305         for (uint j = 1; j < n->req(); j++) {
2306           if (n->in(j) == this) {
2307             return true;
2308           }
2309         }
2310       }
2311     }
2312   }
2313   return false;
2314 }
2315 
2316 //--------------------------find_similar------------------------------
2317 // Return a node with opcode "opc" and same inputs as "this" if one can
2318 // be found; Otherwise return NULL;
2319 Node* Node::find_similar(int opc) {
2320   if (req() >= 2) {
2321     Node* def = in(1);
2322     if (def && def->outcnt() >= 2) {
2323       for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) {
2324         Node* use = def->fast_out(i);
2325         if (use != this &&
2326             use->Opcode() == opc &&
2327             use->req() == req()) {
2328           uint j;
2329           for (j = 0; j < use->req(); j++) {
2330             if (use->in(j) != in(j)) {
2331               break;
2332             }
2333           }
2334           if (j == use->req()) {
2335             return use;
2336           }
2337         }
2338       }
2339     }
2340   }
2341   return NULL;
2342 }
2343 
2344 
2345 //--------------------------unique_ctrl_out------------------------------
2346 // Return the unique control out if only one. Null if none or more than one.
2347 Node* Node::unique_ctrl_out() const {
2348   Node* found = NULL;
2349   for (uint i = 0; i < outcnt(); i++) {
2350     Node* use = raw_out(i);
2351     if (use->is_CFG() && use != this) {
2352       if (found != NULL) return NULL;
2353       found = use;
2354     }
2355   }
2356   return found;
2357 }
2358 
2359 void Node::ensure_control_or_add_prec(Node* c) {
2360   if (in(0) == NULL) {
2361     set_req(0, c);
2362   } else if (in(0) != c) {
2363     add_prec(c);
2364   }
2365 }
2366 
2367 //=============================================================================
2368 //------------------------------yank-------------------------------------------
2369 // Find and remove
2370 void Node_List::yank( Node *n ) {
2371   uint i;
2372   for( i = 0; i < _cnt; i++ )
2373     if( _nodes[i] == n )
2374       break;
2375 
2376   if( i < _cnt )
2377     _nodes[i] = _nodes[--_cnt];
2378 }
2379 
2380 //------------------------------dump-------------------------------------------
2381 void Node_List::dump() const {
2382 #ifndef PRODUCT
2383   for( uint i = 0; i < _cnt; i++ )
2384     if( _nodes[i] ) {
2385       tty->print("%5d--> ",i);
2386       _nodes[i]->dump();
2387     }
2388 #endif
2389 }
2390 
2391 void Node_List::dump_simple() const {
2392 #ifndef PRODUCT
2393   for( uint i = 0; i < _cnt; i++ )
2394     if( _nodes[i] ) {
2395       tty->print(" %d", _nodes[i]->_idx);
2396     } else {
2397       tty->print(" NULL");
2398     }
2399 #endif
2400 }
2401 
2402 //=============================================================================
2403 //------------------------------remove-----------------------------------------
2404 void Unique_Node_List::remove( Node *n ) {
2405   if( _in_worklist[n->_idx] ) {
2406     for( uint i = 0; i < size(); i++ )
2407       if( _nodes[i] == n ) {
2408         map(i,Node_List::pop());
2409         _in_worklist >>= n->_idx;
2410         return;
2411       }
2412     ShouldNotReachHere();
2413   }
2414 }
2415 
2416 //-----------------------remove_useless_nodes----------------------------------
2417 // Remove useless nodes from worklist
2418 void Unique_Node_List::remove_useless_nodes(VectorSet &useful) {
2419 
2420   for( uint i = 0; i < size(); ++i ) {
2421     Node *n = at(i);
2422     assert( n != NULL, "Did not expect null entries in worklist");
2423     if( ! useful.test(n->_idx) ) {
2424       _in_worklist >>= n->_idx;
2425       map(i,Node_List::pop());
2426       // Node *replacement = Node_List::pop();
2427       // if( i != size() ) { // Check if removing last entry
2428       //   _nodes[i] = replacement;
2429       // }
2430       --i;  // Visit popped node
2431       // If it was last entry, loop terminates since size() was also reduced
2432     }
2433   }
2434 }
2435 
2436 //=============================================================================
2437 void Node_Stack::grow() {
2438   size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top
2439   size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode));
2440   size_t max = old_max << 1;             // max * 2
2441   _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max);
2442   _inode_max = _inodes + max;
2443   _inode_top = _inodes + old_top;        // restore _top
2444 }
2445 
2446 // Node_Stack is used to map nodes.
2447 Node* Node_Stack::find(uint idx) const {
2448   uint sz = size();
2449   for (uint i=0; i < sz; i++) {
2450     if (idx == index_at(i) )
2451       return node_at(i);
2452   }
2453   return NULL;
2454 }
2455 
2456 //=============================================================================
2457 uint TypeNode::size_of() const { return sizeof(*this); }
2458 #ifndef PRODUCT
2459 void TypeNode::dump_spec(outputStream *st) const {
2460   if( !Verbose && !WizardMode ) {
2461     // standard dump does this in Verbose and WizardMode
2462     st->print(" #"); _type->dump_on(st);
2463   }
2464 }
2465 
2466 void TypeNode::dump_compact_spec(outputStream *st) const {
2467   st->print("#");
2468   _type->dump_on(st);
2469 }
2470 #endif
2471 uint TypeNode::hash() const {
2472   return Node::hash() + _type->hash();
2473 }
2474 bool TypeNode::cmp( const Node &n ) const
2475 { return !Type::cmp( _type, ((TypeNode&)n)._type ); }
2476 const Type *TypeNode::bottom_type() const { return _type; }
2477 const Type* TypeNode::Value(PhaseGVN* phase) const { return _type; }
2478 
2479 //------------------------------ideal_reg--------------------------------------
2480 uint TypeNode::ideal_reg() const {
2481   return _type->ideal_reg();
2482 }