1 /*
   2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2024, 2025, Alibaba Group Holding Limited. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef SHARE_OPTO_NODE_HPP
  27 #define SHARE_OPTO_NODE_HPP
  28 
  29 #include "libadt/vectset.hpp"
  30 #include "opto/compile.hpp"
  31 #include "opto/type.hpp"
  32 #include "utilities/copy.hpp"
  33 
  34 // Portions of code courtesy of Clifford Click
  35 
  36 // Optimization - Graph Style
  37 
  38 
  39 class AbstractLockNode;
  40 class AddNode;
  41 class AddPNode;
  42 class AliasInfo;
  43 class AllocateArrayNode;
  44 class AllocateNode;
  45 class ArrayCopyNode;
  46 class BaseCountedLoopNode;
  47 class BaseCountedLoopEndNode;
  48 class BlackholeNode;
  49 class Block;
  50 class BoolNode;
  51 class BoxLockNode;
  52 class CMoveNode;
  53 class CallDynamicJavaNode;
  54 class CallJavaNode;
  55 class CallLeafNode;
  56 class CallLeafNoFPNode;
  57 class CallLeafPureNode;
  58 class CallNode;
  59 class CallRuntimeNode;
  60 class CallStaticJavaNode;
  61 class CastFFNode;
  62 class CastHHNode;
  63 class CastDDNode;
  64 class CastVVNode;
  65 class CastIINode;
  66 class CastLLNode;
  67 class CastPPNode;
  68 class CatchNode;
  69 class CatchProjNode;
  70 class CheckCastPPNode;
  71 class ClearArrayNode;
  72 class CmpNode;
  73 class CodeBuffer;
  74 class ConstraintCastNode;
  75 class ConNode;
  76 class ConINode;
  77 class ConvertNode;
  78 class CompareAndSwapNode;
  79 class CompareAndExchangeNode;
  80 class CountedLoopNode;
  81 class CountedLoopEndNode;
  82 class DecodeNarrowPtrNode;
  83 class DecodeNNode;
  84 class DecodeNKlassNode;
  85 class EncodeNarrowPtrNode;
  86 class EncodePNode;
  87 class EncodePKlassNode;
  88 class FastLockNode;
  89 class FastUnlockNode;
  90 class FlatArrayCheckNode;
  91 class HaltNode;
  92 class IfNode;
  93 class IfProjNode;
  94 class IfFalseNode;
  95 class IfTrueNode;
  96 class InitializeNode;
  97 class JVMState;
  98 class JumpNode;
  99 class JumpProjNode;
 100 class LoadNode;
 101 class LoadStoreNode;
 102 class LoadStoreConditionalNode;
 103 class LockNode;
 104 class LongCountedLoopNode;
 105 class LongCountedLoopEndNode;
 106 class LoopNode;
 107 class LShiftNode;
 108 class MachBranchNode;
 109 class MachCallDynamicJavaNode;
 110 class MachCallJavaNode;
 111 class MachCallLeafNode;
 112 class MachCallNode;
 113 class MachCallRuntimeNode;
 114 class MachCallStaticJavaNode;
 115 class MachConstantBaseNode;
 116 class MachConstantNode;
 117 class MachGotoNode;
 118 class MachIfNode;
 119 class MachJumpNode;
 120 class MachNode;
 121 class MachNullCheckNode;
 122 class MachProjNode;
 123 class MachPrologNode;
 124 class MachReturnNode;
 125 class MachSafePointNode;
 126 class MachSpillCopyNode;
 127 class MachTempNode;
 128 class MachMergeNode;
 129 class MachMemBarNode;
 130 class MachVEPNode;
 131 class Matcher;
 132 class MemBarNode;
 133 class MemBarStoreStoreNode;
 134 class MemNode;
 135 class MergeMemNode;
 136 class MinMaxNode;
 137 class MoveNode;
 138 class MulNode;
 139 class MultiNode;
 140 class MultiBranchNode;
 141 class NarrowMemProjNode;
 142 class NegNode;
 143 class NegVNode;
 144 class NeverBranchNode;
 145 class Opaque1Node;
 146 class OpaqueLoopInitNode;
 147 class OpaqueLoopStrideNode;
 148 class OpaqueMultiversioningNode;
 149 class OpaqueConstantBoolNode;
 150 class OpaqueInitializedAssertionPredicateNode;
 151 class OpaqueTemplateAssertionPredicateNode;
 152 class OuterStripMinedLoopNode;
 153 class OuterStripMinedLoopEndNode;
 154 class Node;
 155 class Node_Array;
 156 class Node_List;
 157 class Node_Stack;
 158 class OopMap;
 159 class ParmNode;
 160 class ParsePredicateNode;
 161 class PCTableNode;
 162 class PhaseCCP;
 163 class PhaseGVN;
 164 class PhaseIdealLoop;
 165 class PhaseIterGVN;
 166 class PhaseRegAlloc;
 167 class PhaseTransform;
 168 class PhaseValues;
 169 class PhiNode;
 170 class Pipeline;
 171 class PopulateIndexNode;
 172 class ProjNode;
 173 class RangeCheckNode;
 174 class ReachabilityFenceNode;
 175 class ReductionNode;
 176 class RegMask;
 177 class RegionNode;
 178 class RootNode;
 179 class SafePointNode;
 180 class SafePointScalarObjectNode;
 181 class SafePointScalarMergeNode;
 182 class SaturatingVectorNode;
 183 class StartNode;
 184 class State;
 185 class StoreNode;
 186 class SubNode;
 187 class SubTypeCheckNode;
 188 class Type;
 189 class TypeNode;
 190 class UnlockNode;
 191 class InlineTypeNode;
 192 class LoadFlatNode;
 193 class StoreFlatNode;
 194 class VectorNode;
 195 class LoadVectorNode;
 196 class LoadVectorMaskedNode;
 197 class StoreVectorMaskedNode;
 198 class LoadVectorGatherNode;
 199 class LoadVectorGatherMaskedNode;
 200 class StoreVectorNode;
 201 class StoreVectorScatterNode;
 202 class StoreVectorScatterMaskedNode;
 203 class VerifyVectorAlignmentNode;
 204 class VectorMaskCmpNode;
 205 class VectorUnboxNode;
 206 class VectorSet;
 207 class VectorReinterpretNode;
 208 class ShiftVNode;
 209 class MulVLNode;
 210 class ExpandVNode;
 211 class CompressVNode;
 212 class CompressMNode;
 213 class C2_MacroAssembler;
 214 
 215 
 216 #ifndef OPTO_DU_ITERATOR_ASSERT
 217 #ifdef ASSERT
 218 #define OPTO_DU_ITERATOR_ASSERT 1
 219 #else
 220 #define OPTO_DU_ITERATOR_ASSERT 0
 221 #endif
 222 #endif //OPTO_DU_ITERATOR_ASSERT
 223 
 224 #if OPTO_DU_ITERATOR_ASSERT
 225 class DUIterator;
 226 class DUIterator_Fast;
 227 class DUIterator_Last;
 228 #else
 229 typedef uint   DUIterator;
 230 typedef Node** DUIterator_Fast;
 231 typedef Node** DUIterator_Last;
 232 #endif
 233 
 234 typedef ResizeableHashTable<Node*, Node*, AnyObj::RESOURCE_AREA, mtCompiler> OrigToNewHashtable;
 235 
 236 // Node Sentinel
 237 #define NodeSentinel (Node*)-1
 238 
 239 // Unknown count frequency
 240 #define COUNT_UNKNOWN (-1.0f)
 241 
 242 //------------------------------Node-------------------------------------------
 243 // Nodes define actions in the program.  They create values, which have types.
 244 // They are both vertices in a directed graph and program primitives.  Nodes
 245 // are labeled; the label is the "opcode", the primitive function in the lambda
 246 // calculus sense that gives meaning to the Node.  Node inputs are ordered (so
 247 // that "a-b" is different from "b-a").  The inputs to a Node are the inputs to
 248 // the Node's function.  These inputs also define a Type equation for the Node.
 249 // Solving these Type equations amounts to doing dataflow analysis.
 250 // Control and data are uniformly represented in the graph.  Finally, Nodes
 251 // have a unique dense integer index which is used to index into side arrays
 252 // whenever I have phase-specific information.
 253 
 254 class Node {
 255 
 256   // Lots of restrictions on cloning Nodes
 257   NONCOPYABLE(Node);
 258 
 259 public:
 260   friend class Compile;
 261   #if OPTO_DU_ITERATOR_ASSERT
 262   friend class DUIterator_Common;
 263   friend class DUIterator;
 264   friend class DUIterator_Fast;
 265   friend class DUIterator_Last;
 266   #endif
 267 
 268   // Because Nodes come and go, I define an Arena of Node structures to pull
 269   // from.  This should allow fast access to node creation & deletion.  This
 270   // field is a local cache of a value defined in some "program fragment" for
 271   // which these Nodes are just a part of.
 272 
 273   inline void* operator new(size_t x) throw() {
 274     Compile* C = Compile::current();
 275     Node* n = (Node*)C->node_arena()->AmallocWords(x);
 276     return (void*)n;
 277   }
 278 
 279   // Delete is a NOP
 280   void operator delete( void *ptr ) {}
 281   // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
 282   void destruct(PhaseValues* phase);
 283 
 284   // Create a new Node.  Required is the number is of inputs required for
 285   // semantic correctness.
 286   Node( uint required );
 287 
 288   // Create a new Node with given input edges.
 289   // This version requires use of the "edge-count" new.
 290   // E.g.  new (C,3) FooNode( C, nullptr, left, right );
 291   Node( Node *n0 );
 292   Node( Node *n0, Node *n1 );
 293   Node( Node *n0, Node *n1, Node *n2 );
 294   Node( Node *n0, Node *n1, Node *n2, Node *n3 );
 295   Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
 296   Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
 297   Node( Node *n0, Node *n1, Node *n2, Node *n3,
 298             Node *n4, Node *n5, Node *n6 );
 299 
 300   // Clone an inherited Node given only the base Node type.
 301   Node* clone() const;
 302 
 303   // Clone a Node, immediately supplying one or two new edges.
 304   // The first and second arguments, if non-null, replace in(1) and in(2),
 305   // respectively.
 306   Node* clone_with_data_edge(Node* in1, Node* in2 = nullptr) const {
 307     Node* nn = clone();
 308     if (in1 != nullptr)  nn->set_req(1, in1);
 309     if (in2 != nullptr)  nn->set_req(2, in2);
 310     return nn;
 311   }
 312 
 313 private:
 314   // Shared setup for the above constructors.
 315   // Handles all interactions with Compile::current.
 316   // Puts initial values in all Node fields except _idx.
 317   // Returns the initial value for _idx, which cannot
 318   // be initialized by assignment.
 319   inline int Init(int req);
 320 
 321 //----------------- input edge handling
 322 protected:
 323   friend class PhaseCFG;        // Access to address of _in array elements
 324   Node **_in;                   // Array of use-def references to Nodes
 325   Node **_out;                  // Array of def-use references to Nodes
 326 
 327   // Input edges are split into two categories.  Required edges are required
 328   // for semantic correctness; order is important and nulls are allowed.
 329   // Precedence edges are used to help determine execution order and are
 330   // added, e.g., for scheduling purposes.  They are unordered and not
 331   // duplicated; they have no embedded nulls.  Edges from 0 to _cnt-1
 332   // are required, from _cnt to _max-1 are precedence edges.
 333   node_idx_t _cnt;              // Total number of required Node inputs.
 334 
 335   node_idx_t _max;              // Actual length of input array.
 336 
 337   // Output edges are an unordered list of def-use edges which exactly
 338   // correspond to required input edges which point from other nodes
 339   // to this one.  Thus the count of the output edges is the number of
 340   // users of this node.
 341   node_idx_t _outcnt;           // Total number of Node outputs.
 342 
 343   node_idx_t _outmax;           // Actual length of output array.
 344 
 345   // Grow the actual input array to the next larger power-of-2 bigger than len.
 346   void grow( uint len );
 347   // Grow the output array to the next larger power-of-2 bigger than len.
 348   void out_grow( uint len );
 349   // Resize input or output array to grow it to the next larger power-of-2
 350   // bigger than len.
 351   void resize_array(Node**& array, node_idx_t& max_size, uint len, bool needs_clearing);
 352 
 353 public:
 354   // Each Node is assigned a unique small/dense number. This number is used
 355   // to index into auxiliary arrays of data and bit vectors.
 356   // The value of _idx can be changed using the set_idx() method.
 357   //
 358   // The PhaseRenumberLive phase renumbers nodes based on liveness information.
 359   // Therefore, it updates the value of the _idx field. The parse-time _idx is
 360   // preserved in _parse_idx.
 361   node_idx_t _idx;
 362   DEBUG_ONLY(const node_idx_t _parse_idx;)
 363   // IGV node identifier. Two nodes, possibly in different compilation phases,
 364   // have the same IGV identifier if (and only if) they are the very same node
 365   // (same memory address) or one is "derived" from the other (by e.g.
 366   // renumbering or matching). This identifier makes it possible to follow the
 367   // entire lifetime of a node in IGV even if its C2 identifier (_idx) changes.
 368   NOT_PRODUCT(node_idx_t _igv_idx;)
 369 
 370   // Get the (read-only) number of input edges
 371   uint req() const { return _cnt; }
 372   uint len() const { return _max; }
 373   // Get the (read-only) number of output edges
 374   uint outcnt() const { return _outcnt; }
 375 
 376 #if OPTO_DU_ITERATOR_ASSERT
 377   // Iterate over the out-edges of this node.  Deletions are illegal.
 378   inline DUIterator outs() const;
 379   // Use this when the out array might have changed to suppress asserts.
 380   inline DUIterator& refresh_out_pos(DUIterator& i) const;
 381   // Does the node have an out at this position?  (Used for iteration.)
 382   inline bool has_out(DUIterator& i) const;
 383   inline Node*    out(DUIterator& i) const;
 384   // Iterate over the out-edges of this node.  All changes are illegal.
 385   inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
 386   inline Node*    fast_out(DUIterator_Fast& i) const;
 387   // Iterate over the out-edges of this node, deleting one at a time.
 388   inline DUIterator_Last last_outs(DUIterator_Last& min) const;
 389   inline Node*    last_out(DUIterator_Last& i) const;
 390   // The inline bodies of all these methods are after the iterator definitions.
 391 #else
 392   // Iterate over the out-edges of this node.  Deletions are illegal.
 393   // This iteration uses integral indexes, to decouple from array reallocations.
 394   DUIterator outs() const  { return 0; }
 395   // Use this when the out array might have changed to suppress asserts.
 396   DUIterator refresh_out_pos(DUIterator i) const { return i; }
 397 
 398   // Reference to the i'th output Node.  Error if out of bounds.
 399   Node*    out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; }
 400   // Does the node have an out at this position?  (Used for iteration.)
 401   bool has_out(DUIterator i) const { return i < _outcnt; }
 402 
 403   // Iterate over the out-edges of this node.  All changes are illegal.
 404   // This iteration uses a pointer internal to the out array.
 405   DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
 406     Node** out = _out;
 407     // Assign a limit pointer to the reference argument:
 408     max = out + (ptrdiff_t)_outcnt;
 409     // Return the base pointer:
 410     return out;
 411   }
 412   Node*    fast_out(DUIterator_Fast i) const  { return *i; }
 413   // Iterate over the out-edges of this node, deleting one at a time.
 414   // This iteration uses a pointer internal to the out array.
 415   DUIterator_Last last_outs(DUIterator_Last& min) const {
 416     Node** out = _out;
 417     // Assign a limit pointer to the reference argument:
 418     min = out;
 419     // Return the pointer to the start of the iteration:
 420     return out + (ptrdiff_t)_outcnt - 1;
 421   }
 422   Node*    last_out(DUIterator_Last i) const  { return *i; }
 423 #endif
 424 
 425   // Reference to the i'th input Node.  Error if out of bounds.
 426   Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max); return _in[i]; }
 427   // Reference to the i'th input Node.  null if out of bounds.
 428   Node* lookup(uint i) const { return ((i < _max) ? _in[i] : nullptr); }
 429   // Reference to the i'th output Node.  Error if out of bounds.
 430   // Use this accessor sparingly.  We are going trying to use iterators instead.
 431   Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
 432   // Return the unique out edge.
 433   Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
 434 
 435   // In some cases, a node n is only used by a single use, but the use may use
 436   // n once or multiple times:
 437   //   use = ConvF2I(this)
 438   //   use = AddI(this, this)
 439   Node* unique_multiple_edges_out_or_null() const;
 440 
 441   // Delete out edge at position 'i' by moving last out edge to position 'i'
 442   void  raw_del_out(uint i) {
 443     assert(i < _outcnt,"oob");
 444     assert(_outcnt > 0,"oob");
 445     #if OPTO_DU_ITERATOR_ASSERT
 446     // Record that a change happened here.
 447     DEBUG_ONLY(_last_del = _out[i]; ++_del_tick);
 448     #endif
 449     _out[i] = _out[--_outcnt];
 450     // Smash the old edge so it can't be used accidentally.
 451     DEBUG_ONLY(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
 452   }
 453 
 454 #ifdef ASSERT
 455   bool is_dead() const;
 456   static bool is_not_dead(const Node* n);
 457   bool is_reachable_from_root() const;
 458 #endif
 459   // Check whether node has become unreachable
 460   bool is_unreachable(PhaseIterGVN &igvn) const;
 461 
 462   // Does the node have any immediate non-debug uses?
 463   bool has_non_debug_uses() const;
 464 
 465   // Set a required input edge, also updates corresponding output edge
 466   void add_req( Node *n ); // Append a NEW required input
 467   void add_req( Node *n0, Node *n1 ) {
 468     add_req(n0); add_req(n1); }
 469   void add_req( Node *n0, Node *n1, Node *n2 ) {
 470     add_req(n0); add_req(n1); add_req(n2); }
 471   void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
 472   void del_req( uint idx ); // Delete required edge & compact
 473   void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
 474   void ins_req( uint i, Node *n ); // Insert a NEW required input
 475   void set_req( uint i, Node *n ) {
 476     assert( is_not_dead(n), "can not use dead node");
 477     assert( i < _cnt, "oob: i=%d, _cnt=%d", i, _cnt);
 478     assert( !VerifyHashTableKeys || _hash_lock == 0,
 479             "remove node from hash table before modifying it");
 480     Node** p = &_in[i];    // cache this._in, across the del_out call
 481     if (*p != nullptr)  (*p)->del_out((Node *)this);
 482     (*p) = n;
 483     if (n != nullptr)      n->add_out((Node *)this);
 484     Compile::current()->record_modified_node(this);
 485   }
 486   // Light version of set_req() to init inputs after node creation.
 487   void init_req( uint i, Node *n ) {
 488     assert( (i == 0 && this == n) ||
 489             is_not_dead(n), "can not use dead node");
 490     assert( i < _cnt, "oob");
 491     assert( !VerifyHashTableKeys || _hash_lock == 0,
 492             "remove node from hash table before modifying it");
 493     assert( _in[i] == nullptr, "sanity");
 494     _in[i] = n;
 495     if (n != nullptr)      n->add_out((Node *)this);
 496     Compile::current()->record_modified_node(this);
 497   }
 498   // Find first occurrence of n among my edges:
 499   int find_edge(Node* n);
 500   int find_prec_edge(Node* n) {
 501     for (uint i = req(); i < len(); i++) {
 502       if (_in[i] == n) return i;
 503       if (_in[i] == nullptr) {
 504         DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == nullptr, "Gap in prec edges!"); )
 505         break;
 506       }
 507     }
 508     return -1;
 509   }
 510   int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = nullptr);
 511   int replace_edges_in_range(Node* old, Node* neww, int start, int end, PhaseGVN* gvn);
 512   // null out all inputs to eliminate incoming Def-Use edges.
 513   void disconnect_inputs(Compile* C);
 514 
 515   // Quickly, return true if and only if I am Compile::current()->top().
 516   bool is_top() const {
 517     assert((this == (Node*) Compile::current()->top()) == (_out == nullptr), "");
 518     return (_out == nullptr);
 519   }
 520   // Reaffirm invariants for is_top.  (Only from Compile::set_cached_top_node.)
 521   void setup_is_top();
 522 
 523   // Strip away casting.  (It is depth-limited.)
 524   Node* uncast(bool keep_deps = false) const;
 525   // Return whether two Nodes are equivalent, after stripping casting.
 526   bool eqv_uncast(const Node* n, bool keep_deps = false) const {
 527     return (this->uncast(keep_deps) == n->uncast(keep_deps));
 528   }
 529 
 530   // Find out of current node that matches opcode.
 531   Node* find_out_with(int opcode);
 532   // Return true if the current node has an out that matches opcode.
 533   bool has_out_with(int opcode);
 534   // Return true if the current node has an out that matches any of the opcodes.
 535   bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4);
 536 
 537 private:
 538   static Node* uncast_helper(const Node* n, bool keep_deps);
 539 
 540   // Add an output edge to the end of the list
 541   void add_out( Node *n ) {
 542     if (is_top())  return;
 543     if( _outcnt == _outmax ) out_grow(_outcnt);
 544     _out[_outcnt++] = n;
 545   }
 546   // Delete an output edge
 547   void del_out( Node *n ) {
 548     if (is_top())  return;
 549     Node** outp = &_out[_outcnt];
 550     // Find and remove n
 551     do {
 552       assert(outp > _out, "Missing Def-Use edge");
 553     } while (*--outp != n);
 554     *outp = _out[--_outcnt];
 555     // Smash the old edge so it can't be used accidentally.
 556     DEBUG_ONLY(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
 557     // Record that a change happened here.
 558     #if OPTO_DU_ITERATOR_ASSERT
 559     DEBUG_ONLY(_last_del = n; ++_del_tick);
 560     #endif
 561   }
 562   // Close gap after removing edge.
 563   void close_prec_gap_at(uint gap) {
 564     assert(_cnt <= gap && gap < _max, "no valid prec edge");
 565     uint i = gap;
 566     Node *last = nullptr;
 567     for (; i < _max-1; ++i) {
 568       Node *next = _in[i+1];
 569       if (next == nullptr) break;
 570       last = next;
 571     }
 572     _in[gap] = last;  // Move last slot to empty one.
 573     _in[i] = nullptr; // null out last slot.
 574   }
 575 
 576 public:
 577   // Globally replace this node by a given new node, updating all uses.
 578   void replace_by(Node* new_node);
 579   // Globally replace this node by a given new node, updating all uses
 580   // and cutting input edges of old node.
 581   void subsume_by(Node* new_node, Compile* c) {
 582     replace_by(new_node);
 583     disconnect_inputs(c);
 584   }
 585   void set_req_X(uint i, Node *n, PhaseIterGVN *igvn);
 586   void set_req_X(uint i, Node *n, PhaseGVN *gvn);
 587   // Find the one non-null required input.  RegionNode only
 588   Node *nonnull_req() const;
 589   // Add or remove precedence edges
 590   void add_prec( Node *n );
 591   void rm_prec( uint i );
 592 
 593   // Note: prec(i) will not necessarily point to n if edge already exists.
 594   void set_prec( uint i, Node *n ) {
 595     assert(i < _max, "oob: i=%d, _max=%d", i, _max);
 596     assert(is_not_dead(n), "can not use dead node");
 597     assert(i >= _cnt, "not a precedence edge");
 598     // Avoid spec violation: duplicated prec edge.
 599     if (_in[i] == n) return;
 600     if (n == nullptr || find_prec_edge(n) != -1) {
 601       rm_prec(i);
 602       return;
 603     }
 604     if (_in[i] != nullptr) _in[i]->del_out((Node *)this);
 605     _in[i] = n;
 606     n->add_out((Node *)this);
 607     Compile::current()->record_modified_node(this);
 608   }
 609 
 610   // Set this node's index, used by cisc_version to replace current node
 611   void set_idx(uint new_idx) {
 612     _idx = new_idx;
 613   }
 614   // Swap input edge order.  (Edge indexes i1 and i2 are usually 1 and 2.)
 615   void swap_edges(uint i1, uint i2) {
 616     DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
 617     // Def-Use info is unchanged
 618     Node* n1 = in(i1);
 619     Node* n2 = in(i2);
 620     _in[i1] = n2;
 621     _in[i2] = n1;
 622     // If this node is in the hash table, make sure it doesn't need a rehash.
 623     assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
 624     // Flip swapped edges flag.
 625     if (has_swapped_edges()) {
 626       remove_flag(Node::Flag_has_swapped_edges);
 627     } else {
 628       add_flag(Node::Flag_has_swapped_edges);
 629     }
 630   }
 631 
 632   // Iterators over input Nodes for a Node X are written as:
 633   // for( i = 0; i < X.req(); i++ ) ... X[i] ...
 634   // NOTE: Required edges can contain embedded null pointers.
 635 
 636 //----------------- Other Node Properties
 637 
 638   // Generate class IDs for (some) ideal nodes so that it is possible to determine
 639   // the type of a node using a non-virtual method call (the method is_<Node>() below).
 640   //
 641   // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines
 642   // the type of the node the ID represents; another subset of an ID's bits are reserved
 643   // for the superclasses of the node represented by the ID.
 644   //
 645   // By design, if A is a supertype of B, A.is_B() returns true and B.is_A()
 646   // returns false. A.is_A() returns true.
 647   //
 648   // If two classes, A and B, have the same superclass, a different bit of A's class id
 649   // is reserved for A's type than for B's type. That bit is specified by the third
 650   // parameter in the macro DEFINE_CLASS_ID.
 651   //
 652   // By convention, classes with deeper hierarchy are declared first. Moreover,
 653   // classes with the same hierarchy depth are sorted by usage frequency.
 654   //
 655   // The query method masks the bits to cut off bits of subclasses and then compares
 656   // the result with the class id (see the macro DEFINE_CLASS_QUERY below).
 657   //
 658   //  Class_MachCall=30, ClassMask_MachCall=31
 659   // 12               8               4               0
 660   //  0   0   0   0   0   0   0   0   1   1   1   1   0
 661   //                                  |   |   |   |
 662   //                                  |   |   |   Bit_Mach=2
 663   //                                  |   |   Bit_MachReturn=4
 664   //                                  |   Bit_MachSafePoint=8
 665   //                                  Bit_MachCall=16
 666   //
 667   //  Class_CountedLoop=56, ClassMask_CountedLoop=63
 668   // 12               8               4               0
 669   //  0   0   0   0   0   0   0   1   1   1   0   0   0
 670   //                              |   |   |
 671   //                              |   |   Bit_Region=8
 672   //                              |   Bit_Loop=16
 673   //                              Bit_CountedLoop=32
 674 
 675   #define DEFINE_CLASS_ID(cl, supcl, subn) \
 676   Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
 677   Class_##cl = Class_##supcl + Bit_##cl , \
 678   ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
 679 
 680   // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
 681   // so that its values fit into 32 bits.
 682   enum NodeClasses {
 683     Bit_Node   = 0x00000000,
 684     Class_Node = 0x00000000,
 685     ClassMask_Node = 0xFFFFFFFF,
 686 
 687     DEFINE_CLASS_ID(Multi, Node, 0)
 688       DEFINE_CLASS_ID(SafePoint, Multi, 0)
 689         DEFINE_CLASS_ID(Call,      SafePoint, 0)
 690           DEFINE_CLASS_ID(CallJava,         Call, 0)
 691             DEFINE_CLASS_ID(CallStaticJava,   CallJava, 0)
 692             DEFINE_CLASS_ID(CallDynamicJava,  CallJava, 1)
 693           DEFINE_CLASS_ID(CallRuntime,      Call, 1)
 694             DEFINE_CLASS_ID(CallLeaf,         CallRuntime, 0)
 695               DEFINE_CLASS_ID(CallLeafNoFP,     CallLeaf, 0)
 696               DEFINE_CLASS_ID(CallLeafPure,     CallLeaf, 1)
 697           DEFINE_CLASS_ID(Allocate,         Call, 2)
 698             DEFINE_CLASS_ID(AllocateArray,    Allocate, 0)
 699           DEFINE_CLASS_ID(AbstractLock,     Call, 3)
 700             DEFINE_CLASS_ID(Lock,             AbstractLock, 0)
 701             DEFINE_CLASS_ID(Unlock,           AbstractLock, 1)
 702           DEFINE_CLASS_ID(ArrayCopy,        Call, 4)
 703         DEFINE_CLASS_ID(LoadFlat,  SafePoint, 1)
 704         DEFINE_CLASS_ID(StoreFlat, SafePoint, 2)
 705       DEFINE_CLASS_ID(MultiBranch, Multi, 1)
 706         DEFINE_CLASS_ID(PCTable,     MultiBranch, 0)
 707           DEFINE_CLASS_ID(Catch,       PCTable, 0)
 708           DEFINE_CLASS_ID(Jump,        PCTable, 1)
 709         DEFINE_CLASS_ID(If,          MultiBranch, 1)
 710           DEFINE_CLASS_ID(BaseCountedLoopEnd,     If, 0)
 711             DEFINE_CLASS_ID(CountedLoopEnd,       BaseCountedLoopEnd, 0)
 712             DEFINE_CLASS_ID(LongCountedLoopEnd,   BaseCountedLoopEnd, 1)
 713           DEFINE_CLASS_ID(RangeCheck,             If, 1)
 714           DEFINE_CLASS_ID(OuterStripMinedLoopEnd, If, 2)
 715           DEFINE_CLASS_ID(ParsePredicate,         If, 3)
 716         DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
 717       DEFINE_CLASS_ID(Start,       Multi, 2)
 718       DEFINE_CLASS_ID(MemBar,      Multi, 3)
 719         DEFINE_CLASS_ID(Initialize,       MemBar, 0)
 720         DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
 721       DEFINE_CLASS_ID(Blackhole,   Multi, 4)
 722 
 723     DEFINE_CLASS_ID(Mach,  Node, 1)
 724       DEFINE_CLASS_ID(MachReturn, Mach, 0)
 725         DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
 726           DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
 727             DEFINE_CLASS_ID(MachCallJava,         MachCall, 0)
 728               DEFINE_CLASS_ID(MachCallStaticJava,   MachCallJava, 0)
 729               DEFINE_CLASS_ID(MachCallDynamicJava,  MachCallJava, 1)
 730             DEFINE_CLASS_ID(MachCallRuntime,      MachCall, 1)
 731               DEFINE_CLASS_ID(MachCallLeaf,         MachCallRuntime, 0)
 732       DEFINE_CLASS_ID(MachBranch, Mach, 1)
 733         DEFINE_CLASS_ID(MachIf,         MachBranch, 0)
 734         DEFINE_CLASS_ID(MachGoto,       MachBranch, 1)
 735         DEFINE_CLASS_ID(MachNullCheck,  MachBranch, 2)
 736       DEFINE_CLASS_ID(MachSpillCopy,    Mach, 2)
 737       DEFINE_CLASS_ID(MachTemp,         Mach, 3)
 738       DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
 739       DEFINE_CLASS_ID(MachConstant,     Mach, 5)
 740         DEFINE_CLASS_ID(MachJump,       MachConstant, 0)
 741       DEFINE_CLASS_ID(MachMerge,        Mach, 6)
 742       DEFINE_CLASS_ID(MachMemBar,       Mach, 7)
 743       DEFINE_CLASS_ID(MachProlog,       Mach, 8)
 744       DEFINE_CLASS_ID(MachVEP,          Mach, 9)
 745 
 746     DEFINE_CLASS_ID(Type,  Node, 2)
 747       DEFINE_CLASS_ID(Phi,   Type, 0)
 748       DEFINE_CLASS_ID(ConstraintCast, Type, 1)
 749         DEFINE_CLASS_ID(CastII, ConstraintCast, 0)
 750         DEFINE_CLASS_ID(CheckCastPP, ConstraintCast, 1)
 751         DEFINE_CLASS_ID(CastLL, ConstraintCast, 2)
 752         DEFINE_CLASS_ID(CastFF, ConstraintCast, 3)
 753         DEFINE_CLASS_ID(CastDD, ConstraintCast, 4)
 754         DEFINE_CLASS_ID(CastVV, ConstraintCast, 5)
 755         DEFINE_CLASS_ID(CastPP, ConstraintCast, 6)
 756         DEFINE_CLASS_ID(CastHH, ConstraintCast, 7)
 757       DEFINE_CLASS_ID(CMove, Type, 3)
 758       DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
 759       DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5)
 760         DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0)
 761         DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1)
 762       DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
 763         DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
 764         DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
 765       DEFINE_CLASS_ID(Vector, Type, 7)
 766         DEFINE_CLASS_ID(VectorMaskCmp, Vector, 0)
 767         DEFINE_CLASS_ID(VectorUnbox, Vector, 1)
 768         DEFINE_CLASS_ID(VectorReinterpret, Vector, 2)
 769         DEFINE_CLASS_ID(ShiftV, Vector, 3)
 770         DEFINE_CLASS_ID(CompressV, Vector, 4)
 771         DEFINE_CLASS_ID(ExpandV, Vector, 5)
 772         DEFINE_CLASS_ID(CompressM, Vector, 6)
 773         DEFINE_CLASS_ID(Reduction, Vector, 7)
 774         DEFINE_CLASS_ID(NegV, Vector, 8)
 775         DEFINE_CLASS_ID(SaturatingVector, Vector, 9)
 776         DEFINE_CLASS_ID(MulVL, Vector, 10)
 777       DEFINE_CLASS_ID(InlineType, Type, 8)
 778       DEFINE_CLASS_ID(Con, Type, 9)
 779           DEFINE_CLASS_ID(ConI, Con, 0)
 780       DEFINE_CLASS_ID(SafePointScalarMerge, Type, 10)
 781       DEFINE_CLASS_ID(Convert, Type, 11)
 782 
 783 
 784     DEFINE_CLASS_ID(Proj,  Node, 3)
 785       DEFINE_CLASS_ID(CatchProj, Proj, 0)
 786       DEFINE_CLASS_ID(JumpProj,  Proj, 1)
 787       DEFINE_CLASS_ID(IfProj,    Proj, 2)
 788         DEFINE_CLASS_ID(IfTrue,    IfProj, 0)
 789         DEFINE_CLASS_ID(IfFalse,   IfProj, 1)
 790       DEFINE_CLASS_ID(Parm,      Proj, 4)
 791       DEFINE_CLASS_ID(MachProj,  Proj, 5)
 792       DEFINE_CLASS_ID(NarrowMemProj, Proj, 6)
 793 
 794     DEFINE_CLASS_ID(Mem, Node, 4)
 795       DEFINE_CLASS_ID(Load, Mem, 0)
 796         DEFINE_CLASS_ID(LoadVector,  Load, 0)
 797           DEFINE_CLASS_ID(LoadVectorGather, LoadVector, 0)
 798           DEFINE_CLASS_ID(LoadVectorGatherMasked, LoadVector, 1)
 799           DEFINE_CLASS_ID(LoadVectorMasked, LoadVector, 2)
 800       DEFINE_CLASS_ID(Store, Mem, 1)
 801         DEFINE_CLASS_ID(StoreVector, Store, 0)
 802           DEFINE_CLASS_ID(StoreVectorScatter, StoreVector, 0)
 803           DEFINE_CLASS_ID(StoreVectorScatterMasked, StoreVector, 1)
 804           DEFINE_CLASS_ID(StoreVectorMasked, StoreVector, 2)
 805       DEFINE_CLASS_ID(LoadStore, Mem, 2)
 806         DEFINE_CLASS_ID(LoadStoreConditional, LoadStore, 0)
 807           DEFINE_CLASS_ID(CompareAndSwap, LoadStoreConditional, 0)
 808         DEFINE_CLASS_ID(CompareAndExchangeNode, LoadStore, 1)
 809 
 810     DEFINE_CLASS_ID(Region, Node, 5)
 811       DEFINE_CLASS_ID(Loop, Region, 0)
 812         DEFINE_CLASS_ID(Root,                Loop, 0)
 813         DEFINE_CLASS_ID(BaseCountedLoop,     Loop, 1)
 814           DEFINE_CLASS_ID(CountedLoop,       BaseCountedLoop, 0)
 815           DEFINE_CLASS_ID(LongCountedLoop,   BaseCountedLoop, 1)
 816         DEFINE_CLASS_ID(OuterStripMinedLoop, Loop, 2)
 817 
 818     DEFINE_CLASS_ID(Sub,   Node, 6)
 819       DEFINE_CLASS_ID(Cmp,   Sub, 0)
 820         DEFINE_CLASS_ID(FastLock,       Cmp, 0)
 821         DEFINE_CLASS_ID(FastUnlock,     Cmp, 1)
 822         DEFINE_CLASS_ID(SubTypeCheck,   Cmp, 2)
 823         DEFINE_CLASS_ID(FlatArrayCheck, Cmp, 3)
 824 
 825     DEFINE_CLASS_ID(MergeMem, Node, 7)
 826     DEFINE_CLASS_ID(Bool,     Node, 8)
 827     DEFINE_CLASS_ID(AddP,     Node, 9)
 828     DEFINE_CLASS_ID(BoxLock,  Node, 10)
 829     DEFINE_CLASS_ID(Add,      Node, 11)
 830       DEFINE_CLASS_ID(MinMax,      Add, 0)
 831     DEFINE_CLASS_ID(Mul,      Node, 12)
 832     DEFINE_CLASS_ID(ClearArray, Node, 14)
 833     DEFINE_CLASS_ID(Halt,     Node, 15)
 834     DEFINE_CLASS_ID(Opaque1,  Node, 16)
 835       DEFINE_CLASS_ID(OpaqueLoopInit, Opaque1, 0)
 836       DEFINE_CLASS_ID(OpaqueLoopStride, Opaque1, 1)
 837       DEFINE_CLASS_ID(OpaqueMultiversioning, Opaque1, 2)
 838     DEFINE_CLASS_ID(OpaqueConstantBool,  Node, 17)
 839     DEFINE_CLASS_ID(OpaqueInitializedAssertionPredicate,  Node, 18)
 840     DEFINE_CLASS_ID(OpaqueTemplateAssertionPredicate,  Node, 19)
 841     DEFINE_CLASS_ID(Move,     Node, 20)
 842     DEFINE_CLASS_ID(LShift,   Node, 21)
 843     DEFINE_CLASS_ID(Neg,      Node, 22)
 844     DEFINE_CLASS_ID(ReachabilityFence, Node, 23)
 845 
 846     _max_classes  = ClassMask_Neg
 847   };
 848   #undef DEFINE_CLASS_ID
 849 
 850   // Flags are sorted by usage frequency.
 851   enum NodeFlags : uint64_t {
 852     Flag_is_Copy                     = 1ULL << 0, // should be first bit to avoid shift
 853     Flag_rematerialize               = 1ULL << 1,
 854     Flag_needs_anti_dependence_check = 1ULL << 2,
 855     Flag_is_macro                    = 1ULL << 3,
 856     Flag_is_Con                      = 1ULL << 4,
 857     Flag_is_cisc_alternate           = 1ULL << 5,
 858     Flag_is_dead_loop_safe           = 1ULL << 6,
 859     Flag_may_be_short_branch         = 1ULL << 7,
 860     Flag_avoid_back_to_back_before   = 1ULL << 8,
 861     Flag_avoid_back_to_back_after    = 1ULL << 9,
 862     Flag_has_call                    = 1ULL << 10,
 863     Flag_has_swapped_edges           = 1ULL << 11,
 864     Flag_is_scheduled                = 1ULL << 12,
 865     Flag_is_expensive                = 1ULL << 13,
 866     Flag_is_predicated_vector        = 1ULL << 14, // Marked on a vector node that has an additional
 867                                                    // mask input controlling the lane operations.
 868     Flag_for_post_loop_opts_igvn     = 1ULL << 15,
 869     Flag_for_merge_stores_igvn       = 1ULL << 16,
 870     Flag_is_removed_by_peephole      = 1ULL << 17,
 871     Flag_is_predicated_using_blend   = 1ULL << 18,
 872     _last_flag                       = Flag_is_predicated_using_blend
 873   };
 874 
 875   class PD;
 876 
 877 private:
 878   juint _class_id;
 879   juint _flags;
 880 
 881 #ifdef ASSERT
 882   static juint max_flags();
 883 #endif
 884 
 885 protected:
 886   // These methods should be called from constructors only.
 887   void init_class_id(juint c) {
 888     _class_id = c; // cast out const
 889   }
 890   void init_flags(uint fl) {
 891     assert(fl <= max_flags(), "invalid node flag");
 892     _flags |= fl;
 893   }
 894   void clear_flag(uint fl) {
 895     assert(fl <= max_flags(), "invalid node flag");
 896     _flags &= ~fl;
 897   }
 898 
 899 public:
 900   juint class_id() const { return _class_id; }
 901 
 902   juint flags() const { return _flags; }
 903 
 904   void add_flag(juint fl) { init_flags(fl); }
 905 
 906   void remove_flag(juint fl) { clear_flag(fl); }
 907 
 908   // Return a dense integer opcode number
 909   virtual int Opcode() const;
 910 
 911   // Virtual inherited Node size
 912   virtual uint size_of() const;
 913 
 914   // Other interesting Node properties
 915   #define DEFINE_CLASS_QUERY(type)                           \
 916   bool is_##type() const {                                   \
 917     return ((_class_id & ClassMask_##type) == Class_##type); \
 918   }                                                          \
 919   type##Node *as_##type() const {                            \
 920     assert(is_##type(), "invalid node class: %s", Name());   \
 921     return (type##Node*)this;                                \
 922   }                                                          \
 923   type##Node* isa_##type() const {                           \
 924     return (is_##type()) ? as_##type() : nullptr;            \
 925   }
 926 
 927   DEFINE_CLASS_QUERY(AbstractLock)
 928   DEFINE_CLASS_QUERY(Add)
 929   DEFINE_CLASS_QUERY(AddP)
 930   DEFINE_CLASS_QUERY(Allocate)
 931   DEFINE_CLASS_QUERY(AllocateArray)
 932   DEFINE_CLASS_QUERY(ArrayCopy)
 933   DEFINE_CLASS_QUERY(BaseCountedLoop)
 934   DEFINE_CLASS_QUERY(BaseCountedLoopEnd)
 935   DEFINE_CLASS_QUERY(Blackhole)
 936   DEFINE_CLASS_QUERY(Bool)
 937   DEFINE_CLASS_QUERY(BoxLock)
 938   DEFINE_CLASS_QUERY(Call)
 939   DEFINE_CLASS_QUERY(CallDynamicJava)
 940   DEFINE_CLASS_QUERY(CallJava)
 941   DEFINE_CLASS_QUERY(CallLeaf)
 942   DEFINE_CLASS_QUERY(CallLeafNoFP)
 943   DEFINE_CLASS_QUERY(CallLeafPure)
 944   DEFINE_CLASS_QUERY(CallRuntime)
 945   DEFINE_CLASS_QUERY(CallStaticJava)
 946   DEFINE_CLASS_QUERY(Catch)
 947   DEFINE_CLASS_QUERY(CatchProj)
 948   DEFINE_CLASS_QUERY(CheckCastPP)
 949   DEFINE_CLASS_QUERY(CastII)
 950   DEFINE_CLASS_QUERY(CastLL)
 951   DEFINE_CLASS_QUERY(CastFF)
 952   DEFINE_CLASS_QUERY(ConI)
 953   DEFINE_CLASS_QUERY(CastPP)
 954   DEFINE_CLASS_QUERY(ConstraintCast)
 955   DEFINE_CLASS_QUERY(ClearArray)
 956   DEFINE_CLASS_QUERY(CMove)
 957   DEFINE_CLASS_QUERY(Cmp)
 958   DEFINE_CLASS_QUERY(Convert)
 959   DEFINE_CLASS_QUERY(CountedLoop)
 960   DEFINE_CLASS_QUERY(CountedLoopEnd)
 961   DEFINE_CLASS_QUERY(DecodeNarrowPtr)
 962   DEFINE_CLASS_QUERY(DecodeN)
 963   DEFINE_CLASS_QUERY(DecodeNKlass)
 964   DEFINE_CLASS_QUERY(EncodeNarrowPtr)
 965   DEFINE_CLASS_QUERY(EncodeP)
 966   DEFINE_CLASS_QUERY(EncodePKlass)
 967   DEFINE_CLASS_QUERY(FastLock)
 968   DEFINE_CLASS_QUERY(FastUnlock)
 969   DEFINE_CLASS_QUERY(FlatArrayCheck)
 970   DEFINE_CLASS_QUERY(Halt)
 971   DEFINE_CLASS_QUERY(If)
 972   DEFINE_CLASS_QUERY(RangeCheck)
 973   DEFINE_CLASS_QUERY(IfProj)
 974   DEFINE_CLASS_QUERY(IfFalse)
 975   DEFINE_CLASS_QUERY(IfTrue)
 976   DEFINE_CLASS_QUERY(Initialize)
 977   DEFINE_CLASS_QUERY(Jump)
 978   DEFINE_CLASS_QUERY(JumpProj)
 979   DEFINE_CLASS_QUERY(LongCountedLoop)
 980   DEFINE_CLASS_QUERY(LongCountedLoopEnd)
 981   DEFINE_CLASS_QUERY(Load)
 982   DEFINE_CLASS_QUERY(LoadStore)
 983   DEFINE_CLASS_QUERY(LoadStoreConditional)
 984   DEFINE_CLASS_QUERY(Lock)
 985   DEFINE_CLASS_QUERY(Loop)
 986   DEFINE_CLASS_QUERY(LShift)
 987   DEFINE_CLASS_QUERY(Mach)
 988   DEFINE_CLASS_QUERY(MachBranch)
 989   DEFINE_CLASS_QUERY(MachCall)
 990   DEFINE_CLASS_QUERY(MachCallDynamicJava)
 991   DEFINE_CLASS_QUERY(MachCallJava)
 992   DEFINE_CLASS_QUERY(MachCallLeaf)
 993   DEFINE_CLASS_QUERY(MachCallRuntime)
 994   DEFINE_CLASS_QUERY(MachCallStaticJava)
 995   DEFINE_CLASS_QUERY(MachConstantBase)
 996   DEFINE_CLASS_QUERY(MachConstant)
 997   DEFINE_CLASS_QUERY(MachGoto)
 998   DEFINE_CLASS_QUERY(MachIf)
 999   DEFINE_CLASS_QUERY(MachJump)
1000   DEFINE_CLASS_QUERY(MachNullCheck)
1001   DEFINE_CLASS_QUERY(MachProj)
1002   DEFINE_CLASS_QUERY(MachProlog)
1003   DEFINE_CLASS_QUERY(MachReturn)
1004   DEFINE_CLASS_QUERY(MachSafePoint)
1005   DEFINE_CLASS_QUERY(MachSpillCopy)
1006   DEFINE_CLASS_QUERY(MachTemp)
1007   DEFINE_CLASS_QUERY(MachMemBar)
1008   DEFINE_CLASS_QUERY(MachMerge)
1009   DEFINE_CLASS_QUERY(MachVEP)
1010   DEFINE_CLASS_QUERY(Mem)
1011   DEFINE_CLASS_QUERY(MemBar)
1012   DEFINE_CLASS_QUERY(MemBarStoreStore)
1013   DEFINE_CLASS_QUERY(MergeMem)
1014   DEFINE_CLASS_QUERY(MinMax)
1015   DEFINE_CLASS_QUERY(Move)
1016   DEFINE_CLASS_QUERY(Mul)
1017   DEFINE_CLASS_QUERY(Multi)
1018   DEFINE_CLASS_QUERY(MultiBranch)
1019   DEFINE_CLASS_QUERY(MulVL)
1020   DEFINE_CLASS_QUERY(NarrowMemProj)
1021   DEFINE_CLASS_QUERY(Neg)
1022   DEFINE_CLASS_QUERY(NegV)
1023   DEFINE_CLASS_QUERY(NeverBranch)
1024   DEFINE_CLASS_QUERY(Opaque1)
1025   DEFINE_CLASS_QUERY(OpaqueConstantBool)
1026   DEFINE_CLASS_QUERY(OpaqueInitializedAssertionPredicate)
1027   DEFINE_CLASS_QUERY(OpaqueTemplateAssertionPredicate)
1028   DEFINE_CLASS_QUERY(OpaqueLoopInit)
1029   DEFINE_CLASS_QUERY(OpaqueLoopStride)
1030   DEFINE_CLASS_QUERY(OpaqueMultiversioning)
1031   DEFINE_CLASS_QUERY(OuterStripMinedLoop)
1032   DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd)
1033   DEFINE_CLASS_QUERY(Parm)
1034   DEFINE_CLASS_QUERY(ParsePredicate)
1035   DEFINE_CLASS_QUERY(PCTable)
1036   DEFINE_CLASS_QUERY(Phi)
1037   DEFINE_CLASS_QUERY(Proj)
1038   DEFINE_CLASS_QUERY(ReachabilityFence)
1039   DEFINE_CLASS_QUERY(Reduction)
1040   DEFINE_CLASS_QUERY(Region)
1041   DEFINE_CLASS_QUERY(Root)
1042   DEFINE_CLASS_QUERY(SafePoint)
1043   DEFINE_CLASS_QUERY(SafePointScalarObject)
1044   DEFINE_CLASS_QUERY(SafePointScalarMerge)
1045   DEFINE_CLASS_QUERY(Start)
1046   DEFINE_CLASS_QUERY(Store)
1047   DEFINE_CLASS_QUERY(Sub)
1048   DEFINE_CLASS_QUERY(SubTypeCheck)
1049   DEFINE_CLASS_QUERY(Type)
1050   DEFINE_CLASS_QUERY(InlineType)
1051   DEFINE_CLASS_QUERY(LoadFlat)
1052   DEFINE_CLASS_QUERY(StoreFlat)
1053   DEFINE_CLASS_QUERY(Vector)
1054   DEFINE_CLASS_QUERY(VectorMaskCmp)
1055   DEFINE_CLASS_QUERY(VectorUnbox)
1056   DEFINE_CLASS_QUERY(VectorReinterpret)
1057   DEFINE_CLASS_QUERY(CompressV)
1058   DEFINE_CLASS_QUERY(ExpandV)
1059   DEFINE_CLASS_QUERY(CompressM)
1060   DEFINE_CLASS_QUERY(LoadVector)
1061   DEFINE_CLASS_QUERY(LoadVectorGather)
1062   DEFINE_CLASS_QUERY(LoadVectorMasked)
1063   DEFINE_CLASS_QUERY(LoadVectorGatherMasked)
1064   DEFINE_CLASS_QUERY(StoreVector)
1065   DEFINE_CLASS_QUERY(StoreVectorScatter)
1066   DEFINE_CLASS_QUERY(StoreVectorMasked)
1067   DEFINE_CLASS_QUERY(StoreVectorScatterMasked)
1068   DEFINE_CLASS_QUERY(SaturatingVector)
1069   DEFINE_CLASS_QUERY(ShiftV)
1070   DEFINE_CLASS_QUERY(Unlock)
1071 
1072   #undef DEFINE_CLASS_QUERY
1073 
1074   // duplicate of is_MachSpillCopy()
1075   bool is_SpillCopy () const {
1076     return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
1077   }
1078 
1079   bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
1080   // The data node which is safe to leave in dead loop during IGVN optimization.
1081   bool is_dead_loop_safe() const;
1082 
1083   // is_Copy() returns copied edge index (0 or 1)
1084   uint is_Copy() const { return (_flags & Flag_is_Copy); }
1085 
1086   virtual bool is_CFG() const { return false; }
1087 
1088   // If this node is control-dependent on a test, can it be rerouted to a dominating equivalent
1089   // test? This means that the node can be executed safely as long as it happens after the test
1090   // that is its control input without worrying about the whole control flow. On the contrary, if
1091   // the node depends on a test that is not its control input, or if it depends on more than one
1092   // tests, then this method must return false.
1093   //
1094   // Pseudocode examples:
1095   // 1. if (y != 0) {
1096   //      x / y;
1097   //    }
1098   // The division depends only on the test y != 0 and can be executed anywhere y != 0 holds true.
1099   // As a result, depends_only_on_test returns true.
1100   // 2. if (y != 0) {
1101   //      if (x > 1) {
1102   //        x / y;
1103   //      }
1104   //    }
1105   // If the division x / y has its control input being the IfTrueNode of the test y != 0, then
1106   // depends_only_on_test returns true. Otherwise, if the division has its control input being the
1107   // IfTrueNode of the test x > 1, then depends_only_on_test returns false.
1108   // 3. if (y > z) {
1109   //      if (z > 0) {
1110   //        x / y
1111   //      }
1112   //    }
1113   // The division depends on both tests y > z and z > 0. As a result, depends_only_on_test returns
1114   // false.
1115   //
1116   // This method allows more freedom in certain nodes with regards to scheduling, for example it
1117   // allows nodes to float out of loops together with its test.
1118   //
1119   // This method is pessimistic, this means that it may return false even if the node satisfy the
1120   // requirements. However, it must return false if the node does not satisfy the requirements.
1121   // When a test is decomposed into multiple tests, all nodes that depend on the decomposed test
1122   // must be pinned at the lowest dominating test of those. For example, when a zero check of a
1123   // division is split through a region but the division itself is not, it must be pinned at the
1124   // merge point by returning false when calling this method.
1125   bool depends_only_on_test() const {
1126     if (is_CFG() || pinned()) {
1127       return false;
1128     }
1129     assert(in(0) != nullptr, "must have a control input");
1130     return depends_only_on_test_impl();
1131   }
1132 
1133   // Return a clone of the current node that's pinned. The current node must return true for
1134   // depends_only_on_test, and the retuned node must return false. This method is called when the
1135   // node is disconnected from its test.
1136   //
1137   // Examples:
1138   // 1. for (int i = start; i <= limit; i++) {
1139   //      if (!rangecheck(i, a)) {
1140   //        trap;
1141   //      }
1142   //      a[i];
1143   //    }
1144   // Loop predication can then hoist the range check out of the loop:
1145   //    if (!rangecheck(start, a)) {
1146   //      trap;
1147   //    }
1148   //    if (!rangecheck(limit, a)) {
1149   //      trap;
1150   //    }
1151   //    for (int i = start; i <= limit; i++) {
1152   //      a[i];
1153   //    }
1154   // As the load a[i] now depends on both tests rangecheck(start, a) and rangecheck(limit, a), it
1155   // must be pinned at the lowest dominating test of those.
1156   //
1157   // 2. if (y > x) {
1158   //      if (x >= 0) {
1159   //        if (y != 0) {
1160   //          x / y;
1161   //        }
1162   //      }
1163   //    }
1164   // The test (y != 0) == true can be deduced from (y > x) == true and (x >= 0) == true, so we may
1165   // choose to elide it. In such cases, the division x / y now depends on both tests
1166   // (y > x) == true and (x >= 0) == true, so it must be pinned at the lowest dominating test of
1167   // those.
1168   //
1169   // 3. if (b) {
1170   //      ...
1171   //    } else {
1172   //      ...
1173   //    }
1174   //    if (y == 0) {
1175   //      trap;
1176   //    }
1177   //    x / y;
1178   // The division x / y depends only on the test (y == 0) == false, but if we split the test
1179   // through the merge point but not the division:
1180   //    if (b) {
1181   //      ...
1182   //      if (y == 0) {
1183   //        trap;
1184   //      }
1185   //    } else {
1186   //      ...
1187   //      if (y == 0) {
1188   //        trap;
1189   //      }
1190   //    }
1191   //    x / y;
1192   // The division now has the control input being the RegionNode merge the branches of if(b)
1193   // instead of a test that proves y != 0. As a result, it must be pinned at that node.
1194   //
1195   // There are cases where the node does not actually have a dependency on its control input. For
1196   // example, when we try to sink a LoadNode out of a loop in PhaseIdealLoop::try_sink_out_of_loop,
1197   // we clone the node so that all of the clones can be scheduled out of the loop. To prevent the
1198   // clones from being GVN-ed again, we add a control input for the node at the loop exit. For the
1199   // cases when the node does provably not depend on its control input, this method can return
1200   // nullptr.
1201   Node* pin_node_under_control() const {
1202     assert(depends_only_on_test(), "must be a depends_only_on_test node");
1203     Node* res = pin_node_under_control_impl();
1204     if (res == nullptr) {
1205       assert(is_Load(), "unexpected failure to pin for %s", Name());
1206       return nullptr;
1207     }
1208     assert(!res->depends_only_on_test(), "the result must not depends_only_on_test");
1209     assert(Opcode() == res->Opcode(), "pinning must result in the same kind of node %s - %s", Name(), res->Name());
1210     return res;
1211   }
1212 
1213 private:
1214   virtual bool depends_only_on_test_impl() const { assert(false, "%s", Name()); return false; }
1215   virtual Node* pin_node_under_control_impl() const { assert(false, "%s", Name()); return nullptr; }
1216 
1217 public:
1218   // When building basic blocks, I need to have a notion of block beginning
1219   // Nodes, next block selector Nodes (block enders), and next block
1220   // projections.  These calls need to work on their machine equivalents.  The
1221   // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
1222   bool is_block_start() const {
1223     if ( is_Region() )
1224       return this == (const Node*)in(0);
1225     else
1226       return is_Start();
1227   }
1228 
1229   // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
1230   // Goto and Return.  This call also returns the block ending Node.
1231   virtual const Node *is_block_proj() const;
1232 
1233   // The node is a "macro" node which needs to be expanded before matching
1234   bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
1235   // The node is expensive: the best control is set during loop opts
1236   bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != nullptr; }
1237   // The node's original edge position is swapped.
1238   bool has_swapped_edges() const { return (_flags & Flag_has_swapped_edges) != 0; }
1239 
1240   bool is_predicated_vector() const { return (_flags & Flag_is_predicated_vector) != 0; }
1241 
1242   bool is_predicated_using_blend() const { return (_flags & Flag_is_predicated_using_blend) != 0; }
1243 
1244   // Used in lcm to mark nodes that have scheduled
1245   bool is_scheduled() const { return (_flags & Flag_is_scheduled) != 0; }
1246 
1247   bool for_post_loop_opts_igvn() const { return (_flags & Flag_for_post_loop_opts_igvn) != 0; }
1248   bool for_merge_stores_igvn() const { return (_flags & Flag_for_merge_stores_igvn) != 0; }
1249 
1250   // Is 'n' possibly a loop entry (i.e. a Parse Predicate projection)?
1251   static bool may_be_loop_entry(Node* n) {
1252     return n != nullptr && n->is_IfProj() && n->in(0)->is_ParsePredicate();
1253   }
1254 
1255 //----------------- Optimization
1256 
1257   // Get the worst-case Type output for this Node.
1258   virtual const class Type *bottom_type() const;
1259 
1260   // If we find a better type for a node, try to record it permanently.
1261   // Return true if this node actually changed.
1262   // Be sure to do the hash_delete game in the "rehash" variant.
1263   void raise_bottom_type(const Type* new_type);
1264 
1265   // Get the address type with which this node uses and/or defs memory,
1266   // or null if none.  The address type is conservatively wide.
1267   // Returns non-null for calls, membars, loads, stores, etc.
1268   // Returns TypePtr::BOTTOM if the node touches memory "broadly".
1269   virtual const class TypePtr *adr_type() const { return nullptr; }
1270 
1271   // Return an existing node which computes the same function as this node.
1272   // The optimistic combined algorithm requires this to return a Node which
1273   // is a small number of steps away (e.g., one of my inputs).
1274   virtual Node* Identity(PhaseGVN* phase);
1275 
1276   // Return the set of values this Node can take on at runtime.
1277   virtual const Type* Value(PhaseGVN* phase) const;
1278 
1279   // Return a node which is more "ideal" than the current node.
1280   // The invariants on this call are subtle.  If in doubt, read the
1281   // treatise in node.cpp above the default implementation AND TEST WITH
1282   // -XX:VerifyIterativeGVN=1
1283   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1284 
1285   // Some nodes have specific Ideal subgraph transformations only if they are
1286   // unique users of specific nodes. Such nodes should be put on IGVN worklist
1287   // for the transformations to happen.
1288   bool has_special_unique_user() const;
1289 
1290   // Skip Proj and CatchProj nodes chains. Check for Null and Top.
1291   Node* find_exact_control(Node* ctrl);
1292 
1293   // Results of the dominance analysis.
1294   enum class DomResult {
1295     NotDominate,         // 'this' node does not dominate 'sub'.
1296     Dominate,            // 'this' node dominates or is equal to 'sub'.
1297     EncounteredDeadCode  // Result is undefined due to encountering dead code.
1298   };
1299   // Check if 'this' node dominates or equal to 'sub'.
1300   DomResult dominates(Node* sub, Node_List &nlist);
1301 
1302   bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
1303 public:
1304 
1305   // See if there is valid pipeline info
1306   static  const Pipeline *pipeline_class();
1307   virtual const Pipeline *pipeline() const;
1308 
1309   // Compute the latency from the def to this instruction of the ith input node
1310   uint latency(uint i);
1311 
1312   // Hash & compare functions, for pessimistic value numbering
1313 
1314   // If the hash function returns the special sentinel value NO_HASH,
1315   // the node is guaranteed never to compare equal to any other node.
1316   // If we accidentally generate a hash with value NO_HASH the node
1317   // won't go into the table and we'll lose a little optimization.
1318   static const uint NO_HASH = 0;
1319   virtual uint hash() const;
1320   virtual bool cmp( const Node &n ) const;
1321 
1322   // Operation appears to be iteratively computed (such as an induction variable)
1323   // It is possible for this operation to return false for a loop-varying
1324   // value, if it appears (by local graph inspection) to be computed by a simple conditional.
1325   bool is_iteratively_computed();
1326 
1327   // Determine if a node is a counted loop induction variable.
1328   // NOTE: The method is defined in "loopnode.cpp".
1329   bool is_cloop_ind_var() const;
1330 
1331   // Return a node with opcode "opc" and same inputs as "this" if one can
1332   // be found; Otherwise return null;
1333   Node* find_similar(int opc);
1334   bool has_same_inputs_as(const Node* other) const;
1335 
1336   // Return the unique control out if only one. Null if none or more than one.
1337   Node* unique_ctrl_out_or_null() const;
1338   // Return the unique control out. Asserts if none or more than one control out.
1339   Node* unique_ctrl_out() const;
1340 
1341   // Set control or add control as precedence edge
1342   void ensure_control_or_add_prec(Node* c);
1343   void add_prec_from(Node* n);
1344 
1345   // Visit boundary uses of the node and apply a callback function for each.
1346   // Recursively traverse uses, stopping and applying the callback when
1347   // reaching a boundary node, defined by is_boundary. If callback_on_all is true,
1348   // it applies the callback on all the nodes seen, and not only on the boundary
1349   // nodes. Note: the function definition appears after the complete type
1350   // definition of Node_List.
1351   template <typename Callback, typename Check>
1352   void visit_uses(Callback callback, Check is_boundary, bool always_callback = false) const;
1353 
1354   //----------------- Code Generation
1355 
1356   // Ideal register class for Matching.  Zero means unmatched instruction
1357   // (these are cloned instead of converted to machine nodes).
1358   virtual uint ideal_reg() const;
1359 
1360   static const uint NotAMachineReg;   // must be > max. machine register
1361 
1362   // Do we Match on this edge index or not?  Generally false for Control
1363   // and true for everything else.  Weird for calls & returns.
1364   virtual uint match_edge(uint idx) const;
1365 
1366   // Register class output is returned in
1367   virtual const RegMask &out_RegMask() const;
1368   // Register class input is expected in
1369   virtual const RegMask &in_RegMask(uint) const;
1370   // Should we clone rather than spill this instruction?
1371   bool rematerialize() const;
1372 
1373   // Return JVM State Object if this Node carries debug info, or null otherwise
1374   virtual JVMState* jvms() const;
1375 
1376   // Print as assembly
1377   virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
1378   // Emit bytes using C2_MacroAssembler
1379   virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
1380   // Size of instruction in bytes
1381   virtual uint size(PhaseRegAlloc *ra_) const;
1382 
1383   // Convenience function to extract an integer constant from a node.
1384   // If it is not an integer constant (either Con, CastII, or Mach),
1385   // return value_if_unknown.
1386   jint find_int_con(jint value_if_unknown) const {
1387     const TypeInt* t = find_int_type();
1388     return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
1389   }
1390   // Return the constant, knowing it is an integer constant already
1391   jint get_int() const {
1392     const TypeInt* t = find_int_type();
1393     guarantee(t != nullptr, "must be con");
1394     return t->get_con();
1395   }
1396   // Here's where the work is done.  Can produce non-constant int types too.
1397   const TypeInt* find_int_type() const;
1398   const TypeInteger* find_integer_type(BasicType bt) const;
1399 
1400   // Same thing for long (and intptr_t, via type.hpp):
1401   jlong get_long() const {
1402     const TypeLong* t = find_long_type();
1403     guarantee(t != nullptr, "must be con");
1404     return t->get_con();
1405   }
1406   jlong find_long_con(jint value_if_unknown) const {
1407     const TypeLong* t = find_long_type();
1408     return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
1409   }
1410   const TypeLong* find_long_type() const;
1411 
1412   jlong get_integer_as_long(BasicType bt) const {
1413     const TypeInteger* t = find_integer_type(bt);
1414     guarantee(t != nullptr && t->is_con(), "must be con");
1415     return t->get_con_as_long(bt);
1416   }
1417   jlong find_integer_as_long(BasicType bt, jlong value_if_unknown) const {
1418     const TypeInteger* t = find_integer_type(bt);
1419     if (t == nullptr || !t->is_con())  return value_if_unknown;
1420     return t->get_con_as_long(bt);
1421   }
1422   const TypePtr* get_ptr_type() const;
1423 
1424   // These guys are called by code generated by ADLC:
1425   intptr_t get_ptr() const;
1426   intptr_t get_narrowcon() const;
1427   jdouble getd() const;
1428   jfloat getf() const;
1429   jshort geth() const;
1430 
1431   // Nodes which are pinned into basic blocks
1432   virtual bool pinned() const { return false; }
1433 
1434   // Nodes which use memory without consuming it, hence need antidependences
1435   // More specifically, needs_anti_dependence_check returns true iff the node
1436   // (a) does a load, and (b) does not perform a store (except perhaps to a
1437   // stack slot or some other unaliased location).
1438   bool needs_anti_dependence_check() const;
1439 
1440   // Return which operand this instruction may cisc-spill. In other words,
1441   // return operand position that can convert from reg to memory access
1442   virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
1443   bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
1444 
1445   // Whether this is a memory-writing machine node.
1446   bool is_memory_writer() const { return is_Mach() && bottom_type()->has_memory(); }
1447 
1448   // Whether this is a memory phi node
1449   bool is_memory_phi() const { return is_Phi() && bottom_type() == Type::MEMORY; }
1450 
1451   bool is_div_or_mod(BasicType bt) const;
1452 
1453   bool is_data_proj_of_pure_function(const Node* maybe_pure_function) const;
1454 
1455 //----------------- Printing, etc
1456 #ifndef PRODUCT
1457  public:
1458   Node* find(int idx, bool only_ctrl = false); // Search the graph for the given idx.
1459   Node* find_ctrl(int idx); // Search control ancestors for the given idx.
1460   void dump_bfs(const int max_distance, Node* target, const char* options, outputStream* st, const frame* fr = nullptr) const;
1461   void dump_bfs(const int max_distance, Node* target, const char* options) const; // directly to tty
1462   void dump_bfs(const int max_distance) const; // dump_bfs(max_distance, nullptr, nullptr)
1463   void dump_bfs(const int max_distance, Node* target, const char* options, void* sp, void* fp, void* pc) const;
1464   class DumpConfig {
1465    public:
1466     // overridden to implement coloring of node idx
1467     virtual void pre_dump(outputStream *st, const Node* n) = 0;
1468     virtual void post_dump(outputStream *st) = 0;
1469   };
1470   void dump_idx(bool align = false, outputStream* st = tty, DumpConfig* dc = nullptr) const;
1471   void dump_name(outputStream* st = tty, DumpConfig* dc = nullptr) const;
1472   void dump() const; // print node with newline
1473   void dump(const char* suffix, bool mark = false, outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print this node.
1474   void dump(int depth) const;        // Print this node, recursively to depth d
1475   void dump_ctrl(int depth) const;   // Print control nodes, to depth d
1476   void dump_comp() const;            // Print this node in compact representation.
1477   // Print this node in compact representation.
1478   void dump_comp(const char* suffix, outputStream *st = tty) const;
1479  private:
1480   virtual void dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;    // Print required-edge info
1481   virtual void dump_prec(outputStream* st = tty, DumpConfig* dc = nullptr) const;   // Print precedence-edge info
1482   virtual void dump_out(outputStream* st = tty, DumpConfig* dc = nullptr) const;    // Print the output edge info
1483  public:
1484   virtual void dump_spec(outputStream *st) const {};      // Print per-node info
1485   // Print compact per-node info
1486   virtual void dump_compact_spec(outputStream *st) const { dump_spec(st); }
1487 
1488   static void verify(int verify_depth, VectorSet& visited, Node_List& worklist);
1489 
1490   // This call defines a class-unique string used to identify class instances
1491   virtual const char *Name() const;
1492 
1493   void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
1494   static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; } // check if we are in a dump call
1495 #endif
1496 #ifdef ASSERT
1497   void verify_construction();
1498   bool verify_jvms(const JVMState* jvms) const;
1499 
1500   Node* _debug_orig;                   // Original version of this, if any.
1501   Node*  debug_orig() const            { return _debug_orig; }
1502   void   set_debug_orig(Node* orig);   // _debug_orig = orig
1503   void   dump_orig(outputStream *st, bool print_key = true) const;
1504 
1505   uint64_t _debug_idx;                 // Unique value assigned to every node.
1506   uint64_t debug_idx() const           { return _debug_idx; }
1507   void set_debug_idx(uint64_t debug_idx) { _debug_idx = debug_idx; }
1508 
1509   int        _hash_lock;               // Barrier to modifications of nodes in the hash table
1510   void  enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }
1511   void   exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
1512 
1513   static void init_NodeProperty();
1514 
1515   #if OPTO_DU_ITERATOR_ASSERT
1516   const Node* _last_del;               // The last deleted node.
1517   uint        _del_tick;               // Bumped when a deletion happens..
1518   #endif
1519 #endif
1520 };
1521 
1522 inline bool not_a_node(const Node* n) {
1523   if (n == nullptr)                return true;
1524   if (((intptr_t)n & 1) != 0)      return true;  // uninitialized, etc.
1525   if (*(address*)n == badAddress)  return true;  // kill by Node::destruct
1526   return false;
1527 }
1528 
1529 //-----------------------------------------------------------------------------
1530 // Iterators over DU info, and associated Node functions.
1531 
1532 #if OPTO_DU_ITERATOR_ASSERT
1533 
1534 // Common code for assertion checking on DU iterators.
1535 class DUIterator_Common {
1536 #ifdef ASSERT
1537  protected:
1538   bool         _vdui;               // cached value of VerifyDUIterators
1539   const Node*  _node;               // the node containing the _out array
1540   uint         _outcnt;             // cached node->_outcnt
1541   uint         _del_tick;           // cached node->_del_tick
1542   Node*        _last;               // last value produced by the iterator
1543 
1544   void sample(const Node* node);    // used by c'tor to set up for verifies
1545   void verify(const Node* node, bool at_end_ok = false);
1546   void verify_resync();
1547   void reset(const DUIterator_Common& that);
1548 
1549 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
1550   #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
1551 #else
1552   #define I_VDUI_ONLY(i,x) { }
1553 #endif //ASSERT
1554 };
1555 
1556 #define VDUI_ONLY(x)     I_VDUI_ONLY(*this, x)
1557 
1558 // Default DU iterator.  Allows appends onto the out array.
1559 // Allows deletion from the out array only at the current point.
1560 // Usage:
1561 //  for (DUIterator i = x->outs(); x->has_out(i); i++) {
1562 //    Node* y = x->out(i);
1563 //    ...
1564 //  }
1565 // Compiles in product mode to a unsigned integer index, which indexes
1566 // onto a repeatedly reloaded base pointer of x->_out.  The loop predicate
1567 // also reloads x->_outcnt.  If you delete, you must perform "--i" just
1568 // before continuing the loop.  You must delete only the last-produced
1569 // edge.  You must delete only a single copy of the last-produced edge,
1570 // or else you must delete all copies at once (the first time the edge
1571 // is produced by the iterator).
1572 class DUIterator : public DUIterator_Common {
1573   friend class Node;
1574 
1575   // This is the index which provides the product-mode behavior.
1576   // Whatever the product-mode version of the system does to the
1577   // DUI index is done to this index.  All other fields in
1578   // this class are used only for assertion checking.
1579   uint         _idx;
1580 
1581   #ifdef ASSERT
1582   uint         _refresh_tick;    // Records the refresh activity.
1583 
1584   void sample(const Node* node); // Initialize _refresh_tick etc.
1585   void verify(const Node* node, bool at_end_ok = false);
1586   void verify_increment();       // Verify an increment operation.
1587   void verify_resync();          // Verify that we can back up over a deletion.
1588   void verify_finish();          // Verify that the loop terminated properly.
1589   void refresh();                // Resample verification info.
1590   void reset(const DUIterator& that);  // Resample after assignment.
1591   #endif
1592 
1593   DUIterator(const Node* node, int dummy_to_avoid_conversion)
1594     { _idx = 0;                         DEBUG_ONLY(sample(node)); }
1595 
1596  public:
1597   // initialize to garbage; clear _vdui to disable asserts
1598   DUIterator()
1599     { /*initialize to garbage*/         DEBUG_ONLY(_vdui = false); }
1600 
1601   DUIterator(const DUIterator& that)
1602     { _idx = that._idx;                 DEBUG_ONLY(_vdui = false; reset(that)); }
1603 
1604   void operator++(int dummy_to_specify_postfix_op)
1605     { _idx++;                           VDUI_ONLY(verify_increment()); }
1606 
1607   void operator--()
1608     { VDUI_ONLY(verify_resync());       --_idx; }
1609 
1610   ~DUIterator()
1611     { VDUI_ONLY(verify_finish()); }
1612 
1613   void operator=(const DUIterator& that)
1614     { _idx = that._idx;                 DEBUG_ONLY(reset(that)); }
1615 };
1616 
1617 DUIterator Node::outs() const
1618   { return DUIterator(this, 0); }
1619 DUIterator& Node::refresh_out_pos(DUIterator& i) const
1620   { I_VDUI_ONLY(i, i.refresh());        return i; }
1621 bool Node::has_out(DUIterator& i) const
1622   { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
1623 Node*    Node::out(DUIterator& i) const
1624   { I_VDUI_ONLY(i, i.verify(this));     return DEBUG_ONLY(i._last=) _out[i._idx]; }
1625 
1626 
1627 // Faster DU iterator.  Disallows insertions into the out array.
1628 // Allows deletion from the out array only at the current point.
1629 // Usage:
1630 //  for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
1631 //    Node* y = x->fast_out(i);
1632 //    ...
1633 //  }
1634 // Compiles in product mode to raw Node** pointer arithmetic, with
1635 // no reloading of pointers from the original node x.  If you delete,
1636 // you must perform "--i; --imax" just before continuing the loop.
1637 // If you delete multiple copies of the same edge, you must decrement
1638 // imax, but not i, multiple times:  "--i, imax -= num_edges".
1639 class DUIterator_Fast : public DUIterator_Common {
1640   friend class Node;
1641   friend class DUIterator_Last;
1642 
1643   // This is the pointer which provides the product-mode behavior.
1644   // Whatever the product-mode version of the system does to the
1645   // DUI pointer is done to this pointer.  All other fields in
1646   // this class are used only for assertion checking.
1647   Node**       _outp;
1648 
1649   #ifdef ASSERT
1650   void verify(const Node* node, bool at_end_ok = false);
1651   void verify_limit();
1652   void verify_resync();
1653   void verify_relimit(uint n);
1654   void reset(const DUIterator_Fast& that);
1655   #endif
1656 
1657   // Note:  offset must be signed, since -1 is sometimes passed
1658   DUIterator_Fast(const Node* node, ptrdiff_t offset)
1659     { _outp = node->_out + offset;      DEBUG_ONLY(sample(node)); }
1660 
1661  public:
1662   // initialize to garbage; clear _vdui to disable asserts
1663   DUIterator_Fast()
1664     { /*initialize to garbage*/         DEBUG_ONLY(_vdui = false); }
1665 
1666   DUIterator_Fast(const DUIterator_Fast& that)
1667     { _outp = that._outp;               DEBUG_ONLY(_vdui = false; reset(that)); }
1668 
1669   void operator++(int dummy_to_specify_postfix_op)
1670     { _outp++;                          VDUI_ONLY(verify(_node, true)); }
1671 
1672   void operator--()
1673     { VDUI_ONLY(verify_resync());       --_outp; }
1674 
1675   void operator-=(uint n)   // applied to the limit only
1676     { _outp -= n;           VDUI_ONLY(verify_relimit(n));  }
1677 
1678   bool operator<(DUIterator_Fast& limit) {
1679     I_VDUI_ONLY(*this, this->verify(_node, true));
1680     I_VDUI_ONLY(limit, limit.verify_limit());
1681     return _outp < limit._outp;
1682   }
1683 
1684   void operator=(const DUIterator_Fast& that)
1685     { _outp = that._outp;               DEBUG_ONLY(reset(that)); }
1686 };
1687 
1688 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
1689   // Assign a limit pointer to the reference argument:
1690   imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
1691   // Return the base pointer:
1692   return DUIterator_Fast(this, 0);
1693 }
1694 Node* Node::fast_out(DUIterator_Fast& i) const {
1695   I_VDUI_ONLY(i, i.verify(this));
1696   return DEBUG_ONLY(i._last=) *i._outp;
1697 }
1698 
1699 
1700 // Faster DU iterator.  Requires each successive edge to be removed.
1701 // Does not allow insertion of any edges.
1702 // Usage:
1703 //  for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
1704 //    Node* y = x->last_out(i);
1705 //    ...
1706 //  }
1707 // Compiles in product mode to raw Node** pointer arithmetic, with
1708 // no reloading of pointers from the original node x.
1709 class DUIterator_Last : private DUIterator_Fast {
1710   friend class Node;
1711 
1712   #ifdef ASSERT
1713   void verify(const Node* node, bool at_end_ok = false);
1714   void verify_limit();
1715   void verify_step(uint num_edges);
1716   #endif
1717 
1718   // Note:  offset must be signed, since -1 is sometimes passed
1719   DUIterator_Last(const Node* node, ptrdiff_t offset)
1720     : DUIterator_Fast(node, offset) { }
1721 
1722   void operator++(int dummy_to_specify_postfix_op) {} // do not use
1723   void operator<(int)                              {} // do not use
1724 
1725  public:
1726   DUIterator_Last() { }
1727   // initialize to garbage
1728 
1729   DUIterator_Last(const DUIterator_Last& that) = default;
1730 
1731   void operator--()
1732     { _outp--;              VDUI_ONLY(verify_step(1));  }
1733 
1734   void operator-=(uint n)
1735     { _outp -= n;           VDUI_ONLY(verify_step(n));  }
1736 
1737   bool operator>=(DUIterator_Last& limit) {
1738     I_VDUI_ONLY(*this, this->verify(_node, true));
1739     I_VDUI_ONLY(limit, limit.verify_limit());
1740     return _outp >= limit._outp;
1741   }
1742 
1743   DUIterator_Last& operator=(const DUIterator_Last& that) = default;
1744 };
1745 
1746 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
1747   // Assign a limit pointer to the reference argument:
1748   imin = DUIterator_Last(this, 0);
1749   // Return the initial pointer:
1750   return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
1751 }
1752 Node* Node::last_out(DUIterator_Last& i) const {
1753   I_VDUI_ONLY(i, i.verify(this));
1754   return DEBUG_ONLY(i._last=) *i._outp;
1755 }
1756 
1757 #endif //OPTO_DU_ITERATOR_ASSERT
1758 
1759 #undef I_VDUI_ONLY
1760 #undef VDUI_ONLY
1761 
1762 // An Iterator that truly follows the iterator pattern.  Doesn't
1763 // support deletion but could be made to.
1764 //
1765 //   for (SimpleDUIterator i(n); i.has_next(); i.next()) {
1766 //     Node* m = i.get();
1767 //
1768 class SimpleDUIterator : public StackObj {
1769  private:
1770   Node* node;
1771   DUIterator_Fast imax;
1772   DUIterator_Fast i;
1773  public:
1774   SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
1775   bool has_next() { return i < imax; }
1776   void next() { i++; }
1777   Node* get() { return node->fast_out(i); }
1778 };
1779 
1780 
1781 //-----------------------------------------------------------------------------
1782 // Map dense integer indices to Nodes.  Uses classic doubling-array trick.
1783 // Abstractly provides an infinite array of Node*'s, initialized to null.
1784 // Note that the constructor just zeros things, and since I use Arena
1785 // allocation I do not need a destructor to reclaim storage.
1786 class Node_Array : public AnyObj {
1787 protected:
1788   Arena* _a;                    // Arena to allocate in
1789   uint   _max;
1790   Node** _nodes;
1791   ReallocMark _nesting;         // Safety checks for arena reallocation
1792 
1793   // Grow array to required capacity
1794   void maybe_grow(uint i) {
1795     _nesting.check(_a); // Check if a potential reallocation in the arena is safe
1796     if (i >= _max) {
1797       grow(i);
1798     }
1799   }
1800   void grow(uint i);
1801 
1802 public:
1803   Node_Array(Arena* a, uint max = OptoNodeListSize) : _a(a), _max(max) {
1804     _nodes = NEW_ARENA_ARRAY(a, Node*, max);
1805     clear();
1806   }
1807   Node_Array() : Node_Array(Thread::current()->resource_area()) {}
1808 
1809   NONCOPYABLE(Node_Array);
1810   Node_Array& operator=(Node_Array&&) = delete;
1811   // Allow move constructor for && (eg. capture return of function)
1812   Node_Array(Node_Array&&) = default;
1813 
1814   Node *operator[] ( uint i ) const // Lookup, or null for not mapped
1815   { return (i<_max) ? _nodes[i] : (Node*)nullptr; }
1816   Node* at(uint i) const { assert(i<_max,"oob"); return _nodes[i]; }
1817   Node** adr() { return _nodes; }
1818   // Extend the mapping: index i maps to Node *n.
1819   void map( uint i, Node *n ) { maybe_grow(i); _nodes[i] = n; }
1820   void insert( uint i, Node *n );
1821   void remove( uint i );        // Remove, preserving order
1822   // Clear all entries in _nodes to null but keep storage
1823   void clear() {
1824     Copy::zero_to_bytes(_nodes, _max * sizeof(Node*));
1825   }
1826 
1827   uint max() const { return _max; }
1828   void dump() const;
1829 };
1830 
1831 class Node_List : public Node_Array {
1832   uint _cnt;
1833 public:
1834   Node_List(uint max = OptoNodeListSize) : Node_Array(Thread::current()->resource_area(), max), _cnt(0) {}
1835   Node_List(Arena *a, uint max = OptoNodeListSize) : Node_Array(a, max), _cnt(0) {}
1836 
1837   NONCOPYABLE(Node_List);
1838   Node_List& operator=(Node_List&&) = delete;
1839   // Allow move constructor for && (eg. capture return of function)
1840   Node_List(Node_List&&) = default;
1841 
1842   bool contains(const Node* n) const {
1843     for (uint e = 0; e < size(); e++) {
1844       if (at(e) == n) return true;
1845     }
1846     return false;
1847   }
1848   void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
1849   void remove( uint i ) { Node_Array::remove(i); _cnt--; }
1850   void push( Node *b ) { map(_cnt++,b); }
1851   void yank( Node *n );         // Find and remove
1852   Node *pop() { return _nodes[--_cnt]; }
1853   void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
1854   void copy(const Node_List& from) {
1855     if (from._max > _max) {
1856       grow(from._max);
1857     }
1858     _cnt = from._cnt;
1859     Copy::conjoint_words_to_higher((HeapWord*)&from._nodes[0], (HeapWord*)&_nodes[0], from._max * sizeof(Node*));
1860   }
1861 
1862   uint size() const { return _cnt; }
1863   void dump() const;
1864   void dump_simple() const;
1865 };
1866 
1867 // Definition must appear after complete type definition of Node_List
1868 template <typename Callback, typename Check>
1869 void Node::visit_uses(Callback callback, Check is_boundary, bool always_callback) const {
1870   ResourceMark rm;
1871   VectorSet visited;
1872   Node_List worklist;
1873 
1874   // The initial worklist consists of the direct uses
1875   for (DUIterator_Fast kmax, k = fast_outs(kmax); k < kmax; k++) {
1876     Node* out = fast_out(k);
1877     if (!visited.test_set(out->_idx)) { worklist.push(out); }
1878   }
1879 
1880   while (worklist.size() > 0) {
1881     Node* use = worklist.pop();
1882     bool boundary = is_boundary(use);
1883     if (boundary || always_callback) {
1884       callback(use);
1885     }
1886     if (!boundary) {
1887       // Not a boundary node, continue search
1888       for (DUIterator_Fast kmax, k = use->fast_outs(kmax); k < kmax; k++) {
1889         Node* out = use->fast_out(k);
1890         if (!visited.test_set(out->_idx)) { worklist.push(out); }
1891       }
1892     }
1893   }
1894 }
1895 
1896 
1897 //------------------------------Unique_Node_List-------------------------------
1898 class Unique_Node_List : public Node_List {
1899   VectorSet _in_worklist;
1900   uint _clock_index;            // Index in list where to pop from next
1901 public:
1902   Unique_Node_List() : Node_List(), _clock_index(0) {}
1903   Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
1904 
1905   NONCOPYABLE(Unique_Node_List);
1906   Unique_Node_List& operator=(Unique_Node_List&&) = delete;
1907   // Allow move constructor for && (eg. capture return of function)
1908   Unique_Node_List(Unique_Node_List&&) = default;
1909 
1910   void remove( Node *n );
1911   bool member(const Node* n) const { return _in_worklist.test(n->_idx) != 0; }
1912   VectorSet& member_set(){ return _in_worklist; }
1913 
1914   void push(Node* b) {
1915     if( !_in_worklist.test_set(b->_idx) )
1916       Node_List::push(b);
1917   }
1918   void push_non_cfg_inputs_of(const Node* node) {
1919     for (uint i = 1; i < node->req(); i++) {
1920       Node* input = node->in(i);
1921       if (input != nullptr && !input->is_CFG()) {
1922         push(input);
1923       }
1924     }
1925   }
1926 
1927   void push_outputs_of(const Node* node) {
1928     for (DUIterator_Fast imax, i = node->fast_outs(imax); i < imax; i++) {
1929       Node* output = node->fast_out(i);
1930       push(output);
1931     }
1932   }
1933 
1934   Node *pop() {
1935     if( _clock_index >= size() ) _clock_index = 0;
1936     Node *b = at(_clock_index);
1937     map( _clock_index, Node_List::pop());
1938     if (size() != 0) _clock_index++; // Always start from 0
1939     _in_worklist.remove(b->_idx);
1940     return b;
1941   }
1942   Node *remove(uint i) {
1943     Node *b = Node_List::at(i);
1944     _in_worklist.remove(b->_idx);
1945     map(i,Node_List::pop());
1946     return b;
1947   }
1948   void yank(Node *n) {
1949     _in_worklist.remove(n->_idx);
1950     Node_List::yank(n);
1951   }
1952   void  clear() {
1953     _in_worklist.clear();        // Discards storage but grows automatically
1954     Node_List::clear();
1955     _clock_index = 0;
1956   }
1957   void ensure_empty() {
1958     assert(size() == 0, "must be empty");
1959     clear(); // just in case
1960   }
1961 
1962   // Used after parsing to remove useless nodes before Iterative GVN
1963   void remove_useless_nodes(VectorSet& useful);
1964 
1965   // If the idx of the Nodes change, we must recompute the VectorSet
1966   void recompute_idx_set() {
1967     _in_worklist.clear();
1968     for (uint i = 0; i < size(); i++) {
1969       Node* n = at(i);
1970       _in_worklist.set(n->_idx);
1971     }
1972   }
1973 
1974 #ifdef ASSERT
1975   bool is_subset_of(Unique_Node_List& other) {
1976     for (uint i = 0; i < size(); i++) {
1977       Node* n = at(i);
1978       if (!other.member(n)) {
1979         return false;
1980       }
1981     }
1982     return true;
1983   }
1984 #endif
1985 
1986   bool contains(const Node* n) const {
1987     fatal("use faster member() instead");
1988     return false;
1989   }
1990 
1991 #ifndef PRODUCT
1992   void print_set() const { _in_worklist.print(); }
1993 #endif
1994 };
1995 
1996 // Unique_Mixed_Node_List
1997 // unique: nodes are added only once
1998 // mixed: allow new and old nodes
1999 class Unique_Mixed_Node_List : public ResourceObj {
2000 public:
2001   Unique_Mixed_Node_List() : _visited_set(cmpkey, hashkey) {}
2002 
2003   void add(Node* node) {
2004     if (not_a_node(node)) {
2005       return; // Gracefully handle null, -1, 0xabababab, etc.
2006     }
2007     if (_visited_set[node] == nullptr) {
2008       _visited_set.Insert(node, node);
2009       _worklist.push(node);
2010     }
2011   }
2012 
2013   Node* operator[] (uint i) const {
2014     return _worklist[i];
2015   }
2016 
2017   size_t size() {
2018     return _worklist.size();
2019   }
2020 
2021 private:
2022   Dict _visited_set;
2023   Node_List _worklist;
2024 };
2025 
2026 // Inline definition of Compile::record_for_igvn must be deferred to this point.
2027 inline void Compile::record_for_igvn(Node* n) {
2028   _igvn_worklist->push(n);
2029 }
2030 
2031 // Inline definition of Compile::remove_for_igvn must be deferred to this point.
2032 inline void Compile::remove_for_igvn(Node* n) {
2033   _igvn_worklist->remove(n);
2034 }
2035 
2036 //------------------------------Node_Stack-------------------------------------
2037 class Node_Stack {
2038 protected:
2039   struct INode {
2040     Node *node; // Processed node
2041     uint  indx; // Index of next node's child
2042   };
2043   INode *_inode_top; // tos, stack grows up
2044   INode *_inode_max; // End of _inodes == _inodes + _max
2045   INode *_inodes;    // Array storage for the stack
2046   Arena *_a;         // Arena to allocate in
2047   ReallocMark _nesting; // Safety checks for arena reallocation
2048 
2049   void maybe_grow() {
2050     _nesting.check(_a); // Check if a potential reallocation in the arena is safe
2051     if (_inode_top >= _inode_max) {
2052       grow();
2053     }
2054   }
2055   void grow();
2056 
2057 public:
2058   Node_Stack(int size) {
2059     size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
2060     _a = Thread::current()->resource_area();
2061     _inodes = NEW_ARENA_ARRAY( _a, INode, max );
2062     _inode_max = _inodes + max;
2063     _inode_top = _inodes - 1; // stack is empty
2064   }
2065 
2066   Node_Stack(Arena *a, int size) : _a(a) {
2067     size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
2068     _inodes = NEW_ARENA_ARRAY( _a, INode, max );
2069     _inode_max = _inodes + max;
2070     _inode_top = _inodes - 1; // stack is empty
2071   }
2072 
2073   void pop() {
2074     assert(_inode_top >= _inodes, "node stack underflow");
2075     --_inode_top;
2076   }
2077   void push(Node *n, uint i) {
2078     ++_inode_top;
2079     maybe_grow();
2080     INode *top = _inode_top; // optimization
2081     top->node = n;
2082     top->indx = i;
2083   }
2084   Node *node() const {
2085     return _inode_top->node;
2086   }
2087   Node* node_at(uint i) const {
2088     assert(_inodes + i <= _inode_top, "in range");
2089     return _inodes[i].node;
2090   }
2091   uint index() const {
2092     return _inode_top->indx;
2093   }
2094   uint index_at(uint i) const {
2095     assert(_inodes + i <= _inode_top, "in range");
2096     return _inodes[i].indx;
2097   }
2098   void set_node(Node *n) {
2099     _inode_top->node = n;
2100   }
2101   void set_index(uint i) {
2102     _inode_top->indx = i;
2103   }
2104   uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes,  sizeof(INode)); } // Max size
2105   uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes,  sizeof(INode)); } // Current size
2106   bool is_nonempty() const { return (_inode_top >= _inodes); }
2107   bool is_empty() const { return (_inode_top < _inodes); }
2108   void clear() { _inode_top = _inodes - 1; } // retain storage
2109 
2110   // Node_Stack is used to map nodes.
2111   Node* find(uint idx) const;
2112 
2113   NONCOPYABLE(Node_Stack);
2114 };
2115 
2116 
2117 //-----------------------------Node_Notes--------------------------------------
2118 // Debugging or profiling annotations loosely and sparsely associated
2119 // with some nodes.  See Compile::node_notes_at for the accessor.
2120 class Node_Notes {
2121   JVMState* _jvms;
2122 
2123 public:
2124   Node_Notes(JVMState* jvms = nullptr) {
2125     _jvms = jvms;
2126   }
2127 
2128   JVMState* jvms()            { return _jvms; }
2129   void  set_jvms(JVMState* x) {        _jvms = x; }
2130 
2131   // True if there is nothing here.
2132   bool is_clear() {
2133     return (_jvms == nullptr);
2134   }
2135 
2136   // Make there be nothing here.
2137   void clear() {
2138     _jvms = nullptr;
2139   }
2140 
2141   // Make a new, clean node notes.
2142   static Node_Notes* make(Compile* C) {
2143     Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
2144     nn->clear();
2145     return nn;
2146   }
2147 
2148   Node_Notes* clone(Compile* C) {
2149     Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
2150     (*nn) = (*this);
2151     return nn;
2152   }
2153 
2154   // Absorb any information from source.
2155   bool update_from(Node_Notes* source) {
2156     bool changed = false;
2157     if (source != nullptr) {
2158       if (source->jvms() != nullptr) {
2159         set_jvms(source->jvms());
2160         changed = true;
2161       }
2162     }
2163     return changed;
2164   }
2165 };
2166 
2167 // Inlined accessors for Compile::node_nodes that require the preceding class:
2168 inline Node_Notes*
2169 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
2170                            int idx, bool can_grow) {
2171   assert(idx >= 0, "oob");
2172   int block_idx = (idx >> _log2_node_notes_block_size);
2173   int grow_by = (block_idx - (arr == nullptr? 0: arr->length()));
2174   if (grow_by >= 0) {
2175     if (!can_grow) return nullptr;
2176     grow_node_notes(arr, grow_by + 1);
2177   }
2178   if (arr == nullptr) return nullptr;
2179   // (Every element of arr is a sub-array of length _node_notes_block_size.)
2180   return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
2181 }
2182 
2183 inline Node_Notes* Compile::node_notes_at(int idx) {
2184   return locate_node_notes(_node_note_array, idx, false);
2185 }
2186 
2187 inline bool
2188 Compile::set_node_notes_at(int idx, Node_Notes* value) {
2189   if (value == nullptr || value->is_clear())
2190     return false;  // nothing to write => write nothing
2191   Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
2192   assert(loc != nullptr, "");
2193   return loc->update_from(value);
2194 }
2195 
2196 
2197 //------------------------------TypeNode---------------------------------------
2198 // Node with a Type constant.
2199 class TypeNode : public Node {
2200 protected:
2201   virtual uint hash() const;    // Check the type
2202   virtual bool cmp( const Node &n ) const;
2203   virtual uint size_of() const; // Size is bigger
2204   const Type* const _type;
2205 public:
2206   void set_type(const Type* t) {
2207     assert(t != nullptr, "sanity");
2208     DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
2209     *(const Type**)&_type = t;   // cast away const-ness
2210     // If this node is in the hash table, make sure it doesn't need a rehash.
2211     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
2212   }
2213   const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
2214   TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
2215     init_class_id(Class_Type);
2216   }
2217   virtual const Type* Value(PhaseGVN* phase) const;
2218   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
2219   virtual const Type *bottom_type() const;
2220   virtual       uint  ideal_reg() const;
2221 
2222   void make_path_dead(PhaseIterGVN* igvn, PhaseIdealLoop* loop, Node* ctrl_use, uint j, const char* phase_str);
2223 #ifndef PRODUCT
2224   virtual void dump_spec(outputStream *st) const;
2225   virtual void dump_compact_spec(outputStream *st) const;
2226 #endif
2227   void make_paths_from_here_dead(PhaseIterGVN* igvn, PhaseIdealLoop* loop, const char* phase_str);
2228   void create_halt_path(PhaseIterGVN* igvn, Node* c, PhaseIdealLoop* loop, const char* phase_str) const;
2229 };
2230 
2231 #include "opto/opcodes.hpp"
2232 
2233 #define Op_IL(op) \
2234   inline int Op_ ## op(BasicType bt) { \
2235   assert(bt == T_INT || bt == T_LONG, "only for int or longs"); \
2236   if (bt == T_INT) { \
2237     return Op_## op ## I; \
2238   } \
2239   return Op_## op ## L; \
2240 }
2241 
2242 Op_IL(Add)
2243 Op_IL(And)
2244 Op_IL(Sub)
2245 Op_IL(Mul)
2246 Op_IL(URShift)
2247 Op_IL(LShift)
2248 Op_IL(RShift)
2249 Op_IL(Xor)
2250 Op_IL(Cmp)
2251 Op_IL(Div)
2252 Op_IL(Mod)
2253 Op_IL(UDiv)
2254 Op_IL(UMod)
2255 
2256 inline int Op_ConIL(BasicType bt) {
2257   assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2258   if (bt == T_INT) {
2259     return Op_ConI;
2260   }
2261   return Op_ConL;
2262 }
2263 
2264 inline int Op_Cmp_unsigned(BasicType bt) {
2265   assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2266   if (bt == T_INT) {
2267     return Op_CmpU;
2268   }
2269   return Op_CmpUL;
2270 }
2271 
2272 inline int Op_Cast(BasicType bt) {
2273   assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2274   if (bt == T_INT) {
2275     return Op_CastII;
2276   }
2277   return Op_CastLL;
2278 }
2279 
2280 inline int Op_DivIL(BasicType bt, bool is_unsigned) {
2281   assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2282   if (bt == T_INT) {
2283     if (is_unsigned) {
2284       return Op_UDivI;
2285     } else {
2286       return Op_DivI;
2287     }
2288   }
2289   if (is_unsigned) {
2290     return Op_UDivL;
2291   } else {
2292     return Op_DivL;
2293   }
2294 }
2295 
2296 inline int Op_DivModIL(BasicType bt, bool is_unsigned) {
2297   assert(bt == T_INT || bt == T_LONG, "only for int or longs");
2298   if (bt == T_INT) {
2299     if (is_unsigned) {
2300       return Op_UDivModI;
2301     } else {
2302       return Op_DivModI;
2303     }
2304   }
2305   if (is_unsigned) {
2306     return Op_UDivModL;
2307   } else {
2308     return Op_DivModL;
2309   }
2310 }
2311 
2312 // Interface to define actions that should be taken when running DataNodeBFS. Each use can extend this class to specify
2313 // a customized BFS.
2314 class BFSActions : public StackObj {
2315  public:
2316   // Should a node's inputs further be visited in the BFS traversal? By default, we visit all data inputs. Override this
2317   // method to provide a custom filter.
2318   virtual bool should_visit(Node* node) const {
2319     // By default, visit all inputs.
2320     return true;
2321   };
2322 
2323   // Is the visited node a target node that we are looking for in the BFS traversal? We do not visit its inputs further
2324   // but the BFS will continue to visit all unvisited nodes in the queue.
2325   virtual bool is_target_node(Node* node) const = 0;
2326 
2327   // Defines an action that should be taken when we visit a target node in the BFS traversal.
2328   // To give more freedom, we pass the direct child node to the target node such that
2329   // child->in(i) == target node. This allows to also directly replace the target node instead
2330   // of only updating its inputs.
2331   virtual void target_node_action(Node* child, uint i) = 0;
2332 };
2333 
2334 // Class to perform a BFS traversal on the data nodes from a given start node. The provided BFSActions guide which
2335 // data node's inputs should be further visited, which data nodes are target nodes and what to do with the target nodes.
2336 class DataNodeBFS : public StackObj {
2337   BFSActions& _bfs_actions;
2338 
2339  public:
2340   explicit DataNodeBFS(BFSActions& bfs_action) : _bfs_actions(bfs_action) {}
2341 
2342   // Run the BFS starting from 'start_node' and apply the actions provided to this class.
2343   void run(Node* start_node) {
2344     ResourceMark rm;
2345     Unique_Node_List _nodes_to_visit;
2346     _nodes_to_visit.push(start_node);
2347     for (uint i = 0; i < _nodes_to_visit.size(); i++) {
2348       Node* next = _nodes_to_visit[i];
2349       for (uint j = 1; j < next->req(); j++) {
2350         Node* input = next->in(j);
2351         if (_bfs_actions.is_target_node(input)) {
2352           assert(_bfs_actions.should_visit(input), "must also pass node filter");
2353           _bfs_actions.target_node_action(next, j);
2354         } else if (_bfs_actions.should_visit(input)) {
2355           _nodes_to_visit.push(input);
2356         }
2357       }
2358     }
2359   }
2360 };
2361 
2362 #endif // SHARE_OPTO_NODE_HPP