1 /*
   2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OPTO_CALLNODE_HPP
  26 #define SHARE_OPTO_CALLNODE_HPP
  27 
  28 #include "opto/connode.hpp"
  29 #include "opto/mulnode.hpp"
  30 #include "opto/multnode.hpp"
  31 #include "opto/opcodes.hpp"
  32 #include "opto/phaseX.hpp"
  33 #include "opto/replacednodes.hpp"
  34 #include "opto/type.hpp"
  35 #include "utilities/growableArray.hpp"
  36 
  37 // Portions of code courtesy of Clifford Click
  38 
  39 // Optimization - Graph Style
  40 
  41 class NamedCounter;
  42 class MultiNode;
  43 class  SafePointNode;
  44 class   CallNode;
  45 class     CallJavaNode;
  46 class       CallStaticJavaNode;
  47 class       CallDynamicJavaNode;
  48 class     CallRuntimeNode;
  49 class       CallLeafNode;
  50 class         CallLeafNoFPNode;
  51 class         CallLeafVectorNode;
  52 class     AllocateNode;
  53 class       AllocateArrayNode;
  54 class     AbstractLockNode;
  55 class       LockNode;
  56 class       UnlockNode;
  57 class FastLockNode;
  58 
  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;
  93 };
  94 
  95 
  96 //------------------------------ParmNode---------------------------------------
  97 // Incoming parameters
  98 class ParmNode : public ProjNode {
  99   static const char * const names[TypeFunc::Parms+1];
 100 public:
 101   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 102     init_class_id(Class_Parm);
 103   }
 104   virtual int Opcode() const;
 105   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 106   virtual uint ideal_reg() const;
 107 #ifndef PRODUCT
 108   virtual void dump_spec(outputStream *st) const;
 109   virtual void dump_compact_spec(outputStream *st) const;
 110 #endif
 111 };
 112 
 113 
 114 //------------------------------ReturnNode-------------------------------------
 115 // Return from subroutine node
 116 class ReturnNode : public Node {
 117 public:
 118   ReturnNode(uint edges, Node* cntrl, Node* i_o, Node* memory, Node* frameptr, Node* retadr);
 119   virtual int Opcode() const;
 120   virtual bool  is_CFG() const { return true; }
 121   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 122   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 123   virtual const Type* Value(PhaseGVN* phase) const;
 124   virtual uint ideal_reg() const { return NotAMachineReg; }
 125   virtual uint match_edge(uint idx) const;
 126 #ifndef PRODUCT
 127   virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const;
 128 #endif
 129 };
 130 
 131 
 132 //------------------------------RethrowNode------------------------------------
 133 // Rethrow of exception at call site.  Ends a procedure before rethrowing;
 134 // ends the current basic block like a ReturnNode.  Restores registers and
 135 // unwinds stack.  Rethrow happens in the caller's method.
 136 class RethrowNode : public Node {
 137  public:
 138   RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
 139   virtual int Opcode() const;
 140   virtual bool  is_CFG() const { return true; }
 141   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 142   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 143   virtual const Type* Value(PhaseGVN* phase) const;
 144   virtual uint match_edge(uint idx) const;
 145   virtual uint ideal_reg() const { return NotAMachineReg; }
 146 #ifndef PRODUCT
 147   virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const;
 148 #endif
 149 };
 150 
 151 
 152 //------------------------------ForwardExceptionNode---------------------------
 153 // Pop stack frame and jump to StubRoutines::forward_exception_entry()
 154 class ForwardExceptionNode : public ReturnNode {
 155 public:
 156   ForwardExceptionNode(Node* cntrl, Node* i_o, Node* memory, Node* frameptr, Node* retadr)
 157     : ReturnNode(TypeFunc::Parms, cntrl, i_o, memory, frameptr, retadr) {
 158   }
 159 
 160   virtual int Opcode() const;
 161 };
 162 
 163 //------------------------------TailCallNode-----------------------------------
 164 // Pop stack frame and jump indirect
 165 class TailCallNode : public ReturnNode {
 166 public:
 167   TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
 168     : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
 169     init_req(TypeFunc::Parms, target);
 170     init_req(TypeFunc::Parms+1, moop);
 171   }
 172 
 173   virtual int Opcode() const;
 174   virtual uint match_edge(uint idx) const;
 175 };
 176 
 177 //------------------------------TailJumpNode-----------------------------------
 178 // Pop stack frame and jump indirect
 179 class TailJumpNode : public ReturnNode {
 180 public:
 181   TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
 182     : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
 183     init_req(TypeFunc::Parms, target);
 184     init_req(TypeFunc::Parms+1, ex_oop);
 185   }
 186 
 187   virtual int Opcode() const;
 188   virtual uint match_edge(uint idx) const;
 189 };
 190 
 191 //-------------------------------JVMState-------------------------------------
 192 // A linked list of JVMState nodes captures the whole interpreter state,
 193 // plus GC roots, for all active calls at some call site in this compilation
 194 // unit.  (If there is no inlining, then the list has exactly one link.)
 195 // This provides a way to map the optimized program back into the interpreter,
 196 // or to let the GC mark the stack.
 197 class JVMState : public ResourceObj {
 198 public:
 199   typedef enum {
 200     Reexecute_Undefined = -1, // not defined -- will be translated into false later
 201     Reexecute_False     =  0, // false       -- do not reexecute
 202     Reexecute_True      =  1  // true        -- reexecute the bytecode
 203   } ReexecuteState; //Reexecute State
 204 
 205 private:
 206   JVMState*         _caller;    // List pointer for forming scope chains
 207   uint              _depth;     // One more than caller depth, or one.
 208   uint              _locoff;    // Offset to locals in input edge mapping
 209   uint              _stkoff;    // Offset to stack in input edge mapping
 210   uint              _monoff;    // Offset to monitors in input edge mapping
 211   uint              _scloff;    // Offset to fields of scalar objs in input edge mapping
 212   uint              _endoff;    // Offset to end of input edge mapping
 213   uint              _sp;        // Java Expression Stack Pointer for this state
 214   int               _bci;       // Byte Code Index of this JVM point
 215   ReexecuteState    _reexecute; // Whether this bytecode need to be re-executed
 216   ciMethod*         _method;    // Method Pointer
 217   ciInstance*       _receiver_info; // Constant receiver instance for compiled lambda forms
 218   SafePointNode*    _map;       // Map node associated with this scope
 219 public:
 220   friend class Compile;
 221   friend class PreserveReexecuteState;
 222 
 223   // Because JVMState objects live over the entire lifetime of the
 224   // Compile object, they are allocated into the comp_arena, which
 225   // does not get resource marked or reset during the compile process
 226   void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
 227   void operator delete( void * ) { } // fast deallocation
 228 
 229   // Create a new JVMState, ready for abstract interpretation.
 230   JVMState(ciMethod* method, JVMState* caller);
 231   JVMState(int stack_size);  // root state; has a null method
 232 
 233   // Access functions for the JVM
 234   // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
 235   //       \ locoff    \ stkoff    \ argoff    \ monoff    \ scloff    \ endoff
 236   uint              locoff() const { return _locoff; }
 237   uint              stkoff() const { return _stkoff; }
 238   uint              argoff() const { return _stkoff + _sp; }
 239   uint              monoff() const { return _monoff; }
 240   uint              scloff() const { return _scloff; }
 241   uint              endoff() const { return _endoff; }
 242   uint              oopoff() const { return debug_end(); }
 243 
 244   int            loc_size() const { return stkoff() - locoff(); }
 245   int            stk_size() const { return monoff() - stkoff(); }
 246   int            mon_size() const { return scloff() - monoff(); }
 247   int            scl_size() const { return endoff() - scloff(); }
 248 
 249   bool        is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
 250   bool        is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
 251   bool        is_mon(uint i) const { return monoff() <= i && i < scloff(); }
 252   bool        is_scl(uint i) const { return scloff() <= i && i < endoff(); }
 253 
 254   uint                      sp() const { return _sp; }
 255   int                      bci() const { return _bci; }
 256   bool        should_reexecute() const { return _reexecute==Reexecute_True; }
 257   bool  is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
 258   bool              has_method() const { return _method != nullptr; }
 259   ciMethod*             method() const { assert(has_method(), ""); return _method; }
 260   ciInstance*    receiver_info() const { assert(has_method(), ""); return _receiver_info; }
 261   JVMState*             caller() const { return _caller; }
 262   SafePointNode*           map() const { return _map; }
 263   uint                   depth() const { return _depth; }
 264   uint             debug_start() const; // returns locoff of root caller
 265   uint               debug_end() const; // returns endoff of self
 266   uint              debug_size() const {
 267     return loc_size() + sp() + mon_size() + scl_size();
 268   }
 269   uint        debug_depth()  const; // returns sum of debug_size values at all depths
 270 
 271   // Returns the JVM state at the desired depth (1 == root).
 272   JVMState* of_depth(int d) const;
 273 
 274   // Tells if two JVM states have the same call chain (depth, methods, & bcis).
 275   bool same_calls_as(const JVMState* that) const;
 276 
 277   // Monitors (monitors are stored as (boxNode, objNode) pairs
 278   enum { logMonitorEdges = 1 };
 279   int  nof_monitors()              const { return mon_size() >> logMonitorEdges; }
 280   int  monitor_depth()             const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
 281   int  monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
 282   int  monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
 283   bool is_monitor_box(uint off)    const {
 284     assert(is_mon(off), "should be called only for monitor edge");
 285     return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
 286   }
 287   bool is_monitor_use(uint off)    const { return (is_mon(off)
 288                                                    && is_monitor_box(off))
 289                                              || (caller() && caller()->is_monitor_use(off)); }
 290 
 291   // Initialization functions for the JVM
 292   void              set_locoff(uint off) { _locoff = off; }
 293   void              set_stkoff(uint off) { _stkoff = off; }
 294   void              set_monoff(uint off) { _monoff = off; }
 295   void              set_scloff(uint off) { _scloff = off; }
 296   void              set_endoff(uint off) { _endoff = off; }
 297   void              set_offsets(uint off) {
 298     _locoff = _stkoff = _monoff = _scloff = _endoff = off;
 299   }
 300   void              set_map(SafePointNode* map) { _map = map; }
 301   void              bind_map(SafePointNode* map); // set_map() and set_jvms() for the SafePointNode
 302   void              set_sp(uint sp) { _sp = sp; }
 303                     // _reexecute is initialized to "undefined" for a new bci
 304   void              set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
 305   void              set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
 306   void              set_receiver_info(ciInstance* recv) { assert(has_method() || recv == nullptr, ""); _receiver_info = recv; }
 307 
 308   // Miscellaneous utility functions
 309   JVMState* clone_deep(Compile* C) const;    // recursively clones caller chain
 310   JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
 311   void      set_map_deep(SafePointNode *map);// reset map for all callers
 312   void      adapt_position(int delta);       // Adapt offsets in in-array after adding an edge.
 313   int       interpreter_frame_size() const;
 314   ciInstance* compute_receiver_info(ciMethod* callee) const;
 315 
 316 #ifndef PRODUCT
 317   void      print_method_with_lineno(outputStream* st, bool show_name) const;
 318   void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
 319   void      dump_spec(outputStream *st) const;
 320   void      dump_on(outputStream* st) const;
 321   void      dump() const {
 322     dump_on(tty);
 323   }
 324 #endif
 325 };
 326 
 327 //------------------------------SafePointNode----------------------------------
 328 // A SafePointNode is a subclass of a MultiNode for convenience (and
 329 // potential code sharing) only - conceptually it is independent of
 330 // the Node semantics.
 331 class SafePointNode : public MultiNode {
 332   friend JVMState;
 333   friend class GraphKit;
 334   friend class LibraryCallKit;
 335 
 336   virtual bool           cmp( const Node &n ) const;
 337   virtual uint           size_of() const;       // Size is bigger
 338 
 339 protected:
 340   JVMState* const _jvms;      // Pointer to list of JVM State objects
 341   // Many calls take *all* of memory as input,
 342   // but some produce a limited subset of that memory as output.
 343   // The adr_type reports the call's behavior as a store, not a load.
 344   const TypePtr*  _adr_type;  // What type of memory does this node produce?
 345   ReplacedNodes   _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
 346   bool            _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States
 347 
 348   void set_jvms(JVMState* s) {
 349   assert(s != nullptr, "assign null value to _jvms");
 350     *(JVMState**)&_jvms = s;  // override const attribute in the accessor
 351   }
 352 public:
 353   SafePointNode(uint edges, JVMState* jvms,
 354                 // A plain safepoint advertises no memory effects (null):
 355                 const TypePtr* adr_type = nullptr)
 356     : MultiNode( edges ),
 357       _jvms(jvms),
 358       _adr_type(adr_type),
 359       _has_ea_local_in_scope(false)
 360   {
 361     init_class_id(Class_SafePoint);
 362   }
 363 
 364   JVMState* jvms() const { return _jvms; }
 365   virtual bool needs_deep_clone_jvms(Compile* C) { return false; }
 366   void clone_jvms(Compile* C) {
 367     if (jvms() != nullptr) {
 368       if (needs_deep_clone_jvms(C)) {
 369         set_jvms(jvms()->clone_deep(C));
 370         jvms()->set_map_deep(this);
 371       } else {
 372         jvms()->clone_shallow(C)->bind_map(this);
 373       }
 374     }
 375   }
 376 
 377  private:
 378   void verify_input(const JVMState* jvms, uint idx) const {
 379     assert(verify_jvms(jvms), "jvms must match");
 380     Node* n = in(idx);
 381     assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
 382            in(idx + 1)->is_top(), "2nd half of long/double");
 383   }
 384 
 385  public:
 386   // Functionality from old debug nodes which has changed
 387   Node* local(const JVMState* jvms, uint idx) const {
 388     uint loc_idx = jvms->locoff() + idx;
 389     assert(jvms->is_loc(loc_idx), "not a local slot");
 390     verify_input(jvms, loc_idx);
 391     return in(loc_idx);
 392   }
 393   Node* stack(const JVMState* jvms, uint idx) const {
 394     uint stk_idx = jvms->stkoff() + idx;
 395     assert(jvms->is_stk(stk_idx), "not a stack slot");
 396     verify_input(jvms, stk_idx);
 397     return in(stk_idx);
 398   }
 399   Node* argument(const JVMState* jvms, uint idx) const {
 400     uint arg_idx = jvms->argoff() + idx;
 401     assert(jvms->is_stk(arg_idx), "not an argument slot");
 402     verify_input(jvms, arg_idx);
 403     return in(jvms->argoff() + idx);
 404   }
 405   Node* monitor_box(const JVMState* jvms, uint idx) const {
 406     assert(verify_jvms(jvms), "jvms must match");
 407     uint mon_box_idx = jvms->monitor_box_offset(idx);
 408     assert(jvms->is_monitor_box(mon_box_idx), "not a monitor box offset");
 409     return in(mon_box_idx);
 410   }
 411   Node* monitor_obj(const JVMState* jvms, uint idx) const {
 412     assert(verify_jvms(jvms), "jvms must match");
 413     uint mon_obj_idx = jvms->monitor_obj_offset(idx);
 414     assert(jvms->is_mon(mon_obj_idx) && !jvms->is_monitor_box(mon_obj_idx), "not a monitor obj offset");
 415     return in(mon_obj_idx);
 416   }
 417 
 418   void  set_local(const JVMState* jvms, uint idx, Node *c);
 419 
 420   void  set_stack(const JVMState* jvms, uint idx, Node *c) {
 421     assert(verify_jvms(jvms), "jvms must match");
 422     set_req(jvms->stkoff() + idx, c);
 423   }
 424   void  set_argument(const JVMState* jvms, uint idx, Node *c) {
 425     assert(verify_jvms(jvms), "jvms must match");
 426     set_req(jvms->argoff() + idx, c);
 427   }
 428   void ensure_stack(JVMState* jvms, uint stk_size) {
 429     assert(verify_jvms(jvms), "jvms must match");
 430     int grow_by = (int)stk_size - (int)jvms->stk_size();
 431     if (grow_by > 0)  grow_stack(jvms, grow_by);
 432   }
 433   void grow_stack(JVMState* jvms, uint grow_by);
 434   // Handle monitor stack
 435   void push_monitor( const FastLockNode *lock );
 436   void pop_monitor ();
 437   Node *peek_monitor_box() const;
 438   Node *peek_monitor_obj() const;
 439   // Peek Operand Stacks, JVMS 2.6.2
 440   Node* peek_operand(uint off = 0) const;
 441 
 442   // Access functions for the JVM
 443   Node *control  () const { return in(TypeFunc::Control  ); }
 444   Node *i_o      () const { return in(TypeFunc::I_O      ); }
 445   Node *memory   () const { return in(TypeFunc::Memory   ); }
 446   Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
 447   Node *frameptr () const { return in(TypeFunc::FramePtr ); }
 448 
 449   void set_control  ( Node *c ) { set_req(TypeFunc::Control,c); }
 450   void set_i_o      ( Node *c ) { set_req(TypeFunc::I_O    ,c); }
 451   void set_memory   ( Node *c ) { set_req(TypeFunc::Memory ,c); }
 452 
 453   MergeMemNode* merged_memory() const {
 454     return in(TypeFunc::Memory)->as_MergeMem();
 455   }
 456 
 457   // The parser marks useless maps as dead when it's done with them:
 458   bool is_killed() { return in(TypeFunc::Control) == nullptr; }
 459 
 460   // Exception states bubbling out of subgraphs such as inlined calls
 461   // are recorded here.  (There might be more than one, hence the "next".)
 462   // This feature is used only for safepoints which serve as "maps"
 463   // for JVM states during parsing, intrinsic expansion, etc.
 464   SafePointNode*         next_exception() const;
 465   void               set_next_exception(SafePointNode* n);
 466   bool                   has_exceptions() const { return next_exception() != nullptr; }
 467 
 468   // Helper methods to operate on replaced nodes
 469   ReplacedNodes replaced_nodes() const {
 470     return _replaced_nodes;
 471   }
 472 
 473   void set_replaced_nodes(ReplacedNodes replaced_nodes) {
 474     _replaced_nodes = replaced_nodes;
 475   }
 476 
 477   void clone_replaced_nodes() {
 478     _replaced_nodes.clone();
 479   }
 480   void record_replaced_node(Node* initial, Node* improved) {
 481     _replaced_nodes.record(initial, improved);
 482   }
 483   void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
 484     _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
 485   }
 486   void delete_replaced_nodes() {
 487     _replaced_nodes.reset();
 488   }
 489   void apply_replaced_nodes(uint idx) {
 490     _replaced_nodes.apply(this, idx);
 491   }
 492   void merge_replaced_nodes_with(SafePointNode* sfpt) {
 493     _replaced_nodes.merge_with(sfpt->_replaced_nodes);
 494   }
 495   bool has_replaced_nodes() const {
 496     return !_replaced_nodes.is_empty();
 497   }
 498   void set_has_ea_local_in_scope(bool b) {
 499     _has_ea_local_in_scope = b;
 500   }
 501   bool has_ea_local_in_scope() const {
 502     return _has_ea_local_in_scope;
 503   }
 504 
 505   // A temporary storge for node edges.
 506   // Intended for a single use.
 507   class NodeEdgeTempStorage : public StackObj {
 508     friend class SafePointNode;
 509 
 510     PhaseIterGVN& _igvn;
 511     Node*         _node_hook;
 512 
 513 #ifdef ASSERT
 514     enum State { state_initial, state_populated, state_processed };
 515 
 516     State _state; // monotonically transitions from initial to processed state.
 517 #endif // ASSERT
 518 
 519     bool is_empty() const {
 520       return _node_hook == nullptr || _node_hook->req() == 1;
 521     }
 522     void push(Node* n) {
 523       assert(n != nullptr, "");
 524       if (_node_hook == nullptr) {
 525         _node_hook = new Node(nullptr);
 526       }
 527       _node_hook->add_req(n);
 528     }
 529     Node* pop() {
 530       assert(!is_empty(), "");
 531       int idx = _node_hook->req()-1;
 532       Node* r = _node_hook->in(idx);
 533       _node_hook->del_req(idx);
 534       assert(r != nullptr, "");
 535       return r;
 536     }
 537 
 538   public:
 539     NodeEdgeTempStorage(PhaseIterGVN &igvn) : _igvn(igvn), _node_hook(nullptr)
 540                                               DEBUG_ONLY(COMMA _state(state_initial)) {
 541       assert(is_empty(), "");
 542     }
 543 
 544     ~NodeEdgeTempStorage() {
 545       assert(_state == state_processed, "not processed");
 546       assert(is_empty(), "");
 547       if (_node_hook != nullptr) {
 548         _node_hook->destruct(&_igvn);
 549       }
 550     }
 551 
 552     void remove_edge_if_present(Node* n) {
 553       if (!is_empty()) {
 554         int idx = _node_hook->find_edge(n);
 555         if (idx > 0) {
 556           _node_hook->del_req(idx);
 557         }
 558       }
 559     }
 560   };
 561 
 562   void remove_non_debug_edges(NodeEdgeTempStorage& non_debug_edges);
 563   void restore_non_debug_edges(NodeEdgeTempStorage& non_debug_edges);
 564 
 565   void disconnect_from_root(PhaseIterGVN *igvn);
 566 
 567   // Standard Node stuff
 568   virtual int            Opcode() const;
 569   virtual bool           pinned() const { return true; }
 570   virtual const Type*    Value(PhaseGVN* phase) const;
 571   virtual const Type*    bottom_type() const { return Type::CONTROL; }
 572   virtual const TypePtr* adr_type() const { return _adr_type; }
 573   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
 574   virtual Node          *Ideal(PhaseGVN *phase, bool can_reshape);
 575   virtual Node*          Identity(PhaseGVN* phase);
 576   virtual uint           ideal_reg() const { return 0; }
 577   virtual const RegMask &in_RegMask(uint) const;
 578   virtual const RegMask &out_RegMask() const;
 579   virtual uint           match_edge(uint idx) const;
 580 
 581 #ifndef PRODUCT
 582   virtual void           dump_spec(outputStream *st) const;
 583 #endif
 584 };
 585 
 586 //------------------------------SafePointScalarObjectNode----------------------
 587 // A SafePointScalarObjectNode represents the state of a scalarized object
 588 // at a safepoint.
 589 class SafePointScalarObjectNode: public TypeNode {
 590   uint _first_index;              // First input edge relative index of a SafePoint node where
 591                                   // states of the scalarized object fields are collected.
 592   uint _depth;                    // Depth of the JVM state the _first_index field refers to
 593   uint _n_fields;                 // Number of non-static fields of the scalarized object.
 594 
 595   Node* _alloc;                   // Just for debugging purposes.
 596 
 597   virtual uint hash() const;
 598   virtual bool cmp( const Node &n ) const;
 599 
 600   uint first_index() const { return _first_index; }
 601 
 602 public:
 603   SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields);
 604 
 605   virtual int Opcode() const;
 606   virtual uint           ideal_reg() const;
 607   virtual const RegMask &in_RegMask(uint) const;
 608   virtual const RegMask &out_RegMask() const;
 609   virtual uint           match_edge(uint idx) const;
 610 
 611   uint first_index(JVMState* jvms) const {
 612     assert(jvms != nullptr, "missed JVMS");
 613     return jvms->of_depth(_depth)->scloff() + _first_index;
 614   }
 615   uint n_fields()    const { return _n_fields; }
 616 
 617 #ifdef ASSERT
 618   Node* alloc() const { return _alloc; }
 619 #endif
 620 
 621   virtual uint size_of() const { return sizeof(*this); }
 622 
 623   // Assumes that "this" is an argument to a safepoint node "s", and that
 624   // "new_call" is being created to correspond to "s".  But the difference
 625   // between the start index of the jvmstates of "new_call" and "s" is
 626   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 627   // corresponds appropriately to "this" in "new_call".  Assumes that
 628   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 629   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 630   SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
 631 
 632 #ifndef PRODUCT
 633   virtual void              dump_spec(outputStream *st) const;
 634 #endif
 635 };
 636 
 637 //------------------------------SafePointScalarMergeNode----------------------
 638 //
 639 // This class represents an allocation merge that is used as debug information
 640 // and had at least one of its input scalar replaced.
 641 //
 642 // The required inputs of this node, except the control, are pointers to
 643 // SafePointScalarObjectNodes that describe scalarized inputs of the original
 644 // allocation merge. The other(s) properties of the class are described below.
 645 //
 646 // _merge_pointer_idx : index in the SafePointNode's input array where the
 647 //   description of the _allocation merge_ starts. The index is zero based and
 648 //   relative to the SafePoint's scloff. The two entries in the SafePointNode's
 649 //   input array starting at '_merge_pointer_idx` are Phi nodes representing:
 650 //
 651 //   1) The original merge Phi. During rematerialization this input will only be
 652 //   used if the "selector Phi" (see below) indicates that the execution of the
 653 //   Phi took the path of a non scalarized input.
 654 //
 655 //   2) A "selector Phi". The output of this Phi will be '-1' if the execution
 656 //   of the method exercised a non scalarized input of the original Phi.
 657 //   Otherwise, the output will be >=0, and it will indicate the index-1 in the
 658 //   SafePointScalarMergeNode input array where the description of the
 659 //   scalarized object that should be used is.
 660 //
 661 // As an example, consider a Phi merging 3 inputs, of which the last 2 are
 662 // scalar replaceable.
 663 //
 664 //    Phi(Region, NSR, SR, SR)
 665 //
 666 // During scalar replacement the SR inputs will be changed to null:
 667 //
 668 //    Phi(Region, NSR, nullptr, nullptr)
 669 //
 670 // A corresponding selector Phi will be created with a configuration like this:
 671 //
 672 //    Phi(Region, -1, 0, 1)
 673 //
 674 // During execution of the compiled method, if the execution reaches a Trap, the
 675 // output of the selector Phi will tell if we need to rematerialize one of the
 676 // scalar replaced inputs or if we should just use the pointer returned by the
 677 // original Phi.
 678 
 679 class SafePointScalarMergeNode: public TypeNode {
 680   int _merge_pointer_idx;         // This is the first input edge relative
 681                                   // index of a SafePoint node where metadata information relative
 682                                   // to restoring the merge is stored. The corresponding input
 683                                   // in the associated SafePoint will point to a Phi representing
 684                                   // potential non-scalar replaced objects.
 685 
 686   virtual uint hash() const;
 687   virtual bool cmp( const Node &n ) const;
 688 
 689 public:
 690   SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx);
 691 
 692   virtual int            Opcode() const;
 693   virtual uint           ideal_reg() const;
 694   virtual const RegMask &in_RegMask(uint) const;
 695   virtual const RegMask &out_RegMask() const;
 696   virtual uint           match_edge(uint idx) const;
 697 
 698   virtual uint size_of() const { return sizeof(*this); }
 699 
 700   int merge_pointer_idx(JVMState* jvms) const {
 701     assert(jvms != nullptr, "JVMS reference is null.");
 702     return jvms->scloff() + _merge_pointer_idx;
 703   }
 704 
 705   int selector_idx(JVMState* jvms) const {
 706     assert(jvms != nullptr, "JVMS reference is null.");
 707     return jvms->scloff() + _merge_pointer_idx + 1;
 708   }
 709 
 710   // Assumes that "this" is an argument to a safepoint node "s", and that
 711   // "new_call" is being created to correspond to "s".  But the difference
 712   // between the start index of the jvmstates of "new_call" and "s" is
 713   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 714   // corresponds appropriately to "this" in "new_call".  Assumes that
 715   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 716   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 717   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 718 
 719 #ifndef PRODUCT
 720   virtual void              dump_spec(outputStream *st) const;
 721 #endif
 722 };
 723 
 724 // Simple container for the outgoing projections of a call.  Useful
 725 // for serious surgery on calls.
 726 class CallProjections {
 727 public:
 728   Node* fallthrough_proj;
 729   Node* fallthrough_catchproj;
 730   Node* fallthrough_memproj;
 731   Node* fallthrough_ioproj;
 732   Node* catchall_catchproj;
 733   Node* catchall_memproj;
 734   Node* catchall_ioproj;
 735   Node* exobj;
 736   uint nb_resproj;
 737   Node* resproj[1]; // at least one projection
 738 
 739   CallProjections(uint nbres) {
 740     fallthrough_proj      = nullptr;
 741     fallthrough_catchproj = nullptr;
 742     fallthrough_memproj   = nullptr;
 743     fallthrough_ioproj    = nullptr;
 744     catchall_catchproj    = nullptr;
 745     catchall_memproj      = nullptr;
 746     catchall_ioproj       = nullptr;
 747     exobj                 = nullptr;
 748     nb_resproj            = nbres;
 749     resproj[0]            = nullptr;
 750     for (uint i = 1; i < nb_resproj; i++) {
 751       resproj[i]          = nullptr;
 752     }
 753   }
 754 
 755 };
 756 
 757 class CallGenerator;
 758 
 759 //------------------------------CallNode---------------------------------------
 760 // Call nodes now subsume the function of debug nodes at callsites, so they
 761 // contain the functionality of a full scope chain of debug nodes.
 762 class CallNode : public SafePointNode {
 763 
 764 protected:
 765   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) const;
 766 
 767 public:
 768   const TypeFunc* _tf;          // Function type
 769   address         _entry_point; // Address of method being called
 770   float           _cnt;         // Estimate of number of times called
 771   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 772   const char*     _name;        // Printable name, if _method is null
 773 
 774   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 775     : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
 776       _tf(tf),
 777       _entry_point(addr),
 778       _cnt(COUNT_UNKNOWN),
 779       _generator(nullptr),
 780       _name(nullptr)
 781   {
 782     init_class_id(Class_Call);
 783   }
 784 
 785   const TypeFunc* tf()         const { return _tf; }
 786   address  entry_point()       const { return _entry_point; }
 787   float    cnt()               const { return _cnt; }
 788   CallGenerator* generator()   const { return _generator; }
 789 
 790   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 791   void set_entry_point(address p)       { _entry_point = p; }
 792   void set_cnt(float c)                 { _cnt = c; }
 793   void set_generator(CallGenerator* cg) { _generator = cg; }
 794 
 795   virtual const Type* bottom_type() const;
 796   virtual const Type* Value(PhaseGVN* phase) const;
 797   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 798   virtual Node* Identity(PhaseGVN* phase) { return this; }
 799   virtual bool        cmp(const Node &n) const;
 800   virtual uint        size_of() const = 0;
 801   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 802   virtual Node*       match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
 803   virtual uint        ideal_reg() const { return NotAMachineReg; }
 804   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 805   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 806   virtual bool        guaranteed_safepoint()  { return true; }
 807   // For macro nodes, the JVMState gets modified during expansion. If calls
 808   // use MachConstantBase, it gets modified during matching. If the call is
 809   // late inlined, it also needs the full JVMState. So when cloning the
 810   // node the JVMState must be deep cloned. Default is to shallow clone.
 811   virtual bool needs_deep_clone_jvms(Compile* C) { return _generator != nullptr || C->needs_deep_clone_jvms(); }
 812 
 813   // Returns true if the call may modify n
 814   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const;
 815   // Does this node have a use of n other than in debug information?
 816   bool                has_non_debug_use(const Node* n);
 817   bool                has_debug_use(const Node* n) const;
 818   // Returns the unique CheckCastPP of a call
 819   // or result projection is there are several CheckCastPP
 820   // or returns null if there is no one.
 821   Node* result_cast();
 822   // Does this node returns pointer?
 823   bool returns_pointer() const {
 824     const TypeTuple* r = tf()->range_sig();
 825     return (!tf()->returns_inline_type_as_fields() &&
 826             r->cnt() > TypeFunc::Parms &&
 827             r->field_at(TypeFunc::Parms)->isa_ptr());
 828   }
 829 
 830   // Collect all the interesting edges from a call for use in
 831   // replacing the call by something else.  Used by macro expansion
 832   // and the late inlining support.
 833   CallProjections* extract_projections(bool separate_io_proj,
 834                                        bool do_asserts = true,
 835                                        bool allow_handlers = false) const;
 836 
 837   virtual uint match_edge(uint idx) const;
 838 
 839   bool is_call_to_arraycopystub() const;
 840   bool is_call_to_multianewarray_stub() const;
 841 
 842   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 843 
 844 #ifndef PRODUCT
 845   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 846   virtual void        dump_spec(outputStream* st) const;
 847 #endif
 848 };
 849 
 850 
 851 //------------------------------CallJavaNode-----------------------------------
 852 // Make a static or dynamic subroutine call node using Java calling
 853 // convention.  (The "Java" calling convention is the compiler's calling
 854 // convention, as opposed to the interpreter's or that of native C.)
 855 class CallJavaNode : public CallNode {
 856 protected:
 857   virtual bool cmp( const Node &n ) const;
 858   virtual uint size_of() const; // Size is bigger
 859 
 860   ciMethod* _method;               // Method being direct called
 861   bool    _optimized_virtual;
 862   bool    _override_symbolic_info; // Override symbolic call site info from bytecode
 863   bool    _arg_escape;             // ArgEscape in parameter list
 864 public:
 865   CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method)
 866     : CallNode(tf, addr, TypePtr::BOTTOM),
 867       _method(method),
 868       _optimized_virtual(false),
 869       _override_symbolic_info(false),
 870       _arg_escape(false)
 871   {
 872     init_class_id(Class_CallJava);
 873   }
 874 
 875   virtual int   Opcode() const;
 876   ciMethod* method() const                 { return _method; }
 877   void  set_method(ciMethod *m)            { _method = m; }
 878   void  set_optimized_virtual(bool f)      { _optimized_virtual = f; }
 879   bool  is_optimized_virtual() const       { return _optimized_virtual; }
 880   void  set_override_symbolic_info(bool f) { _override_symbolic_info = f; }
 881   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 882   void  set_arg_escape(bool f)             { _arg_escape = f; }
 883   bool  arg_escape() const                 { return _arg_escape; }
 884   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 885   void register_for_late_inline();
 886 
 887   DEBUG_ONLY( bool validate_symbolic_info() const; )
 888 
 889 #ifndef PRODUCT
 890   virtual void  dump_spec(outputStream *st) const;
 891   virtual void  dump_compact_spec(outputStream *st) const;
 892 #endif
 893 };
 894 
 895 //------------------------------CallStaticJavaNode-----------------------------
 896 // Make a direct subroutine call using Java calling convention (for static
 897 // calls and optimized virtual calls, plus calls to wrappers for run-time
 898 // routines); generates static stub.
 899 class CallStaticJavaNode : public CallJavaNode {
 900   virtual bool cmp( const Node &n ) const;
 901   virtual uint size_of() const; // Size is bigger
 902 
 903   bool remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg);
 904 
 905 public:
 906   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 907     : CallJavaNode(tf, addr, method) {
 908     init_class_id(Class_CallStaticJava);
 909     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 910       init_flags(Flag_is_macro);
 911       C->add_macro_node(this);
 912     }
 913     const TypeTuple *r = tf->range_sig();
 914     if (InlineTypeReturnedAsFields &&
 915         method != nullptr &&
 916         method->is_method_handle_intrinsic() &&
 917         r->cnt() > TypeFunc::Parms &&
 918         r->field_at(TypeFunc::Parms)->isa_oopptr() &&
 919         r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
 920       // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
 921       init_flags(Flag_is_macro);
 922       C->add_macro_node(this);
 923     }
 924   }
 925   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 926     : CallJavaNode(tf, addr, nullptr) {
 927     init_class_id(Class_CallStaticJava);
 928     // This node calls a runtime stub, which often has narrow memory effects.
 929     _adr_type = adr_type;
 930     _name = name;
 931   }
 932 
 933   // If this is an uncommon trap, return the request code, else zero.
 934   int uncommon_trap_request() const;
 935   bool is_uncommon_trap() const;
 936   static int extract_uncommon_trap_request(const Node* call);
 937 
 938   bool is_boxing_method() const {
 939     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 940   }
 941   // Late inlining modifies the JVMState, so we need to deep clone it
 942   // when the call node is cloned (because it is macro node).
 943   virtual bool needs_deep_clone_jvms(Compile* C) {
 944     return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);
 945   }
 946 
 947   virtual int         Opcode() const;
 948   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 949 
 950 #ifndef PRODUCT
 951   virtual void        dump_spec(outputStream *st) const;
 952   virtual void        dump_compact_spec(outputStream *st) const;
 953 #endif
 954 };
 955 
 956 //------------------------------CallDynamicJavaNode----------------------------
 957 // Make a dispatched call using Java calling convention.
 958 class CallDynamicJavaNode : public CallJavaNode {
 959   virtual bool cmp( const Node &n ) const;
 960   virtual uint size_of() const; // Size is bigger
 961 public:
 962   CallDynamicJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int vtable_index)
 963     : CallJavaNode(tf,addr,method), _vtable_index(vtable_index) {
 964     init_class_id(Class_CallDynamicJava);
 965   }
 966 
 967   // Late inlining modifies the JVMState, so we need to deep clone it
 968   // when the call node is cloned.
 969   virtual bool needs_deep_clone_jvms(Compile* C) {
 970     return IncrementalInlineVirtual || CallNode::needs_deep_clone_jvms(C);
 971   }
 972 
 973   int _vtable_index;
 974   virtual int   Opcode() const;
 975   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 976 #ifndef PRODUCT
 977   virtual void  dump_spec(outputStream *st) const;
 978 #endif
 979 };
 980 
 981 //------------------------------CallRuntimeNode--------------------------------
 982 // Make a direct subroutine call node into compiled C++ code.
 983 class CallRuntimeNode : public CallNode {
 984 protected:
 985   virtual bool cmp( const Node &n ) const;
 986   virtual uint size_of() const; // Size is bigger
 987 public:
 988   CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
 989                   const TypePtr* adr_type, JVMState* jvms = nullptr)
 990     : CallNode(tf, addr, adr_type, jvms)
 991   {
 992     init_class_id(Class_CallRuntime);
 993     _name = name;
 994   }
 995 
 996   virtual int   Opcode() const;
 997   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 998 
 999 #ifndef PRODUCT
1000   virtual void  dump_spec(outputStream *st) const;
1001 #endif
1002 };
1003 
1004 //------------------------------CallLeafNode-----------------------------------
1005 // Make a direct subroutine call node into compiled C++ code, without
1006 // safepoints
1007 class CallLeafNode : public CallRuntimeNode {
1008 public:
1009   CallLeafNode(const TypeFunc* tf, address addr, const char* name,
1010                const TypePtr* adr_type)
1011     : CallRuntimeNode(tf, addr, name, adr_type)
1012   {
1013     init_class_id(Class_CallLeaf);
1014   }
1015   virtual int   Opcode() const;
1016   virtual bool        guaranteed_safepoint()  { return false; }
1017 #ifndef PRODUCT
1018   virtual void  dump_spec(outputStream *st) const;
1019 #endif
1020 };
1021 
1022 /* A pure function call, they are assumed not to be safepoints, not to read or write memory,
1023  * have no exception... They just take parameters, return a value without side effect. It is
1024  * always correct to create some, or remove them, if the result is not used.
1025  *
1026  * They still have control input to allow easy lowering into other kind of calls that require
1027  * a control, but this is more a technical than a moral constraint.
1028  *
1029  * Pure calls must have only control and data input and output: I/O, Memory and so on must be top.
1030  * Nevertheless, pure calls can typically be expensive math operations so care must be taken
1031  * when letting the node float.
1032  */
1033 class CallLeafPureNode : public CallLeafNode {
1034 protected:
1035   bool is_unused() const;
1036   bool is_dead() const;
1037   TupleNode* make_tuple_of_input_state_and_top_return_values(const Compile* C) const;
1038 
1039 public:
1040   CallLeafPureNode(const TypeFunc* tf, address addr, const char* name)
1041       : CallLeafNode(tf, addr, name, nullptr) {
1042     init_class_id(Class_CallLeafPure);
1043   }
1044   int Opcode() const override;
1045   Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
1046 
1047   CallLeafPureNode* inline_call_leaf_pure_node(Node* control = nullptr) const;
1048 };
1049 
1050 //------------------------------CallLeafNoFPNode-------------------------------
1051 // CallLeafNode, not using floating point or using it in the same manner as
1052 // the generated code
1053 class CallLeafNoFPNode : public CallLeafNode {
1054 public:
1055   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
1056                    const TypePtr* adr_type)
1057     : CallLeafNode(tf, addr, name, adr_type)
1058   {
1059     init_class_id(Class_CallLeafNoFP);
1060   }
1061   virtual int   Opcode() const;
1062   virtual uint match_edge(uint idx) const;
1063 };
1064 
1065 //------------------------------CallLeafVectorNode-------------------------------
1066 // CallLeafNode but calling with vector calling convention instead.
1067 class CallLeafVectorNode : public CallLeafNode {
1068 private:
1069   uint _num_bits;
1070 protected:
1071   virtual bool cmp( const Node &n ) const;
1072   virtual uint size_of() const; // Size is bigger
1073 public:
1074   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
1075                    const TypePtr* adr_type, uint num_bits)
1076     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
1077   {
1078   }
1079   virtual int   Opcode() const;
1080   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
1081 };
1082 
1083 
1084 //------------------------------Allocate---------------------------------------
1085 // High-level memory allocation
1086 //
1087 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
1088 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
1089 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
1090 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
1091 //  order to differentiate the uses of the projection on the normal control path from
1092 //  those on the exception return path.
1093 //
1094 class AllocateNode : public CallNode {
1095 public:
1096   enum {
1097     // Output:
1098     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
1099     // Inputs:
1100     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
1101     KlassNode,                        // type (maybe dynamic) of the obj.
1102     InitialTest,                      // slow-path test (may be constant)
1103     ALength,                          // array length (or TOP if none)
1104     ValidLengthTest,
1105     InlineType,                       // InlineTypeNode if this is an inline type allocation
1106     InitValue,                        // Init value for null-free inline type arrays
1107     RawInitValue,                     // Same as above but as raw machine word
1108     ParmLimit
1109   };
1110 
1111   static const TypeFunc* alloc_type(const Type* t) {
1112     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1113     fields[AllocSize]   = TypeInt::POS;
1114     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1115     fields[InitialTest] = TypeInt::BOOL;
1116     fields[ALength]     = t;  // length (can be a bad length)
1117     fields[ValidLengthTest] = TypeInt::BOOL;
1118     fields[InlineType] = Type::BOTTOM;
1119     fields[InitValue] = TypeInstPtr::NOTNULL;
1120     fields[RawInitValue] = TypeX_X;
1121 
1122     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1123 
1124     // create result type (range)
1125     fields = TypeTuple::fields(1);
1126     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1127 
1128     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1129 
1130     return TypeFunc::make(domain, range);
1131   }
1132 
1133   // Result of Escape Analysis
1134   bool _is_scalar_replaceable;
1135   bool _is_non_escaping;
1136   // True when MemBar for new is redundant with MemBar at initialzer exit
1137   bool _is_allocation_MemBar_redundant;
1138 
1139   virtual uint size_of() const; // Size is bigger
1140   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1141                Node *size, Node *klass_node, Node *initial_test,
1142                InlineTypeNode* inline_type_node = nullptr);
1143   // Expansion modifies the JVMState, so we need to deep clone it
1144   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1145   virtual int Opcode() const;
1146   virtual uint ideal_reg() const { return Op_RegP; }
1147   virtual bool        guaranteed_safepoint()  { return false; }
1148 
1149   // allocations do not modify their arguments
1150   virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const { return false; }
1151 
1152   // Pattern-match a possible usage of AllocateNode.
1153   // Return null if no allocation is recognized.
1154   // The operand is the pointer produced by the (possible) allocation.
1155   // It must be a projection of the Allocate or its subsequent CastPP.
1156   // (Note:  This function is defined in file graphKit.cpp, near
1157   // GraphKit::new_instance/new_array, whose output it recognizes.)
1158   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1159   static AllocateNode* Ideal_allocation(Node* ptr);
1160 
1161   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1162   // an offset, which is reported back to the caller.
1163   // (Note:  AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
1164   static AllocateNode* Ideal_allocation(Node* ptr, PhaseValues* phase,
1165                                         intptr_t& offset);
1166 
1167   // Dig the klass operand out of a (possible) allocation site.
1168   static Node* Ideal_klass(Node* ptr, PhaseValues* phase) {
1169     AllocateNode* allo = Ideal_allocation(ptr);
1170     return (allo == nullptr) ? nullptr : allo->in(KlassNode);
1171   }
1172 
1173   // Conservatively small estimate of offset of first non-header byte.
1174   int minimum_header_size() {
1175     return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
1176                                 instanceOopDesc::base_offset_in_bytes();
1177   }
1178 
1179   // Return the corresponding initialization barrier (or null if none).
1180   // Walks out edges to find it...
1181   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1182   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1183   InitializeNode* initialization();
1184 
1185   // Convenience for initialization->maybe_set_complete(phase)
1186   bool maybe_set_complete(PhaseGVN* phase);
1187 
1188   // Return true if allocation doesn't escape thread, its escape state
1189   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1190   // is true when its allocation's escape state is noEscape or
1191   // ArgEscape. In case allocation's InitializeNode is null, check
1192   // AlllocateNode._is_non_escaping flag.
1193   // AlllocateNode._is_non_escaping is true when its escape state is
1194   // noEscape.
1195   bool does_not_escape_thread() {
1196     InitializeNode* init = nullptr;
1197     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1198   }
1199 
1200   // If object doesn't escape in <.init> method and there is memory barrier
1201   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1202   // Inovke this method when MemBar at exit of initializer and post-dominate
1203   // allocation node.
1204   void compute_MemBar_redundancy(ciMethod* initializer);
1205   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1206 
1207   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1208 
1209   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1210 };
1211 
1212 //------------------------------AllocateArray---------------------------------
1213 //
1214 // High-level array allocation
1215 //
1216 class AllocateArrayNode : public AllocateNode {
1217 public:
1218   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1219                     Node* initial_test, Node* count_val, Node* valid_length_test,
1220                     Node* init_value, Node* raw_init_value)
1221     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1222                    initial_test)
1223   {
1224     init_class_id(Class_AllocateArray);
1225     set_req(AllocateNode::ALength, count_val);
1226     set_req(AllocateNode::ValidLengthTest, valid_length_test);
1227     init_req(AllocateNode::InitValue, init_value);
1228     init_req(AllocateNode::RawInitValue, raw_init_value);
1229   }
1230   virtual uint size_of() const { return sizeof(*this); }
1231   virtual int Opcode() const;
1232 
1233   // Dig the length operand out of a array allocation site.
1234   Node* Ideal_length() {
1235     return in(AllocateNode::ALength);
1236   }
1237 
1238   // Dig the length operand out of a array allocation site and narrow the
1239   // type with a CastII, if necesssary
1240   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1241 
1242   // Pattern-match a possible usage of AllocateArrayNode.
1243   // Return null if no allocation is recognized.
1244   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1245     AllocateNode* allo = Ideal_allocation(ptr);
1246     return (allo == nullptr || !allo->is_AllocateArray())
1247            ? nullptr : allo->as_AllocateArray();
1248   }
1249 };
1250 
1251 //------------------------------AbstractLockNode-----------------------------------
1252 class AbstractLockNode: public CallNode {
1253 private:
1254   enum {
1255     Regular = 0,  // Normal lock
1256     NonEscObj,    // Lock is used for non escaping object
1257     Coarsened,    // Lock was coarsened
1258     Nested        // Nested lock
1259   } _kind;
1260 
1261   static const char* _kind_names[Nested+1];
1262 
1263 #ifndef PRODUCT
1264   NamedCounter* _counter;
1265 #endif
1266 
1267 protected:
1268   // helper functions for lock elimination
1269   //
1270 
1271   bool find_matching_unlock(const Node* ctrl, LockNode* lock,
1272                             GrowableArray<AbstractLockNode*> &lock_ops);
1273   bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1274                                        GrowableArray<AbstractLockNode*> &lock_ops);
1275   bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1276                                GrowableArray<AbstractLockNode*> &lock_ops);
1277   LockNode *find_matching_lock(UnlockNode* unlock);
1278 
1279   // Update the counter to indicate that this lock was eliminated.
1280   void set_eliminated_lock_counter() PRODUCT_RETURN;
1281 
1282 public:
1283   AbstractLockNode(const TypeFunc *tf)
1284     : CallNode(tf, nullptr, TypeRawPtr::BOTTOM),
1285       _kind(Regular)
1286   {
1287 #ifndef PRODUCT
1288     _counter = nullptr;
1289 #endif
1290   }
1291   virtual int Opcode() const = 0;
1292   Node *   obj_node() const       {return in(TypeFunc::Parms + 0); }
1293   Node *   box_node() const       {return in(TypeFunc::Parms + 1); }
1294   Node *   fastlock_node() const  {return in(TypeFunc::Parms + 2); }
1295   void     set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
1296 
1297   const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
1298 
1299   virtual uint size_of() const { return sizeof(*this); }
1300 
1301   bool is_eliminated()  const { return (_kind != Regular); }
1302   bool is_non_esc_obj() const { return (_kind == NonEscObj); }
1303   bool is_coarsened()   const { return (_kind == Coarsened); }
1304   bool is_nested()      const { return (_kind == Nested); }
1305 
1306   const char * kind_as_string() const;
1307   void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = nullptr) const;
1308 
1309   void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
1310   void set_coarsened()   { _kind = Coarsened; set_eliminated_lock_counter(); }
1311   void set_nested()      { _kind = Nested; set_eliminated_lock_counter(); }
1312 
1313   // Check that all locks/unlocks associated with object come from balanced regions.
1314   // They can become unbalanced after coarsening optimization or on OSR entry.
1315   bool is_balanced();
1316 
1317   // locking does not modify its arguments
1318   virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const { return false; }
1319 
1320 #ifndef PRODUCT
1321   void create_lock_counter(JVMState* s);
1322   NamedCounter* counter() const { return _counter; }
1323   virtual void dump_spec(outputStream* st) const;
1324   virtual void dump_compact_spec(outputStream* st) const;
1325 #endif
1326 };
1327 
1328 //------------------------------Lock---------------------------------------
1329 // High-level lock operation
1330 //
1331 // This is a subclass of CallNode because it is a macro node which gets expanded
1332 // into a code sequence containing a call.  This node takes 3 "parameters":
1333 //    0  -  object to lock
1334 //    1 -   a BoxLockNode
1335 //    2 -   a FastLockNode
1336 //
1337 class LockNode : public AbstractLockNode {
1338   static const TypeFunc* _lock_type_Type;
1339 public:
1340 
1341   static inline const TypeFunc* lock_type() {
1342     assert(_lock_type_Type != nullptr, "should be initialized");
1343     return _lock_type_Type;
1344   }
1345 
1346   static void initialize_lock_Type() {
1347     assert(_lock_type_Type == nullptr, "should be called once");
1348     // create input type (domain)
1349     const Type **fields = TypeTuple::fields(3);
1350     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1351     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1352     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1353     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1354 
1355     // create result type (range)
1356     fields = TypeTuple::fields(0);
1357 
1358     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1359 
1360     _lock_type_Type = TypeFunc::make(domain,range);
1361   }
1362 
1363   virtual int Opcode() const;
1364   virtual uint size_of() const; // Size is bigger
1365   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1366     init_class_id(Class_Lock);
1367     init_flags(Flag_is_macro);
1368     C->add_macro_node(this);
1369   }
1370   virtual bool        guaranteed_safepoint()  { return false; }
1371 
1372   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1373   // Expansion modifies the JVMState, so we need to deep clone it
1374   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1375 
1376   bool is_nested_lock_region(); // Is this Lock nested?
1377   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1378 };
1379 
1380 //------------------------------Unlock---------------------------------------
1381 // High-level unlock operation
1382 class UnlockNode : public AbstractLockNode {
1383 private:
1384 #ifdef ASSERT
1385   JVMState* const _dbg_jvms;      // Pointer to list of JVM State objects
1386 #endif
1387 public:
1388   virtual int Opcode() const;
1389   virtual uint size_of() const; // Size is bigger
1390   UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )
1391 #ifdef ASSERT
1392     , _dbg_jvms(nullptr)
1393 #endif
1394   {
1395     init_class_id(Class_Unlock);
1396     init_flags(Flag_is_macro);
1397     C->add_macro_node(this);
1398   }
1399   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1400   // unlock is never a safepoint
1401   virtual bool        guaranteed_safepoint()  { return false; }
1402 #ifdef ASSERT
1403   void set_dbg_jvms(JVMState* s) {
1404     *(JVMState**)&_dbg_jvms = s;  // override const attribute in the accessor
1405   }
1406   JVMState* dbg_jvms() const { return _dbg_jvms; }
1407 #else
1408   JVMState* dbg_jvms() const { return nullptr; }
1409 #endif
1410 };
1411 
1412 //------------------------------PowDNode--------------------------------------
1413 class PowDNode : public CallLeafPureNode {
1414   TupleNode* make_tuple_of_input_state_and_result(PhaseIterGVN* phase, Node* result, Node* control = nullptr);
1415 
1416 public:
1417   PowDNode(Compile* C, Node* base, Node* exp);
1418   int Opcode() const override;
1419   const Type* Value(PhaseGVN* phase) const override;
1420   Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
1421 
1422   Node* base() const { return in(TypeFunc::Parms + 0); }
1423   Node* exp() const  { return in(TypeFunc::Parms + 2); }
1424 };
1425 
1426 #endif // SHARE_OPTO_CALLNODE_HPP