1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OPTO_CALLNODE_HPP
  26 #define SHARE_OPTO_CALLNODE_HPP
  27 
  28 #include "opto/connode.hpp"
  29 #include "opto/mulnode.hpp"
  30 #include "opto/multnode.hpp"
  31 #include "opto/opcodes.hpp"
  32 #include "opto/phaseX.hpp"
  33 #include "opto/replacednodes.hpp"
  34 #include "opto/type.hpp"
  35 #include "utilities/growableArray.hpp"
  36 
  37 // Portions of code courtesy of Clifford Click
  38 
  39 // Optimization - Graph Style
  40 
  41 class NamedCounter;
  42 class MultiNode;
  43 class  SafePointNode;
  44 class   CallNode;
  45 class     CallJavaNode;
  46 class       CallStaticJavaNode;
  47 class       CallDynamicJavaNode;
  48 class     CallRuntimeNode;
  49 class       CallLeafNode;
  50 class         CallLeafNoFPNode;
  51 class         CallLeafVectorNode;
  52 class     AllocateNode;
  53 class       AllocateArrayNode;
  54 class     AbstractLockNode;
  55 class       LockNode;
  56 class       UnlockNode;
  57 class FastLockNode;
  58 
  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match( const ProjNode *proj, const Matcher *m );
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;
  93   static  const TypeTuple *osr_domain();
  94 };
  95 
  96 
  97 //------------------------------ParmNode---------------------------------------
  98 // Incoming parameters
  99 class ParmNode : public ProjNode {
 100   static const char * const names[TypeFunc::Parms+1];
 101 public:
 102   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 103     init_class_id(Class_Parm);
 104   }
 105   virtual int Opcode() const;
 106   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 107   virtual uint ideal_reg() const;
 108 #ifndef PRODUCT
 109   virtual void dump_spec(outputStream *st) const;
 110   virtual void dump_compact_spec(outputStream *st) const;
 111 #endif
 112 };
 113 
 114 
 115 //------------------------------ReturnNode-------------------------------------
 116 // Return from subroutine node
 117 class ReturnNode : public Node {
 118 public:
 119   ReturnNode(uint edges, Node* cntrl, Node* i_o, Node* memory, Node* frameptr, Node* retadr);
 120   virtual int Opcode() const;
 121   virtual bool  is_CFG() const { return true; }
 122   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 123   virtual bool depends_only_on_test() const { return false; }
 124   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 125   virtual const Type* Value(PhaseGVN* phase) const;
 126   virtual uint ideal_reg() const { return NotAMachineReg; }
 127   virtual uint match_edge(uint idx) const;
 128 #ifndef PRODUCT
 129   virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const;
 130 #endif
 131 };
 132 
 133 
 134 //------------------------------RethrowNode------------------------------------
 135 // Rethrow of exception at call site.  Ends a procedure before rethrowing;
 136 // ends the current basic block like a ReturnNode.  Restores registers and
 137 // unwinds stack.  Rethrow happens in the caller's method.
 138 class RethrowNode : public Node {
 139  public:
 140   RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
 141   virtual int Opcode() const;
 142   virtual bool  is_CFG() const { return true; }
 143   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 144   virtual bool depends_only_on_test() const { return false; }
 145   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 146   virtual const Type* Value(PhaseGVN* phase) const;
 147   virtual uint match_edge(uint idx) const;
 148   virtual uint ideal_reg() const { return NotAMachineReg; }
 149 #ifndef PRODUCT
 150   virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const;
 151 #endif
 152 };
 153 
 154 
 155 //------------------------------ForwardExceptionNode---------------------------
 156 // Pop stack frame and jump to StubRoutines::forward_exception_entry()
 157 class ForwardExceptionNode : public ReturnNode {
 158 public:
 159   ForwardExceptionNode(Node* cntrl, Node* i_o, Node* memory, Node* frameptr, Node* retadr)
 160     : ReturnNode(TypeFunc::Parms, cntrl, i_o, memory, frameptr, retadr) {
 161   }
 162 
 163   virtual int Opcode() const;
 164 };
 165 
 166 //------------------------------TailCallNode-----------------------------------
 167 // Pop stack frame and jump indirect
 168 class TailCallNode : public ReturnNode {
 169 public:
 170   TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
 171     : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
 172     init_req(TypeFunc::Parms, target);
 173     init_req(TypeFunc::Parms+1, moop);
 174   }
 175 
 176   virtual int Opcode() const;
 177   virtual uint match_edge(uint idx) const;
 178 };
 179 
 180 //------------------------------TailJumpNode-----------------------------------
 181 // Pop stack frame and jump indirect
 182 class TailJumpNode : public ReturnNode {
 183 public:
 184   TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
 185     : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
 186     init_req(TypeFunc::Parms, target);
 187     init_req(TypeFunc::Parms+1, ex_oop);
 188   }
 189 
 190   virtual int Opcode() const;
 191   virtual uint match_edge(uint idx) const;
 192 };
 193 
 194 //-------------------------------JVMState-------------------------------------
 195 // A linked list of JVMState nodes captures the whole interpreter state,
 196 // plus GC roots, for all active calls at some call site in this compilation
 197 // unit.  (If there is no inlining, then the list has exactly one link.)
 198 // This provides a way to map the optimized program back into the interpreter,
 199 // or to let the GC mark the stack.
 200 class JVMState : public ResourceObj {
 201 public:
 202   typedef enum {
 203     Reexecute_Undefined = -1, // not defined -- will be translated into false later
 204     Reexecute_False     =  0, // false       -- do not reexecute
 205     Reexecute_True      =  1  // true        -- reexecute the bytecode
 206   } ReexecuteState; //Reexecute State
 207 
 208 private:
 209   JVMState*         _caller;    // List pointer for forming scope chains
 210   uint              _depth;     // One more than caller depth, or one.
 211   uint              _locoff;    // Offset to locals in input edge mapping
 212   uint              _stkoff;    // Offset to stack in input edge mapping
 213   uint              _monoff;    // Offset to monitors in input edge mapping
 214   uint              _scloff;    // Offset to fields of scalar objs in input edge mapping
 215   uint              _endoff;    // Offset to end of input edge mapping
 216   uint              _sp;        // Java Expression Stack Pointer for this state
 217   int               _bci;       // Byte Code Index of this JVM point
 218   ReexecuteState    _reexecute; // Whether this bytecode need to be re-executed
 219   ciMethod*         _method;    // Method Pointer
 220   SafePointNode*    _map;       // Map node associated with this scope
 221 public:
 222   friend class Compile;
 223   friend class PreserveReexecuteState;
 224 
 225   // Because JVMState objects live over the entire lifetime of the
 226   // Compile object, they are allocated into the comp_arena, which
 227   // does not get resource marked or reset during the compile process
 228   void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
 229   void operator delete( void * ) { } // fast deallocation
 230 
 231   // Create a new JVMState, ready for abstract interpretation.
 232   JVMState(ciMethod* method, JVMState* caller);
 233   JVMState(int stack_size);  // root state; has a null method
 234 
 235   // Access functions for the JVM
 236   // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
 237   //       \ locoff    \ stkoff    \ argoff    \ monoff    \ scloff    \ endoff
 238   uint              locoff() const { return _locoff; }
 239   uint              stkoff() const { return _stkoff; }
 240   uint              argoff() const { return _stkoff + _sp; }
 241   uint              monoff() const { return _monoff; }
 242   uint              scloff() const { return _scloff; }
 243   uint              endoff() const { return _endoff; }
 244   uint              oopoff() const { return debug_end(); }
 245 
 246   int            loc_size() const { return stkoff() - locoff(); }
 247   int            stk_size() const { return monoff() - stkoff(); }
 248   int            mon_size() const { return scloff() - monoff(); }
 249   int            scl_size() const { return endoff() - scloff(); }
 250 
 251   bool        is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
 252   bool        is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
 253   bool        is_mon(uint i) const { return monoff() <= i && i < scloff(); }
 254   bool        is_scl(uint i) const { return scloff() <= i && i < endoff(); }
 255 
 256   uint                      sp() const { return _sp; }
 257   int                      bci() const { return _bci; }
 258   bool        should_reexecute() const { return _reexecute==Reexecute_True; }
 259   bool  is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
 260   bool              has_method() const { return _method != nullptr; }
 261   ciMethod*             method() const { assert(has_method(), ""); return _method; }
 262   JVMState*             caller() const { return _caller; }
 263   SafePointNode*           map() const { return _map; }
 264   uint                   depth() const { return _depth; }
 265   uint             debug_start() const; // returns locoff of root caller
 266   uint               debug_end() const; // returns endoff of self
 267   uint              debug_size() const {
 268     return loc_size() + sp() + mon_size() + scl_size();
 269   }
 270   uint        debug_depth()  const; // returns sum of debug_size values at all depths
 271 
 272   // Returns the JVM state at the desired depth (1 == root).
 273   JVMState* of_depth(int d) const;
 274 
 275   // Tells if two JVM states have the same call chain (depth, methods, & bcis).
 276   bool same_calls_as(const JVMState* that) const;
 277 
 278   // Monitors (monitors are stored as (boxNode, objNode) pairs
 279   enum { logMonitorEdges = 1 };
 280   int  nof_monitors()              const { return mon_size() >> logMonitorEdges; }
 281   int  monitor_depth()             const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
 282   int  monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
 283   int  monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
 284   bool is_monitor_box(uint off)    const {
 285     assert(is_mon(off), "should be called only for monitor edge");
 286     return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
 287   }
 288   bool is_monitor_use(uint off)    const { return (is_mon(off)
 289                                                    && is_monitor_box(off))
 290                                              || (caller() && caller()->is_monitor_use(off)); }
 291 
 292   // Initialization functions for the JVM
 293   void              set_locoff(uint off) { _locoff = off; }
 294   void              set_stkoff(uint off) { _stkoff = off; }
 295   void              set_monoff(uint off) { _monoff = off; }
 296   void              set_scloff(uint off) { _scloff = off; }
 297   void              set_endoff(uint off) { _endoff = off; }
 298   void              set_offsets(uint off) {
 299     _locoff = _stkoff = _monoff = _scloff = _endoff = off;
 300   }
 301   void              set_map(SafePointNode* map) { _map = map; }
 302   void              bind_map(SafePointNode* map); // set_map() and set_jvms() for the SafePointNode
 303   void              set_sp(uint sp) { _sp = sp; }
 304                     // _reexecute is initialized to "undefined" for a new bci
 305   void              set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
 306   void              set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
 307 
 308   // Miscellaneous utility functions
 309   JVMState* clone_deep(Compile* C) const;    // recursively clones caller chain
 310   JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
 311   void      set_map_deep(SafePointNode *map);// reset map for all callers
 312   void      adapt_position(int delta);       // Adapt offsets in in-array after adding an edge.
 313   int       interpreter_frame_size() const;
 314 
 315 #ifndef PRODUCT
 316   void      print_method_with_lineno(outputStream* st, bool show_name) const;
 317   void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
 318   void      dump_spec(outputStream *st) const;
 319   void      dump_on(outputStream* st) const;
 320   void      dump() const {
 321     dump_on(tty);
 322   }
 323 #endif
 324 };
 325 
 326 //------------------------------SafePointNode----------------------------------
 327 // A SafePointNode is a subclass of a MultiNode for convenience (and
 328 // potential code sharing) only - conceptually it is independent of
 329 // the Node semantics.
 330 class SafePointNode : public MultiNode {
 331   friend JVMState;
 332   friend class GraphKit;
 333   friend class LibraryCallKit;
 334 
 335   virtual bool           cmp( const Node &n ) const;
 336   virtual uint           size_of() const;       // Size is bigger
 337 
 338 protected:
 339   JVMState* const _jvms;      // Pointer to list of JVM State objects
 340   // Many calls take *all* of memory as input,
 341   // but some produce a limited subset of that memory as output.
 342   // The adr_type reports the call's behavior as a store, not a load.
 343   const TypePtr*  _adr_type;  // What type of memory does this node produce?
 344   ReplacedNodes   _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
 345   bool            _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States
 346 
 347   void set_jvms(JVMState* s) {
 348   assert(s != nullptr, "assign null value to _jvms");
 349     *(JVMState**)&_jvms = s;  // override const attribute in the accessor
 350   }
 351 public:
 352   SafePointNode(uint edges, JVMState* jvms,
 353                 // A plain safepoint advertises no memory effects (null):
 354                 const TypePtr* adr_type = nullptr)
 355     : MultiNode( edges ),
 356       _jvms(jvms),
 357       _adr_type(adr_type),
 358       _has_ea_local_in_scope(false)
 359   {
 360     init_class_id(Class_SafePoint);
 361   }
 362 
 363   JVMState* jvms() const { return _jvms; }
 364   virtual bool needs_deep_clone_jvms(Compile* C) { return false; }
 365   void clone_jvms(Compile* C) {
 366     if (jvms() != nullptr) {
 367       if (needs_deep_clone_jvms(C)) {
 368         set_jvms(jvms()->clone_deep(C));
 369         jvms()->set_map_deep(this);
 370       } else {
 371         jvms()->clone_shallow(C)->bind_map(this);
 372       }
 373     }
 374   }
 375 
 376  private:
 377   void verify_input(JVMState* jvms, uint idx) const {
 378     assert(verify_jvms(jvms), "jvms must match");
 379     Node* n = in(idx);
 380     assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
 381            in(idx + 1)->is_top(), "2nd half of long/double");
 382   }
 383 
 384  public:
 385   // Functionality from old debug nodes which has changed
 386   Node *local(JVMState* jvms, uint idx) const {
 387     verify_input(jvms, jvms->locoff() + idx);
 388     return in(jvms->locoff() + idx);
 389   }
 390   Node *stack(JVMState* jvms, uint idx) const {
 391     verify_input(jvms, jvms->stkoff() + idx);
 392     return in(jvms->stkoff() + idx);
 393   }
 394   Node *argument(JVMState* jvms, uint idx) const {
 395     verify_input(jvms, jvms->argoff() + idx);
 396     return in(jvms->argoff() + idx);
 397   }
 398   Node *monitor_box(JVMState* jvms, uint idx) const {
 399     assert(verify_jvms(jvms), "jvms must match");
 400     return in(jvms->monitor_box_offset(idx));
 401   }
 402   Node *monitor_obj(JVMState* jvms, uint idx) const {
 403     assert(verify_jvms(jvms), "jvms must match");
 404     return in(jvms->monitor_obj_offset(idx));
 405   }
 406 
 407   void  set_local(JVMState* jvms, uint idx, Node *c);
 408 
 409   void  set_stack(JVMState* jvms, uint idx, Node *c) {
 410     assert(verify_jvms(jvms), "jvms must match");
 411     set_req(jvms->stkoff() + idx, c);
 412   }
 413   void  set_argument(JVMState* jvms, uint idx, Node *c) {
 414     assert(verify_jvms(jvms), "jvms must match");
 415     set_req(jvms->argoff() + idx, c);
 416   }
 417   void ensure_stack(JVMState* jvms, uint stk_size) {
 418     assert(verify_jvms(jvms), "jvms must match");
 419     int grow_by = (int)stk_size - (int)jvms->stk_size();
 420     if (grow_by > 0)  grow_stack(jvms, grow_by);
 421   }
 422   void grow_stack(JVMState* jvms, uint grow_by);
 423   // Handle monitor stack
 424   void push_monitor( const FastLockNode *lock );
 425   void pop_monitor ();
 426   Node *peek_monitor_box() const;
 427   Node *peek_monitor_obj() const;
 428   // Peek Operand Stacks, JVMS 2.6.2
 429   Node* peek_operand(uint off = 0) const;
 430 
 431   // Access functions for the JVM
 432   Node *control  () const { return in(TypeFunc::Control  ); }
 433   Node *i_o      () const { return in(TypeFunc::I_O      ); }
 434   Node *memory   () const { return in(TypeFunc::Memory   ); }
 435   Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
 436   Node *frameptr () const { return in(TypeFunc::FramePtr ); }
 437 
 438   void set_control  ( Node *c ) { set_req(TypeFunc::Control,c); }
 439   void set_i_o      ( Node *c ) { set_req(TypeFunc::I_O    ,c); }
 440   void set_memory   ( Node *c ) { set_req(TypeFunc::Memory ,c); }
 441 
 442   MergeMemNode* merged_memory() const {
 443     return in(TypeFunc::Memory)->as_MergeMem();
 444   }
 445 
 446   // The parser marks useless maps as dead when it's done with them:
 447   bool is_killed() { return in(TypeFunc::Control) == nullptr; }
 448 
 449   // Exception states bubbling out of subgraphs such as inlined calls
 450   // are recorded here.  (There might be more than one, hence the "next".)
 451   // This feature is used only for safepoints which serve as "maps"
 452   // for JVM states during parsing, intrinsic expansion, etc.
 453   SafePointNode*         next_exception() const;
 454   void               set_next_exception(SafePointNode* n);
 455   bool                   has_exceptions() const { return next_exception() != nullptr; }
 456 
 457   // Helper methods to operate on replaced nodes
 458   ReplacedNodes replaced_nodes() const {
 459     return _replaced_nodes;
 460   }
 461 
 462   void set_replaced_nodes(ReplacedNodes replaced_nodes) {
 463     _replaced_nodes = replaced_nodes;
 464   }
 465 
 466   void clone_replaced_nodes() {
 467     _replaced_nodes.clone();
 468   }
 469   void record_replaced_node(Node* initial, Node* improved) {
 470     _replaced_nodes.record(initial, improved);
 471   }
 472   void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
 473     _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
 474   }
 475   void delete_replaced_nodes() {
 476     _replaced_nodes.reset();
 477   }
 478   void apply_replaced_nodes(uint idx) {
 479     _replaced_nodes.apply(this, idx);
 480   }
 481   void merge_replaced_nodes_with(SafePointNode* sfpt) {
 482     _replaced_nodes.merge_with(sfpt->_replaced_nodes);
 483   }
 484   bool has_replaced_nodes() const {
 485     return !_replaced_nodes.is_empty();
 486   }
 487   void set_has_ea_local_in_scope(bool b) {
 488     _has_ea_local_in_scope = b;
 489   }
 490   bool has_ea_local_in_scope() const {
 491     return _has_ea_local_in_scope;
 492   }
 493 
 494   void disconnect_from_root(PhaseIterGVN *igvn);
 495 
 496   // Standard Node stuff
 497   virtual int            Opcode() const;
 498   virtual bool           pinned() const { return true; }
 499   virtual const Type*    Value(PhaseGVN* phase) const;
 500   virtual const Type*    bottom_type() const { return Type::CONTROL; }
 501   virtual const TypePtr* adr_type() const { return _adr_type; }
 502   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
 503   virtual Node          *Ideal(PhaseGVN *phase, bool can_reshape);
 504   virtual Node*          Identity(PhaseGVN* phase);
 505   virtual uint           ideal_reg() const { return 0; }
 506   virtual const RegMask &in_RegMask(uint) const;
 507   virtual const RegMask &out_RegMask() const;
 508   virtual uint           match_edge(uint idx) const;
 509 
 510 #ifndef PRODUCT
 511   virtual void           dump_spec(outputStream *st) const;
 512 #endif
 513 };
 514 
 515 //------------------------------SafePointScalarObjectNode----------------------
 516 // A SafePointScalarObjectNode represents the state of a scalarized object
 517 // at a safepoint.
 518 class SafePointScalarObjectNode: public TypeNode {
 519   uint _first_index;              // First input edge relative index of a SafePoint node where
 520                                   // states of the scalarized object fields are collected.
 521   uint _depth;                    // Depth of the JVM state the _first_index field refers to
 522   uint _n_fields;                 // Number of non-static fields of the scalarized object.
 523 
 524   Node* _alloc;                   // Just for debugging purposes.
 525 
 526   virtual uint hash() const;
 527   virtual bool cmp( const Node &n ) const;
 528 
 529   uint first_index() const { return _first_index; }
 530 
 531 public:
 532   SafePointScalarObjectNode(const TypeOopPtr* tp, Node* alloc, uint first_index, uint depth, uint n_fields);
 533 
 534   virtual int Opcode() const;
 535   virtual uint           ideal_reg() const;
 536   virtual const RegMask &in_RegMask(uint) const;
 537   virtual const RegMask &out_RegMask() const;
 538   virtual uint           match_edge(uint idx) const;
 539 
 540   uint first_index(JVMState* jvms) const {
 541     assert(jvms != nullptr, "missed JVMS");
 542     return jvms->of_depth(_depth)->scloff() + _first_index;
 543   }
 544   uint n_fields()    const { return _n_fields; }
 545 
 546 #ifdef ASSERT
 547   Node* alloc() const { return _alloc; }
 548 #endif
 549 
 550   virtual uint size_of() const { return sizeof(*this); }
 551 
 552   // Assumes that "this" is an argument to a safepoint node "s", and that
 553   // "new_call" is being created to correspond to "s".  But the difference
 554   // between the start index of the jvmstates of "new_call" and "s" is
 555   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 556   // corresponds appropriately to "this" in "new_call".  Assumes that
 557   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 558   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 559   SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
 560 
 561 #ifndef PRODUCT
 562   virtual void              dump_spec(outputStream *st) const;
 563 #endif
 564 };
 565 
 566 //------------------------------SafePointScalarMergeNode----------------------
 567 //
 568 // This class represents an allocation merge that is used as debug information
 569 // and had at least one of its input scalar replaced.
 570 //
 571 // The required inputs of this node, except the control, are pointers to
 572 // SafePointScalarObjectNodes that describe scalarized inputs of the original
 573 // allocation merge. The other(s) properties of the class are described below.
 574 //
 575 // _merge_pointer_idx : index in the SafePointNode's input array where the
 576 //   description of the _allocation merge_ starts. The index is zero based and
 577 //   relative to the SafePoint's scloff. The two entries in the SafePointNode's
 578 //   input array starting at '_merge_pointer_idx` are Phi nodes representing:
 579 //
 580 //   1) The original merge Phi. During rematerialization this input will only be
 581 //   used if the "selector Phi" (see below) indicates that the execution of the
 582 //   Phi took the path of a non scalarized input.
 583 //
 584 //   2) A "selector Phi". The output of this Phi will be '-1' if the execution
 585 //   of the method exercised a non scalarized input of the original Phi.
 586 //   Otherwise, the output will be >=0, and it will indicate the index-1 in the
 587 //   SafePointScalarMergeNode input array where the description of the
 588 //   scalarized object that should be used is.
 589 //
 590 // As an example, consider a Phi merging 3 inputs, of which the last 2 are
 591 // scalar replaceable.
 592 //
 593 //    Phi(Region, NSR, SR, SR)
 594 //
 595 // During scalar replacement the SR inputs will be changed to null:
 596 //
 597 //    Phi(Region, NSR, nullptr, nullptr)
 598 //
 599 // A corresponding selector Phi will be created with a configuration like this:
 600 //
 601 //    Phi(Region, -1, 0, 1)
 602 //
 603 // During execution of the compiled method, if the execution reaches a Trap, the
 604 // output of the selector Phi will tell if we need to rematerialize one of the
 605 // scalar replaced inputs or if we should just use the pointer returned by the
 606 // original Phi.
 607 
 608 class SafePointScalarMergeNode: public TypeNode {
 609   int _merge_pointer_idx;         // This is the first input edge relative
 610                                   // index of a SafePoint node where metadata information relative
 611                                   // to restoring the merge is stored. The corresponding input
 612                                   // in the associated SafePoint will point to a Phi representing
 613                                   // potential non-scalar replaced objects.
 614 
 615   virtual uint hash() const;
 616   virtual bool cmp( const Node &n ) const;
 617 
 618 public:
 619   SafePointScalarMergeNode(const TypeOopPtr* tp, int merge_pointer_idx);
 620 
 621   virtual int            Opcode() const;
 622   virtual uint           ideal_reg() const;
 623   virtual const RegMask &in_RegMask(uint) const;
 624   virtual const RegMask &out_RegMask() const;
 625   virtual uint           match_edge(uint idx) const;
 626 
 627   virtual uint size_of() const { return sizeof(*this); }
 628 
 629   int merge_pointer_idx(JVMState* jvms) const {
 630     assert(jvms != nullptr, "JVMS reference is null.");
 631     return jvms->scloff() + _merge_pointer_idx;
 632   }
 633 
 634   int selector_idx(JVMState* jvms) const {
 635     assert(jvms != nullptr, "JVMS reference is null.");
 636     return jvms->scloff() + _merge_pointer_idx + 1;
 637   }
 638 
 639   // Assumes that "this" is an argument to a safepoint node "s", and that
 640   // "new_call" is being created to correspond to "s".  But the difference
 641   // between the start index of the jvmstates of "new_call" and "s" is
 642   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 643   // corresponds appropriately to "this" in "new_call".  Assumes that
 644   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 645   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 646   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 647 
 648 #ifndef PRODUCT
 649   virtual void              dump_spec(outputStream *st) const;
 650 #endif
 651 };
 652 
 653 // Simple container for the outgoing projections of a call.  Useful
 654 // for serious surgery on calls.
 655 class CallProjections : public StackObj {
 656 public:
 657   Node* fallthrough_proj;
 658   Node* fallthrough_catchproj;
 659   Node* fallthrough_memproj;
 660   Node* fallthrough_ioproj;
 661   Node* catchall_catchproj;
 662   Node* catchall_memproj;
 663   Node* catchall_ioproj;
 664   Node* resproj;
 665   Node* exobj;
 666 };
 667 
 668 class CallGenerator;
 669 
 670 //------------------------------CallNode---------------------------------------
 671 // Call nodes now subsume the function of debug nodes at callsites, so they
 672 // contain the functionality of a full scope chain of debug nodes.
 673 class CallNode : public SafePointNode {
 674 
 675 protected:
 676   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 677 
 678 public:
 679   const TypeFunc* _tf;          // Function type
 680   address         _entry_point; // Address of method being called
 681   float           _cnt;         // Estimate of number of times called
 682   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 683   const char*     _name;        // Printable name, if _method is null
 684 
 685   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 686     : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
 687       _tf(tf),
 688       _entry_point(addr),
 689       _cnt(COUNT_UNKNOWN),
 690       _generator(nullptr),
 691       _name(nullptr)
 692   {
 693     init_class_id(Class_Call);
 694   }
 695 
 696   const TypeFunc* tf()         const { return _tf; }
 697   address  entry_point()       const { return _entry_point; }
 698   float    cnt()               const { return _cnt; }
 699   CallGenerator* generator()   const { return _generator; }
 700 
 701   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 702   void set_entry_point(address p)       { _entry_point = p; }
 703   void set_cnt(float c)                 { _cnt = c; }
 704   void set_generator(CallGenerator* cg) { _generator = cg; }
 705 
 706   virtual const Type* bottom_type() const;
 707   virtual const Type* Value(PhaseGVN* phase) const;
 708   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 709   virtual Node* Identity(PhaseGVN* phase) { return this; }
 710   virtual bool        cmp(const Node &n) const;
 711   virtual uint        size_of() const = 0;
 712   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 713   virtual Node*       match(const ProjNode* proj, const Matcher* m);
 714   virtual uint        ideal_reg() const { return NotAMachineReg; }
 715   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 716   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 717   virtual bool        guaranteed_safepoint()  { return true; }
 718   // For macro nodes, the JVMState gets modified during expansion. If calls
 719   // use MachConstantBase, it gets modified during matching. So when cloning
 720   // the node the JVMState must be deep cloned. Default is to shallow clone.
 721   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 722 
 723   // Returns true if the call may modify n
 724   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 725   // Does this node have a use of n other than in debug information?
 726   bool                has_non_debug_use(Node* n);
 727   // Returns the unique CheckCastPP of a call
 728   // or result projection is there are several CheckCastPP
 729   // or returns null if there is no one.
 730   Node* result_cast();
 731   // Does this node returns pointer?
 732   bool returns_pointer() const {
 733     const TypeTuple* r = tf()->range();
 734     return (r->cnt() > TypeFunc::Parms &&
 735             r->field_at(TypeFunc::Parms)->isa_ptr());
 736   }
 737 
 738   // Collect all the interesting edges from a call for use in
 739   // replacing the call by something else.  Used by macro expansion
 740   // and the late inlining support.
 741   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true) const;
 742 
 743   virtual uint match_edge(uint idx) const;
 744 
 745   bool is_call_to_arraycopystub() const;
 746 
 747   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 748 
 749 #ifndef PRODUCT
 750   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 751   virtual void        dump_spec(outputStream* st) const;
 752 #endif
 753 };
 754 
 755 
 756 //------------------------------CallJavaNode-----------------------------------
 757 // Make a static or dynamic subroutine call node using Java calling
 758 // convention.  (The "Java" calling convention is the compiler's calling
 759 // convention, as opposed to the interpreter's or that of native C.)
 760 class CallJavaNode : public CallNode {
 761 protected:
 762   virtual bool cmp( const Node &n ) const;
 763   virtual uint size_of() const; // Size is bigger
 764 
 765   ciMethod* _method;               // Method being direct called
 766   bool    _optimized_virtual;
 767   bool    _method_handle_invoke;
 768   bool    _override_symbolic_info; // Override symbolic call site info from bytecode
 769   bool    _arg_escape;             // ArgEscape in parameter list
 770 public:
 771   CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method)
 772     : CallNode(tf, addr, TypePtr::BOTTOM),
 773       _method(method),
 774       _optimized_virtual(false),
 775       _method_handle_invoke(false),
 776       _override_symbolic_info(false),
 777       _arg_escape(false)
 778   {
 779     init_class_id(Class_CallJava);
 780   }
 781 
 782   virtual int   Opcode() const;
 783   ciMethod* method() const                 { return _method; }
 784   void  set_method(ciMethod *m)            { _method = m; }
 785   void  set_optimized_virtual(bool f)      { _optimized_virtual = f; }
 786   bool  is_optimized_virtual() const       { return _optimized_virtual; }
 787   void  set_method_handle_invoke(bool f)   { _method_handle_invoke = f; }
 788   bool  is_method_handle_invoke() const    { return _method_handle_invoke; }
 789   void  set_override_symbolic_info(bool f) { _override_symbolic_info = f; }
 790   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 791   void  set_arg_escape(bool f)             { _arg_escape = f; }
 792   bool  arg_escape() const                 { return _arg_escape; }
 793   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 794   void register_for_late_inline();
 795 
 796   DEBUG_ONLY( bool validate_symbolic_info() const; )
 797 
 798 #ifndef PRODUCT
 799   virtual void  dump_spec(outputStream *st) const;
 800   virtual void  dump_compact_spec(outputStream *st) const;
 801 #endif
 802 };
 803 
 804 //------------------------------CallStaticJavaNode-----------------------------
 805 // Make a direct subroutine call using Java calling convention (for static
 806 // calls and optimized virtual calls, plus calls to wrappers for run-time
 807 // routines); generates static stub.
 808 class CallStaticJavaNode : public CallJavaNode {
 809   virtual bool cmp( const Node &n ) const;
 810   virtual uint size_of() const; // Size is bigger
 811 public:
 812   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 813     : CallJavaNode(tf, addr, method) {
 814     init_class_id(Class_CallStaticJava);
 815     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 816       init_flags(Flag_is_macro);
 817       C->add_macro_node(this);
 818     }
 819   }
 820   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 821     : CallJavaNode(tf, addr, nullptr) {
 822     init_class_id(Class_CallStaticJava);
 823     // This node calls a runtime stub, which often has narrow memory effects.
 824     _adr_type = adr_type;
 825     _name = name;
 826   }
 827 
 828   // If this is an uncommon trap, return the request code, else zero.
 829   int uncommon_trap_request() const;
 830   bool is_uncommon_trap() const;
 831   static int extract_uncommon_trap_request(const Node* call);
 832 
 833   bool is_boxing_method() const {
 834     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 835   }
 836   // Late inlining modifies the JVMState, so we need to deep clone it
 837   // when the call node is cloned (because it is macro node).
 838   virtual bool needs_deep_clone_jvms(Compile* C) {
 839     return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);
 840   }
 841 
 842   virtual int         Opcode() const;
 843   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 844 
 845 #ifndef PRODUCT
 846   virtual void        dump_spec(outputStream *st) const;
 847   virtual void        dump_compact_spec(outputStream *st) const;
 848 #endif
 849 };
 850 
 851 //------------------------------CallDynamicJavaNode----------------------------
 852 // Make a dispatched call using Java calling convention.
 853 class CallDynamicJavaNode : public CallJavaNode {
 854   virtual bool cmp( const Node &n ) const;
 855   virtual uint size_of() const; // Size is bigger
 856 public:
 857   CallDynamicJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int vtable_index)
 858     : CallJavaNode(tf,addr,method), _vtable_index(vtable_index) {
 859     init_class_id(Class_CallDynamicJava);
 860   }
 861 
 862   // Late inlining modifies the JVMState, so we need to deep clone it
 863   // when the call node is cloned.
 864   virtual bool needs_deep_clone_jvms(Compile* C) {
 865     return IncrementalInlineVirtual || CallNode::needs_deep_clone_jvms(C);
 866   }
 867 
 868   int _vtable_index;
 869   virtual int   Opcode() const;
 870   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 871 #ifndef PRODUCT
 872   virtual void  dump_spec(outputStream *st) const;
 873 #endif
 874 };
 875 
 876 //------------------------------CallRuntimeNode--------------------------------
 877 // Make a direct subroutine call node into compiled C++ code.
 878 class CallRuntimeNode : public CallNode {
 879 protected:
 880   virtual bool cmp( const Node &n ) const;
 881   virtual uint size_of() const; // Size is bigger
 882 public:
 883   CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
 884                   const TypePtr* adr_type, JVMState* jvms = nullptr)
 885     : CallNode(tf, addr, adr_type, jvms)
 886   {
 887     init_class_id(Class_CallRuntime);
 888     _name = name;
 889   }
 890 
 891   virtual int   Opcode() const;
 892   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 893 
 894 #ifndef PRODUCT
 895   virtual void  dump_spec(outputStream *st) const;
 896 #endif
 897 };
 898 
 899 //------------------------------CallLeafNode-----------------------------------
 900 // Make a direct subroutine call node into compiled C++ code, without
 901 // safepoints
 902 class CallLeafNode : public CallRuntimeNode {
 903 public:
 904   CallLeafNode(const TypeFunc* tf, address addr, const char* name,
 905                const TypePtr* adr_type)
 906     : CallRuntimeNode(tf, addr, name, adr_type)
 907   {
 908     init_class_id(Class_CallLeaf);
 909   }
 910   virtual int   Opcode() const;
 911   virtual bool        guaranteed_safepoint()  { return false; }
 912 #ifndef PRODUCT
 913   virtual void  dump_spec(outputStream *st) const;
 914 #endif
 915 };
 916 
 917 /* A pure function call, they are assumed not to be safepoints, not to read or write memory,
 918  * have no exception... They just take parameters, return a value without side effect. It is
 919  * always correct to create some, or remove them, if the result is not used.
 920  *
 921  * They still have control input to allow easy lowering into other kind of calls that require
 922  * a control, but this is more a technical than a moral constraint.
 923  *
 924  * Pure calls must have only control and data input and output: I/O, Memory and so on must be top.
 925  * Nevertheless, pure calls can typically be expensive math operations so care must be taken
 926  * when letting the node float.
 927  */
 928 class CallLeafPureNode : public CallLeafNode {
 929 protected:
 930   bool is_unused() const;
 931   bool is_dead() const;
 932   TupleNode* make_tuple_of_input_state_and_top_return_values(const Compile* C) const;
 933 
 934 public:
 935   CallLeafPureNode(const TypeFunc* tf, address addr, const char* name,
 936                    const TypePtr* adr_type)
 937       : CallLeafNode(tf, addr, name, adr_type) {
 938     init_class_id(Class_CallLeafPure);
 939   }
 940   int Opcode() const override;
 941   Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
 942 };
 943 
 944 //------------------------------CallLeafNoFPNode-------------------------------
 945 // CallLeafNode, not using floating point or using it in the same manner as
 946 // the generated code
 947 class CallLeafNoFPNode : public CallLeafNode {
 948 public:
 949   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 950                    const TypePtr* adr_type)
 951     : CallLeafNode(tf, addr, name, adr_type)
 952   {
 953     init_class_id(Class_CallLeafNoFP);
 954   }
 955   virtual int   Opcode() const;
 956 };
 957 
 958 //------------------------------CallLeafVectorNode-------------------------------
 959 // CallLeafNode but calling with vector calling convention instead.
 960 class CallLeafVectorNode : public CallLeafNode {
 961 private:
 962   uint _num_bits;
 963 protected:
 964   virtual bool cmp( const Node &n ) const;
 965   virtual uint size_of() const; // Size is bigger
 966 public:
 967   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 968                    const TypePtr* adr_type, uint num_bits)
 969     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 970   {
 971   }
 972   virtual int   Opcode() const;
 973   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 974 };
 975 
 976 
 977 //------------------------------Allocate---------------------------------------
 978 // High-level memory allocation
 979 //
 980 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 981 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 982 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 983 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 984 //  order to differentiate the uses of the projection on the normal control path from
 985 //  those on the exception return path.
 986 //
 987 class AllocateNode : public CallNode {
 988 public:
 989   enum {
 990     // Output:
 991     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 992     // Inputs:
 993     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 994     KlassNode,                        // type (maybe dynamic) of the obj.
 995     InitialTest,                      // slow-path test (may be constant)
 996     ALength,                          // array length (or TOP if none)
 997     ValidLengthTest,
 998     ParmLimit
 999   };
1000 
1001   static const TypeFunc* alloc_type(const Type* t) {
1002     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1003     fields[AllocSize]   = TypeInt::POS;
1004     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1005     fields[InitialTest] = TypeInt::BOOL;
1006     fields[ALength]     = t;  // length (can be a bad length)
1007     fields[ValidLengthTest] = TypeInt::BOOL;
1008 
1009     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1010 
1011     // create result type (range)
1012     fields = TypeTuple::fields(1);
1013     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1014 
1015     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1016 
1017     return TypeFunc::make(domain, range);
1018   }
1019 
1020   // Result of Escape Analysis
1021   bool _is_scalar_replaceable;
1022   bool _is_non_escaping;
1023   // True when MemBar for new is redundant with MemBar at initialzer exit
1024   bool _is_allocation_MemBar_redundant;
1025 
1026   virtual uint size_of() const; // Size is bigger
1027   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1028                Node *size, Node *klass_node, Node *initial_test);
1029   // Expansion modifies the JVMState, so we need to deep clone it
1030   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1031   virtual int Opcode() const;
1032   virtual uint ideal_reg() const { return Op_RegP; }
1033   virtual bool        guaranteed_safepoint()  { return false; }
1034 
1035   // allocations do not modify their arguments
1036   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1037 
1038   // Pattern-match a possible usage of AllocateNode.
1039   // Return null if no allocation is recognized.
1040   // The operand is the pointer produced by the (possible) allocation.
1041   // It must be a projection of the Allocate or its subsequent CastPP.
1042   // (Note:  This function is defined in file graphKit.cpp, near
1043   // GraphKit::new_instance/new_array, whose output it recognizes.)
1044   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1045   static AllocateNode* Ideal_allocation(Node* ptr);
1046 
1047   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1048   // an offset, which is reported back to the caller.
1049   // (Note:  AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
1050   static AllocateNode* Ideal_allocation(Node* ptr, PhaseValues* phase,
1051                                         intptr_t& offset);
1052 
1053   // Dig the klass operand out of a (possible) allocation site.
1054   static Node* Ideal_klass(Node* ptr, PhaseValues* phase) {
1055     AllocateNode* allo = Ideal_allocation(ptr);
1056     return (allo == nullptr) ? nullptr : allo->in(KlassNode);
1057   }
1058 
1059   // Conservatively small estimate of offset of first non-header byte.
1060   int minimum_header_size() {
1061     return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
1062                                 instanceOopDesc::base_offset_in_bytes();
1063   }
1064 
1065   // Return the corresponding initialization barrier (or null if none).
1066   // Walks out edges to find it...
1067   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1068   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1069   InitializeNode* initialization();
1070 
1071   // Convenience for initialization->maybe_set_complete(phase)
1072   bool maybe_set_complete(PhaseGVN* phase);
1073 
1074   // Return true if allocation doesn't escape thread, its escape state
1075   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1076   // is true when its allocation's escape state is noEscape or
1077   // ArgEscape. In case allocation's InitializeNode is null, check
1078   // AlllocateNode._is_non_escaping flag.
1079   // AlllocateNode._is_non_escaping is true when its escape state is
1080   // noEscape.
1081   bool does_not_escape_thread() {
1082     InitializeNode* init = nullptr;
1083     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1084   }
1085 
1086   // If object doesn't escape in <.init> method and there is memory barrier
1087   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1088   // Inovke this method when MemBar at exit of initializer and post-dominate
1089   // allocation node.
1090   void compute_MemBar_redundancy(ciMethod* initializer);
1091   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1092 
1093   Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
1094 
1095   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1096 };
1097 
1098 //------------------------------AllocateArray---------------------------------
1099 //
1100 // High-level array allocation
1101 //
1102 class AllocateArrayNode : public AllocateNode {
1103 public:
1104   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1105                     Node* initial_test, Node* count_val, Node* valid_length_test)
1106     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1107                    initial_test)
1108   {
1109     init_class_id(Class_AllocateArray);
1110     set_req(AllocateNode::ALength,        count_val);
1111     set_req(AllocateNode::ValidLengthTest, valid_length_test);
1112   }
1113   virtual int Opcode() const;
1114 
1115   // Dig the length operand out of a array allocation site.
1116   Node* Ideal_length() {
1117     return in(AllocateNode::ALength);
1118   }
1119 
1120   // Dig the length operand out of a array allocation site and narrow the
1121   // type with a CastII, if necesssary
1122   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1123 
1124   // Pattern-match a possible usage of AllocateArrayNode.
1125   // Return null if no allocation is recognized.
1126   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1127     AllocateNode* allo = Ideal_allocation(ptr);
1128     return (allo == nullptr || !allo->is_AllocateArray())
1129            ? nullptr : allo->as_AllocateArray();
1130   }
1131 };
1132 
1133 //------------------------------AbstractLockNode-----------------------------------
1134 class AbstractLockNode: public CallNode {
1135 private:
1136   enum {
1137     Regular = 0,  // Normal lock
1138     NonEscObj,    // Lock is used for non escaping object
1139     Coarsened,    // Lock was coarsened
1140     Nested        // Nested lock
1141   } _kind;
1142 
1143   static const char* _kind_names[Nested+1];
1144 
1145 #ifndef PRODUCT
1146   NamedCounter* _counter;
1147 #endif
1148 
1149 protected:
1150   // helper functions for lock elimination
1151   //
1152 
1153   bool find_matching_unlock(const Node* ctrl, LockNode* lock,
1154                             GrowableArray<AbstractLockNode*> &lock_ops);
1155   bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1156                                        GrowableArray<AbstractLockNode*> &lock_ops);
1157   bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1158                                GrowableArray<AbstractLockNode*> &lock_ops);
1159   LockNode *find_matching_lock(UnlockNode* unlock);
1160 
1161   // Update the counter to indicate that this lock was eliminated.
1162   void set_eliminated_lock_counter() PRODUCT_RETURN;
1163 
1164 public:
1165   AbstractLockNode(const TypeFunc *tf)
1166     : CallNode(tf, nullptr, TypeRawPtr::BOTTOM),
1167       _kind(Regular)
1168   {
1169 #ifndef PRODUCT
1170     _counter = nullptr;
1171 #endif
1172   }
1173   virtual int Opcode() const = 0;
1174   Node *   obj_node() const       {return in(TypeFunc::Parms + 0); }
1175   Node *   box_node() const       {return in(TypeFunc::Parms + 1); }
1176   Node *   fastlock_node() const  {return in(TypeFunc::Parms + 2); }
1177   void     set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
1178 
1179   const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
1180 
1181   virtual uint size_of() const { return sizeof(*this); }
1182 
1183   bool is_eliminated()  const { return (_kind != Regular); }
1184   bool is_non_esc_obj() const { return (_kind == NonEscObj); }
1185   bool is_coarsened()   const { return (_kind == Coarsened); }
1186   bool is_nested()      const { return (_kind == Nested); }
1187 
1188   const char * kind_as_string() const;
1189   void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = nullptr) const;
1190 
1191   void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
1192   void set_coarsened()   { _kind = Coarsened; set_eliminated_lock_counter(); }
1193   void set_nested()      { _kind = Nested; set_eliminated_lock_counter(); }
1194 
1195   // Check that all locks/unlocks associated with object come from balanced regions.
1196   // They can become unbalanced after coarsening optimization or on OSR entry.
1197   bool is_balanced();
1198 
1199   // locking does not modify its arguments
1200   virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase){ return false; }
1201 
1202 #ifndef PRODUCT
1203   void create_lock_counter(JVMState* s);
1204   NamedCounter* counter() const { return _counter; }
1205   virtual void dump_spec(outputStream* st) const;
1206   virtual void dump_compact_spec(outputStream* st) const;
1207 #endif
1208 };
1209 
1210 //------------------------------Lock---------------------------------------
1211 // High-level lock operation
1212 //
1213 // This is a subclass of CallNode because it is a macro node which gets expanded
1214 // into a code sequence containing a call.  This node takes 3 "parameters":
1215 //    0  -  object to lock
1216 //    1 -   a BoxLockNode
1217 //    2 -   a FastLockNode
1218 //
1219 class LockNode : public AbstractLockNode {
1220   static const TypeFunc* _lock_type_Type;
1221 public:
1222 
1223   static inline const TypeFunc* lock_type() {
1224     assert(_lock_type_Type != nullptr, "should be initialized");
1225     return _lock_type_Type;
1226   }
1227 
1228   static void initialize_lock_Type() {
1229     assert(_lock_type_Type == nullptr, "should be called once");
1230     // create input type (domain)
1231     const Type **fields = TypeTuple::fields(3);
1232     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1233     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1234     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1235     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1236 
1237     // create result type (range)
1238     fields = TypeTuple::fields(0);
1239 
1240     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1241 
1242     _lock_type_Type = TypeFunc::make(domain,range);
1243   }
1244 
1245   virtual int Opcode() const;
1246   virtual uint size_of() const; // Size is bigger
1247   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1248     init_class_id(Class_Lock);
1249     init_flags(Flag_is_macro);
1250     C->add_macro_node(this);
1251   }
1252   virtual bool        guaranteed_safepoint()  { return false; }
1253 
1254   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1255   // Expansion modifies the JVMState, so we need to deep clone it
1256   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1257 
1258   bool is_nested_lock_region(); // Is this Lock nested?
1259   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1260 };
1261 
1262 //------------------------------Unlock---------------------------------------
1263 // High-level unlock operation
1264 class UnlockNode : public AbstractLockNode {
1265 private:
1266 #ifdef ASSERT
1267   JVMState* const _dbg_jvms;      // Pointer to list of JVM State objects
1268 #endif
1269 public:
1270   virtual int Opcode() const;
1271   virtual uint size_of() const; // Size is bigger
1272   UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )
1273 #ifdef ASSERT
1274     , _dbg_jvms(nullptr)
1275 #endif
1276   {
1277     init_class_id(Class_Unlock);
1278     init_flags(Flag_is_macro);
1279     C->add_macro_node(this);
1280   }
1281   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1282   // unlock is never a safepoint
1283   virtual bool        guaranteed_safepoint()  { return false; }
1284 #ifdef ASSERT
1285   void set_dbg_jvms(JVMState* s) {
1286     *(JVMState**)&_dbg_jvms = s;  // override const attribute in the accessor
1287   }
1288   JVMState* dbg_jvms() const { return _dbg_jvms; }
1289 #else
1290   JVMState* dbg_jvms() const { return nullptr; }
1291 #endif
1292 };
1293 #endif // SHARE_OPTO_CALLNODE_HPP