1 /*
   2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OPTO_CALLNODE_HPP
  26 #define SHARE_OPTO_CALLNODE_HPP
  27 
  28 #include "opto/connode.hpp"
  29 #include "opto/mulnode.hpp"
  30 #include "opto/multnode.hpp"
  31 #include "opto/opcodes.hpp"
  32 #include "opto/phaseX.hpp"
  33 #include "opto/replacednodes.hpp"
  34 #include "opto/type.hpp"
  35 #include "utilities/growableArray.hpp"
  36 
  37 // Portions of code courtesy of Clifford Click
  38 
  39 // Optimization - Graph Style
  40 
  41 class NamedCounter;
  42 class MultiNode;
  43 class  SafePointNode;
  44 class   CallNode;
  45 class     CallJavaNode;
  46 class       CallStaticJavaNode;
  47 class       CallDynamicJavaNode;
  48 class     CallRuntimeNode;
  49 class       CallLeafNode;
  50 class         CallLeafNoFPNode;
  51 class         CallLeafVectorNode;
  52 class     AllocateNode;
  53 class       AllocateArrayNode;
  54 class     AbstractLockNode;
  55 class       LockNode;
  56 class       UnlockNode;
  57 class FastLockNode;
  58 
  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;
  93 };
  94 
  95 
  96 //------------------------------ParmNode---------------------------------------
  97 // Incoming parameters
  98 class ParmNode : public ProjNode {
  99   static const char * const names[TypeFunc::Parms+1];
 100 public:
 101   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 102     init_class_id(Class_Parm);
 103   }
 104   virtual int Opcode() const;
 105   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 106   virtual uint ideal_reg() const;
 107 #ifndef PRODUCT
 108   virtual void dump_spec(outputStream *st) const;
 109   virtual void dump_compact_spec(outputStream *st) const;
 110 #endif
 111 };
 112 
 113 
 114 //------------------------------ReturnNode-------------------------------------
 115 // Return from subroutine node
 116 class ReturnNode : public Node {
 117 public:
 118   ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
 119   virtual int Opcode() const;
 120   virtual bool  is_CFG() const { return true; }
 121   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 122   virtual bool depends_only_on_test() const { return false; }
 123   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 124   virtual const Type* Value(PhaseGVN* phase) const;
 125   virtual uint ideal_reg() const { return NotAMachineReg; }
 126   virtual uint match_edge(uint idx) const;
 127 #ifndef PRODUCT
 128   virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const;
 129 #endif
 130 };
 131 
 132 
 133 //------------------------------RethrowNode------------------------------------
 134 // Rethrow of exception at call site.  Ends a procedure before rethrowing;
 135 // ends the current basic block like a ReturnNode.  Restores registers and
 136 // unwinds stack.  Rethrow happens in the caller's method.
 137 class RethrowNode : public Node {
 138  public:
 139   RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
 140   virtual int Opcode() const;
 141   virtual bool  is_CFG() const { return true; }
 142   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 143   virtual bool depends_only_on_test() const { return false; }
 144   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 145   virtual const Type* Value(PhaseGVN* phase) const;
 146   virtual uint match_edge(uint idx) const;
 147   virtual uint ideal_reg() const { return NotAMachineReg; }
 148 #ifndef PRODUCT
 149   virtual void dump_req(outputStream *st = tty, DumpConfig* dc = nullptr) const;
 150 #endif
 151 };
 152 
 153 
 154 //------------------------------TailCallNode-----------------------------------
 155 // Pop stack frame and jump indirect
 156 class TailCallNode : public ReturnNode {
 157 public:
 158   TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
 159     : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
 160     init_req(TypeFunc::Parms, target);
 161     init_req(TypeFunc::Parms+1, moop);
 162   }
 163 
 164   virtual int Opcode() const;
 165   virtual uint match_edge(uint idx) const;
 166 };
 167 
 168 //------------------------------TailJumpNode-----------------------------------
 169 // Pop stack frame and jump indirect
 170 class TailJumpNode : public ReturnNode {
 171 public:
 172   TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
 173     : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
 174     init_req(TypeFunc::Parms, target);
 175     init_req(TypeFunc::Parms+1, ex_oop);
 176   }
 177 
 178   virtual int Opcode() const;
 179   virtual uint match_edge(uint idx) const;
 180 };
 181 
 182 //-------------------------------JVMState-------------------------------------
 183 // A linked list of JVMState nodes captures the whole interpreter state,
 184 // plus GC roots, for all active calls at some call site in this compilation
 185 // unit.  (If there is no inlining, then the list has exactly one link.)
 186 // This provides a way to map the optimized program back into the interpreter,
 187 // or to let the GC mark the stack.
 188 class JVMState : public ResourceObj {
 189   friend class VMStructs;
 190 public:
 191   typedef enum {
 192     Reexecute_Undefined = -1, // not defined -- will be translated into false later
 193     Reexecute_False     =  0, // false       -- do not reexecute
 194     Reexecute_True      =  1  // true        -- reexecute the bytecode
 195   } ReexecuteState; //Reexecute State
 196 
 197 private:
 198   JVMState*         _caller;    // List pointer for forming scope chains
 199   uint              _depth;     // One more than caller depth, or one.
 200   uint              _locoff;    // Offset to locals in input edge mapping
 201   uint              _stkoff;    // Offset to stack in input edge mapping
 202   uint              _monoff;    // Offset to monitors in input edge mapping
 203   uint              _scloff;    // Offset to fields of scalar objs in input edge mapping
 204   uint              _endoff;    // Offset to end of input edge mapping
 205   uint              _sp;        // Java Expression Stack Pointer for this state
 206   int               _bci;       // Byte Code Index of this JVM point
 207   ReexecuteState    _reexecute; // Whether this bytecode need to be re-executed
 208   ciMethod*         _method;    // Method Pointer
 209   SafePointNode*    _map;       // Map node associated with this scope
 210 public:
 211   friend class Compile;
 212   friend class PreserveReexecuteState;
 213 
 214   // Because JVMState objects live over the entire lifetime of the
 215   // Compile object, they are allocated into the comp_arena, which
 216   // does not get resource marked or reset during the compile process
 217   void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
 218   void operator delete( void * ) { } // fast deallocation
 219 
 220   // Create a new JVMState, ready for abstract interpretation.
 221   JVMState(ciMethod* method, JVMState* caller);
 222   JVMState(int stack_size);  // root state; has a null method
 223 
 224   // Access functions for the JVM
 225   // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
 226   //       \ locoff    \ stkoff    \ argoff    \ monoff    \ scloff    \ endoff
 227   uint              locoff() const { return _locoff; }
 228   uint              stkoff() const { return _stkoff; }
 229   uint              argoff() const { return _stkoff + _sp; }
 230   uint              monoff() const { return _monoff; }
 231   uint              scloff() const { return _scloff; }
 232   uint              endoff() const { return _endoff; }
 233   uint              oopoff() const { return debug_end(); }
 234 
 235   int            loc_size() const { return stkoff() - locoff(); }
 236   int            stk_size() const { return monoff() - stkoff(); }
 237   int            mon_size() const { return scloff() - monoff(); }
 238   int            scl_size() const { return endoff() - scloff(); }
 239 
 240   bool        is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
 241   bool        is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
 242   bool        is_mon(uint i) const { return monoff() <= i && i < scloff(); }
 243   bool        is_scl(uint i) const { return scloff() <= i && i < endoff(); }
 244 
 245   uint                      sp() const { return _sp; }
 246   int                      bci() const { return _bci; }
 247   bool        should_reexecute() const { return _reexecute==Reexecute_True; }
 248   bool  is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
 249   bool              has_method() const { return _method != NULL; }
 250   ciMethod*             method() const { assert(has_method(), ""); return _method; }
 251   JVMState*             caller() const { return _caller; }
 252   SafePointNode*           map() const { return _map; }
 253   uint                   depth() const { return _depth; }
 254   uint             debug_start() const; // returns locoff of root caller
 255   uint               debug_end() const; // returns endoff of self
 256   uint              debug_size() const {
 257     return loc_size() + sp() + mon_size() + scl_size();
 258   }
 259   uint        debug_depth()  const; // returns sum of debug_size values at all depths
 260 
 261   // Returns the JVM state at the desired depth (1 == root).
 262   JVMState* of_depth(int d) const;
 263 
 264   // Tells if two JVM states have the same call chain (depth, methods, & bcis).
 265   bool same_calls_as(const JVMState* that) const;
 266 
 267   // Monitors (monitors are stored as (boxNode, objNode) pairs
 268   enum { logMonitorEdges = 1 };
 269   int  nof_monitors()              const { return mon_size() >> logMonitorEdges; }
 270   int  monitor_depth()             const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
 271   int  monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
 272   int  monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
 273   bool is_monitor_box(uint off)    const {
 274     assert(is_mon(off), "should be called only for monitor edge");
 275     return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
 276   }
 277   bool is_monitor_use(uint off)    const { return (is_mon(off)
 278                                                    && is_monitor_box(off))
 279                                              || (caller() && caller()->is_monitor_use(off)); }
 280 
 281   // Initialization functions for the JVM
 282   void              set_locoff(uint off) { _locoff = off; }
 283   void              set_stkoff(uint off) { _stkoff = off; }
 284   void              set_monoff(uint off) { _monoff = off; }
 285   void              set_scloff(uint off) { _scloff = off; }
 286   void              set_endoff(uint off) { _endoff = off; }
 287   void              set_offsets(uint off) {
 288     _locoff = _stkoff = _monoff = _scloff = _endoff = off;
 289   }
 290   void              set_map(SafePointNode* map) { _map = map; }
 291   void              bind_map(SafePointNode* map); // set_map() and set_jvms() for the SafePointNode
 292   void              set_sp(uint sp) { _sp = sp; }
 293                     // _reexecute is initialized to "undefined" for a new bci
 294   void              set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
 295   void              set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
 296 
 297   // Miscellaneous utility functions
 298   JVMState* clone_deep(Compile* C) const;    // recursively clones caller chain
 299   JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
 300   void      set_map_deep(SafePointNode *map);// reset map for all callers
 301   void      adapt_position(int delta);       // Adapt offsets in in-array after adding an edge.
 302   int       interpreter_frame_size() const;
 303 
 304 #ifndef PRODUCT
 305   void      print_method_with_lineno(outputStream* st, bool show_name) const;
 306   void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
 307   void      dump_spec(outputStream *st) const;
 308   void      dump_on(outputStream* st) const;
 309   void      dump() const {
 310     dump_on(tty);
 311   }
 312 #endif
 313 };
 314 
 315 //------------------------------SafePointNode----------------------------------
 316 // A SafePointNode is a subclass of a MultiNode for convenience (and
 317 // potential code sharing) only - conceptually it is independent of
 318 // the Node semantics.
 319 class SafePointNode : public MultiNode {
 320   friend JVMState;
 321   friend class GraphKit;
 322   friend class VMStructs;
 323 
 324   virtual bool           cmp( const Node &n ) const;
 325   virtual uint           size_of() const;       // Size is bigger
 326 
 327 protected:
 328   JVMState* const _jvms;      // Pointer to list of JVM State objects
 329   // Many calls take *all* of memory as input,
 330   // but some produce a limited subset of that memory as output.
 331   // The adr_type reports the call's behavior as a store, not a load.
 332   const TypePtr*  _adr_type;  // What type of memory does this node produce?
 333   ReplacedNodes   _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
 334   bool            _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States
 335 
 336   void set_jvms(JVMState* s) {
 337   assert(s != nullptr, "assign NULL value to _jvms");
 338     *(JVMState**)&_jvms = s;  // override const attribute in the accessor
 339   }
 340 public:
 341   SafePointNode(uint edges, JVMState* jvms,
 342                 // A plain safepoint advertises no memory effects (NULL):
 343                 const TypePtr* adr_type = NULL)
 344     : MultiNode( edges ),
 345       _jvms(jvms),
 346       _adr_type(adr_type),
 347       _has_ea_local_in_scope(false)
 348   {
 349     init_class_id(Class_SafePoint);
 350   }
 351 
 352   JVMState* jvms() const { return _jvms; }
 353   virtual bool needs_deep_clone_jvms(Compile* C) { return false; }
 354   void clone_jvms(Compile* C) {
 355     if (jvms() != NULL) {
 356       if (needs_deep_clone_jvms(C)) {
 357         set_jvms(jvms()->clone_deep(C));
 358         jvms()->set_map_deep(this);
 359       } else {
 360         jvms()->clone_shallow(C)->bind_map(this);
 361       }
 362     }
 363   }
 364 
 365  private:
 366   void verify_input(JVMState* jvms, uint idx) const {
 367     assert(verify_jvms(jvms), "jvms must match");
 368     Node* n = in(idx);
 369     assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
 370            in(idx + 1)->is_top(), "2nd half of long/double");
 371   }
 372 
 373  public:
 374   // Functionality from old debug nodes which has changed
 375   Node *local(JVMState* jvms, uint idx) const {
 376     verify_input(jvms, jvms->locoff() + idx);
 377     return in(jvms->locoff() + idx);
 378   }
 379   Node *stack(JVMState* jvms, uint idx) const {
 380     verify_input(jvms, jvms->stkoff() + idx);
 381     return in(jvms->stkoff() + idx);
 382   }
 383   Node *argument(JVMState* jvms, uint idx) const {
 384     verify_input(jvms, jvms->argoff() + idx);
 385     return in(jvms->argoff() + idx);
 386   }
 387   Node *monitor_box(JVMState* jvms, uint idx) const {
 388     assert(verify_jvms(jvms), "jvms must match");
 389     return in(jvms->monitor_box_offset(idx));
 390   }
 391   Node *monitor_obj(JVMState* jvms, uint idx) const {
 392     assert(verify_jvms(jvms), "jvms must match");
 393     return in(jvms->monitor_obj_offset(idx));
 394   }
 395 
 396   void  set_local(JVMState* jvms, uint idx, Node *c);
 397 
 398   void  set_stack(JVMState* jvms, uint idx, Node *c) {
 399     assert(verify_jvms(jvms), "jvms must match");
 400     set_req(jvms->stkoff() + idx, c);
 401   }
 402   void  set_argument(JVMState* jvms, uint idx, Node *c) {
 403     assert(verify_jvms(jvms), "jvms must match");
 404     set_req(jvms->argoff() + idx, c);
 405   }
 406   void ensure_stack(JVMState* jvms, uint stk_size) {
 407     assert(verify_jvms(jvms), "jvms must match");
 408     int grow_by = (int)stk_size - (int)jvms->stk_size();
 409     if (grow_by > 0)  grow_stack(jvms, grow_by);
 410   }
 411   void grow_stack(JVMState* jvms, uint grow_by);
 412   // Handle monitor stack
 413   void push_monitor( const FastLockNode *lock );
 414   void pop_monitor ();
 415   Node *peek_monitor_box() const;
 416   Node *peek_monitor_obj() const;
 417   // Peek Operand Stacks, JVMS 2.6.2
 418   Node* peek_operand(uint off = 0) const;
 419 
 420   // Access functions for the JVM
 421   Node *control  () const { return in(TypeFunc::Control  ); }
 422   Node *i_o      () const { return in(TypeFunc::I_O      ); }
 423   Node *memory   () const { return in(TypeFunc::Memory   ); }
 424   Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
 425   Node *frameptr () const { return in(TypeFunc::FramePtr ); }
 426 
 427   void set_control  ( Node *c ) { set_req(TypeFunc::Control,c); }
 428   void set_i_o      ( Node *c ) { set_req(TypeFunc::I_O    ,c); }
 429   void set_memory   ( Node *c ) { set_req(TypeFunc::Memory ,c); }
 430 
 431   MergeMemNode* merged_memory() const {
 432     return in(TypeFunc::Memory)->as_MergeMem();
 433   }
 434 
 435   // The parser marks useless maps as dead when it's done with them:
 436   bool is_killed() { return in(TypeFunc::Control) == NULL; }
 437 
 438   // Exception states bubbling out of subgraphs such as inlined calls
 439   // are recorded here.  (There might be more than one, hence the "next".)
 440   // This feature is used only for safepoints which serve as "maps"
 441   // for JVM states during parsing, intrinsic expansion, etc.
 442   SafePointNode*         next_exception() const;
 443   void               set_next_exception(SafePointNode* n);
 444   bool                   has_exceptions() const { return next_exception() != NULL; }
 445 
 446   // Helper methods to operate on replaced nodes
 447   ReplacedNodes replaced_nodes() const {
 448     return _replaced_nodes;
 449   }
 450 
 451   void set_replaced_nodes(ReplacedNodes replaced_nodes) {
 452     _replaced_nodes = replaced_nodes;
 453   }
 454 
 455   void clone_replaced_nodes() {
 456     _replaced_nodes.clone();
 457   }
 458   void record_replaced_node(Node* initial, Node* improved) {
 459     _replaced_nodes.record(initial, improved);
 460   }
 461   void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
 462     _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
 463   }
 464   void delete_replaced_nodes() {
 465     _replaced_nodes.reset();
 466   }
 467   void apply_replaced_nodes(uint idx) {
 468     _replaced_nodes.apply(this, idx);
 469   }
 470   void merge_replaced_nodes_with(SafePointNode* sfpt) {
 471     _replaced_nodes.merge_with(sfpt->_replaced_nodes);
 472   }
 473   bool has_replaced_nodes() const {
 474     return !_replaced_nodes.is_empty();
 475   }
 476   void set_has_ea_local_in_scope(bool b) {
 477     _has_ea_local_in_scope = b;
 478   }
 479   bool has_ea_local_in_scope() const {
 480     return _has_ea_local_in_scope;
 481   }
 482 
 483   void disconnect_from_root(PhaseIterGVN *igvn);
 484 
 485   // Standard Node stuff
 486   virtual int            Opcode() const;
 487   virtual bool           pinned() const { return true; }
 488   virtual const Type*    Value(PhaseGVN* phase) const;
 489   virtual const Type*    bottom_type() const { return Type::CONTROL; }
 490   virtual const TypePtr* adr_type() const { return _adr_type; }
 491   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
 492   virtual Node          *Ideal(PhaseGVN *phase, bool can_reshape);
 493   virtual Node*          Identity(PhaseGVN* phase);
 494   virtual uint           ideal_reg() const { return 0; }
 495   virtual const RegMask &in_RegMask(uint) const;
 496   virtual const RegMask &out_RegMask() const;
 497   virtual uint           match_edge(uint idx) const;
 498 
 499 #ifndef PRODUCT
 500   virtual void           dump_spec(outputStream *st) const;
 501 #endif
 502 };
 503 
 504 //------------------------------SafePointScalarObjectNode----------------------
 505 // A SafePointScalarObjectNode represents the state of a scalarized object
 506 // at a safepoint.
 507 
 508 class SafePointScalarObjectNode: public TypeNode {
 509   uint _first_index; // First input edge relative index of a SafePoint node where
 510                      // states of the scalarized object fields are collected.
 511                      // It is relative to the last (youngest) jvms->_scloff.
 512   uint _n_fields;    // Number of non-static fields of the scalarized object.
 513   DEBUG_ONLY(Node* _alloc;)
 514 
 515   virtual uint hash() const ; // { return NO_HASH; }
 516   virtual bool cmp( const Node &n ) const;
 517 
 518   uint first_index() const { return _first_index; }
 519 
 520 public:
 521   SafePointScalarObjectNode(const TypeOopPtr* tp,
 522 #ifdef ASSERT
 523                             Node* alloc,
 524 #endif
 525                             uint first_index, uint n_fields);
 526   virtual int Opcode() const;
 527   virtual uint           ideal_reg() const;
 528   virtual const RegMask &in_RegMask(uint) const;
 529   virtual const RegMask &out_RegMask() const;
 530   virtual uint           match_edge(uint idx) const;
 531 
 532   uint first_index(JVMState* jvms) const {
 533     assert(jvms != NULL, "missed JVMS");
 534     return jvms->scloff() + _first_index;
 535   }
 536   uint n_fields()    const { return _n_fields; }
 537 
 538 #ifdef ASSERT
 539   Node* alloc() const { return _alloc; }
 540 #endif
 541 
 542   virtual uint size_of() const { return sizeof(*this); }
 543 
 544   // Assumes that "this" is an argument to a safepoint node "s", and that
 545   // "new_call" is being created to correspond to "s".  But the difference
 546   // between the start index of the jvmstates of "new_call" and "s" is
 547   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 548   // corresponds appropriately to "this" in "new_call".  Assumes that
 549   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 550   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 551   SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
 552 
 553 #ifndef PRODUCT
 554   virtual void              dump_spec(outputStream *st) const;
 555 #endif
 556 };
 557 
 558 
 559 // Simple container for the outgoing projections of a call.  Useful
 560 // for serious surgery on calls.
 561 class CallProjections {
 562 public:
 563   Node* fallthrough_proj;
 564   Node* fallthrough_catchproj;
 565   Node* fallthrough_memproj;
 566   Node* fallthrough_ioproj;
 567   Node* catchall_catchproj;
 568   Node* catchall_memproj;
 569   Node* catchall_ioproj;
 570   Node* exobj;
 571   uint nb_resproj;
 572   Node* resproj[1]; // at least one projection
 573 
 574   CallProjections(uint nbres) {
 575     fallthrough_proj      = NULL;
 576     fallthrough_catchproj = NULL;
 577     fallthrough_memproj   = NULL;
 578     fallthrough_ioproj    = NULL;
 579     catchall_catchproj    = NULL;
 580     catchall_memproj      = NULL;
 581     catchall_ioproj       = NULL;
 582     exobj                 = NULL;
 583     nb_resproj            = nbres;
 584     resproj[0]            = NULL;
 585     for (uint i = 1; i < nb_resproj; i++) {
 586       resproj[i]          = NULL;
 587     }
 588   }
 589 
 590 };
 591 
 592 class CallGenerator;
 593 
 594 //------------------------------CallNode---------------------------------------
 595 // Call nodes now subsume the function of debug nodes at callsites, so they
 596 // contain the functionality of a full scope chain of debug nodes.
 597 class CallNode : public SafePointNode {
 598   friend class VMStructs;
 599 
 600 protected:
 601   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseTransform* phase);
 602 
 603 public:
 604   const TypeFunc* _tf;          // Function type
 605   address         _entry_point; // Address of method being called
 606   float           _cnt;         // Estimate of number of times called
 607   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 608   const char*     _name;        // Printable name, if _method is NULL
 609 
 610   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 611     : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
 612       _tf(tf),
 613       _entry_point(addr),
 614       _cnt(COUNT_UNKNOWN),
 615       _generator(NULL),
 616       _name(NULL)
 617   {
 618     init_class_id(Class_Call);
 619   }
 620 
 621   const TypeFunc* tf()         const { return _tf; }
 622   const address  entry_point() const { return _entry_point; }
 623   const float    cnt()         const { return _cnt; }
 624   CallGenerator* generator()   const { return _generator; }
 625 
 626   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 627   void set_entry_point(address p)       { _entry_point = p; }
 628   void set_cnt(float c)                 { _cnt = c; }
 629   void set_generator(CallGenerator* cg) { _generator = cg; }
 630 
 631   virtual const Type* bottom_type() const;
 632   virtual const Type* Value(PhaseGVN* phase) const;
 633   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 634   virtual Node* Identity(PhaseGVN* phase) { return this; }
 635   virtual bool        cmp(const Node &n) const;
 636   virtual uint        size_of() const = 0;
 637   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 638   virtual Node*       match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
 639   virtual uint        ideal_reg() const { return NotAMachineReg; }
 640   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 641   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 642   virtual bool        guaranteed_safepoint()  { return true; }
 643   // For macro nodes, the JVMState gets modified during expansion. If calls
 644   // use MachConstantBase, it gets modified during matching. So when cloning
 645   // the node the JVMState must be deep cloned. Default is to shallow clone.
 646   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 647 
 648   // Returns true if the call may modify n
 649   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseTransform* phase);
 650   // Does this node have a use of n other than in debug information?
 651   bool                has_non_debug_use(Node* n);
 652   bool                has_debug_use(Node* n);
 653   // Returns the unique CheckCastPP of a call
 654   // or result projection is there are several CheckCastPP
 655   // or returns NULL if there is no one.
 656   Node* result_cast();
 657   // Does this node returns pointer?
 658   bool returns_pointer() const {
 659     const TypeTuple* r = tf()->range_sig();
 660     return (!tf()->returns_inline_type_as_fields() &&
 661             r->cnt() > TypeFunc::Parms &&
 662             r->field_at(TypeFunc::Parms)->isa_ptr());
 663   }
 664 
 665   // Collect all the interesting edges from a call for use in
 666   // replacing the call by something else.  Used by macro expansion
 667   // and the late inlining support.
 668   CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true);
 669 
 670   virtual uint match_edge(uint idx) const;
 671 
 672   bool is_call_to_arraycopystub() const;
 673 
 674   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 675 
 676 #ifndef PRODUCT
 677   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 678   virtual void        dump_spec(outputStream* st) const;
 679 #endif
 680 };
 681 
 682 
 683 //------------------------------CallJavaNode-----------------------------------
 684 // Make a static or dynamic subroutine call node using Java calling
 685 // convention.  (The "Java" calling convention is the compiler's calling
 686 // convention, as opposed to the interpreter's or that of native C.)
 687 class CallJavaNode : public CallNode {
 688   friend class VMStructs;
 689 protected:
 690   virtual bool cmp( const Node &n ) const;
 691   virtual uint size_of() const; // Size is bigger
 692 
 693   bool    _optimized_virtual;
 694   bool    _method_handle_invoke;
 695   bool    _override_symbolic_info; // Override symbolic call site info from bytecode
 696   ciMethod* _method;               // Method being direct called
 697   bool    _arg_escape;             // ArgEscape in parameter list
 698 public:
 699   CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method)
 700     : CallNode(tf, addr, TypePtr::BOTTOM),
 701       _optimized_virtual(false),
 702       _method_handle_invoke(false),
 703       _override_symbolic_info(false),
 704       _method(method),
 705       _arg_escape(false)
 706   {
 707     init_class_id(Class_CallJava);
 708   }
 709 
 710   virtual int   Opcode() const;
 711   ciMethod* method() const                 { return _method; }
 712   void  set_method(ciMethod *m)            { _method = m; }
 713   void  set_optimized_virtual(bool f)      { _optimized_virtual = f; }
 714   bool  is_optimized_virtual() const       { return _optimized_virtual; }
 715   void  set_method_handle_invoke(bool f)   { _method_handle_invoke = f; }
 716   bool  is_method_handle_invoke() const    { return _method_handle_invoke; }
 717   void  set_override_symbolic_info(bool f) { _override_symbolic_info = f; }
 718   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 719   void  set_arg_escape(bool f)             { _arg_escape = f; }
 720   bool  arg_escape() const                 { return _arg_escape; }
 721   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 722 
 723   DEBUG_ONLY( bool validate_symbolic_info() const; )
 724 
 725 #ifndef PRODUCT
 726   virtual void  dump_spec(outputStream *st) const;
 727   virtual void  dump_compact_spec(outputStream *st) const;
 728 #endif
 729 };
 730 
 731 //------------------------------CallStaticJavaNode-----------------------------
 732 // Make a direct subroutine call using Java calling convention (for static
 733 // calls and optimized virtual calls, plus calls to wrappers for run-time
 734 // routines); generates static stub.
 735 class CallStaticJavaNode : public CallJavaNode {
 736   virtual bool cmp( const Node &n ) const;
 737   virtual uint size_of() const; // Size is bigger
 738 
 739   bool remove_useless_allocation(PhaseGVN *phase, Node* ctl, Node* mem, Node* unc_arg);
 740 
 741 public:
 742   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 743     : CallJavaNode(tf, addr, method) {
 744     init_class_id(Class_CallStaticJava);
 745     if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
 746       init_flags(Flag_is_macro);
 747       C->add_macro_node(this);
 748     }
 749     const TypeTuple *r = tf->range_sig();
 750     if (InlineTypeReturnedAsFields &&
 751         method != NULL &&
 752         method->is_method_handle_intrinsic() &&
 753         r->cnt() > TypeFunc::Parms &&
 754         r->field_at(TypeFunc::Parms)->isa_oopptr() &&
 755         r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
 756       // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
 757       init_flags(Flag_is_macro);
 758       C->add_macro_node(this);
 759     }
 760   }
 761   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 762     : CallJavaNode(tf, addr, NULL) {
 763     init_class_id(Class_CallStaticJava);
 764     // This node calls a runtime stub, which often has narrow memory effects.
 765     _adr_type = adr_type;
 766     _name = name;
 767   }
 768 
 769   // If this is an uncommon trap, return the request code, else zero.
 770   int uncommon_trap_request() const;
 771   static int extract_uncommon_trap_request(const Node* call);
 772 
 773   bool is_boxing_method() const {
 774     return is_macro() && (method() != NULL) && method()->is_boxing_method();
 775   }
 776   // Late inlining modifies the JVMState, so we need to deep clone it
 777   // when the call node is cloned (because it is macro node).
 778   virtual bool needs_deep_clone_jvms(Compile* C) {
 779     return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);
 780   }
 781 
 782   virtual int         Opcode() const;
 783   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 784 
 785 #ifndef PRODUCT
 786   virtual void        dump_spec(outputStream *st) const;
 787   virtual void        dump_compact_spec(outputStream *st) const;
 788 #endif
 789 };
 790 
 791 //------------------------------CallDynamicJavaNode----------------------------
 792 // Make a dispatched call using Java calling convention.
 793 class CallDynamicJavaNode : public CallJavaNode {
 794   virtual bool cmp( const Node &n ) const;
 795   virtual uint size_of() const; // Size is bigger
 796 public:
 797   CallDynamicJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int vtable_index)
 798     : CallJavaNode(tf,addr,method), _vtable_index(vtable_index) {
 799     init_class_id(Class_CallDynamicJava);
 800   }
 801 
 802   // Late inlining modifies the JVMState, so we need to deep clone it
 803   // when the call node is cloned.
 804   virtual bool needs_deep_clone_jvms(Compile* C) {
 805     return IncrementalInlineVirtual || CallNode::needs_deep_clone_jvms(C);
 806   }
 807 
 808   int _vtable_index;
 809   virtual int   Opcode() const;
 810   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 811 #ifndef PRODUCT
 812   virtual void  dump_spec(outputStream *st) const;
 813 #endif
 814 };
 815 
 816 //------------------------------CallRuntimeNode--------------------------------
 817 // Make a direct subroutine call node into compiled C++ code.
 818 class CallRuntimeNode : public CallNode {
 819 protected:
 820   virtual bool cmp( const Node &n ) const;
 821   virtual uint size_of() const; // Size is bigger
 822 public:
 823   CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
 824                   const TypePtr* adr_type, JVMState* jvms = nullptr)
 825     : CallNode(tf, addr, adr_type, jvms)
 826   {
 827     init_class_id(Class_CallRuntime);
 828     _name = name;
 829   }
 830 
 831   virtual int   Opcode() const;
 832   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 833 
 834 #ifndef PRODUCT
 835   virtual void  dump_spec(outputStream *st) const;
 836 #endif
 837 };
 838 
 839 //------------------------------CallLeafNode-----------------------------------
 840 // Make a direct subroutine call node into compiled C++ code, without
 841 // safepoints
 842 class CallLeafNode : public CallRuntimeNode {
 843 public:
 844   CallLeafNode(const TypeFunc* tf, address addr, const char* name,
 845                const TypePtr* adr_type)
 846     : CallRuntimeNode(tf, addr, name, adr_type)
 847   {
 848     init_class_id(Class_CallLeaf);
 849   }
 850   virtual int   Opcode() const;
 851   virtual bool        guaranteed_safepoint()  { return false; }
 852 #ifndef PRODUCT
 853   virtual void  dump_spec(outputStream *st) const;
 854 #endif
 855 };
 856 
 857 //------------------------------CallLeafNoFPNode-------------------------------
 858 // CallLeafNode, not using floating point or using it in the same manner as
 859 // the generated code
 860 class CallLeafNoFPNode : public CallLeafNode {
 861 public:
 862   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 863                    const TypePtr* adr_type)
 864     : CallLeafNode(tf, addr, name, adr_type)
 865   {
 866     init_class_id(Class_CallLeafNoFP);
 867   }
 868   virtual int   Opcode() const;
 869   virtual uint match_edge(uint idx) const;
 870 };
 871 
 872 //------------------------------CallLeafVectorNode-------------------------------
 873 // CallLeafNode but calling with vector calling convention instead.
 874 class CallLeafVectorNode : public CallLeafNode {
 875 private:
 876   uint _num_bits;
 877 protected:
 878   virtual bool cmp( const Node &n ) const;
 879   virtual uint size_of() const; // Size is bigger
 880 public:
 881   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 882                    const TypePtr* adr_type, uint num_bits)
 883     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 884   {
 885   }
 886   virtual int   Opcode() const;
 887   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 888 };
 889 
 890 
 891 //------------------------------Allocate---------------------------------------
 892 // High-level memory allocation
 893 //
 894 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 895 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 896 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 897 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 898 //  order to differentiate the uses of the projection on the normal control path from
 899 //  those on the exception return path.
 900 //
 901 class AllocateNode : public CallNode {
 902 public:
 903   enum {
 904     // Output:
 905     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 906     // Inputs:
 907     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 908     KlassNode,                        // type (maybe dynamic) of the obj.
 909     InitialTest,                      // slow-path test (may be constant)
 910     ALength,                          // array length (or TOP if none)
 911     InlineTypeNode,                   // InlineTypeNode if this is an inline type allocation
 912     DefaultValue,                     // default value in case of non-flattened inline type array
 913     RawDefaultValue,                  // same as above but as raw machine word
 914     ParmLimit
 915   };
 916 
 917   static const TypeFunc* alloc_type(const Type* t) {
 918     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
 919     fields[AllocSize]   = TypeInt::POS;
 920     fields[KlassNode]   = TypeInstPtr::NOTNULL;
 921     fields[InitialTest] = TypeInt::BOOL;
 922     fields[ALength]     = t;  // length (can be a bad length)
 923     fields[InlineTypeNode] = Type::BOTTOM;
 924     fields[DefaultValue] = TypeInstPtr::NOTNULL;
 925     fields[RawDefaultValue] = TypeX_X;
 926 
 927     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
 928 
 929     // create result type (range)
 930     fields = TypeTuple::fields(1);
 931     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
 932 
 933     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 934 
 935     return TypeFunc::make(domain, range);
 936   }
 937 
 938   // Result of Escape Analysis
 939   bool _is_scalar_replaceable;
 940   bool _is_non_escaping;
 941   // True when MemBar for new is redundant with MemBar at initialzer exit
 942   bool _is_allocation_MemBar_redundant;
 943   bool _larval;
 944 
 945   virtual uint size_of() const; // Size is bigger
 946   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 947                Node *size, Node *klass_node, Node *initial_test,
 948                InlineTypeBaseNode* inline_type_node = NULL);
 949   // Expansion modifies the JVMState, so we need to deep clone it
 950   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
 951   virtual int Opcode() const;
 952   virtual uint ideal_reg() const { return Op_RegP; }
 953   virtual bool        guaranteed_safepoint()  { return false; }
 954 
 955   // allocations do not modify their arguments
 956   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
 957 
 958   // Pattern-match a possible usage of AllocateNode.
 959   // Return null if no allocation is recognized.
 960   // The operand is the pointer produced by the (possible) allocation.
 961   // It must be a projection of the Allocate or its subsequent CastPP.
 962   // (Note:  This function is defined in file graphKit.cpp, near
 963   // GraphKit::new_instance/new_array, whose output it recognizes.)
 964   // The 'ptr' may not have an offset unless the 'offset' argument is given.
 965   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
 966 
 967   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
 968   // an offset, which is reported back to the caller.
 969   // (Note:  AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
 970   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
 971                                         intptr_t& offset);
 972 
 973   // Dig the klass operand out of a (possible) allocation site.
 974   static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
 975     AllocateNode* allo = Ideal_allocation(ptr, phase);
 976     return (allo == NULL) ? NULL : allo->in(KlassNode);
 977   }
 978 
 979   // Conservatively small estimate of offset of first non-header byte.
 980   int minimum_header_size() {
 981     return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
 982                                 instanceOopDesc::base_offset_in_bytes();
 983   }
 984 
 985   // Return the corresponding initialization barrier (or null if none).
 986   // Walks out edges to find it...
 987   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
 988   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
 989   InitializeNode* initialization();
 990 
 991   // Convenience for initialization->maybe_set_complete(phase)
 992   bool maybe_set_complete(PhaseGVN* phase);
 993 
 994   // Return true if allocation doesn't escape thread, its escape state
 995   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
 996   // is true when its allocation's escape state is noEscape or
 997   // ArgEscape. In case allocation's InitializeNode is NULL, check
 998   // AlllocateNode._is_non_escaping flag.
 999   // AlllocateNode._is_non_escaping is true when its escape state is
1000   // noEscape.
1001   bool does_not_escape_thread() {
1002     InitializeNode* init = NULL;
1003     return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
1004   }
1005 
1006   // If object doesn't escape in <.init> method and there is memory barrier
1007   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1008   // Inovke this method when MemBar at exit of initializer and post-dominate
1009   // allocation node.
1010   void compute_MemBar_redundancy(ciMethod* initializer);
1011   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1012 
1013   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1014 };
1015 
1016 //------------------------------AllocateArray---------------------------------
1017 //
1018 // High-level array allocation
1019 //
1020 class AllocateArrayNode : public AllocateNode {
1021 public:
1022   AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1023                     Node* size, Node* klass_node, Node* initial_test,
1024                     Node* count_val,
1025                     Node* default_value, Node* raw_default_value
1026                     )
1027     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1028                    initial_test)
1029   {
1030     init_class_id(Class_AllocateArray);
1031     set_req(AllocateNode::ALength,        count_val);
1032     init_req(AllocateNode::DefaultValue,  default_value);
1033     init_req(AllocateNode::RawDefaultValue, raw_default_value);
1034   }
1035   virtual int Opcode() const;
1036   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1037 
1038   // Dig the length operand out of a array allocation site.
1039   Node* Ideal_length() {
1040     return in(AllocateNode::ALength);
1041   }
1042 
1043   // Dig the length operand out of a array allocation site and narrow the
1044   // type with a CastII, if necesssary
1045   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
1046 
1047   // Pattern-match a possible usage of AllocateArrayNode.
1048   // Return null if no allocation is recognized.
1049   static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
1050     AllocateNode* allo = Ideal_allocation(ptr, phase);
1051     return (allo == NULL || !allo->is_AllocateArray())
1052            ? NULL : allo->as_AllocateArray();
1053   }
1054 };
1055 
1056 //------------------------------AbstractLockNode-----------------------------------
1057 class AbstractLockNode: public CallNode {
1058 private:
1059   enum {
1060     Regular = 0,  // Normal lock
1061     NonEscObj,    // Lock is used for non escaping object
1062     Coarsened,    // Lock was coarsened
1063     Nested        // Nested lock
1064   } _kind;
1065 
1066   static const char* _kind_names[Nested+1];
1067 
1068 #ifndef PRODUCT
1069   NamedCounter* _counter;
1070 #endif
1071 
1072 protected:
1073   // helper functions for lock elimination
1074   //
1075 
1076   bool find_matching_unlock(const Node* ctrl, LockNode* lock,
1077                             GrowableArray<AbstractLockNode*> &lock_ops);
1078   bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1079                                        GrowableArray<AbstractLockNode*> &lock_ops);
1080   bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1081                                GrowableArray<AbstractLockNode*> &lock_ops);
1082   LockNode *find_matching_lock(UnlockNode* unlock);
1083 
1084   // Update the counter to indicate that this lock was eliminated.
1085   void set_eliminated_lock_counter() PRODUCT_RETURN;
1086 
1087 public:
1088   AbstractLockNode(const TypeFunc *tf)
1089     : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
1090       _kind(Regular)
1091   {
1092 #ifndef PRODUCT
1093     _counter = NULL;
1094 #endif
1095   }
1096   virtual int Opcode() const = 0;
1097   Node *   obj_node() const       {return in(TypeFunc::Parms + 0); }
1098   Node *   box_node() const       {return in(TypeFunc::Parms + 1); }
1099   Node *   fastlock_node() const  {return in(TypeFunc::Parms + 2); }
1100   void     set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
1101 
1102   const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
1103 
1104   virtual uint size_of() const { return sizeof(*this); }
1105 
1106   bool is_eliminated()  const { return (_kind != Regular); }
1107   bool is_non_esc_obj() const { return (_kind == NonEscObj); }
1108   bool is_coarsened()   const { return (_kind == Coarsened); }
1109   bool is_nested()      const { return (_kind == Nested); }
1110 
1111   const char * kind_as_string() const;
1112   void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = NULL) const;
1113 
1114   void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
1115   void set_coarsened()   { _kind = Coarsened; set_eliminated_lock_counter(); }
1116   void set_nested()      { _kind = Nested; set_eliminated_lock_counter(); }
1117 
1118   // locking does not modify its arguments
1119   virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;}
1120 
1121 #ifndef PRODUCT
1122   void create_lock_counter(JVMState* s);
1123   NamedCounter* counter() const { return _counter; }
1124   virtual void dump_spec(outputStream* st) const;
1125   virtual void dump_compact_spec(outputStream* st) const;
1126 #endif
1127 };
1128 
1129 //------------------------------Lock---------------------------------------
1130 // High-level lock operation
1131 //
1132 // This is a subclass of CallNode because it is a macro node which gets expanded
1133 // into a code sequence containing a call.  This node takes 3 "parameters":
1134 //    0  -  object to lock
1135 //    1 -   a BoxLockNode
1136 //    2 -   a FastLockNode
1137 //
1138 class LockNode : public AbstractLockNode {
1139 public:
1140 
1141   static const TypeFunc *lock_type() {
1142     // create input type (domain)
1143     const Type **fields = TypeTuple::fields(3);
1144     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1145     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1146     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1147     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1148 
1149     // create result type (range)
1150     fields = TypeTuple::fields(0);
1151 
1152     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1153 
1154     return TypeFunc::make(domain, range);
1155   }
1156 
1157   virtual int Opcode() const;
1158   virtual uint size_of() const; // Size is bigger
1159   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1160     init_class_id(Class_Lock);
1161     init_flags(Flag_is_macro);
1162     C->add_macro_node(this);
1163   }
1164   virtual bool        guaranteed_safepoint()  { return false; }
1165 
1166   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1167   // Expansion modifies the JVMState, so we need to deep clone it
1168   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1169 
1170   bool is_nested_lock_region(); // Is this Lock nested?
1171   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1172 };
1173 
1174 //------------------------------Unlock---------------------------------------
1175 // High-level unlock operation
1176 class UnlockNode : public AbstractLockNode {
1177 private:
1178 #ifdef ASSERT
1179   JVMState* const _dbg_jvms;      // Pointer to list of JVM State objects
1180 #endif
1181 public:
1182   virtual int Opcode() const;
1183   virtual uint size_of() const; // Size is bigger
1184   UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )
1185 #ifdef ASSERT
1186     , _dbg_jvms(NULL)
1187 #endif
1188   {
1189     init_class_id(Class_Unlock);
1190     init_flags(Flag_is_macro);
1191     C->add_macro_node(this);
1192   }
1193   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1194   // unlock is never a safepoint
1195   virtual bool        guaranteed_safepoint()  { return false; }
1196 #ifdef ASSERT
1197   void set_dbg_jvms(JVMState* s) {
1198     *(JVMState**)&_dbg_jvms = s;  // override const attribute in the accessor
1199   }
1200   JVMState* dbg_jvms() const { return _dbg_jvms; }
1201 #else
1202   JVMState* dbg_jvms() const { return NULL; }
1203 #endif
1204 };
1205 #endif // SHARE_OPTO_CALLNODE_HPP