1 /*
   2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OPTO_CALLNODE_HPP
  26 #define SHARE_OPTO_CALLNODE_HPP
  27 
  28 #include "opto/connode.hpp"
  29 #include "opto/mulnode.hpp"
  30 #include "opto/multnode.hpp"
  31 #include "opto/opcodes.hpp"
  32 #include "opto/phaseX.hpp"
  33 #include "opto/replacednodes.hpp"
  34 #include "opto/type.hpp"
  35 #include "utilities/growableArray.hpp"
  36 
  37 // Portions of code courtesy of Clifford Click
  38 
  39 // Optimization - Graph Style
  40 
  41 class NamedCounter;
  42 class MultiNode;
  43 class  SafePointNode;
  44 class   CallNode;
  45 class     CallJavaNode;
  46 class       CallStaticJavaNode;
  47 class       CallDynamicJavaNode;
  48 class     CallRuntimeNode;
  49 class       CallLeafNode;
  50 class         CallLeafNoFPNode;
  51 class         CallLeafVectorNode;
  52 class     CallNativeNode;
  53 class     AllocateNode;
  54 class       AllocateArrayNode;
  55 class     AbstractLockNode;
  56 class       LockNode;
  57 class       UnlockNode;
  58 class FastLockNode;
  59 
  60 //------------------------------StartNode--------------------------------------
  61 // The method start node
  62 class StartNode : public MultiNode {
  63   virtual bool cmp( const Node &n ) const;
  64   virtual uint size_of() const; // Size is bigger
  65 public:
  66   const TypeTuple *_domain;
  67   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  68     init_class_id(Class_Start);
  69     init_req(0,this);
  70     init_req(1,root);
  71   }
  72   virtual int Opcode() const;
  73   virtual bool pinned() const { return true; };
  74   virtual const Type *bottom_type() const;
  75   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  76   virtual const Type* Value(PhaseGVN* phase) const;
  77   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  78   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  79   virtual const RegMask &in_RegMask(uint) const;
  80   virtual Node *match( const ProjNode *proj, const Matcher *m );
  81   virtual uint ideal_reg() const { return 0; }
  82 #ifndef PRODUCT
  83   virtual void  dump_spec(outputStream *st) const;
  84   virtual void  dump_compact_spec(outputStream *st) const;
  85 #endif
  86 };
  87 
  88 //------------------------------StartOSRNode-----------------------------------
  89 // The method start node for on stack replacement code
  90 class StartOSRNode : public StartNode {
  91 public:
  92   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  93   virtual int   Opcode() const;
  94   static  const TypeTuple *osr_domain();
  95 };
  96 
  97 
  98 //------------------------------ParmNode---------------------------------------
  99 // Incoming parameters
 100 class ParmNode : public ProjNode {
 101   static const char * const names[TypeFunc::Parms+1];
 102 public:
 103   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 104     init_class_id(Class_Parm);
 105   }
 106   virtual int Opcode() const;
 107   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 108   virtual uint ideal_reg() const;
 109 #ifndef PRODUCT
 110   virtual void dump_spec(outputStream *st) const;
 111   virtual void dump_compact_spec(outputStream *st) const;
 112   virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const;
 113 #endif
 114 };
 115 
 116 
 117 //------------------------------ReturnNode-------------------------------------
 118 // Return from subroutine node
 119 class ReturnNode : public Node {
 120 public:
 121   ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
 122   virtual int Opcode() const;
 123   virtual bool  is_CFG() const { return true; }
 124   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 125   virtual bool depends_only_on_test() const { return false; }
 126   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 127   virtual const Type* Value(PhaseGVN* phase) const;
 128   virtual uint ideal_reg() const { return NotAMachineReg; }
 129   virtual uint match_edge(uint idx) const;
 130 #ifndef PRODUCT
 131   virtual void dump_req(outputStream *st = tty) const;
 132 #endif
 133 };
 134 
 135 
 136 //------------------------------RethrowNode------------------------------------
 137 // Rethrow of exception at call site.  Ends a procedure before rethrowing;
 138 // ends the current basic block like a ReturnNode.  Restores registers and
 139 // unwinds stack.  Rethrow happens in the caller's method.
 140 class RethrowNode : public Node {
 141  public:
 142   RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
 143   virtual int Opcode() const;
 144   virtual bool  is_CFG() const { return true; }
 145   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 146   virtual bool depends_only_on_test() const { return false; }
 147   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 148   virtual const Type* Value(PhaseGVN* phase) const;
 149   virtual uint match_edge(uint idx) const;
 150   virtual uint ideal_reg() const { return NotAMachineReg; }
 151 #ifndef PRODUCT
 152   virtual void dump_req(outputStream *st = tty) const;
 153 #endif
 154 };
 155 
 156 
 157 //------------------------------TailCallNode-----------------------------------
 158 // Pop stack frame and jump indirect
 159 class TailCallNode : public ReturnNode {
 160 public:
 161   TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
 162     : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
 163     init_req(TypeFunc::Parms, target);
 164     init_req(TypeFunc::Parms+1, moop);
 165   }
 166 
 167   virtual int Opcode() const;
 168   virtual uint match_edge(uint idx) const;
 169 };
 170 
 171 //------------------------------TailJumpNode-----------------------------------
 172 // Pop stack frame and jump indirect
 173 class TailJumpNode : public ReturnNode {
 174 public:
 175   TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
 176     : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
 177     init_req(TypeFunc::Parms, target);
 178     init_req(TypeFunc::Parms+1, ex_oop);
 179   }
 180 
 181   virtual int Opcode() const;
 182   virtual uint match_edge(uint idx) const;
 183 };
 184 
 185 //-------------------------------JVMState-------------------------------------
 186 // A linked list of JVMState nodes captures the whole interpreter state,
 187 // plus GC roots, for all active calls at some call site in this compilation
 188 // unit.  (If there is no inlining, then the list has exactly one link.)
 189 // This provides a way to map the optimized program back into the interpreter,
 190 // or to let the GC mark the stack.
 191 class JVMState : public ResourceObj {
 192   friend class VMStructs;
 193 public:
 194   typedef enum {
 195     Reexecute_Undefined = -1, // not defined -- will be translated into false later
 196     Reexecute_False     =  0, // false       -- do not reexecute
 197     Reexecute_True      =  1  // true        -- reexecute the bytecode
 198   } ReexecuteState; //Reexecute State
 199 
 200 private:
 201   JVMState*         _caller;    // List pointer for forming scope chains
 202   uint              _depth;     // One more than caller depth, or one.
 203   uint              _locoff;    // Offset to locals in input edge mapping
 204   uint              _stkoff;    // Offset to stack in input edge mapping
 205   uint              _monoff;    // Offset to monitors in input edge mapping
 206   uint              _scloff;    // Offset to fields of scalar objs in input edge mapping
 207   uint              _endoff;    // Offset to end of input edge mapping
 208   uint              _sp;        // Jave Expression Stack Pointer for this state
 209   int               _bci;       // Byte Code Index of this JVM point
 210   ReexecuteState    _reexecute; // Whether this bytecode need to be re-executed
 211   ciMethod*         _method;    // Method Pointer
 212   SafePointNode*    _map;       // Map node associated with this scope
 213 public:
 214   friend class Compile;
 215   friend class PreserveReexecuteState;
 216 
 217   // Because JVMState objects live over the entire lifetime of the
 218   // Compile object, they are allocated into the comp_arena, which
 219   // does not get resource marked or reset during the compile process
 220   void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
 221   void operator delete( void * ) { } // fast deallocation
 222 
 223   // Create a new JVMState, ready for abstract interpretation.
 224   JVMState(ciMethod* method, JVMState* caller);
 225   JVMState(int stack_size);  // root state; has a null method
 226 
 227   // Access functions for the JVM
 228   // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
 229   //       \ locoff    \ stkoff    \ argoff    \ monoff    \ scloff    \ endoff
 230   uint              locoff() const { return _locoff; }
 231   uint              stkoff() const { return _stkoff; }
 232   uint              argoff() const { return _stkoff + _sp; }
 233   uint              monoff() const { return _monoff; }
 234   uint              scloff() const { return _scloff; }
 235   uint              endoff() const { return _endoff; }
 236   uint              oopoff() const { return debug_end(); }
 237 
 238   int            loc_size() const { return stkoff() - locoff(); }
 239   int            stk_size() const { return monoff() - stkoff(); }
 240   int            mon_size() const { return scloff() - monoff(); }
 241   int            scl_size() const { return endoff() - scloff(); }
 242 
 243   bool        is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
 244   bool        is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
 245   bool        is_mon(uint i) const { return monoff() <= i && i < scloff(); }
 246   bool        is_scl(uint i) const { return scloff() <= i && i < endoff(); }
 247 
 248   uint                      sp() const { return _sp; }
 249   int                      bci() const { return _bci; }
 250   bool        should_reexecute() const { return _reexecute==Reexecute_True; }
 251   bool  is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
 252   bool              has_method() const { return _method != NULL; }
 253   ciMethod*             method() const { assert(has_method(), ""); return _method; }
 254   JVMState*             caller() const { return _caller; }
 255   SafePointNode*           map() const { return _map; }
 256   uint                   depth() const { return _depth; }
 257   uint             debug_start() const; // returns locoff of root caller
 258   uint               debug_end() const; // returns endoff of self
 259   uint              debug_size() const {
 260     return loc_size() + sp() + mon_size() + scl_size();
 261   }
 262   uint        debug_depth()  const; // returns sum of debug_size values at all depths
 263 
 264   // Returns the JVM state at the desired depth (1 == root).
 265   JVMState* of_depth(int d) const;
 266 
 267   // Tells if two JVM states have the same call chain (depth, methods, & bcis).
 268   bool same_calls_as(const JVMState* that) const;
 269 
 270   // Monitors (monitors are stored as (boxNode, objNode) pairs
 271   enum { logMonitorEdges = 1 };
 272   int  nof_monitors()              const { return mon_size() >> logMonitorEdges; }
 273   int  monitor_depth()             const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
 274   int  monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
 275   int  monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
 276   bool is_monitor_box(uint off)    const {
 277     assert(is_mon(off), "should be called only for monitor edge");
 278     return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
 279   }
 280   bool is_monitor_use(uint off)    const { return (is_mon(off)
 281                                                    && is_monitor_box(off))
 282                                              || (caller() && caller()->is_monitor_use(off)); }
 283 
 284   // Initialization functions for the JVM
 285   void              set_locoff(uint off) { _locoff = off; }
 286   void              set_stkoff(uint off) { _stkoff = off; }
 287   void              set_monoff(uint off) { _monoff = off; }
 288   void              set_scloff(uint off) { _scloff = off; }
 289   void              set_endoff(uint off) { _endoff = off; }
 290   void              set_offsets(uint off) {
 291     _locoff = _stkoff = _monoff = _scloff = _endoff = off;
 292   }
 293   void              set_map(SafePointNode* map) { _map = map; }
 294   void              bind_map(SafePointNode* map); // set_map() and set_jvms() for the SafePointNode
 295   void              set_sp(uint sp) { _sp = sp; }
 296                     // _reexecute is initialized to "undefined" for a new bci
 297   void              set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
 298   void              set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
 299 
 300   // Miscellaneous utility functions
 301   JVMState* clone_deep(Compile* C) const;    // recursively clones caller chain
 302   JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
 303   void      set_map_deep(SafePointNode *map);// reset map for all callers
 304   void      adapt_position(int delta);       // Adapt offsets in in-array after adding an edge.
 305   int       interpreter_frame_size() const;
 306 
 307 #ifndef PRODUCT
 308   void      print_method_with_lineno(outputStream* st, bool show_name) const;
 309   void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
 310   void      dump_spec(outputStream *st) const;
 311   void      dump_on(outputStream* st) const;
 312   void      dump() const {
 313     dump_on(tty);
 314   }
 315 #endif
 316 };
 317 
 318 //------------------------------SafePointNode----------------------------------
 319 // A SafePointNode is a subclass of a MultiNode for convenience (and
 320 // potential code sharing) only - conceptually it is independent of
 321 // the Node semantics.
 322 class SafePointNode : public MultiNode {
 323   friend JVMState;
 324   friend class GraphKit;
 325   friend class VMStructs;
 326 
 327   virtual bool           cmp( const Node &n ) const;
 328   virtual uint           size_of() const;       // Size is bigger
 329 
 330 protected:
 331   JVMState* const _jvms;      // Pointer to list of JVM State objects
 332   // Many calls take *all* of memory as input,
 333   // but some produce a limited subset of that memory as output.
 334   // The adr_type reports the call's behavior as a store, not a load.
 335   const TypePtr*  _adr_type;  // What type of memory does this node produce?
 336   ReplacedNodes   _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
 337   bool            _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States
 338 
 339   void set_jvms(JVMState* s) {
 340   assert(s != nullptr, "assign NULL value to _jvms");
 341     *(JVMState**)&_jvms = s;  // override const attribute in the accessor
 342   }
 343 public:
 344   SafePointNode(uint edges, JVMState* jvms,
 345                 // A plain safepoint advertises no memory effects (NULL):
 346                 const TypePtr* adr_type = NULL)
 347     : MultiNode( edges ),
 348       _jvms(jvms),
 349       _adr_type(adr_type),
 350       _has_ea_local_in_scope(false)
 351   {
 352     init_class_id(Class_SafePoint);
 353   }
 354 
 355   JVMState* jvms() const { return _jvms; }
 356   virtual bool needs_deep_clone_jvms(Compile* C) { return false; }
 357   void clone_jvms(Compile* C) {
 358     if (jvms() != NULL) {
 359       if (needs_deep_clone_jvms(C)) {
 360         set_jvms(jvms()->clone_deep(C));
 361         jvms()->set_map_deep(this);
 362       } else {
 363         jvms()->clone_shallow(C)->bind_map(this);
 364       }
 365     }
 366   }
 367 
 368  private:
 369   void verify_input(JVMState* jvms, uint idx) const {
 370     assert(verify_jvms(jvms), "jvms must match");
 371     Node* n = in(idx);
 372     assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
 373            in(idx + 1)->is_top(), "2nd half of long/double");
 374   }
 375 
 376  public:
 377   // Functionality from old debug nodes which has changed
 378   Node *local(JVMState* jvms, uint idx) const {
 379     verify_input(jvms, jvms->locoff() + idx);
 380     return in(jvms->locoff() + idx);
 381   }
 382   Node *stack(JVMState* jvms, uint idx) const {
 383     verify_input(jvms, jvms->stkoff() + idx);
 384     return in(jvms->stkoff() + idx);
 385   }
 386   Node *argument(JVMState* jvms, uint idx) const {
 387     verify_input(jvms, jvms->argoff() + idx);
 388     return in(jvms->argoff() + idx);
 389   }
 390   Node *monitor_box(JVMState* jvms, uint idx) const {
 391     assert(verify_jvms(jvms), "jvms must match");
 392     return in(jvms->monitor_box_offset(idx));
 393   }
 394   Node *monitor_obj(JVMState* jvms, uint idx) const {
 395     assert(verify_jvms(jvms), "jvms must match");
 396     return in(jvms->monitor_obj_offset(idx));
 397   }
 398 
 399   void  set_local(JVMState* jvms, uint idx, Node *c);
 400 
 401   void  set_stack(JVMState* jvms, uint idx, Node *c) {
 402     assert(verify_jvms(jvms), "jvms must match");
 403     set_req(jvms->stkoff() + idx, c);
 404   }
 405   void  set_argument(JVMState* jvms, uint idx, Node *c) {
 406     assert(verify_jvms(jvms), "jvms must match");
 407     set_req(jvms->argoff() + idx, c);
 408   }
 409   void ensure_stack(JVMState* jvms, uint stk_size) {
 410     assert(verify_jvms(jvms), "jvms must match");
 411     int grow_by = (int)stk_size - (int)jvms->stk_size();
 412     if (grow_by > 0)  grow_stack(jvms, grow_by);
 413   }
 414   void grow_stack(JVMState* jvms, uint grow_by);
 415   // Handle monitor stack
 416   void push_monitor( const FastLockNode *lock );
 417   void pop_monitor ();
 418   Node *peek_monitor_box() const;
 419   Node *peek_monitor_obj() const;
 420 
 421   // Access functions for the JVM
 422   Node *control  () const { return in(TypeFunc::Control  ); }
 423   Node *i_o      () const { return in(TypeFunc::I_O      ); }
 424   Node *memory   () const { return in(TypeFunc::Memory   ); }
 425   Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
 426   Node *frameptr () const { return in(TypeFunc::FramePtr ); }
 427 
 428   void set_control  ( Node *c ) { set_req(TypeFunc::Control,c); }
 429   void set_i_o      ( Node *c ) { set_req(TypeFunc::I_O    ,c); }
 430   void set_memory   ( Node *c ) { set_req(TypeFunc::Memory ,c); }
 431 
 432   MergeMemNode* merged_memory() const {
 433     return in(TypeFunc::Memory)->as_MergeMem();
 434   }
 435 
 436   // The parser marks useless maps as dead when it's done with them:
 437   bool is_killed() { return in(TypeFunc::Control) == NULL; }
 438 
 439   // Exception states bubbling out of subgraphs such as inlined calls
 440   // are recorded here.  (There might be more than one, hence the "next".)
 441   // This feature is used only for safepoints which serve as "maps"
 442   // for JVM states during parsing, intrinsic expansion, etc.
 443   SafePointNode*         next_exception() const;
 444   void               set_next_exception(SafePointNode* n);
 445   bool                   has_exceptions() const { return next_exception() != NULL; }
 446 
 447   // Helper methods to operate on replaced nodes
 448   ReplacedNodes replaced_nodes() const {
 449     return _replaced_nodes;
 450   }
 451 
 452   void set_replaced_nodes(ReplacedNodes replaced_nodes) {
 453     _replaced_nodes = replaced_nodes;
 454   }
 455 
 456   void clone_replaced_nodes() {
 457     _replaced_nodes.clone();
 458   }
 459   void record_replaced_node(Node* initial, Node* improved) {
 460     _replaced_nodes.record(initial, improved);
 461   }
 462   void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
 463     _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
 464   }
 465   void delete_replaced_nodes() {
 466     _replaced_nodes.reset();
 467   }
 468   void apply_replaced_nodes(uint idx) {
 469     _replaced_nodes.apply(this, idx);
 470   }
 471   void merge_replaced_nodes_with(SafePointNode* sfpt) {
 472     _replaced_nodes.merge_with(sfpt->_replaced_nodes);
 473   }
 474   bool has_replaced_nodes() const {
 475     return !_replaced_nodes.is_empty();
 476   }
 477   void set_has_ea_local_in_scope(bool b) {
 478     _has_ea_local_in_scope = b;
 479   }
 480   bool has_ea_local_in_scope() const {
 481     return _has_ea_local_in_scope;
 482   }
 483 
 484   void disconnect_from_root(PhaseIterGVN *igvn);
 485 
 486   // Standard Node stuff
 487   virtual int            Opcode() const;
 488   virtual bool           pinned() const { return true; }
 489   virtual const Type*    Value(PhaseGVN* phase) const;
 490   virtual const Type*    bottom_type() const { return Type::CONTROL; }
 491   virtual const TypePtr* adr_type() const { return _adr_type; }
 492   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
 493   virtual Node          *Ideal(PhaseGVN *phase, bool can_reshape);
 494   virtual Node*          Identity(PhaseGVN* phase);
 495   virtual uint           ideal_reg() const { return 0; }
 496   virtual const RegMask &in_RegMask(uint) const;
 497   virtual const RegMask &out_RegMask() const;
 498   virtual uint           match_edge(uint idx) const;
 499 
 500 #ifndef PRODUCT
 501   virtual void           dump_spec(outputStream *st) const;
 502   virtual void           related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const;
 503 #endif
 504 };
 505 
 506 //------------------------------SafePointScalarObjectNode----------------------
 507 // A SafePointScalarObjectNode represents the state of a scalarized object
 508 // at a safepoint.
 509 
 510 class SafePointScalarObjectNode: public TypeNode {
 511   uint _first_index; // First input edge relative index of a SafePoint node where
 512                      // states of the scalarized object fields are collected.
 513                      // It is relative to the last (youngest) jvms->_scloff.
 514   uint _n_fields;    // Number of non-static fields of the scalarized object.
 515   bool _is_auto_box; // True if the scalarized object is an auto box.
 516   DEBUG_ONLY(Node* _alloc;)
 517 
 518   virtual uint hash() const ; // { return NO_HASH; }
 519   virtual bool cmp( const Node &n ) const;
 520 
 521   uint first_index() const { return _first_index; }
 522 
 523 public:
 524   SafePointScalarObjectNode(const TypeOopPtr* tp,
 525 #ifdef ASSERT
 526                             Node* alloc,
 527 #endif
 528                             uint first_index, uint n_fields, bool is_auto_box = false);
 529   virtual int Opcode() const;
 530   virtual uint           ideal_reg() const;
 531   virtual const RegMask &in_RegMask(uint) const;
 532   virtual const RegMask &out_RegMask() const;
 533   virtual uint           match_edge(uint idx) const;
 534 
 535   uint first_index(JVMState* jvms) const {
 536     assert(jvms != NULL, "missed JVMS");
 537     return jvms->scloff() + _first_index;
 538   }
 539   uint n_fields()    const { return _n_fields; }
 540 
 541   bool is_auto_box() const { return _is_auto_box; }
 542 #ifdef ASSERT
 543   Node* alloc() const { return _alloc; }
 544 #endif
 545 
 546   virtual uint size_of() const { return sizeof(*this); }
 547 
 548   // Assumes that "this" is an argument to a safepoint node "s", and that
 549   // "new_call" is being created to correspond to "s".  But the difference
 550   // between the start index of the jvmstates of "new_call" and "s" is
 551   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 552   // corresponds appropriately to "this" in "new_call".  Assumes that
 553   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 554   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 555   SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
 556 
 557 #ifndef PRODUCT
 558   virtual void              dump_spec(outputStream *st) const;
 559 #endif
 560 };
 561 
 562 
 563 // Simple container for the outgoing projections of a call.  Useful
 564 // for serious surgery on calls.
 565 class CallProjections : public StackObj {
 566 public:
 567   Node* fallthrough_proj;
 568   Node* fallthrough_catchproj;
 569   Node* fallthrough_memproj;
 570   Node* fallthrough_ioproj;
 571   Node* catchall_catchproj;
 572   Node* catchall_memproj;
 573   Node* catchall_ioproj;
 574   Node* resproj;
 575   Node* exobj;
 576 };
 577 
 578 class CallGenerator;
 579 
 580 //------------------------------CallNode---------------------------------------
 581 // Call nodes now subsume the function of debug nodes at callsites, so they
 582 // contain the functionality of a full scope chain of debug nodes.
 583 class CallNode : public SafePointNode {
 584   friend class VMStructs;
 585 
 586 protected:
 587   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseTransform* phase);
 588 
 589 public:
 590   const TypeFunc* _tf;          // Function type
 591   address         _entry_point; // Address of method being called
 592   float           _cnt;         // Estimate of number of times called
 593   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 594   const char*     _name;        // Printable name, if _method is NULL
 595 
 596   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 597     : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
 598       _tf(tf),
 599       _entry_point(addr),
 600       _cnt(COUNT_UNKNOWN),
 601       _generator(NULL),
 602       _name(NULL)
 603   {
 604     init_class_id(Class_Call);
 605   }
 606 
 607   const TypeFunc* tf()         const { return _tf; }
 608   const address  entry_point() const { return _entry_point; }
 609   const float    cnt()         const { return _cnt; }
 610   CallGenerator* generator()   const { return _generator; }
 611 
 612   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 613   void set_entry_point(address p)       { _entry_point = p; }
 614   void set_cnt(float c)                 { _cnt = c; }
 615   void set_generator(CallGenerator* cg) { _generator = cg; }
 616 
 617   virtual const Type* bottom_type() const;
 618   virtual const Type* Value(PhaseGVN* phase) const;
 619   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 620   virtual Node* Identity(PhaseGVN* phase) { return this; }
 621   virtual bool        cmp(const Node &n) const;
 622   virtual uint        size_of() const = 0;
 623   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 624   virtual Node*       match(const ProjNode* proj, const Matcher* m);
 625   virtual uint        ideal_reg() const { return NotAMachineReg; }
 626   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 627   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 628   virtual bool        guaranteed_safepoint()  { return true; }
 629   // For macro nodes, the JVMState gets modified during expansion. If calls
 630   // use MachConstantBase, it gets modified during matching. So when cloning
 631   // the node the JVMState must be deep cloned. Default is to shallow clone.
 632   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 633 
 634   // Returns true if the call may modify n
 635   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseTransform* phase);
 636   // Does this node have a use of n other than in debug information?
 637   bool                has_non_debug_use(Node* n);
 638   // Returns the unique CheckCastPP of a call
 639   // or result projection is there are several CheckCastPP
 640   // or returns NULL if there is no one.
 641   Node* result_cast();
 642   // Does this node returns pointer?
 643   bool returns_pointer() const {
 644     const TypeTuple* r = tf()->range();
 645     return (r->cnt() > TypeFunc::Parms &&
 646             r->field_at(TypeFunc::Parms)->isa_ptr());
 647   }
 648 
 649   // Collect all the interesting edges from a call for use in
 650   // replacing the call by something else.  Used by macro expansion
 651   // and the late inlining support.
 652   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
 653 
 654   virtual uint match_edge(uint idx) const;
 655 
 656   bool is_call_to_arraycopystub() const;
 657 
 658   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 659 
 660 #ifndef PRODUCT
 661   virtual void        dump_req(outputStream* st = tty) const;
 662   virtual void        dump_spec(outputStream* st) const;
 663 #endif
 664 };
 665 
 666 
 667 //------------------------------CallJavaNode-----------------------------------
 668 // Make a static or dynamic subroutine call node using Java calling
 669 // convention.  (The "Java" calling convention is the compiler's calling
 670 // convention, as opposed to the interpreter's or that of native C.)
 671 class CallJavaNode : public CallNode {
 672   friend class VMStructs;
 673 protected:
 674   virtual bool cmp( const Node &n ) const;
 675   virtual uint size_of() const; // Size is bigger
 676 
 677   bool    _optimized_virtual;
 678   bool    _method_handle_invoke;
 679   bool    _override_symbolic_info; // Override symbolic call site info from bytecode
 680   ciMethod* _method;               // Method being direct called
 681   bool    _arg_escape;             // ArgEscape in parameter list
 682 public:
 683   CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method)
 684     : CallNode(tf, addr, TypePtr::BOTTOM),
 685       _optimized_virtual(false),
 686       _method_handle_invoke(false),
 687       _override_symbolic_info(false),
 688       _method(method),
 689       _arg_escape(false)
 690   {
 691     init_class_id(Class_CallJava);
 692   }
 693 
 694   virtual int   Opcode() const;
 695   ciMethod* method() const                 { return _method; }
 696   void  set_method(ciMethod *m)            { _method = m; }
 697   void  set_optimized_virtual(bool f)      { _optimized_virtual = f; }
 698   bool  is_optimized_virtual() const       { return _optimized_virtual; }
 699   void  set_method_handle_invoke(bool f)   { _method_handle_invoke = f; }
 700   bool  is_method_handle_invoke() const    { return _method_handle_invoke; }
 701   void  set_override_symbolic_info(bool f) { _override_symbolic_info = f; }
 702   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 703   void  set_arg_escape(bool f)             { _arg_escape = f; }
 704   bool  arg_escape() const                 { return _arg_escape; }
 705   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 706 
 707   DEBUG_ONLY( bool validate_symbolic_info() const; )
 708 
 709 #ifndef PRODUCT
 710   virtual void  dump_spec(outputStream *st) const;
 711   virtual void  dump_compact_spec(outputStream *st) const;
 712 #endif
 713 };
 714 
 715 //------------------------------CallStaticJavaNode-----------------------------
 716 // Make a direct subroutine call using Java calling convention (for static
 717 // calls and optimized virtual calls, plus calls to wrappers for run-time
 718 // routines); generates static stub.
 719 class CallStaticJavaNode : public CallJavaNode {
 720   virtual bool cmp( const Node &n ) const;
 721   virtual uint size_of() const; // Size is bigger
 722 public:
 723   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 724     : CallJavaNode(tf, addr, method) {
 725     init_class_id(Class_CallStaticJava);
 726     if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
 727       init_flags(Flag_is_macro);
 728       C->add_macro_node(this);
 729     }
 730   }
 731   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 732     : CallJavaNode(tf, addr, NULL) {
 733     init_class_id(Class_CallStaticJava);
 734     // This node calls a runtime stub, which often has narrow memory effects.
 735     _adr_type = adr_type;
 736     _name = name;
 737   }
 738 
 739   // If this is an uncommon trap, return the request code, else zero.
 740   int uncommon_trap_request() const;
 741   static int extract_uncommon_trap_request(const Node* call);
 742 
 743   bool is_boxing_method() const {
 744     return is_macro() && (method() != NULL) && method()->is_boxing_method();
 745   }
 746   // Late inlining modifies the JVMState, so we need to deep clone it
 747   // when the call node is cloned (because it is macro node).
 748   virtual bool needs_deep_clone_jvms(Compile* C) {
 749     return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);
 750   }
 751 
 752   virtual int         Opcode() const;
 753   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 754 
 755 #ifndef PRODUCT
 756   virtual void        dump_spec(outputStream *st) const;
 757   virtual void        dump_compact_spec(outputStream *st) const;
 758 #endif
 759 };
 760 
 761 //------------------------------CallDynamicJavaNode----------------------------
 762 // Make a dispatched call using Java calling convention.
 763 class CallDynamicJavaNode : public CallJavaNode {
 764   virtual bool cmp( const Node &n ) const;
 765   virtual uint size_of() const; // Size is bigger
 766 public:
 767   CallDynamicJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int vtable_index)
 768     : CallJavaNode(tf,addr,method), _vtable_index(vtable_index) {
 769     init_class_id(Class_CallDynamicJava);
 770   }
 771 
 772   // Late inlining modifies the JVMState, so we need to deep clone it
 773   // when the call node is cloned.
 774   virtual bool needs_deep_clone_jvms(Compile* C) {
 775     return IncrementalInlineVirtual || CallNode::needs_deep_clone_jvms(C);
 776   }
 777 
 778   int _vtable_index;
 779   virtual int   Opcode() const;
 780   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 781 #ifndef PRODUCT
 782   virtual void  dump_spec(outputStream *st) const;
 783 #endif
 784 };
 785 
 786 //------------------------------CallRuntimeNode--------------------------------
 787 // Make a direct subroutine call node into compiled C++ code.
 788 class CallRuntimeNode : public CallNode {
 789 protected:
 790   virtual bool cmp( const Node &n ) const;
 791   virtual uint size_of() const; // Size is bigger
 792 public:
 793   CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
 794                   const TypePtr* adr_type, JVMState* jvms = nullptr)
 795     : CallNode(tf, addr, adr_type, jvms)
 796   {
 797     init_class_id(Class_CallRuntime);
 798     _name = name;
 799   }
 800 
 801   virtual int   Opcode() const;
 802   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 803 
 804 #ifndef PRODUCT
 805   virtual void  dump_spec(outputStream *st) const;
 806 #endif
 807 };
 808 
 809 //------------------------------CallLeafNode-----------------------------------
 810 // Make a direct subroutine call node into compiled C++ code, without
 811 // safepoints
 812 class CallLeafNode : public CallRuntimeNode {
 813 public:
 814   CallLeafNode(const TypeFunc* tf, address addr, const char* name,
 815                const TypePtr* adr_type)
 816     : CallRuntimeNode(tf, addr, name, adr_type)
 817   {
 818     init_class_id(Class_CallLeaf);
 819   }
 820   virtual int   Opcode() const;
 821   virtual bool        guaranteed_safepoint()  { return false; }
 822 #ifndef PRODUCT
 823   virtual void  dump_spec(outputStream *st) const;
 824 #endif
 825 };
 826 
 827 //------------------------------CallNativeNode-----------------------------------
 828 // Make a direct call into a foreign function with an arbitrary ABI
 829 // safepoints
 830 class CallNativeNode : public CallNode {
 831   friend class MachCallNativeNode;
 832   virtual bool cmp( const Node &n ) const;
 833   virtual uint size_of() const;
 834   static void print_regs(const GrowableArray<VMReg>& regs, outputStream* st);
 835 public:
 836   GrowableArray<VMReg> _arg_regs;
 837   GrowableArray<VMReg> _ret_regs;
 838   const int _shadow_space_bytes;
 839   const bool _need_transition;
 840 
 841   CallNativeNode(const TypeFunc* tf, address addr, const char* name,
 842                  const TypePtr* adr_type,
 843                  const GrowableArray<VMReg>& arg_regs,
 844                  const GrowableArray<VMReg>& ret_regs,
 845                  int shadow_space_bytes,
 846                  bool need_transition)
 847     : CallNode(tf, addr, adr_type), _arg_regs(arg_regs),
 848       _ret_regs(ret_regs), _shadow_space_bytes(shadow_space_bytes),
 849       _need_transition(need_transition)
 850   {
 851     init_class_id(Class_CallNative);
 852     _name = name;
 853   }
 854   virtual int   Opcode() const;
 855   virtual bool  guaranteed_safepoint()  { return _need_transition; }
 856   virtual Node* match(const ProjNode *proj, const Matcher *m);
 857   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 858 #ifndef PRODUCT
 859   virtual void  dump_spec(outputStream *st) const;
 860 #endif
 861 };
 862 
 863 //------------------------------CallLeafNoFPNode-------------------------------
 864 // CallLeafNode, not using floating point or using it in the same manner as
 865 // the generated code
 866 class CallLeafNoFPNode : public CallLeafNode {
 867 public:
 868   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 869                    const TypePtr* adr_type)
 870     : CallLeafNode(tf, addr, name, adr_type)
 871   {
 872     init_class_id(Class_CallLeafNoFP);
 873   }
 874   virtual int   Opcode() const;
 875 };
 876 
 877 //------------------------------CallLeafVectorNode-------------------------------
 878 // CallLeafNode but calling with vector calling convention instead.
 879 class CallLeafVectorNode : public CallLeafNode {
 880 private:
 881   uint _num_bits;
 882 protected:
 883   virtual bool cmp( const Node &n ) const;
 884   virtual uint size_of() const; // Size is bigger
 885 public:
 886   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 887                    const TypePtr* adr_type, uint num_bits)
 888     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 889   {
 890   }
 891   virtual int   Opcode() const;
 892   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 893 };
 894 
 895 
 896 //------------------------------Allocate---------------------------------------
 897 // High-level memory allocation
 898 //
 899 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 900 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 901 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 902 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 903 //  order to differentiate the uses of the projection on the normal control path from
 904 //  those on the exception return path.
 905 //
 906 class AllocateNode : public CallNode {
 907 public:
 908   enum {
 909     // Output:
 910     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 911     // Inputs:
 912     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 913     KlassNode,                        // type (maybe dynamic) of the obj.
 914     InitialTest,                      // slow-path test (may be constant)
 915     ALength,                          // array length (or TOP if none)
 916     ParmLimit
 917   };
 918 
 919   static const TypeFunc* alloc_type(const Type* t) {
 920     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
 921     fields[AllocSize]   = TypeInt::POS;
 922     fields[KlassNode]   = TypeInstPtr::NOTNULL;
 923     fields[InitialTest] = TypeInt::BOOL;
 924     fields[ALength]     = t;  // length (can be a bad length)
 925 
 926     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
 927 
 928     // create result type (range)
 929     fields = TypeTuple::fields(1);
 930     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
 931 
 932     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 933 
 934     return TypeFunc::make(domain, range);
 935   }
 936 
 937   // Result of Escape Analysis
 938   bool _is_scalar_replaceable;
 939   bool _is_non_escaping;
 940   // True when MemBar for new is redundant with MemBar at initialzer exit
 941   bool _is_allocation_MemBar_redundant;
 942 
 943   virtual uint size_of() const; // Size is bigger
 944   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 945                Node *size, Node *klass_node, Node *initial_test);
 946   // Expansion modifies the JVMState, so we need to deep clone it
 947   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
 948   virtual int Opcode() const;
 949   virtual uint ideal_reg() const { return Op_RegP; }
 950   virtual bool        guaranteed_safepoint()  { return false; }
 951 
 952   // allocations do not modify their arguments
 953   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
 954 
 955   // Pattern-match a possible usage of AllocateNode.
 956   // Return null if no allocation is recognized.
 957   // The operand is the pointer produced by the (possible) allocation.
 958   // It must be a projection of the Allocate or its subsequent CastPP.
 959   // (Note:  This function is defined in file graphKit.cpp, near
 960   // GraphKit::new_instance/new_array, whose output it recognizes.)
 961   // The 'ptr' may not have an offset unless the 'offset' argument is given.
 962   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
 963 
 964   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
 965   // an offset, which is reported back to the caller.
 966   // (Note:  AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
 967   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
 968                                         intptr_t& offset);
 969 
 970   // Dig the klass operand out of a (possible) allocation site.
 971   static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
 972     AllocateNode* allo = Ideal_allocation(ptr, phase);
 973     return (allo == NULL) ? NULL : allo->in(KlassNode);
 974   }
 975 
 976   // Conservatively small estimate of offset of first non-header byte.
 977   int minimum_header_size() {
 978     return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
 979                                 instanceOopDesc::base_offset_in_bytes();
 980   }
 981 
 982   // Return the corresponding initialization barrier (or null if none).
 983   // Walks out edges to find it...
 984   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
 985   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
 986   InitializeNode* initialization();
 987 
 988   // Convenience for initialization->maybe_set_complete(phase)
 989   bool maybe_set_complete(PhaseGVN* phase);
 990 
 991   // Return true if allocation doesn't escape thread, its escape state
 992   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
 993   // is true when its allocation's escape state is noEscape or
 994   // ArgEscape. In case allocation's InitializeNode is NULL, check
 995   // AlllocateNode._is_non_escaping flag.
 996   // AlllocateNode._is_non_escaping is true when its escape state is
 997   // noEscape.
 998   bool does_not_escape_thread() {
 999     InitializeNode* init = NULL;
1000     return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
1001   }
1002 
1003   // If object doesn't escape in <.init> method and there is memory barrier
1004   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1005   // Inovke this method when MemBar at exit of initializer and post-dominate
1006   // allocation node.
1007   void compute_MemBar_redundancy(ciMethod* initializer);
1008   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1009 
1010   Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
1011 };
1012 
1013 //------------------------------AllocateArray---------------------------------
1014 //
1015 // High-level array allocation
1016 //
1017 class AllocateArrayNode : public AllocateNode {
1018 public:
1019   AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1020                     Node* size, Node* klass_node, Node* initial_test,
1021                     Node* count_val
1022                     )
1023     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1024                    initial_test)
1025   {
1026     init_class_id(Class_AllocateArray);
1027     set_req(AllocateNode::ALength,        count_val);
1028   }
1029   virtual int Opcode() const;
1030   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1031 
1032   // Dig the length operand out of a array allocation site.
1033   Node* Ideal_length() {
1034     return in(AllocateNode::ALength);
1035   }
1036 
1037   // Dig the length operand out of a array allocation site and narrow the
1038   // type with a CastII, if necesssary
1039   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
1040 
1041   // Pattern-match a possible usage of AllocateArrayNode.
1042   // Return null if no allocation is recognized.
1043   static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
1044     AllocateNode* allo = Ideal_allocation(ptr, phase);
1045     return (allo == NULL || !allo->is_AllocateArray())
1046            ? NULL : allo->as_AllocateArray();
1047   }
1048 };
1049 
1050 //------------------------------AbstractLockNode-----------------------------------
1051 class AbstractLockNode: public CallNode {
1052 private:
1053   enum {
1054     Regular = 0,  // Normal lock
1055     NonEscObj,    // Lock is used for non escaping object
1056     Coarsened,    // Lock was coarsened
1057     Nested        // Nested lock
1058   } _kind;
1059 
1060   static const char* _kind_names[Nested+1];
1061 
1062 #ifndef PRODUCT
1063   NamedCounter* _counter;
1064 #endif
1065 
1066 protected:
1067   // helper functions for lock elimination
1068   //
1069 
1070   bool find_matching_unlock(const Node* ctrl, LockNode* lock,
1071                             GrowableArray<AbstractLockNode*> &lock_ops);
1072   bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1073                                        GrowableArray<AbstractLockNode*> &lock_ops);
1074   bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1075                                GrowableArray<AbstractLockNode*> &lock_ops);
1076   LockNode *find_matching_lock(UnlockNode* unlock);
1077 
1078   // Update the counter to indicate that this lock was eliminated.
1079   void set_eliminated_lock_counter() PRODUCT_RETURN;
1080 
1081 public:
1082   AbstractLockNode(const TypeFunc *tf)
1083     : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
1084       _kind(Regular)
1085   {
1086 #ifndef PRODUCT
1087     _counter = NULL;
1088 #endif
1089   }
1090   virtual int Opcode() const = 0;
1091   Node *   obj_node() const       {return in(TypeFunc::Parms + 0); }
1092   Node *   box_node() const       {return in(TypeFunc::Parms + 1); }
1093   Node *   fastlock_node() const  {return in(TypeFunc::Parms + 2); }
1094   void     set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
1095 
1096   const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
1097 
1098   virtual uint size_of() const { return sizeof(*this); }
1099 
1100   bool is_eliminated()  const { return (_kind != Regular); }
1101   bool is_non_esc_obj() const { return (_kind == NonEscObj); }
1102   bool is_coarsened()   const { return (_kind == Coarsened); }
1103   bool is_nested()      const { return (_kind == Nested); }
1104 
1105   const char * kind_as_string() const;
1106   void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = NULL) const;
1107 
1108   void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
1109   void set_coarsened()   { _kind = Coarsened; set_eliminated_lock_counter(); }
1110   void set_nested()      { _kind = Nested; set_eliminated_lock_counter(); }
1111 
1112   // locking does not modify its arguments
1113   virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;}
1114 
1115 #ifndef PRODUCT
1116   void create_lock_counter(JVMState* s);
1117   NamedCounter* counter() const { return _counter; }
1118   virtual void dump_spec(outputStream* st) const;
1119   virtual void dump_compact_spec(outputStream* st) const;
1120   virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const;
1121 #endif
1122 };
1123 
1124 //------------------------------Lock---------------------------------------
1125 // High-level lock operation
1126 //
1127 // This is a subclass of CallNode because it is a macro node which gets expanded
1128 // into a code sequence containing a call.  This node takes 3 "parameters":
1129 //    0  -  object to lock
1130 //    1 -   a BoxLockNode
1131 //    2 -   a FastLockNode
1132 //
1133 class LockNode : public AbstractLockNode {
1134 public:
1135 
1136   static const TypeFunc *lock_type() {
1137     // create input type (domain)
1138     const Type **fields = TypeTuple::fields(3);
1139     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1140     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1141     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1142     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1143 
1144     // create result type (range)
1145     fields = TypeTuple::fields(0);
1146 
1147     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1148 
1149     return TypeFunc::make(domain,range);
1150   }
1151 
1152   virtual int Opcode() const;
1153   virtual uint size_of() const; // Size is bigger
1154   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1155     init_class_id(Class_Lock);
1156     init_flags(Flag_is_macro);
1157     C->add_macro_node(this);
1158   }
1159   virtual bool        guaranteed_safepoint()  { return false; }
1160 
1161   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1162   // Expansion modifies the JVMState, so we need to deep clone it
1163   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1164 
1165   bool is_nested_lock_region(); // Is this Lock nested?
1166   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1167 };
1168 
1169 //------------------------------Unlock---------------------------------------
1170 // High-level unlock operation
1171 class UnlockNode : public AbstractLockNode {
1172 private:
1173 #ifdef ASSERT
1174   JVMState* const _dbg_jvms;      // Pointer to list of JVM State objects
1175 #endif
1176 public:
1177   virtual int Opcode() const;
1178   virtual uint size_of() const; // Size is bigger
1179   UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )
1180 #ifdef ASSERT
1181     , _dbg_jvms(NULL)
1182 #endif
1183   {
1184     init_class_id(Class_Unlock);
1185     init_flags(Flag_is_macro);
1186     C->add_macro_node(this);
1187   }
1188   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1189   // unlock is never a safepoint
1190   virtual bool        guaranteed_safepoint()  { return false; }
1191 #ifdef ASSERT
1192   void set_dbg_jvms(JVMState* s) {
1193     *(JVMState**)&_dbg_jvms = s;  // override const attribute in the accessor
1194   }
1195   JVMState* dbg_jvms() const { return _dbg_jvms; }
1196 #else
1197   JVMState* dbg_jvms() const { return NULL; }
1198 #endif
1199 };
1200 #endif // SHARE_OPTO_CALLNODE_HPP