< prev index next >

src/hotspot/share/opto/callnode.hpp

Print this page

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match( const ProjNode *proj, const Matcher *m );
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;
  93   static  const TypeTuple *osr_domain();
  94 };
  95 
  96 
  97 //------------------------------ParmNode---------------------------------------
  98 // Incoming parameters
  99 class ParmNode : public ProjNode {
 100   static const char * const names[TypeFunc::Parms+1];
 101 public:
 102   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 103     init_class_id(Class_Parm);
 104   }
 105   virtual int Opcode() const;
 106   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 107   virtual uint ideal_reg() const;
 108 #ifndef PRODUCT
 109   virtual void dump_spec(outputStream *st) const;
 110   virtual void dump_compact_spec(outputStream *st) const;
 111 #endif
 112 };
 113 

 625     assert(jvms != nullptr, "JVMS reference is null.");
 626     return jvms->scloff() + _merge_pointer_idx + 1;
 627   }
 628 
 629   // Assumes that "this" is an argument to a safepoint node "s", and that
 630   // "new_call" is being created to correspond to "s".  But the difference
 631   // between the start index of the jvmstates of "new_call" and "s" is
 632   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 633   // corresponds appropriately to "this" in "new_call".  Assumes that
 634   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 635   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 636   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 637 
 638 #ifndef PRODUCT
 639   virtual void              dump_spec(outputStream *st) const;
 640 #endif
 641 };
 642 
 643 // Simple container for the outgoing projections of a call.  Useful
 644 // for serious surgery on calls.
 645 class CallProjections : public StackObj {
 646 public:
 647   Node* fallthrough_proj;
 648   Node* fallthrough_catchproj;
 649   Node* fallthrough_memproj;
 650   Node* fallthrough_ioproj;
 651   Node* catchall_catchproj;
 652   Node* catchall_memproj;
 653   Node* catchall_ioproj;
 654   Node* resproj;
 655   Node* exobj;



















 656 };
 657 
 658 class CallGenerator;
 659 
 660 //------------------------------CallNode---------------------------------------
 661 // Call nodes now subsume the function of debug nodes at callsites, so they
 662 // contain the functionality of a full scope chain of debug nodes.
 663 class CallNode : public SafePointNode {
 664   friend class VMStructs;
 665 
 666 protected:
 667   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 668 
 669 public:
 670   const TypeFunc* _tf;          // Function type
 671   address         _entry_point; // Address of method being called
 672   float           _cnt;         // Estimate of number of times called
 673   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 674   const char*     _name;        // Printable name, if _method is null
 675 
 676   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 677     : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
 678       _tf(tf),
 679       _entry_point(addr),
 680       _cnt(COUNT_UNKNOWN),
 681       _generator(nullptr),
 682       _name(nullptr)
 683   {
 684     init_class_id(Class_Call);
 685   }
 686 
 687   const TypeFunc* tf()         const { return _tf; }
 688   address  entry_point()       const { return _entry_point; }
 689   float    cnt()               const { return _cnt; }
 690   CallGenerator* generator()   const { return _generator; }
 691 
 692   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 693   void set_entry_point(address p)       { _entry_point = p; }
 694   void set_cnt(float c)                 { _cnt = c; }
 695   void set_generator(CallGenerator* cg) { _generator = cg; }
 696 
 697   virtual const Type* bottom_type() const;
 698   virtual const Type* Value(PhaseGVN* phase) const;
 699   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 700   virtual Node* Identity(PhaseGVN* phase) { return this; }
 701   virtual bool        cmp(const Node &n) const;
 702   virtual uint        size_of() const = 0;
 703   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 704   virtual Node*       match(const ProjNode* proj, const Matcher* m);
 705   virtual uint        ideal_reg() const { return NotAMachineReg; }
 706   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 707   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 708   virtual bool        guaranteed_safepoint()  { return true; }
 709   // For macro nodes, the JVMState gets modified during expansion. If calls
 710   // use MachConstantBase, it gets modified during matching. So when cloning
 711   // the node the JVMState must be deep cloned. Default is to shallow clone.
 712   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 713 
 714   // Returns true if the call may modify n
 715   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 716   // Does this node have a use of n other than in debug information?
 717   bool                has_non_debug_use(Node* n);

 718   // Returns the unique CheckCastPP of a call
 719   // or result projection is there are several CheckCastPP
 720   // or returns null if there is no one.
 721   Node* result_cast();
 722   // Does this node returns pointer?
 723   bool returns_pointer() const {
 724     const TypeTuple* r = tf()->range();
 725     return (r->cnt() > TypeFunc::Parms &&

 726             r->field_at(TypeFunc::Parms)->isa_ptr());
 727   }
 728 
 729   // Collect all the interesting edges from a call for use in
 730   // replacing the call by something else.  Used by macro expansion
 731   // and the late inlining support.
 732   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
 733 
 734   virtual uint match_edge(uint idx) const;
 735 
 736   bool is_call_to_arraycopystub() const;
 737 
 738   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 739 
 740 #ifndef PRODUCT
 741   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 742   virtual void        dump_spec(outputStream* st) const;
 743 #endif
 744 };
 745 
 746 
 747 //------------------------------CallJavaNode-----------------------------------
 748 // Make a static or dynamic subroutine call node using Java calling
 749 // convention.  (The "Java" calling convention is the compiler's calling
 750 // convention, as opposed to the interpreter's or that of native C.)
 751 class CallJavaNode : public CallNode {
 752   friend class VMStructs;

 782   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 783   void  set_arg_escape(bool f)             { _arg_escape = f; }
 784   bool  arg_escape() const                 { return _arg_escape; }
 785   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 786 
 787   DEBUG_ONLY( bool validate_symbolic_info() const; )
 788 
 789 #ifndef PRODUCT
 790   virtual void  dump_spec(outputStream *st) const;
 791   virtual void  dump_compact_spec(outputStream *st) const;
 792 #endif
 793 };
 794 
 795 //------------------------------CallStaticJavaNode-----------------------------
 796 // Make a direct subroutine call using Java calling convention (for static
 797 // calls and optimized virtual calls, plus calls to wrappers for run-time
 798 // routines); generates static stub.
 799 class CallStaticJavaNode : public CallJavaNode {
 800   virtual bool cmp( const Node &n ) const;
 801   virtual uint size_of() const; // Size is bigger



 802 public:
 803   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 804     : CallJavaNode(tf, addr, method) {
 805     init_class_id(Class_CallStaticJava);
 806     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 807       init_flags(Flag_is_macro);
 808       C->add_macro_node(this);
 809     }











 810   }
 811   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 812     : CallJavaNode(tf, addr, nullptr) {
 813     init_class_id(Class_CallStaticJava);
 814     // This node calls a runtime stub, which often has narrow memory effects.
 815     _adr_type = adr_type;
 816     _name = name;
 817   }
 818 
 819   // If this is an uncommon trap, return the request code, else zero.
 820   int uncommon_trap_request() const;
 821   bool is_uncommon_trap() const;
 822   static int extract_uncommon_trap_request(const Node* call);
 823 
 824   bool is_boxing_method() const {
 825     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 826   }
 827   // Late inlining modifies the JVMState, so we need to deep clone it
 828   // when the call node is cloned (because it is macro node).
 829   virtual bool needs_deep_clone_jvms(Compile* C) {

 900   }
 901   virtual int   Opcode() const;
 902   virtual bool        guaranteed_safepoint()  { return false; }
 903 #ifndef PRODUCT
 904   virtual void  dump_spec(outputStream *st) const;
 905 #endif
 906 };
 907 
 908 //------------------------------CallLeafNoFPNode-------------------------------
 909 // CallLeafNode, not using floating point or using it in the same manner as
 910 // the generated code
 911 class CallLeafNoFPNode : public CallLeafNode {
 912 public:
 913   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 914                    const TypePtr* adr_type)
 915     : CallLeafNode(tf, addr, name, adr_type)
 916   {
 917     init_class_id(Class_CallLeafNoFP);
 918   }
 919   virtual int   Opcode() const;

 920 };
 921 
 922 //------------------------------CallLeafVectorNode-------------------------------
 923 // CallLeafNode but calling with vector calling convention instead.
 924 class CallLeafVectorNode : public CallLeafNode {
 925 private:
 926   uint _num_bits;
 927 protected:
 928   virtual bool cmp( const Node &n ) const;
 929   virtual uint size_of() const; // Size is bigger
 930 public:
 931   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 932                    const TypePtr* adr_type, uint num_bits)
 933     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 934   {
 935   }
 936   virtual int   Opcode() const;
 937   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 938 };
 939 

 942 // High-level memory allocation
 943 //
 944 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 945 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 946 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 947 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 948 //  order to differentiate the uses of the projection on the normal control path from
 949 //  those on the exception return path.
 950 //
 951 class AllocateNode : public CallNode {
 952 public:
 953   enum {
 954     // Output:
 955     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 956     // Inputs:
 957     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 958     KlassNode,                        // type (maybe dynamic) of the obj.
 959     InitialTest,                      // slow-path test (may be constant)
 960     ALength,                          // array length (or TOP if none)
 961     ValidLengthTest,



 962     ParmLimit
 963   };
 964 
 965   static const TypeFunc* alloc_type(const Type* t) {
 966     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
 967     fields[AllocSize]   = TypeInt::POS;
 968     fields[KlassNode]   = TypeInstPtr::NOTNULL;
 969     fields[InitialTest] = TypeInt::BOOL;
 970     fields[ALength]     = t;  // length (can be a bad length)
 971     fields[ValidLengthTest] = TypeInt::BOOL;



 972 
 973     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
 974 
 975     // create result type (range)
 976     fields = TypeTuple::fields(1);
 977     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
 978 
 979     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 980 
 981     return TypeFunc::make(domain, range);
 982   }
 983 
 984   // Result of Escape Analysis
 985   bool _is_scalar_replaceable;
 986   bool _is_non_escaping;
 987   // True when MemBar for new is redundant with MemBar at initialzer exit
 988   bool _is_allocation_MemBar_redundant;

 989 
 990   virtual uint size_of() const; // Size is bigger
 991   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 992                Node *size, Node *klass_node, Node *initial_test);

 993   // Expansion modifies the JVMState, so we need to deep clone it
 994   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
 995   virtual int Opcode() const;
 996   virtual uint ideal_reg() const { return Op_RegP; }
 997   virtual bool        guaranteed_safepoint()  { return false; }
 998 
 999   // allocations do not modify their arguments
1000   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1001 
1002   // Pattern-match a possible usage of AllocateNode.
1003   // Return null if no allocation is recognized.
1004   // The operand is the pointer produced by the (possible) allocation.
1005   // It must be a projection of the Allocate or its subsequent CastPP.
1006   // (Note:  This function is defined in file graphKit.cpp, near
1007   // GraphKit::new_instance/new_array, whose output it recognizes.)
1008   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1009   static AllocateNode* Ideal_allocation(Node* ptr);
1010 
1011   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1012   // an offset, which is reported back to the caller.

1037 
1038   // Return true if allocation doesn't escape thread, its escape state
1039   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1040   // is true when its allocation's escape state is noEscape or
1041   // ArgEscape. In case allocation's InitializeNode is null, check
1042   // AlllocateNode._is_non_escaping flag.
1043   // AlllocateNode._is_non_escaping is true when its escape state is
1044   // noEscape.
1045   bool does_not_escape_thread() {
1046     InitializeNode* init = nullptr;
1047     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1048   }
1049 
1050   // If object doesn't escape in <.init> method and there is memory barrier
1051   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1052   // Inovke this method when MemBar at exit of initializer and post-dominate
1053   // allocation node.
1054   void compute_MemBar_redundancy(ciMethod* initializer);
1055   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1056 
1057   Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
1058 };
1059 
1060 //------------------------------AllocateArray---------------------------------
1061 //
1062 // High-level array allocation
1063 //
1064 class AllocateArrayNode : public AllocateNode {



1065 public:
1066   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1067                     Node* initial_test, Node* count_val, Node* valid_length_test)

1068     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1069                    initial_test)
1070   {
1071     init_class_id(Class_AllocateArray);
1072     set_req(AllocateNode::ALength,        count_val);
1073     set_req(AllocateNode::ValidLengthTest, valid_length_test);



1074   }

1075   virtual int Opcode() const;
1076 
1077   // Dig the length operand out of a array allocation site.
1078   Node* Ideal_length() {
1079     return in(AllocateNode::ALength);
1080   }
1081 
1082   // Dig the length operand out of a array allocation site and narrow the
1083   // type with a CastII, if necesssary
1084   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1085 
1086   // Pattern-match a possible usage of AllocateArrayNode.
1087   // Return null if no allocation is recognized.
1088   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1089     AllocateNode* allo = Ideal_allocation(ptr);
1090     return (allo == nullptr || !allo->is_AllocateArray())
1091            ? nullptr : allo->as_AllocateArray();
1092   }



1093 };
1094 
1095 //------------------------------AbstractLockNode-----------------------------------
1096 class AbstractLockNode: public CallNode {
1097 private:
1098   enum {
1099     Regular = 0,  // Normal lock
1100     NonEscObj,    // Lock is used for non escaping object
1101     Coarsened,    // Lock was coarsened
1102     Nested        // Nested lock
1103   } _kind;
1104 
1105   static const char* _kind_names[Nested+1];
1106 
1107 #ifndef PRODUCT
1108   NamedCounter* _counter;
1109 #endif
1110 
1111 protected:
1112   // helper functions for lock elimination

1173 //    0  -  object to lock
1174 //    1 -   a BoxLockNode
1175 //    2 -   a FastLockNode
1176 //
1177 class LockNode : public AbstractLockNode {
1178 public:
1179 
1180   static const TypeFunc *lock_type() {
1181     // create input type (domain)
1182     const Type **fields = TypeTuple::fields(3);
1183     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1184     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1185     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1186     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1187 
1188     // create result type (range)
1189     fields = TypeTuple::fields(0);
1190 
1191     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1192 
1193     return TypeFunc::make(domain,range);
1194   }
1195 
1196   virtual int Opcode() const;
1197   virtual uint size_of() const; // Size is bigger
1198   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1199     init_class_id(Class_Lock);
1200     init_flags(Flag_is_macro);
1201     C->add_macro_node(this);
1202   }
1203   virtual bool        guaranteed_safepoint()  { return false; }
1204 
1205   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1206   // Expansion modifies the JVMState, so we need to deep clone it
1207   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1208 
1209   bool is_nested_lock_region(); // Is this Lock nested?
1210   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1211 };
1212 
1213 //------------------------------Unlock---------------------------------------

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;

  93 };
  94 
  95 
  96 //------------------------------ParmNode---------------------------------------
  97 // Incoming parameters
  98 class ParmNode : public ProjNode {
  99   static const char * const names[TypeFunc::Parms+1];
 100 public:
 101   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 102     init_class_id(Class_Parm);
 103   }
 104   virtual int Opcode() const;
 105   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 106   virtual uint ideal_reg() const;
 107 #ifndef PRODUCT
 108   virtual void dump_spec(outputStream *st) const;
 109   virtual void dump_compact_spec(outputStream *st) const;
 110 #endif
 111 };
 112 

 624     assert(jvms != nullptr, "JVMS reference is null.");
 625     return jvms->scloff() + _merge_pointer_idx + 1;
 626   }
 627 
 628   // Assumes that "this" is an argument to a safepoint node "s", and that
 629   // "new_call" is being created to correspond to "s".  But the difference
 630   // between the start index of the jvmstates of "new_call" and "s" is
 631   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 632   // corresponds appropriately to "this" in "new_call".  Assumes that
 633   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 634   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 635   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 636 
 637 #ifndef PRODUCT
 638   virtual void              dump_spec(outputStream *st) const;
 639 #endif
 640 };
 641 
 642 // Simple container for the outgoing projections of a call.  Useful
 643 // for serious surgery on calls.
 644 class CallProjections {
 645 public:
 646   Node* fallthrough_proj;
 647   Node* fallthrough_catchproj;
 648   Node* fallthrough_memproj;
 649   Node* fallthrough_ioproj;
 650   Node* catchall_catchproj;
 651   Node* catchall_memproj;
 652   Node* catchall_ioproj;

 653   Node* exobj;
 654   uint nb_resproj;
 655   Node* resproj[1]; // at least one projection
 656 
 657   CallProjections(uint nbres) {
 658     fallthrough_proj      = nullptr;
 659     fallthrough_catchproj = nullptr;
 660     fallthrough_memproj   = nullptr;
 661     fallthrough_ioproj    = nullptr;
 662     catchall_catchproj    = nullptr;
 663     catchall_memproj      = nullptr;
 664     catchall_ioproj       = nullptr;
 665     exobj                 = nullptr;
 666     nb_resproj            = nbres;
 667     resproj[0]            = nullptr;
 668     for (uint i = 1; i < nb_resproj; i++) {
 669       resproj[i]          = nullptr;
 670     }
 671   }
 672 
 673 };
 674 
 675 class CallGenerator;
 676 
 677 //------------------------------CallNode---------------------------------------
 678 // Call nodes now subsume the function of debug nodes at callsites, so they
 679 // contain the functionality of a full scope chain of debug nodes.
 680 class CallNode : public SafePointNode {
 681   friend class VMStructs;
 682 
 683 protected:
 684   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 685 
 686 public:
 687   const TypeFunc* _tf;          // Function type
 688   address         _entry_point; // Address of method being called
 689   float           _cnt;         // Estimate of number of times called
 690   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 691   const char*     _name;        // Printable name, if _method is null
 692 
 693   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 694     : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
 695       _tf(tf),
 696       _entry_point(addr),
 697       _cnt(COUNT_UNKNOWN),
 698       _generator(nullptr),
 699       _name(nullptr)
 700   {
 701     init_class_id(Class_Call);
 702   }
 703 
 704   const TypeFunc* tf()         const { return _tf; }
 705   address  entry_point()       const { return _entry_point; }
 706   float    cnt()               const { return _cnt; }
 707   CallGenerator* generator()   const { return _generator; }
 708 
 709   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 710   void set_entry_point(address p)       { _entry_point = p; }
 711   void set_cnt(float c)                 { _cnt = c; }
 712   void set_generator(CallGenerator* cg) { _generator = cg; }
 713 
 714   virtual const Type* bottom_type() const;
 715   virtual const Type* Value(PhaseGVN* phase) const;
 716   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 717   virtual Node* Identity(PhaseGVN* phase) { return this; }
 718   virtual bool        cmp(const Node &n) const;
 719   virtual uint        size_of() const = 0;
 720   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 721   virtual Node*       match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
 722   virtual uint        ideal_reg() const { return NotAMachineReg; }
 723   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 724   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 725   virtual bool        guaranteed_safepoint()  { return true; }
 726   // For macro nodes, the JVMState gets modified during expansion. If calls
 727   // use MachConstantBase, it gets modified during matching. So when cloning
 728   // the node the JVMState must be deep cloned. Default is to shallow clone.
 729   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 730 
 731   // Returns true if the call may modify n
 732   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 733   // Does this node have a use of n other than in debug information?
 734   bool                has_non_debug_use(Node* n);
 735   bool                has_debug_use(Node* n);
 736   // Returns the unique CheckCastPP of a call
 737   // or result projection is there are several CheckCastPP
 738   // or returns null if there is no one.
 739   Node* result_cast();
 740   // Does this node returns pointer?
 741   bool returns_pointer() const {
 742     const TypeTuple* r = tf()->range_sig();
 743     return (!tf()->returns_inline_type_as_fields() &&
 744             r->cnt() > TypeFunc::Parms &&
 745             r->field_at(TypeFunc::Parms)->isa_ptr());
 746   }
 747 
 748   // Collect all the interesting edges from a call for use in
 749   // replacing the call by something else.  Used by macro expansion
 750   // and the late inlining support.
 751   CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true);
 752 
 753   virtual uint match_edge(uint idx) const;
 754 
 755   bool is_call_to_arraycopystub() const;
 756 
 757   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 758 
 759 #ifndef PRODUCT
 760   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 761   virtual void        dump_spec(outputStream* st) const;
 762 #endif
 763 };
 764 
 765 
 766 //------------------------------CallJavaNode-----------------------------------
 767 // Make a static or dynamic subroutine call node using Java calling
 768 // convention.  (The "Java" calling convention is the compiler's calling
 769 // convention, as opposed to the interpreter's or that of native C.)
 770 class CallJavaNode : public CallNode {
 771   friend class VMStructs;

 801   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 802   void  set_arg_escape(bool f)             { _arg_escape = f; }
 803   bool  arg_escape() const                 { return _arg_escape; }
 804   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 805 
 806   DEBUG_ONLY( bool validate_symbolic_info() const; )
 807 
 808 #ifndef PRODUCT
 809   virtual void  dump_spec(outputStream *st) const;
 810   virtual void  dump_compact_spec(outputStream *st) const;
 811 #endif
 812 };
 813 
 814 //------------------------------CallStaticJavaNode-----------------------------
 815 // Make a direct subroutine call using Java calling convention (for static
 816 // calls and optimized virtual calls, plus calls to wrappers for run-time
 817 // routines); generates static stub.
 818 class CallStaticJavaNode : public CallJavaNode {
 819   virtual bool cmp( const Node &n ) const;
 820   virtual uint size_of() const; // Size is bigger
 821 
 822   bool remove_useless_allocation(PhaseGVN *phase, Node* ctl, Node* mem, Node* unc_arg);
 823 
 824 public:
 825   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 826     : CallJavaNode(tf, addr, method) {
 827     init_class_id(Class_CallStaticJava);
 828     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 829       init_flags(Flag_is_macro);
 830       C->add_macro_node(this);
 831     }
 832     const TypeTuple *r = tf->range_sig();
 833     if (InlineTypeReturnedAsFields &&
 834         method != nullptr &&
 835         method->is_method_handle_intrinsic() &&
 836         r->cnt() > TypeFunc::Parms &&
 837         r->field_at(TypeFunc::Parms)->isa_oopptr() &&
 838         r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
 839       // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
 840       init_flags(Flag_is_macro);
 841       C->add_macro_node(this);
 842     }
 843   }
 844   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 845     : CallJavaNode(tf, addr, nullptr) {
 846     init_class_id(Class_CallStaticJava);
 847     // This node calls a runtime stub, which often has narrow memory effects.
 848     _adr_type = adr_type;
 849     _name = name;
 850   }
 851 
 852   // If this is an uncommon trap, return the request code, else zero.
 853   int uncommon_trap_request() const;
 854   bool is_uncommon_trap() const;
 855   static int extract_uncommon_trap_request(const Node* call);
 856 
 857   bool is_boxing_method() const {
 858     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 859   }
 860   // Late inlining modifies the JVMState, so we need to deep clone it
 861   // when the call node is cloned (because it is macro node).
 862   virtual bool needs_deep_clone_jvms(Compile* C) {

 933   }
 934   virtual int   Opcode() const;
 935   virtual bool        guaranteed_safepoint()  { return false; }
 936 #ifndef PRODUCT
 937   virtual void  dump_spec(outputStream *st) const;
 938 #endif
 939 };
 940 
 941 //------------------------------CallLeafNoFPNode-------------------------------
 942 // CallLeafNode, not using floating point or using it in the same manner as
 943 // the generated code
 944 class CallLeafNoFPNode : public CallLeafNode {
 945 public:
 946   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 947                    const TypePtr* adr_type)
 948     : CallLeafNode(tf, addr, name, adr_type)
 949   {
 950     init_class_id(Class_CallLeafNoFP);
 951   }
 952   virtual int   Opcode() const;
 953   virtual uint match_edge(uint idx) const;
 954 };
 955 
 956 //------------------------------CallLeafVectorNode-------------------------------
 957 // CallLeafNode but calling with vector calling convention instead.
 958 class CallLeafVectorNode : public CallLeafNode {
 959 private:
 960   uint _num_bits;
 961 protected:
 962   virtual bool cmp( const Node &n ) const;
 963   virtual uint size_of() const; // Size is bigger
 964 public:
 965   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 966                    const TypePtr* adr_type, uint num_bits)
 967     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 968   {
 969   }
 970   virtual int   Opcode() const;
 971   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 972 };
 973 

 976 // High-level memory allocation
 977 //
 978 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 979 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 980 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 981 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 982 //  order to differentiate the uses of the projection on the normal control path from
 983 //  those on the exception return path.
 984 //
 985 class AllocateNode : public CallNode {
 986 public:
 987   enum {
 988     // Output:
 989     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 990     // Inputs:
 991     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 992     KlassNode,                        // type (maybe dynamic) of the obj.
 993     InitialTest,                      // slow-path test (may be constant)
 994     ALength,                          // array length (or TOP if none)
 995     ValidLengthTest,
 996     InlineType,                       // InlineTypeNode if this is an inline type allocation
 997     DefaultValue,                     // default value in case of non-flat inline type array
 998     RawDefaultValue,                  // same as above but as raw machine word
 999     ParmLimit
1000   };
1001 
1002   static const TypeFunc* alloc_type(const Type* t) {
1003     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1004     fields[AllocSize]   = TypeInt::POS;
1005     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1006     fields[InitialTest] = TypeInt::BOOL;
1007     fields[ALength]     = t;  // length (can be a bad length)
1008     fields[ValidLengthTest] = TypeInt::BOOL;
1009     fields[InlineType] = Type::BOTTOM;
1010     fields[DefaultValue] = TypeInstPtr::NOTNULL;
1011     fields[RawDefaultValue] = TypeX_X;
1012 
1013     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1014 
1015     // create result type (range)
1016     fields = TypeTuple::fields(1);
1017     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1018 
1019     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1020 
1021     return TypeFunc::make(domain, range);
1022   }
1023 
1024   // Result of Escape Analysis
1025   bool _is_scalar_replaceable;
1026   bool _is_non_escaping;
1027   // True when MemBar for new is redundant with MemBar at initialzer exit
1028   bool _is_allocation_MemBar_redundant;
1029   bool _larval;
1030 
1031   virtual uint size_of() const; // Size is bigger
1032   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1033                Node *size, Node *klass_node, Node *initial_test,
1034                InlineTypeNode* inline_type_node = nullptr);
1035   // Expansion modifies the JVMState, so we need to deep clone it
1036   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1037   virtual int Opcode() const;
1038   virtual uint ideal_reg() const { return Op_RegP; }
1039   virtual bool        guaranteed_safepoint()  { return false; }
1040 
1041   // allocations do not modify their arguments
1042   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1043 
1044   // Pattern-match a possible usage of AllocateNode.
1045   // Return null if no allocation is recognized.
1046   // The operand is the pointer produced by the (possible) allocation.
1047   // It must be a projection of the Allocate or its subsequent CastPP.
1048   // (Note:  This function is defined in file graphKit.cpp, near
1049   // GraphKit::new_instance/new_array, whose output it recognizes.)
1050   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1051   static AllocateNode* Ideal_allocation(Node* ptr);
1052 
1053   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1054   // an offset, which is reported back to the caller.

1079 
1080   // Return true if allocation doesn't escape thread, its escape state
1081   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1082   // is true when its allocation's escape state is noEscape or
1083   // ArgEscape. In case allocation's InitializeNode is null, check
1084   // AlllocateNode._is_non_escaping flag.
1085   // AlllocateNode._is_non_escaping is true when its escape state is
1086   // noEscape.
1087   bool does_not_escape_thread() {
1088     InitializeNode* init = nullptr;
1089     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1090   }
1091 
1092   // If object doesn't escape in <.init> method and there is memory barrier
1093   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1094   // Inovke this method when MemBar at exit of initializer and post-dominate
1095   // allocation node.
1096   void compute_MemBar_redundancy(ciMethod* initializer);
1097   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1098 
1099   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1100 };
1101 
1102 //------------------------------AllocateArray---------------------------------
1103 //
1104 // High-level array allocation
1105 //
1106 class AllocateArrayNode : public AllocateNode {
1107 private:
1108   bool _null_free;
1109 
1110 public:
1111   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1112                     Node* initial_test, Node* count_val, Node* valid_length_test,
1113                     Node* default_value, Node* raw_default_value)
1114     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1115                    initial_test)
1116   {
1117     init_class_id(Class_AllocateArray);
1118     set_req(AllocateNode::ALength,        count_val);
1119     set_req(AllocateNode::ValidLengthTest, valid_length_test);
1120     init_req(AllocateNode::DefaultValue,  default_value);
1121     init_req(AllocateNode::RawDefaultValue, raw_default_value);
1122     _null_free = false;
1123   }
1124   virtual uint size_of() const { return sizeof(*this); }
1125   virtual int Opcode() const;
1126 
1127   // Dig the length operand out of a array allocation site.
1128   Node* Ideal_length() {
1129     return in(AllocateNode::ALength);
1130   }
1131 
1132   // Dig the length operand out of a array allocation site and narrow the
1133   // type with a CastII, if necesssary
1134   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1135 
1136   // Pattern-match a possible usage of AllocateArrayNode.
1137   // Return null if no allocation is recognized.
1138   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1139     AllocateNode* allo = Ideal_allocation(ptr);
1140     return (allo == nullptr || !allo->is_AllocateArray())
1141            ? nullptr : allo->as_AllocateArray();
1142   }
1143 
1144   void set_null_free() { _null_free = true; }
1145   bool is_null_free() const { return _null_free; }
1146 };
1147 
1148 //------------------------------AbstractLockNode-----------------------------------
1149 class AbstractLockNode: public CallNode {
1150 private:
1151   enum {
1152     Regular = 0,  // Normal lock
1153     NonEscObj,    // Lock is used for non escaping object
1154     Coarsened,    // Lock was coarsened
1155     Nested        // Nested lock
1156   } _kind;
1157 
1158   static const char* _kind_names[Nested+1];
1159 
1160 #ifndef PRODUCT
1161   NamedCounter* _counter;
1162 #endif
1163 
1164 protected:
1165   // helper functions for lock elimination

1226 //    0  -  object to lock
1227 //    1 -   a BoxLockNode
1228 //    2 -   a FastLockNode
1229 //
1230 class LockNode : public AbstractLockNode {
1231 public:
1232 
1233   static const TypeFunc *lock_type() {
1234     // create input type (domain)
1235     const Type **fields = TypeTuple::fields(3);
1236     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1237     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1238     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1239     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1240 
1241     // create result type (range)
1242     fields = TypeTuple::fields(0);
1243 
1244     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1245 
1246     return TypeFunc::make(domain, range);
1247   }
1248 
1249   virtual int Opcode() const;
1250   virtual uint size_of() const; // Size is bigger
1251   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1252     init_class_id(Class_Lock);
1253     init_flags(Flag_is_macro);
1254     C->add_macro_node(this);
1255   }
1256   virtual bool        guaranteed_safepoint()  { return false; }
1257 
1258   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1259   // Expansion modifies the JVMState, so we need to deep clone it
1260   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1261 
1262   bool is_nested_lock_region(); // Is this Lock nested?
1263   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1264 };
1265 
1266 //------------------------------Unlock---------------------------------------
< prev index next >