< prev index next >

src/hotspot/share/opto/callnode.hpp

Print this page

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match( const ProjNode *proj, const Matcher *m );
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;
  93   static  const TypeTuple *osr_domain();
  94 };
  95 
  96 
  97 //------------------------------ParmNode---------------------------------------
  98 // Incoming parameters
  99 class ParmNode : public ProjNode {
 100   static const char * const names[TypeFunc::Parms+1];
 101 public:
 102   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 103     init_class_id(Class_Parm);
 104   }
 105   virtual int Opcode() const;
 106   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 107   virtual uint ideal_reg() const;
 108 #ifndef PRODUCT
 109   virtual void dump_spec(outputStream *st) const;
 110   virtual void dump_compact_spec(outputStream *st) const;
 111 #endif
 112 };
 113 

 707     assert(jvms != nullptr, "JVMS reference is null.");
 708     return jvms->scloff() + _merge_pointer_idx + 1;
 709   }
 710 
 711   // Assumes that "this" is an argument to a safepoint node "s", and that
 712   // "new_call" is being created to correspond to "s".  But the difference
 713   // between the start index of the jvmstates of "new_call" and "s" is
 714   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 715   // corresponds appropriately to "this" in "new_call".  Assumes that
 716   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 717   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 718   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 719 
 720 #ifndef PRODUCT
 721   virtual void              dump_spec(outputStream *st) const;
 722 #endif
 723 };
 724 
 725 // Simple container for the outgoing projections of a call.  Useful
 726 // for serious surgery on calls.
 727 class CallProjections : public StackObj {
 728 public:
 729   Node* fallthrough_proj;
 730   Node* fallthrough_catchproj;
 731   Node* fallthrough_memproj;
 732   Node* fallthrough_ioproj;
 733   Node* catchall_catchproj;
 734   Node* catchall_memproj;
 735   Node* catchall_ioproj;
 736   Node* resproj;
 737   Node* exobj;



















 738 };
 739 
 740 class CallGenerator;
 741 
 742 //------------------------------CallNode---------------------------------------
 743 // Call nodes now subsume the function of debug nodes at callsites, so they
 744 // contain the functionality of a full scope chain of debug nodes.
 745 class CallNode : public SafePointNode {
 746 
 747 protected:
 748   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) const;
 749 
 750 public:
 751   const TypeFunc* _tf;          // Function type
 752   address         _entry_point; // Address of method being called
 753   float           _cnt;         // Estimate of number of times called
 754   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 755   const char*     _name;        // Printable name, if _method is null
 756 
 757   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 758     : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
 759       _tf(tf),
 760       _entry_point(addr),
 761       _cnt(COUNT_UNKNOWN),
 762       _generator(nullptr),
 763       _name(nullptr)
 764   {
 765     init_class_id(Class_Call);
 766   }
 767 
 768   const TypeFunc* tf()         const { return _tf; }
 769   address  entry_point()       const { return _entry_point; }
 770   float    cnt()               const { return _cnt; }
 771   CallGenerator* generator()   const { return _generator; }
 772 
 773   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 774   void set_entry_point(address p)       { _entry_point = p; }
 775   void set_cnt(float c)                 { _cnt = c; }
 776   void set_generator(CallGenerator* cg) { _generator = cg; }
 777 
 778   virtual const Type* bottom_type() const;
 779   virtual const Type* Value(PhaseGVN* phase) const;
 780   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 781   virtual Node* Identity(PhaseGVN* phase) { return this; }
 782   virtual bool        cmp(const Node &n) const;
 783   virtual uint        size_of() const = 0;
 784   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 785   virtual Node*       match(const ProjNode* proj, const Matcher* m);
 786   virtual uint        ideal_reg() const { return NotAMachineReg; }
 787   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 788   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 789   virtual bool        guaranteed_safepoint()  { return true; }
 790   // For macro nodes, the JVMState gets modified during expansion. If calls
 791   // use MachConstantBase, it gets modified during matching. If the call is
 792   // late inlined, it also needs the full JVMState. So when cloning the
 793   // node the JVMState must be deep cloned. Default is to shallow clone.
 794   virtual bool needs_deep_clone_jvms(Compile* C) { return _generator != nullptr || C->needs_deep_clone_jvms(); }
 795 
 796   // Returns true if the call may modify n
 797   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const;
 798   // Does this node have a use of n other than in debug information?
 799   bool                has_non_debug_use(const Node* n);

 800   // Returns the unique CheckCastPP of a call
 801   // or result projection is there are several CheckCastPP
 802   // or returns null if there is no one.
 803   Node* result_cast();
 804   // Does this node returns pointer?
 805   bool returns_pointer() const {
 806     const TypeTuple* r = tf()->range();
 807     return (r->cnt() > TypeFunc::Parms &&

 808             r->field_at(TypeFunc::Parms)->isa_ptr());
 809   }
 810 
 811   // Collect all the interesting edges from a call for use in
 812   // replacing the call by something else.  Used by macro expansion
 813   // and the late inlining support.
 814   void extract_projections(CallProjections* projs,
 815                            bool separate_io_proj,
 816                            bool do_asserts = true,
 817                            bool allow_handlers = false) const;
 818 
 819   virtual uint match_edge(uint idx) const;
 820 
 821   bool is_call_to_arraycopystub() const;
 822   bool is_call_to_multianewarray_stub() const;
 823 
 824   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 825 
 826 #ifndef PRODUCT
 827   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 828   virtual void        dump_spec(outputStream* st) const;
 829 #endif
 830 };
 831 
 832 
 833 //------------------------------CallJavaNode-----------------------------------
 834 // Make a static or dynamic subroutine call node using Java calling
 835 // convention.  (The "Java" calling convention is the compiler's calling
 836 // convention, as opposed to the interpreter's or that of native C.)
 837 class CallJavaNode : public CallNode {

 864   void  set_arg_escape(bool f)             { _arg_escape = f; }
 865   bool  arg_escape() const                 { return _arg_escape; }
 866   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 867   void register_for_late_inline();
 868 
 869   DEBUG_ONLY( bool validate_symbolic_info() const; )
 870 
 871 #ifndef PRODUCT
 872   virtual void  dump_spec(outputStream *st) const;
 873   virtual void  dump_compact_spec(outputStream *st) const;
 874 #endif
 875 };
 876 
 877 //------------------------------CallStaticJavaNode-----------------------------
 878 // Make a direct subroutine call using Java calling convention (for static
 879 // calls and optimized virtual calls, plus calls to wrappers for run-time
 880 // routines); generates static stub.
 881 class CallStaticJavaNode : public CallJavaNode {
 882   virtual bool cmp( const Node &n ) const;
 883   virtual uint size_of() const; // Size is bigger



 884 public:
 885   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 886     : CallJavaNode(tf, addr, method) {
 887     init_class_id(Class_CallStaticJava);
 888     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 889       init_flags(Flag_is_macro);
 890       C->add_macro_node(this);
 891     }











 892   }
 893   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 894     : CallJavaNode(tf, addr, nullptr) {
 895     init_class_id(Class_CallStaticJava);
 896     // This node calls a runtime stub, which often has narrow memory effects.
 897     _adr_type = adr_type;
 898     _name = name;
 899   }
 900 
 901   // If this is an uncommon trap, return the request code, else zero.
 902   int uncommon_trap_request() const;
 903   bool is_uncommon_trap() const;
 904   static int extract_uncommon_trap_request(const Node* call);
 905 
 906   bool is_boxing_method() const {
 907     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 908   }
 909   // Late inlining modifies the JVMState, so we need to deep clone it
 910   // when the call node is cloned (because it is macro node).
 911   virtual bool needs_deep_clone_jvms(Compile* C) {

1010     init_class_id(Class_CallLeafPure);
1011   }
1012   int Opcode() const override;
1013   Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
1014 
1015   CallLeafPureNode* inline_call_leaf_pure_node(Node* control = nullptr) const;
1016 };
1017 
1018 //------------------------------CallLeafNoFPNode-------------------------------
1019 // CallLeafNode, not using floating point or using it in the same manner as
1020 // the generated code
1021 class CallLeafNoFPNode : public CallLeafNode {
1022 public:
1023   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
1024                    const TypePtr* adr_type)
1025     : CallLeafNode(tf, addr, name, adr_type)
1026   {
1027     init_class_id(Class_CallLeafNoFP);
1028   }
1029   virtual int   Opcode() const;

1030 };
1031 
1032 //------------------------------CallLeafVectorNode-------------------------------
1033 // CallLeafNode but calling with vector calling convention instead.
1034 class CallLeafVectorNode : public CallLeafNode {
1035 private:
1036   uint _num_bits;
1037 protected:
1038   virtual bool cmp( const Node &n ) const;
1039   virtual uint size_of() const; // Size is bigger
1040 public:
1041   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
1042                    const TypePtr* adr_type, uint num_bits)
1043     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
1044   {
1045   }
1046   virtual int   Opcode() const;
1047   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
1048 };
1049 

1052 // High-level memory allocation
1053 //
1054 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
1055 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
1056 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
1057 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
1058 //  order to differentiate the uses of the projection on the normal control path from
1059 //  those on the exception return path.
1060 //
1061 class AllocateNode : public CallNode {
1062 public:
1063   enum {
1064     // Output:
1065     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
1066     // Inputs:
1067     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
1068     KlassNode,                        // type (maybe dynamic) of the obj.
1069     InitialTest,                      // slow-path test (may be constant)
1070     ALength,                          // array length (or TOP if none)
1071     ValidLengthTest,



1072     ParmLimit
1073   };
1074 
1075   static const TypeFunc* alloc_type(const Type* t) {
1076     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1077     fields[AllocSize]   = TypeInt::POS;
1078     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1079     fields[InitialTest] = TypeInt::BOOL;
1080     fields[ALength]     = t;  // length (can be a bad length)
1081     fields[ValidLengthTest] = TypeInt::BOOL;



1082 
1083     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1084 
1085     // create result type (range)
1086     fields = TypeTuple::fields(1);
1087     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1088 
1089     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1090 
1091     return TypeFunc::make(domain, range);
1092   }
1093 
1094   // Result of Escape Analysis
1095   bool _is_scalar_replaceable;
1096   bool _is_non_escaping;
1097   // True when MemBar for new is redundant with MemBar at initialzer exit
1098   bool _is_allocation_MemBar_redundant;
1099 
1100   virtual uint size_of() const; // Size is bigger
1101   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1102                Node *size, Node *klass_node, Node *initial_test);

1103   // Expansion modifies the JVMState, so we need to deep clone it
1104   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1105   virtual int Opcode() const;
1106   virtual uint ideal_reg() const { return Op_RegP; }
1107   virtual bool        guaranteed_safepoint()  { return false; }
1108 
1109   // allocations do not modify their arguments
1110   virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const { return false; }
1111 
1112   // Pattern-match a possible usage of AllocateNode.
1113   // Return null if no allocation is recognized.
1114   // The operand is the pointer produced by the (possible) allocation.
1115   // It must be a projection of the Allocate or its subsequent CastPP.
1116   // (Note:  This function is defined in file graphKit.cpp, near
1117   // GraphKit::new_instance/new_array, whose output it recognizes.)
1118   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1119   static AllocateNode* Ideal_allocation(Node* ptr);
1120 
1121   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1122   // an offset, which is reported back to the caller.

1159 
1160   // If object doesn't escape in <.init> method and there is memory barrier
1161   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1162   // Inovke this method when MemBar at exit of initializer and post-dominate
1163   // allocation node.
1164   void compute_MemBar_redundancy(ciMethod* initializer);
1165   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1166 
1167   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1168 
1169   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1170 };
1171 
1172 //------------------------------AllocateArray---------------------------------
1173 //
1174 // High-level array allocation
1175 //
1176 class AllocateArrayNode : public AllocateNode {
1177 public:
1178   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1179                     Node* initial_test, Node* count_val, Node* valid_length_test)

1180     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1181                    initial_test)
1182   {
1183     init_class_id(Class_AllocateArray);
1184     set_req(AllocateNode::ALength,        count_val);
1185     set_req(AllocateNode::ValidLengthTest, valid_length_test);


1186   }

1187   virtual int Opcode() const;
1188 
1189   // Dig the length operand out of a array allocation site.
1190   Node* Ideal_length() {
1191     return in(AllocateNode::ALength);
1192   }
1193 
1194   // Dig the length operand out of a array allocation site and narrow the
1195   // type with a CastII, if necesssary
1196   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1197 
1198   // Pattern-match a possible usage of AllocateArrayNode.
1199   // Return null if no allocation is recognized.
1200   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1201     AllocateNode* allo = Ideal_allocation(ptr);
1202     return (allo == nullptr || !allo->is_AllocateArray())
1203            ? nullptr : allo->as_AllocateArray();
1204   }
1205 };
1206 

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;

  93 };
  94 
  95 
  96 //------------------------------ParmNode---------------------------------------
  97 // Incoming parameters
  98 class ParmNode : public ProjNode {
  99   static const char * const names[TypeFunc::Parms+1];
 100 public:
 101   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 102     init_class_id(Class_Parm);
 103   }
 104   virtual int Opcode() const;
 105   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 106   virtual uint ideal_reg() const;
 107 #ifndef PRODUCT
 108   virtual void dump_spec(outputStream *st) const;
 109   virtual void dump_compact_spec(outputStream *st) const;
 110 #endif
 111 };
 112 

 706     assert(jvms != nullptr, "JVMS reference is null.");
 707     return jvms->scloff() + _merge_pointer_idx + 1;
 708   }
 709 
 710   // Assumes that "this" is an argument to a safepoint node "s", and that
 711   // "new_call" is being created to correspond to "s".  But the difference
 712   // between the start index of the jvmstates of "new_call" and "s" is
 713   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 714   // corresponds appropriately to "this" in "new_call".  Assumes that
 715   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 716   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 717   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 718 
 719 #ifndef PRODUCT
 720   virtual void              dump_spec(outputStream *st) const;
 721 #endif
 722 };
 723 
 724 // Simple container for the outgoing projections of a call.  Useful
 725 // for serious surgery on calls.
 726 class CallProjections {
 727 public:
 728   Node* fallthrough_proj;
 729   Node* fallthrough_catchproj;
 730   Node* fallthrough_memproj;
 731   Node* fallthrough_ioproj;
 732   Node* catchall_catchproj;
 733   Node* catchall_memproj;
 734   Node* catchall_ioproj;

 735   Node* exobj;
 736   uint nb_resproj;
 737   Node* resproj[1]; // at least one projection
 738 
 739   CallProjections(uint nbres) {
 740     fallthrough_proj      = nullptr;
 741     fallthrough_catchproj = nullptr;
 742     fallthrough_memproj   = nullptr;
 743     fallthrough_ioproj    = nullptr;
 744     catchall_catchproj    = nullptr;
 745     catchall_memproj      = nullptr;
 746     catchall_ioproj       = nullptr;
 747     exobj                 = nullptr;
 748     nb_resproj            = nbres;
 749     resproj[0]            = nullptr;
 750     for (uint i = 1; i < nb_resproj; i++) {
 751       resproj[i]          = nullptr;
 752     }
 753   }
 754 
 755 };
 756 
 757 class CallGenerator;
 758 
 759 //------------------------------CallNode---------------------------------------
 760 // Call nodes now subsume the function of debug nodes at callsites, so they
 761 // contain the functionality of a full scope chain of debug nodes.
 762 class CallNode : public SafePointNode {
 763 
 764 protected:
 765   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase) const;
 766 
 767 public:
 768   const TypeFunc* _tf;          // Function type
 769   address         _entry_point; // Address of method being called
 770   float           _cnt;         // Estimate of number of times called
 771   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 772   const char*     _name;        // Printable name, if _method is null
 773 
 774   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 775     : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
 776       _tf(tf),
 777       _entry_point(addr),
 778       _cnt(COUNT_UNKNOWN),
 779       _generator(nullptr),
 780       _name(nullptr)
 781   {
 782     init_class_id(Class_Call);
 783   }
 784 
 785   const TypeFunc* tf()         const { return _tf; }
 786   address  entry_point()       const { return _entry_point; }
 787   float    cnt()               const { return _cnt; }
 788   CallGenerator* generator()   const { return _generator; }
 789 
 790   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 791   void set_entry_point(address p)       { _entry_point = p; }
 792   void set_cnt(float c)                 { _cnt = c; }
 793   void set_generator(CallGenerator* cg) { _generator = cg; }
 794 
 795   virtual const Type* bottom_type() const;
 796   virtual const Type* Value(PhaseGVN* phase) const;
 797   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 798   virtual Node* Identity(PhaseGVN* phase) { return this; }
 799   virtual bool        cmp(const Node &n) const;
 800   virtual uint        size_of() const = 0;
 801   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 802   virtual Node*       match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
 803   virtual uint        ideal_reg() const { return NotAMachineReg; }
 804   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 805   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 806   virtual bool        guaranteed_safepoint()  { return true; }
 807   // For macro nodes, the JVMState gets modified during expansion. If calls
 808   // use MachConstantBase, it gets modified during matching. If the call is
 809   // late inlined, it also needs the full JVMState. So when cloning the
 810   // node the JVMState must be deep cloned. Default is to shallow clone.
 811   virtual bool needs_deep_clone_jvms(Compile* C) { return _generator != nullptr || C->needs_deep_clone_jvms(); }
 812 
 813   // Returns true if the call may modify n
 814   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const;
 815   // Does this node have a use of n other than in debug information?
 816   bool                has_non_debug_use(const Node* n);
 817   bool                has_debug_use(const Node* n) const;
 818   // Returns the unique CheckCastPP of a call
 819   // or result projection is there are several CheckCastPP
 820   // or returns null if there is no one.
 821   Node* result_cast();
 822   // Does this node returns pointer?
 823   bool returns_pointer() const {
 824     const TypeTuple* r = tf()->range_sig();
 825     return (!tf()->returns_inline_type_as_fields() &&
 826             r->cnt() > TypeFunc::Parms &&
 827             r->field_at(TypeFunc::Parms)->isa_ptr());
 828   }
 829 
 830   // Collect all the interesting edges from a call for use in
 831   // replacing the call by something else.  Used by macro expansion
 832   // and the late inlining support.
 833   CallProjections* extract_projections(bool separate_io_proj,
 834                                        bool do_asserts = true,
 835                                        bool allow_handlers = false) const;

 836 
 837   virtual uint match_edge(uint idx) const;
 838 
 839   bool is_call_to_arraycopystub() const;
 840   bool is_call_to_multianewarray_stub() const;
 841 
 842   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 843 
 844 #ifndef PRODUCT
 845   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 846   virtual void        dump_spec(outputStream* st) const;
 847 #endif
 848 };
 849 
 850 
 851 //------------------------------CallJavaNode-----------------------------------
 852 // Make a static or dynamic subroutine call node using Java calling
 853 // convention.  (The "Java" calling convention is the compiler's calling
 854 // convention, as opposed to the interpreter's or that of native C.)
 855 class CallJavaNode : public CallNode {

 882   void  set_arg_escape(bool f)             { _arg_escape = f; }
 883   bool  arg_escape() const                 { return _arg_escape; }
 884   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 885   void register_for_late_inline();
 886 
 887   DEBUG_ONLY( bool validate_symbolic_info() const; )
 888 
 889 #ifndef PRODUCT
 890   virtual void  dump_spec(outputStream *st) const;
 891   virtual void  dump_compact_spec(outputStream *st) const;
 892 #endif
 893 };
 894 
 895 //------------------------------CallStaticJavaNode-----------------------------
 896 // Make a direct subroutine call using Java calling convention (for static
 897 // calls and optimized virtual calls, plus calls to wrappers for run-time
 898 // routines); generates static stub.
 899 class CallStaticJavaNode : public CallJavaNode {
 900   virtual bool cmp( const Node &n ) const;
 901   virtual uint size_of() const; // Size is bigger
 902 
 903   bool remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg);
 904 
 905 public:
 906   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 907     : CallJavaNode(tf, addr, method) {
 908     init_class_id(Class_CallStaticJava);
 909     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 910       init_flags(Flag_is_macro);
 911       C->add_macro_node(this);
 912     }
 913     const TypeTuple *r = tf->range_sig();
 914     if (InlineTypeReturnedAsFields &&
 915         method != nullptr &&
 916         method->is_method_handle_intrinsic() &&
 917         r->cnt() > TypeFunc::Parms &&
 918         r->field_at(TypeFunc::Parms)->isa_oopptr() &&
 919         r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
 920       // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
 921       init_flags(Flag_is_macro);
 922       C->add_macro_node(this);
 923     }
 924   }
 925   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 926     : CallJavaNode(tf, addr, nullptr) {
 927     init_class_id(Class_CallStaticJava);
 928     // This node calls a runtime stub, which often has narrow memory effects.
 929     _adr_type = adr_type;
 930     _name = name;
 931   }
 932 
 933   // If this is an uncommon trap, return the request code, else zero.
 934   int uncommon_trap_request() const;
 935   bool is_uncommon_trap() const;
 936   static int extract_uncommon_trap_request(const Node* call);
 937 
 938   bool is_boxing_method() const {
 939     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 940   }
 941   // Late inlining modifies the JVMState, so we need to deep clone it
 942   // when the call node is cloned (because it is macro node).
 943   virtual bool needs_deep_clone_jvms(Compile* C) {

1042     init_class_id(Class_CallLeafPure);
1043   }
1044   int Opcode() const override;
1045   Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
1046 
1047   CallLeafPureNode* inline_call_leaf_pure_node(Node* control = nullptr) const;
1048 };
1049 
1050 //------------------------------CallLeafNoFPNode-------------------------------
1051 // CallLeafNode, not using floating point or using it in the same manner as
1052 // the generated code
1053 class CallLeafNoFPNode : public CallLeafNode {
1054 public:
1055   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
1056                    const TypePtr* adr_type)
1057     : CallLeafNode(tf, addr, name, adr_type)
1058   {
1059     init_class_id(Class_CallLeafNoFP);
1060   }
1061   virtual int   Opcode() const;
1062   virtual uint match_edge(uint idx) const;
1063 };
1064 
1065 //------------------------------CallLeafVectorNode-------------------------------
1066 // CallLeafNode but calling with vector calling convention instead.
1067 class CallLeafVectorNode : public CallLeafNode {
1068 private:
1069   uint _num_bits;
1070 protected:
1071   virtual bool cmp( const Node &n ) const;
1072   virtual uint size_of() const; // Size is bigger
1073 public:
1074   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
1075                    const TypePtr* adr_type, uint num_bits)
1076     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
1077   {
1078   }
1079   virtual int   Opcode() const;
1080   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
1081 };
1082 

1085 // High-level memory allocation
1086 //
1087 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
1088 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
1089 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
1090 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
1091 //  order to differentiate the uses of the projection on the normal control path from
1092 //  those on the exception return path.
1093 //
1094 class AllocateNode : public CallNode {
1095 public:
1096   enum {
1097     // Output:
1098     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
1099     // Inputs:
1100     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
1101     KlassNode,                        // type (maybe dynamic) of the obj.
1102     InitialTest,                      // slow-path test (may be constant)
1103     ALength,                          // array length (or TOP if none)
1104     ValidLengthTest,
1105     InlineType,                       // InlineTypeNode if this is an inline type allocation
1106     InitValue,                        // Init value for null-free inline type arrays
1107     RawInitValue,                     // Same as above but as raw machine word
1108     ParmLimit
1109   };
1110 
1111   static const TypeFunc* alloc_type(const Type* t) {
1112     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1113     fields[AllocSize]   = TypeInt::POS;
1114     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1115     fields[InitialTest] = TypeInt::BOOL;
1116     fields[ALength]     = t;  // length (can be a bad length)
1117     fields[ValidLengthTest] = TypeInt::BOOL;
1118     fields[InlineType] = Type::BOTTOM;
1119     fields[InitValue] = TypeInstPtr::NOTNULL;
1120     fields[RawInitValue] = TypeX_X;
1121 
1122     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1123 
1124     // create result type (range)
1125     fields = TypeTuple::fields(1);
1126     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1127 
1128     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1129 
1130     return TypeFunc::make(domain, range);
1131   }
1132 
1133   // Result of Escape Analysis
1134   bool _is_scalar_replaceable;
1135   bool _is_non_escaping;
1136   // True when MemBar for new is redundant with MemBar at initialzer exit
1137   bool _is_allocation_MemBar_redundant;
1138 
1139   virtual uint size_of() const; // Size is bigger
1140   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1141                Node *size, Node *klass_node, Node *initial_test,
1142                InlineTypeNode* inline_type_node = nullptr);
1143   // Expansion modifies the JVMState, so we need to deep clone it
1144   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1145   virtual int Opcode() const;
1146   virtual uint ideal_reg() const { return Op_RegP; }
1147   virtual bool        guaranteed_safepoint()  { return false; }
1148 
1149   // allocations do not modify their arguments
1150   virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const { return false; }
1151 
1152   // Pattern-match a possible usage of AllocateNode.
1153   // Return null if no allocation is recognized.
1154   // The operand is the pointer produced by the (possible) allocation.
1155   // It must be a projection of the Allocate or its subsequent CastPP.
1156   // (Note:  This function is defined in file graphKit.cpp, near
1157   // GraphKit::new_instance/new_array, whose output it recognizes.)
1158   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1159   static AllocateNode* Ideal_allocation(Node* ptr);
1160 
1161   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1162   // an offset, which is reported back to the caller.

1199 
1200   // If object doesn't escape in <.init> method and there is memory barrier
1201   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1202   // Inovke this method when MemBar at exit of initializer and post-dominate
1203   // allocation node.
1204   void compute_MemBar_redundancy(ciMethod* initializer);
1205   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1206 
1207   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1208 
1209   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1210 };
1211 
1212 //------------------------------AllocateArray---------------------------------
1213 //
1214 // High-level array allocation
1215 //
1216 class AllocateArrayNode : public AllocateNode {
1217 public:
1218   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1219                     Node* initial_test, Node* count_val, Node* valid_length_test,
1220                     Node* init_value, Node* raw_init_value)
1221     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1222                    initial_test)
1223   {
1224     init_class_id(Class_AllocateArray);
1225     set_req(AllocateNode::ALength, count_val);
1226     set_req(AllocateNode::ValidLengthTest, valid_length_test);
1227     init_req(AllocateNode::InitValue, init_value);
1228     init_req(AllocateNode::RawInitValue, raw_init_value);
1229   }
1230   virtual uint size_of() const { return sizeof(*this); }
1231   virtual int Opcode() const;
1232 
1233   // Dig the length operand out of a array allocation site.
1234   Node* Ideal_length() {
1235     return in(AllocateNode::ALength);
1236   }
1237 
1238   // Dig the length operand out of a array allocation site and narrow the
1239   // type with a CastII, if necesssary
1240   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1241 
1242   // Pattern-match a possible usage of AllocateArrayNode.
1243   // Return null if no allocation is recognized.
1244   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1245     AllocateNode* allo = Ideal_allocation(ptr);
1246     return (allo == nullptr || !allo->is_AllocateArray())
1247            ? nullptr : allo->as_AllocateArray();
1248   }
1249 };
1250 
< prev index next >