< prev index next >

src/hotspot/share/opto/callnode.hpp

Print this page

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match( const ProjNode *proj, const Matcher *m );
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;
  93   static  const TypeTuple *osr_domain();
  94 };
  95 
  96 
  97 //------------------------------ParmNode---------------------------------------
  98 // Incoming parameters
  99 class ParmNode : public ProjNode {
 100   static const char * const names[TypeFunc::Parms+1];
 101 public:
 102   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 103     init_class_id(Class_Parm);
 104   }
 105   virtual int Opcode() const;
 106   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 107   virtual uint ideal_reg() const;
 108 #ifndef PRODUCT
 109   virtual void dump_spec(outputStream *st) const;
 110   virtual void dump_compact_spec(outputStream *st) const;
 111 #endif
 112 };
 113 

 649     assert(jvms != nullptr, "JVMS reference is null.");
 650     return jvms->scloff() + _merge_pointer_idx + 1;
 651   }
 652 
 653   // Assumes that "this" is an argument to a safepoint node "s", and that
 654   // "new_call" is being created to correspond to "s".  But the difference
 655   // between the start index of the jvmstates of "new_call" and "s" is
 656   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 657   // corresponds appropriately to "this" in "new_call".  Assumes that
 658   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 659   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 660   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 661 
 662 #ifndef PRODUCT
 663   virtual void              dump_spec(outputStream *st) const;
 664 #endif
 665 };
 666 
 667 // Simple container for the outgoing projections of a call.  Useful
 668 // for serious surgery on calls.
 669 class CallProjections : public StackObj {
 670 public:
 671   Node* fallthrough_proj;
 672   Node* fallthrough_catchproj;
 673   Node* fallthrough_memproj;
 674   Node* fallthrough_ioproj;
 675   Node* catchall_catchproj;
 676   Node* catchall_memproj;
 677   Node* catchall_ioproj;
 678   Node* resproj;
 679   Node* exobj;



















 680 };
 681 
 682 class CallGenerator;
 683 
 684 //------------------------------CallNode---------------------------------------
 685 // Call nodes now subsume the function of debug nodes at callsites, so they
 686 // contain the functionality of a full scope chain of debug nodes.
 687 class CallNode : public SafePointNode {
 688 
 689 protected:
 690   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 691 
 692 public:
 693   const TypeFunc* _tf;          // Function type
 694   address         _entry_point; // Address of method being called
 695   float           _cnt;         // Estimate of number of times called
 696   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 697   const char*     _name;        // Printable name, if _method is null
 698 
 699   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 700     : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
 701       _tf(tf),
 702       _entry_point(addr),
 703       _cnt(COUNT_UNKNOWN),
 704       _generator(nullptr),
 705       _name(nullptr)
 706   {
 707     init_class_id(Class_Call);
 708   }
 709 
 710   const TypeFunc* tf()         const { return _tf; }
 711   address  entry_point()       const { return _entry_point; }
 712   float    cnt()               const { return _cnt; }
 713   CallGenerator* generator()   const { return _generator; }
 714 
 715   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 716   void set_entry_point(address p)       { _entry_point = p; }
 717   void set_cnt(float c)                 { _cnt = c; }
 718   void set_generator(CallGenerator* cg) { _generator = cg; }
 719 
 720   virtual const Type* bottom_type() const;
 721   virtual const Type* Value(PhaseGVN* phase) const;
 722   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 723   virtual Node* Identity(PhaseGVN* phase) { return this; }
 724   virtual bool        cmp(const Node &n) const;
 725   virtual uint        size_of() const = 0;
 726   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 727   virtual Node*       match(const ProjNode* proj, const Matcher* m);
 728   virtual uint        ideal_reg() const { return NotAMachineReg; }
 729   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 730   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 731   virtual bool        guaranteed_safepoint()  { return true; }
 732   // For macro nodes, the JVMState gets modified during expansion. If calls
 733   // use MachConstantBase, it gets modified during matching. If the call is
 734   // late inlined, it also needs the full JVMState. So when cloning the
 735   // node the JVMState must be deep cloned. Default is to shallow clone.
 736   virtual bool needs_deep_clone_jvms(Compile* C) { return _generator != nullptr || C->needs_deep_clone_jvms(); }
 737 
 738   // Returns true if the call may modify n
 739   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 740   // Does this node have a use of n other than in debug information?
 741   bool                has_non_debug_use(Node* n);

 742   // Returns the unique CheckCastPP of a call
 743   // or result projection is there are several CheckCastPP
 744   // or returns null if there is no one.
 745   Node* result_cast();
 746   // Does this node returns pointer?
 747   bool returns_pointer() const {
 748     const TypeTuple* r = tf()->range();
 749     return (r->cnt() > TypeFunc::Parms &&

 750             r->field_at(TypeFunc::Parms)->isa_ptr());
 751   }
 752 
 753   // Collect all the interesting edges from a call for use in
 754   // replacing the call by something else.  Used by macro expansion
 755   // and the late inlining support.
 756   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true) const;
 757 
 758   virtual uint match_edge(uint idx) const;
 759 
 760   bool is_call_to_arraycopystub() const;
 761   bool is_call_to_multianewarray_stub() const;
 762 
 763   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 764 
 765 #ifndef PRODUCT
 766   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 767   virtual void        dump_spec(outputStream* st) const;
 768 #endif
 769 };
 770 
 771 
 772 //------------------------------CallJavaNode-----------------------------------
 773 // Make a static or dynamic subroutine call node using Java calling
 774 // convention.  (The "Java" calling convention is the compiler's calling
 775 // convention, as opposed to the interpreter's or that of native C.)
 776 class CallJavaNode : public CallNode {

 803   void  set_arg_escape(bool f)             { _arg_escape = f; }
 804   bool  arg_escape() const                 { return _arg_escape; }
 805   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 806   void register_for_late_inline();
 807 
 808   DEBUG_ONLY( bool validate_symbolic_info() const; )
 809 
 810 #ifndef PRODUCT
 811   virtual void  dump_spec(outputStream *st) const;
 812   virtual void  dump_compact_spec(outputStream *st) const;
 813 #endif
 814 };
 815 
 816 //------------------------------CallStaticJavaNode-----------------------------
 817 // Make a direct subroutine call using Java calling convention (for static
 818 // calls and optimized virtual calls, plus calls to wrappers for run-time
 819 // routines); generates static stub.
 820 class CallStaticJavaNode : public CallJavaNode {
 821   virtual bool cmp( const Node &n ) const;
 822   virtual uint size_of() const; // Size is bigger



 823 public:
 824   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 825     : CallJavaNode(tf, addr, method) {
 826     init_class_id(Class_CallStaticJava);
 827     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 828       init_flags(Flag_is_macro);
 829       C->add_macro_node(this);
 830     }











 831   }
 832   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 833     : CallJavaNode(tf, addr, nullptr) {
 834     init_class_id(Class_CallStaticJava);
 835     // This node calls a runtime stub, which often has narrow memory effects.
 836     _adr_type = adr_type;
 837     _name = name;
 838   }
 839 
 840   // If this is an uncommon trap, return the request code, else zero.
 841   int uncommon_trap_request() const;
 842   bool is_uncommon_trap() const;
 843   static int extract_uncommon_trap_request(const Node* call);
 844 
 845   bool is_boxing_method() const {
 846     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 847   }
 848   // Late inlining modifies the JVMState, so we need to deep clone it
 849   // when the call node is cloned (because it is macro node).
 850   virtual bool needs_deep_clone_jvms(Compile* C) {

 947   CallLeafPureNode(const TypeFunc* tf, address addr, const char* name)
 948       : CallLeafNode(tf, addr, name, nullptr) {
 949     init_class_id(Class_CallLeafPure);
 950   }
 951   int Opcode() const override;
 952   Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
 953 };
 954 
 955 //------------------------------CallLeafNoFPNode-------------------------------
 956 // CallLeafNode, not using floating point or using it in the same manner as
 957 // the generated code
 958 class CallLeafNoFPNode : public CallLeafNode {
 959 public:
 960   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 961                    const TypePtr* adr_type)
 962     : CallLeafNode(tf, addr, name, adr_type)
 963   {
 964     init_class_id(Class_CallLeafNoFP);
 965   }
 966   virtual int   Opcode() const;

 967 };
 968 
 969 //------------------------------CallLeafVectorNode-------------------------------
 970 // CallLeafNode but calling with vector calling convention instead.
 971 class CallLeafVectorNode : public CallLeafNode {
 972 private:
 973   uint _num_bits;
 974 protected:
 975   virtual bool cmp( const Node &n ) const;
 976   virtual uint size_of() const; // Size is bigger
 977 public:
 978   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 979                    const TypePtr* adr_type, uint num_bits)
 980     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 981   {
 982   }
 983   virtual int   Opcode() const;
 984   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 985 };
 986 

 989 // High-level memory allocation
 990 //
 991 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 992 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 993 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 994 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 995 //  order to differentiate the uses of the projection on the normal control path from
 996 //  those on the exception return path.
 997 //
 998 class AllocateNode : public CallNode {
 999 public:
1000   enum {
1001     // Output:
1002     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
1003     // Inputs:
1004     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
1005     KlassNode,                        // type (maybe dynamic) of the obj.
1006     InitialTest,                      // slow-path test (may be constant)
1007     ALength,                          // array length (or TOP if none)
1008     ValidLengthTest,



1009     ParmLimit
1010   };
1011 
1012   static const TypeFunc* alloc_type(const Type* t) {
1013     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1014     fields[AllocSize]   = TypeInt::POS;
1015     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1016     fields[InitialTest] = TypeInt::BOOL;
1017     fields[ALength]     = t;  // length (can be a bad length)
1018     fields[ValidLengthTest] = TypeInt::BOOL;



1019 
1020     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1021 
1022     // create result type (range)
1023     fields = TypeTuple::fields(1);
1024     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1025 
1026     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1027 
1028     return TypeFunc::make(domain, range);
1029   }
1030 
1031   // Result of Escape Analysis
1032   bool _is_scalar_replaceable;
1033   bool _is_non_escaping;
1034   // True when MemBar for new is redundant with MemBar at initialzer exit
1035   bool _is_allocation_MemBar_redundant;

1036 
1037   virtual uint size_of() const; // Size is bigger
1038   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1039                Node *size, Node *klass_node, Node *initial_test);

1040   // Expansion modifies the JVMState, so we need to deep clone it
1041   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1042   virtual int Opcode() const;
1043   virtual uint ideal_reg() const { return Op_RegP; }
1044   virtual bool        guaranteed_safepoint()  { return false; }
1045 
1046   // allocations do not modify their arguments
1047   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1048 
1049   // Pattern-match a possible usage of AllocateNode.
1050   // Return null if no allocation is recognized.
1051   // The operand is the pointer produced by the (possible) allocation.
1052   // It must be a projection of the Allocate or its subsequent CastPP.
1053   // (Note:  This function is defined in file graphKit.cpp, near
1054   // GraphKit::new_instance/new_array, whose output it recognizes.)
1055   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1056   static AllocateNode* Ideal_allocation(Node* ptr);
1057 
1058   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1059   // an offset, which is reported back to the caller.

1084 
1085   // Return true if allocation doesn't escape thread, its escape state
1086   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1087   // is true when its allocation's escape state is noEscape or
1088   // ArgEscape. In case allocation's InitializeNode is null, check
1089   // AlllocateNode._is_non_escaping flag.
1090   // AlllocateNode._is_non_escaping is true when its escape state is
1091   // noEscape.
1092   bool does_not_escape_thread() {
1093     InitializeNode* init = nullptr;
1094     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1095   }
1096 
1097   // If object doesn't escape in <.init> method and there is memory barrier
1098   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1099   // Inovke this method when MemBar at exit of initializer and post-dominate
1100   // allocation node.
1101   void compute_MemBar_redundancy(ciMethod* initializer);
1102   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1103 
1104   Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
1105 
1106   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1107 };
1108 
1109 //------------------------------AllocateArray---------------------------------
1110 //
1111 // High-level array allocation
1112 //
1113 class AllocateArrayNode : public AllocateNode {
1114 public:
1115   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1116                     Node* initial_test, Node* count_val, Node* valid_length_test)

1117     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1118                    initial_test)
1119   {
1120     init_class_id(Class_AllocateArray);
1121     set_req(AllocateNode::ALength,        count_val);
1122     set_req(AllocateNode::ValidLengthTest, valid_length_test);


1123   }

1124   virtual int Opcode() const;
1125 
1126   // Dig the length operand out of a array allocation site.
1127   Node* Ideal_length() {
1128     return in(AllocateNode::ALength);
1129   }
1130 
1131   // Dig the length operand out of a array allocation site and narrow the
1132   // type with a CastII, if necesssary
1133   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1134 
1135   // Pattern-match a possible usage of AllocateArrayNode.
1136   // Return null if no allocation is recognized.
1137   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1138     AllocateNode* allo = Ideal_allocation(ptr);
1139     return (allo == nullptr || !allo->is_AllocateArray())
1140            ? nullptr : allo->as_AllocateArray();
1141   }
1142 };
1143 

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;

  93 };
  94 
  95 
  96 //------------------------------ParmNode---------------------------------------
  97 // Incoming parameters
  98 class ParmNode : public ProjNode {
  99   static const char * const names[TypeFunc::Parms+1];
 100 public:
 101   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 102     init_class_id(Class_Parm);
 103   }
 104   virtual int Opcode() const;
 105   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 106   virtual uint ideal_reg() const;
 107 #ifndef PRODUCT
 108   virtual void dump_spec(outputStream *st) const;
 109   virtual void dump_compact_spec(outputStream *st) const;
 110 #endif
 111 };
 112 

 648     assert(jvms != nullptr, "JVMS reference is null.");
 649     return jvms->scloff() + _merge_pointer_idx + 1;
 650   }
 651 
 652   // Assumes that "this" is an argument to a safepoint node "s", and that
 653   // "new_call" is being created to correspond to "s".  But the difference
 654   // between the start index of the jvmstates of "new_call" and "s" is
 655   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 656   // corresponds appropriately to "this" in "new_call".  Assumes that
 657   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 658   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 659   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 660 
 661 #ifndef PRODUCT
 662   virtual void              dump_spec(outputStream *st) const;
 663 #endif
 664 };
 665 
 666 // Simple container for the outgoing projections of a call.  Useful
 667 // for serious surgery on calls.
 668 class CallProjections {
 669 public:
 670   Node* fallthrough_proj;
 671   Node* fallthrough_catchproj;
 672   Node* fallthrough_memproj;
 673   Node* fallthrough_ioproj;
 674   Node* catchall_catchproj;
 675   Node* catchall_memproj;
 676   Node* catchall_ioproj;

 677   Node* exobj;
 678   uint nb_resproj;
 679   Node* resproj[1]; // at least one projection
 680 
 681   CallProjections(uint nbres) {
 682     fallthrough_proj      = nullptr;
 683     fallthrough_catchproj = nullptr;
 684     fallthrough_memproj   = nullptr;
 685     fallthrough_ioproj    = nullptr;
 686     catchall_catchproj    = nullptr;
 687     catchall_memproj      = nullptr;
 688     catchall_ioproj       = nullptr;
 689     exobj                 = nullptr;
 690     nb_resproj            = nbres;
 691     resproj[0]            = nullptr;
 692     for (uint i = 1; i < nb_resproj; i++) {
 693       resproj[i]          = nullptr;
 694     }
 695   }
 696 
 697 };
 698 
 699 class CallGenerator;
 700 
 701 //------------------------------CallNode---------------------------------------
 702 // Call nodes now subsume the function of debug nodes at callsites, so they
 703 // contain the functionality of a full scope chain of debug nodes.
 704 class CallNode : public SafePointNode {
 705 
 706 protected:
 707   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 708 
 709 public:
 710   const TypeFunc* _tf;          // Function type
 711   address         _entry_point; // Address of method being called
 712   float           _cnt;         // Estimate of number of times called
 713   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 714   const char*     _name;        // Printable name, if _method is null
 715 
 716   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 717     : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
 718       _tf(tf),
 719       _entry_point(addr),
 720       _cnt(COUNT_UNKNOWN),
 721       _generator(nullptr),
 722       _name(nullptr)
 723   {
 724     init_class_id(Class_Call);
 725   }
 726 
 727   const TypeFunc* tf()         const { return _tf; }
 728   address  entry_point()       const { return _entry_point; }
 729   float    cnt()               const { return _cnt; }
 730   CallGenerator* generator()   const { return _generator; }
 731 
 732   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 733   void set_entry_point(address p)       { _entry_point = p; }
 734   void set_cnt(float c)                 { _cnt = c; }
 735   void set_generator(CallGenerator* cg) { _generator = cg; }
 736 
 737   virtual const Type* bottom_type() const;
 738   virtual const Type* Value(PhaseGVN* phase) const;
 739   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 740   virtual Node* Identity(PhaseGVN* phase) { return this; }
 741   virtual bool        cmp(const Node &n) const;
 742   virtual uint        size_of() const = 0;
 743   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 744   virtual Node*       match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
 745   virtual uint        ideal_reg() const { return NotAMachineReg; }
 746   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 747   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 748   virtual bool        guaranteed_safepoint()  { return true; }
 749   // For macro nodes, the JVMState gets modified during expansion. If calls
 750   // use MachConstantBase, it gets modified during matching. If the call is
 751   // late inlined, it also needs the full JVMState. So when cloning the
 752   // node the JVMState must be deep cloned. Default is to shallow clone.
 753   virtual bool needs_deep_clone_jvms(Compile* C) { return _generator != nullptr || C->needs_deep_clone_jvms(); }
 754 
 755   // Returns true if the call may modify n
 756   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 757   // Does this node have a use of n other than in debug information?
 758   bool                has_non_debug_use(Node* n);
 759   bool                has_debug_use(Node* n);
 760   // Returns the unique CheckCastPP of a call
 761   // or result projection is there are several CheckCastPP
 762   // or returns null if there is no one.
 763   Node* result_cast();
 764   // Does this node returns pointer?
 765   bool returns_pointer() const {
 766     const TypeTuple* r = tf()->range_sig();
 767     return (!tf()->returns_inline_type_as_fields() &&
 768             r->cnt() > TypeFunc::Parms &&
 769             r->field_at(TypeFunc::Parms)->isa_ptr());
 770   }
 771 
 772   // Collect all the interesting edges from a call for use in
 773   // replacing the call by something else.  Used by macro expansion
 774   // and the late inlining support.
 775   CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true) const;
 776 
 777   virtual uint match_edge(uint idx) const;
 778 
 779   bool is_call_to_arraycopystub() const;
 780   bool is_call_to_multianewarray_stub() const;
 781 
 782   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 783 
 784 #ifndef PRODUCT
 785   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 786   virtual void        dump_spec(outputStream* st) const;
 787 #endif
 788 };
 789 
 790 
 791 //------------------------------CallJavaNode-----------------------------------
 792 // Make a static or dynamic subroutine call node using Java calling
 793 // convention.  (The "Java" calling convention is the compiler's calling
 794 // convention, as opposed to the interpreter's or that of native C.)
 795 class CallJavaNode : public CallNode {

 822   void  set_arg_escape(bool f)             { _arg_escape = f; }
 823   bool  arg_escape() const                 { return _arg_escape; }
 824   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 825   void register_for_late_inline();
 826 
 827   DEBUG_ONLY( bool validate_symbolic_info() const; )
 828 
 829 #ifndef PRODUCT
 830   virtual void  dump_spec(outputStream *st) const;
 831   virtual void  dump_compact_spec(outputStream *st) const;
 832 #endif
 833 };
 834 
 835 //------------------------------CallStaticJavaNode-----------------------------
 836 // Make a direct subroutine call using Java calling convention (for static
 837 // calls and optimized virtual calls, plus calls to wrappers for run-time
 838 // routines); generates static stub.
 839 class CallStaticJavaNode : public CallJavaNode {
 840   virtual bool cmp( const Node &n ) const;
 841   virtual uint size_of() const; // Size is bigger
 842 
 843   bool remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg);
 844 
 845 public:
 846   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 847     : CallJavaNode(tf, addr, method) {
 848     init_class_id(Class_CallStaticJava);
 849     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 850       init_flags(Flag_is_macro);
 851       C->add_macro_node(this);
 852     }
 853     const TypeTuple *r = tf->range_sig();
 854     if (InlineTypeReturnedAsFields &&
 855         method != nullptr &&
 856         method->is_method_handle_intrinsic() &&
 857         r->cnt() > TypeFunc::Parms &&
 858         r->field_at(TypeFunc::Parms)->isa_oopptr() &&
 859         r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
 860       // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
 861       init_flags(Flag_is_macro);
 862       C->add_macro_node(this);
 863     }
 864   }
 865   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 866     : CallJavaNode(tf, addr, nullptr) {
 867     init_class_id(Class_CallStaticJava);
 868     // This node calls a runtime stub, which often has narrow memory effects.
 869     _adr_type = adr_type;
 870     _name = name;
 871   }
 872 
 873   // If this is an uncommon trap, return the request code, else zero.
 874   int uncommon_trap_request() const;
 875   bool is_uncommon_trap() const;
 876   static int extract_uncommon_trap_request(const Node* call);
 877 
 878   bool is_boxing_method() const {
 879     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 880   }
 881   // Late inlining modifies the JVMState, so we need to deep clone it
 882   // when the call node is cloned (because it is macro node).
 883   virtual bool needs_deep_clone_jvms(Compile* C) {

 980   CallLeafPureNode(const TypeFunc* tf, address addr, const char* name)
 981       : CallLeafNode(tf, addr, name, nullptr) {
 982     init_class_id(Class_CallLeafPure);
 983   }
 984   int Opcode() const override;
 985   Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
 986 };
 987 
 988 //------------------------------CallLeafNoFPNode-------------------------------
 989 // CallLeafNode, not using floating point or using it in the same manner as
 990 // the generated code
 991 class CallLeafNoFPNode : public CallLeafNode {
 992 public:
 993   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 994                    const TypePtr* adr_type)
 995     : CallLeafNode(tf, addr, name, adr_type)
 996   {
 997     init_class_id(Class_CallLeafNoFP);
 998   }
 999   virtual int   Opcode() const;
1000   virtual uint match_edge(uint idx) const;
1001 };
1002 
1003 //------------------------------CallLeafVectorNode-------------------------------
1004 // CallLeafNode but calling with vector calling convention instead.
1005 class CallLeafVectorNode : public CallLeafNode {
1006 private:
1007   uint _num_bits;
1008 protected:
1009   virtual bool cmp( const Node &n ) const;
1010   virtual uint size_of() const; // Size is bigger
1011 public:
1012   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
1013                    const TypePtr* adr_type, uint num_bits)
1014     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
1015   {
1016   }
1017   virtual int   Opcode() const;
1018   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
1019 };
1020 

1023 // High-level memory allocation
1024 //
1025 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
1026 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
1027 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
1028 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
1029 //  order to differentiate the uses of the projection on the normal control path from
1030 //  those on the exception return path.
1031 //
1032 class AllocateNode : public CallNode {
1033 public:
1034   enum {
1035     // Output:
1036     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
1037     // Inputs:
1038     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
1039     KlassNode,                        // type (maybe dynamic) of the obj.
1040     InitialTest,                      // slow-path test (may be constant)
1041     ALength,                          // array length (or TOP if none)
1042     ValidLengthTest,
1043     InlineType,                       // InlineTypeNode if this is an inline type allocation
1044     InitValue,                        // Init value for null-free inline type arrays
1045     RawInitValue,                     // Same as above but as raw machine word
1046     ParmLimit
1047   };
1048 
1049   static const TypeFunc* alloc_type(const Type* t) {
1050     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1051     fields[AllocSize]   = TypeInt::POS;
1052     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1053     fields[InitialTest] = TypeInt::BOOL;
1054     fields[ALength]     = t;  // length (can be a bad length)
1055     fields[ValidLengthTest] = TypeInt::BOOL;
1056     fields[InlineType] = Type::BOTTOM;
1057     fields[InitValue] = TypeInstPtr::NOTNULL;
1058     fields[RawInitValue] = TypeX_X;
1059 
1060     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1061 
1062     // create result type (range)
1063     fields = TypeTuple::fields(1);
1064     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1065 
1066     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1067 
1068     return TypeFunc::make(domain, range);
1069   }
1070 
1071   // Result of Escape Analysis
1072   bool _is_scalar_replaceable;
1073   bool _is_non_escaping;
1074   // True when MemBar for new is redundant with MemBar at initialzer exit
1075   bool _is_allocation_MemBar_redundant;
1076   bool _larval;
1077 
1078   virtual uint size_of() const; // Size is bigger
1079   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1080                Node *size, Node *klass_node, Node *initial_test,
1081                InlineTypeNode* inline_type_node = nullptr);
1082   // Expansion modifies the JVMState, so we need to deep clone it
1083   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1084   virtual int Opcode() const;
1085   virtual uint ideal_reg() const { return Op_RegP; }
1086   virtual bool        guaranteed_safepoint()  { return false; }
1087 
1088   // allocations do not modify their arguments
1089   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1090 
1091   // Pattern-match a possible usage of AllocateNode.
1092   // Return null if no allocation is recognized.
1093   // The operand is the pointer produced by the (possible) allocation.
1094   // It must be a projection of the Allocate or its subsequent CastPP.
1095   // (Note:  This function is defined in file graphKit.cpp, near
1096   // GraphKit::new_instance/new_array, whose output it recognizes.)
1097   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1098   static AllocateNode* Ideal_allocation(Node* ptr);
1099 
1100   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1101   // an offset, which is reported back to the caller.

1126 
1127   // Return true if allocation doesn't escape thread, its escape state
1128   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1129   // is true when its allocation's escape state is noEscape or
1130   // ArgEscape. In case allocation's InitializeNode is null, check
1131   // AlllocateNode._is_non_escaping flag.
1132   // AlllocateNode._is_non_escaping is true when its escape state is
1133   // noEscape.
1134   bool does_not_escape_thread() {
1135     InitializeNode* init = nullptr;
1136     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1137   }
1138 
1139   // If object doesn't escape in <.init> method and there is memory barrier
1140   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1141   // Inovke this method when MemBar at exit of initializer and post-dominate
1142   // allocation node.
1143   void compute_MemBar_redundancy(ciMethod* initializer);
1144   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1145 
1146   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1147 
1148   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1149 };
1150 
1151 //------------------------------AllocateArray---------------------------------
1152 //
1153 // High-level array allocation
1154 //
1155 class AllocateArrayNode : public AllocateNode {
1156 public:
1157   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1158                     Node* initial_test, Node* count_val, Node* valid_length_test,
1159                     Node* init_value, Node* raw_init_value)
1160     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1161                    initial_test)
1162   {
1163     init_class_id(Class_AllocateArray);
1164     set_req(AllocateNode::ALength, count_val);
1165     set_req(AllocateNode::ValidLengthTest, valid_length_test);
1166     init_req(AllocateNode::InitValue, init_value);
1167     init_req(AllocateNode::RawInitValue, raw_init_value);
1168   }
1169   virtual uint size_of() const { return sizeof(*this); }
1170   virtual int Opcode() const;
1171 
1172   // Dig the length operand out of a array allocation site.
1173   Node* Ideal_length() {
1174     return in(AllocateNode::ALength);
1175   }
1176 
1177   // Dig the length operand out of a array allocation site and narrow the
1178   // type with a CastII, if necesssary
1179   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1180 
1181   // Pattern-match a possible usage of AllocateArrayNode.
1182   // Return null if no allocation is recognized.
1183   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1184     AllocateNode* allo = Ideal_allocation(ptr);
1185     return (allo == nullptr || !allo->is_AllocateArray())
1186            ? nullptr : allo->as_AllocateArray();
1187   }
1188 };
1189 
< prev index next >