< prev index next >

src/hotspot/share/opto/callnode.hpp

Print this page

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match( const ProjNode *proj, const Matcher *m );
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;
  93   static  const TypeTuple *osr_domain();
  94 };
  95 
  96 
  97 //------------------------------ParmNode---------------------------------------
  98 // Incoming parameters
  99 class ParmNode : public ProjNode {
 100   static const char * const names[TypeFunc::Parms+1];
 101 public:
 102   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 103     init_class_id(Class_Parm);
 104   }
 105   virtual int Opcode() const;
 106   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 107   virtual uint ideal_reg() const;
 108 #ifndef PRODUCT
 109   virtual void dump_spec(outputStream *st) const;
 110   virtual void dump_compact_spec(outputStream *st) const;
 111 #endif
 112 };
 113 

 649     assert(jvms != nullptr, "JVMS reference is null.");
 650     return jvms->scloff() + _merge_pointer_idx + 1;
 651   }
 652 
 653   // Assumes that "this" is an argument to a safepoint node "s", and that
 654   // "new_call" is being created to correspond to "s".  But the difference
 655   // between the start index of the jvmstates of "new_call" and "s" is
 656   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 657   // corresponds appropriately to "this" in "new_call".  Assumes that
 658   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 659   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 660   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 661 
 662 #ifndef PRODUCT
 663   virtual void              dump_spec(outputStream *st) const;
 664 #endif
 665 };
 666 
 667 // Simple container for the outgoing projections of a call.  Useful
 668 // for serious surgery on calls.
 669 class CallProjections : public StackObj {
 670 public:
 671   Node* fallthrough_proj;
 672   Node* fallthrough_catchproj;
 673   Node* fallthrough_memproj;
 674   Node* fallthrough_ioproj;
 675   Node* catchall_catchproj;
 676   Node* catchall_memproj;
 677   Node* catchall_ioproj;
 678   Node* resproj;
 679   Node* exobj;



















 680 };
 681 
 682 class CallGenerator;
 683 
 684 //------------------------------CallNode---------------------------------------
 685 // Call nodes now subsume the function of debug nodes at callsites, so they
 686 // contain the functionality of a full scope chain of debug nodes.
 687 class CallNode : public SafePointNode {
 688 
 689 protected:
 690   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 691 
 692 public:
 693   const TypeFunc* _tf;          // Function type
 694   address         _entry_point; // Address of method being called
 695   float           _cnt;         // Estimate of number of times called
 696   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 697   const char*     _name;        // Printable name, if _method is null
 698 
 699   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 700     : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
 701       _tf(tf),
 702       _entry_point(addr),
 703       _cnt(COUNT_UNKNOWN),
 704       _generator(nullptr),
 705       _name(nullptr)
 706   {
 707     init_class_id(Class_Call);
 708   }
 709 
 710   const TypeFunc* tf()         const { return _tf; }
 711   address  entry_point()       const { return _entry_point; }
 712   float    cnt()               const { return _cnt; }
 713   CallGenerator* generator()   const { return _generator; }
 714 
 715   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 716   void set_entry_point(address p)       { _entry_point = p; }
 717   void set_cnt(float c)                 { _cnt = c; }
 718   void set_generator(CallGenerator* cg) { _generator = cg; }
 719 
 720   virtual const Type* bottom_type() const;
 721   virtual const Type* Value(PhaseGVN* phase) const;
 722   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 723   virtual Node* Identity(PhaseGVN* phase) { return this; }
 724   virtual bool        cmp(const Node &n) const;
 725   virtual uint        size_of() const = 0;
 726   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 727   virtual Node*       match(const ProjNode* proj, const Matcher* m);
 728   virtual uint        ideal_reg() const { return NotAMachineReg; }
 729   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 730   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 731   virtual bool        guaranteed_safepoint()  { return true; }
 732   // For macro nodes, the JVMState gets modified during expansion. If calls
 733   // use MachConstantBase, it gets modified during matching. So when cloning
 734   // the node the JVMState must be deep cloned. Default is to shallow clone.
 735   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 736 
 737   // Returns true if the call may modify n
 738   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 739   // Does this node have a use of n other than in debug information?
 740   bool                has_non_debug_use(Node* n);

 741   // Returns the unique CheckCastPP of a call
 742   // or result projection is there are several CheckCastPP
 743   // or returns null if there is no one.
 744   Node* result_cast();
 745   // Does this node returns pointer?
 746   bool returns_pointer() const {
 747     const TypeTuple* r = tf()->range();
 748     return (r->cnt() > TypeFunc::Parms &&

 749             r->field_at(TypeFunc::Parms)->isa_ptr());
 750   }
 751 
 752   // Collect all the interesting edges from a call for use in
 753   // replacing the call by something else.  Used by macro expansion
 754   // and the late inlining support.
 755   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true) const;
 756 
 757   virtual uint match_edge(uint idx) const;
 758 
 759   bool is_call_to_arraycopystub() const;
 760 
 761   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 762 
 763 #ifndef PRODUCT
 764   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 765   virtual void        dump_spec(outputStream* st) const;
 766 #endif
 767 };
 768 
 769 
 770 //------------------------------CallJavaNode-----------------------------------
 771 // Make a static or dynamic subroutine call node using Java calling
 772 // convention.  (The "Java" calling convention is the compiler's calling
 773 // convention, as opposed to the interpreter's or that of native C.)
 774 class CallJavaNode : public CallNode {
 775 protected:

 801   void  set_arg_escape(bool f)             { _arg_escape = f; }
 802   bool  arg_escape() const                 { return _arg_escape; }
 803   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 804   void register_for_late_inline();
 805 
 806   DEBUG_ONLY( bool validate_symbolic_info() const; )
 807 
 808 #ifndef PRODUCT
 809   virtual void  dump_spec(outputStream *st) const;
 810   virtual void  dump_compact_spec(outputStream *st) const;
 811 #endif
 812 };
 813 
 814 //------------------------------CallStaticJavaNode-----------------------------
 815 // Make a direct subroutine call using Java calling convention (for static
 816 // calls and optimized virtual calls, plus calls to wrappers for run-time
 817 // routines); generates static stub.
 818 class CallStaticJavaNode : public CallJavaNode {
 819   virtual bool cmp( const Node &n ) const;
 820   virtual uint size_of() const; // Size is bigger



 821 public:
 822   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 823     : CallJavaNode(tf, addr, method) {
 824     init_class_id(Class_CallStaticJava);
 825     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 826       init_flags(Flag_is_macro);
 827       C->add_macro_node(this);
 828     }











 829   }
 830   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 831     : CallJavaNode(tf, addr, nullptr) {
 832     init_class_id(Class_CallStaticJava);
 833     // This node calls a runtime stub, which often has narrow memory effects.
 834     _adr_type = adr_type;
 835     _name = name;
 836   }
 837 
 838   // If this is an uncommon trap, return the request code, else zero.
 839   int uncommon_trap_request() const;
 840   bool is_uncommon_trap() const;
 841   static int extract_uncommon_trap_request(const Node* call);
 842 
 843   bool is_boxing_method() const {
 844     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 845   }
 846   // Late inlining modifies the JVMState, so we need to deep clone it
 847   // when the call node is cloned (because it is macro node).
 848   virtual bool needs_deep_clone_jvms(Compile* C) {

 946                    const TypePtr* adr_type)
 947       : CallLeafNode(tf, addr, name, adr_type) {
 948     init_class_id(Class_CallLeafPure);
 949   }
 950   int Opcode() const override;
 951   Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
 952 };
 953 
 954 //------------------------------CallLeafNoFPNode-------------------------------
 955 // CallLeafNode, not using floating point or using it in the same manner as
 956 // the generated code
 957 class CallLeafNoFPNode : public CallLeafNode {
 958 public:
 959   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 960                    const TypePtr* adr_type)
 961     : CallLeafNode(tf, addr, name, adr_type)
 962   {
 963     init_class_id(Class_CallLeafNoFP);
 964   }
 965   virtual int   Opcode() const;

 966 };
 967 
 968 //------------------------------CallLeafVectorNode-------------------------------
 969 // CallLeafNode but calling with vector calling convention instead.
 970 class CallLeafVectorNode : public CallLeafNode {
 971 private:
 972   uint _num_bits;
 973 protected:
 974   virtual bool cmp( const Node &n ) const;
 975   virtual uint size_of() const; // Size is bigger
 976 public:
 977   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 978                    const TypePtr* adr_type, uint num_bits)
 979     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 980   {
 981   }
 982   virtual int   Opcode() const;
 983   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 984 };
 985 

 988 // High-level memory allocation
 989 //
 990 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 991 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 992 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 993 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 994 //  order to differentiate the uses of the projection on the normal control path from
 995 //  those on the exception return path.
 996 //
 997 class AllocateNode : public CallNode {
 998 public:
 999   enum {
1000     // Output:
1001     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
1002     // Inputs:
1003     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
1004     KlassNode,                        // type (maybe dynamic) of the obj.
1005     InitialTest,                      // slow-path test (may be constant)
1006     ALength,                          // array length (or TOP if none)
1007     ValidLengthTest,



1008     ParmLimit
1009   };
1010 
1011   static const TypeFunc* alloc_type(const Type* t) {
1012     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1013     fields[AllocSize]   = TypeInt::POS;
1014     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1015     fields[InitialTest] = TypeInt::BOOL;
1016     fields[ALength]     = t;  // length (can be a bad length)
1017     fields[ValidLengthTest] = TypeInt::BOOL;



1018 
1019     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1020 
1021     // create result type (range)
1022     fields = TypeTuple::fields(1);
1023     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1024 
1025     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1026 
1027     return TypeFunc::make(domain, range);
1028   }
1029 
1030   // Result of Escape Analysis
1031   bool _is_scalar_replaceable;
1032   bool _is_non_escaping;
1033   // True when MemBar for new is redundant with MemBar at initialzer exit
1034   bool _is_allocation_MemBar_redundant;

1035 
1036   virtual uint size_of() const; // Size is bigger
1037   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1038                Node *size, Node *klass_node, Node *initial_test);

1039   // Expansion modifies the JVMState, so we need to deep clone it
1040   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1041   virtual int Opcode() const;
1042   virtual uint ideal_reg() const { return Op_RegP; }
1043   virtual bool        guaranteed_safepoint()  { return false; }
1044 
1045   // allocations do not modify their arguments
1046   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1047 
1048   // Pattern-match a possible usage of AllocateNode.
1049   // Return null if no allocation is recognized.
1050   // The operand is the pointer produced by the (possible) allocation.
1051   // It must be a projection of the Allocate or its subsequent CastPP.
1052   // (Note:  This function is defined in file graphKit.cpp, near
1053   // GraphKit::new_instance/new_array, whose output it recognizes.)
1054   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1055   static AllocateNode* Ideal_allocation(Node* ptr);
1056 
1057   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1058   // an offset, which is reported back to the caller.

1083 
1084   // Return true if allocation doesn't escape thread, its escape state
1085   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1086   // is true when its allocation's escape state is noEscape or
1087   // ArgEscape. In case allocation's InitializeNode is null, check
1088   // AlllocateNode._is_non_escaping flag.
1089   // AlllocateNode._is_non_escaping is true when its escape state is
1090   // noEscape.
1091   bool does_not_escape_thread() {
1092     InitializeNode* init = nullptr;
1093     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1094   }
1095 
1096   // If object doesn't escape in <.init> method and there is memory barrier
1097   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1098   // Inovke this method when MemBar at exit of initializer and post-dominate
1099   // allocation node.
1100   void compute_MemBar_redundancy(ciMethod* initializer);
1101   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1102 
1103   Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
1104 
1105   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1106 };
1107 
1108 //------------------------------AllocateArray---------------------------------
1109 //
1110 // High-level array allocation
1111 //
1112 class AllocateArrayNode : public AllocateNode {
1113 public:
1114   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1115                     Node* initial_test, Node* count_val, Node* valid_length_test)

1116     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1117                    initial_test)
1118   {
1119     init_class_id(Class_AllocateArray);
1120     set_req(AllocateNode::ALength,        count_val);
1121     set_req(AllocateNode::ValidLengthTest, valid_length_test);


1122   }

1123   virtual int Opcode() const;
1124 
1125   // Dig the length operand out of a array allocation site.
1126   Node* Ideal_length() {
1127     return in(AllocateNode::ALength);
1128   }
1129 
1130   // Dig the length operand out of a array allocation site and narrow the
1131   // type with a CastII, if necesssary
1132   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1133 
1134   // Pattern-match a possible usage of AllocateArrayNode.
1135   // Return null if no allocation is recognized.
1136   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1137     AllocateNode* allo = Ideal_allocation(ptr);
1138     return (allo == nullptr || !allo->is_AllocateArray())
1139            ? nullptr : allo->as_AllocateArray();
1140   }
1141 };
1142 

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;

  93 };
  94 
  95 
  96 //------------------------------ParmNode---------------------------------------
  97 // Incoming parameters
  98 class ParmNode : public ProjNode {
  99   static const char * const names[TypeFunc::Parms+1];
 100 public:
 101   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 102     init_class_id(Class_Parm);
 103   }
 104   virtual int Opcode() const;
 105   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 106   virtual uint ideal_reg() const;
 107 #ifndef PRODUCT
 108   virtual void dump_spec(outputStream *st) const;
 109   virtual void dump_compact_spec(outputStream *st) const;
 110 #endif
 111 };
 112 

 648     assert(jvms != nullptr, "JVMS reference is null.");
 649     return jvms->scloff() + _merge_pointer_idx + 1;
 650   }
 651 
 652   // Assumes that "this" is an argument to a safepoint node "s", and that
 653   // "new_call" is being created to correspond to "s".  But the difference
 654   // between the start index of the jvmstates of "new_call" and "s" is
 655   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 656   // corresponds appropriately to "this" in "new_call".  Assumes that
 657   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 658   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 659   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 660 
 661 #ifndef PRODUCT
 662   virtual void              dump_spec(outputStream *st) const;
 663 #endif
 664 };
 665 
 666 // Simple container for the outgoing projections of a call.  Useful
 667 // for serious surgery on calls.
 668 class CallProjections {
 669 public:
 670   Node* fallthrough_proj;
 671   Node* fallthrough_catchproj;
 672   Node* fallthrough_memproj;
 673   Node* fallthrough_ioproj;
 674   Node* catchall_catchproj;
 675   Node* catchall_memproj;
 676   Node* catchall_ioproj;

 677   Node* exobj;
 678   uint nb_resproj;
 679   Node* resproj[1]; // at least one projection
 680 
 681   CallProjections(uint nbres) {
 682     fallthrough_proj      = nullptr;
 683     fallthrough_catchproj = nullptr;
 684     fallthrough_memproj   = nullptr;
 685     fallthrough_ioproj    = nullptr;
 686     catchall_catchproj    = nullptr;
 687     catchall_memproj      = nullptr;
 688     catchall_ioproj       = nullptr;
 689     exobj                 = nullptr;
 690     nb_resproj            = nbres;
 691     resproj[0]            = nullptr;
 692     for (uint i = 1; i < nb_resproj; i++) {
 693       resproj[i]          = nullptr;
 694     }
 695   }
 696 
 697 };
 698 
 699 class CallGenerator;
 700 
 701 //------------------------------CallNode---------------------------------------
 702 // Call nodes now subsume the function of debug nodes at callsites, so they
 703 // contain the functionality of a full scope chain of debug nodes.
 704 class CallNode : public SafePointNode {
 705 
 706 protected:
 707   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 708 
 709 public:
 710   const TypeFunc* _tf;          // Function type
 711   address         _entry_point; // Address of method being called
 712   float           _cnt;         // Estimate of number of times called
 713   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 714   const char*     _name;        // Printable name, if _method is null
 715 
 716   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 717     : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
 718       _tf(tf),
 719       _entry_point(addr),
 720       _cnt(COUNT_UNKNOWN),
 721       _generator(nullptr),
 722       _name(nullptr)
 723   {
 724     init_class_id(Class_Call);
 725   }
 726 
 727   const TypeFunc* tf()         const { return _tf; }
 728   address  entry_point()       const { return _entry_point; }
 729   float    cnt()               const { return _cnt; }
 730   CallGenerator* generator()   const { return _generator; }
 731 
 732   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 733   void set_entry_point(address p)       { _entry_point = p; }
 734   void set_cnt(float c)                 { _cnt = c; }
 735   void set_generator(CallGenerator* cg) { _generator = cg; }
 736 
 737   virtual const Type* bottom_type() const;
 738   virtual const Type* Value(PhaseGVN* phase) const;
 739   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 740   virtual Node* Identity(PhaseGVN* phase) { return this; }
 741   virtual bool        cmp(const Node &n) const;
 742   virtual uint        size_of() const = 0;
 743   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 744   virtual Node*       match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
 745   virtual uint        ideal_reg() const { return NotAMachineReg; }
 746   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 747   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 748   virtual bool        guaranteed_safepoint()  { return true; }
 749   // For macro nodes, the JVMState gets modified during expansion. If calls
 750   // use MachConstantBase, it gets modified during matching. So when cloning
 751   // the node the JVMState must be deep cloned. Default is to shallow clone.
 752   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 753 
 754   // Returns true if the call may modify n
 755   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 756   // Does this node have a use of n other than in debug information?
 757   bool                has_non_debug_use(Node* n);
 758   bool                has_debug_use(Node* n);
 759   // Returns the unique CheckCastPP of a call
 760   // or result projection is there are several CheckCastPP
 761   // or returns null if there is no one.
 762   Node* result_cast();
 763   // Does this node returns pointer?
 764   bool returns_pointer() const {
 765     const TypeTuple* r = tf()->range_sig();
 766     return (!tf()->returns_inline_type_as_fields() &&
 767             r->cnt() > TypeFunc::Parms &&
 768             r->field_at(TypeFunc::Parms)->isa_ptr());
 769   }
 770 
 771   // Collect all the interesting edges from a call for use in
 772   // replacing the call by something else.  Used by macro expansion
 773   // and the late inlining support.
 774   CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true) const;
 775 
 776   virtual uint match_edge(uint idx) const;
 777 
 778   bool is_call_to_arraycopystub() const;
 779 
 780   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 781 
 782 #ifndef PRODUCT
 783   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 784   virtual void        dump_spec(outputStream* st) const;
 785 #endif
 786 };
 787 
 788 
 789 //------------------------------CallJavaNode-----------------------------------
 790 // Make a static or dynamic subroutine call node using Java calling
 791 // convention.  (The "Java" calling convention is the compiler's calling
 792 // convention, as opposed to the interpreter's or that of native C.)
 793 class CallJavaNode : public CallNode {
 794 protected:

 820   void  set_arg_escape(bool f)             { _arg_escape = f; }
 821   bool  arg_escape() const                 { return _arg_escape; }
 822   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 823   void register_for_late_inline();
 824 
 825   DEBUG_ONLY( bool validate_symbolic_info() const; )
 826 
 827 #ifndef PRODUCT
 828   virtual void  dump_spec(outputStream *st) const;
 829   virtual void  dump_compact_spec(outputStream *st) const;
 830 #endif
 831 };
 832 
 833 //------------------------------CallStaticJavaNode-----------------------------
 834 // Make a direct subroutine call using Java calling convention (for static
 835 // calls and optimized virtual calls, plus calls to wrappers for run-time
 836 // routines); generates static stub.
 837 class CallStaticJavaNode : public CallJavaNode {
 838   virtual bool cmp( const Node &n ) const;
 839   virtual uint size_of() const; // Size is bigger
 840 
 841   bool remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg);
 842 
 843 public:
 844   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 845     : CallJavaNode(tf, addr, method) {
 846     init_class_id(Class_CallStaticJava);
 847     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 848       init_flags(Flag_is_macro);
 849       C->add_macro_node(this);
 850     }
 851     const TypeTuple *r = tf->range_sig();
 852     if (InlineTypeReturnedAsFields &&
 853         method != nullptr &&
 854         method->is_method_handle_intrinsic() &&
 855         r->cnt() > TypeFunc::Parms &&
 856         r->field_at(TypeFunc::Parms)->isa_oopptr() &&
 857         r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
 858       // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
 859       init_flags(Flag_is_macro);
 860       C->add_macro_node(this);
 861     }
 862   }
 863   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 864     : CallJavaNode(tf, addr, nullptr) {
 865     init_class_id(Class_CallStaticJava);
 866     // This node calls a runtime stub, which often has narrow memory effects.
 867     _adr_type = adr_type;
 868     _name = name;
 869   }
 870 
 871   // If this is an uncommon trap, return the request code, else zero.
 872   int uncommon_trap_request() const;
 873   bool is_uncommon_trap() const;
 874   static int extract_uncommon_trap_request(const Node* call);
 875 
 876   bool is_boxing_method() const {
 877     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 878   }
 879   // Late inlining modifies the JVMState, so we need to deep clone it
 880   // when the call node is cloned (because it is macro node).
 881   virtual bool needs_deep_clone_jvms(Compile* C) {

 979                    const TypePtr* adr_type)
 980       : CallLeafNode(tf, addr, name, adr_type) {
 981     init_class_id(Class_CallLeafPure);
 982   }
 983   int Opcode() const override;
 984   Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
 985 };
 986 
 987 //------------------------------CallLeafNoFPNode-------------------------------
 988 // CallLeafNode, not using floating point or using it in the same manner as
 989 // the generated code
 990 class CallLeafNoFPNode : public CallLeafNode {
 991 public:
 992   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 993                    const TypePtr* adr_type)
 994     : CallLeafNode(tf, addr, name, adr_type)
 995   {
 996     init_class_id(Class_CallLeafNoFP);
 997   }
 998   virtual int   Opcode() const;
 999   virtual uint match_edge(uint idx) const;
1000 };
1001 
1002 //------------------------------CallLeafVectorNode-------------------------------
1003 // CallLeafNode but calling with vector calling convention instead.
1004 class CallLeafVectorNode : public CallLeafNode {
1005 private:
1006   uint _num_bits;
1007 protected:
1008   virtual bool cmp( const Node &n ) const;
1009   virtual uint size_of() const; // Size is bigger
1010 public:
1011   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
1012                    const TypePtr* adr_type, uint num_bits)
1013     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
1014   {
1015   }
1016   virtual int   Opcode() const;
1017   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
1018 };
1019 

1022 // High-level memory allocation
1023 //
1024 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
1025 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
1026 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
1027 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
1028 //  order to differentiate the uses of the projection on the normal control path from
1029 //  those on the exception return path.
1030 //
1031 class AllocateNode : public CallNode {
1032 public:
1033   enum {
1034     // Output:
1035     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
1036     // Inputs:
1037     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
1038     KlassNode,                        // type (maybe dynamic) of the obj.
1039     InitialTest,                      // slow-path test (may be constant)
1040     ALength,                          // array length (or TOP if none)
1041     ValidLengthTest,
1042     InlineType,                       // InlineTypeNode if this is an inline type allocation
1043     InitValue,                        // Init value for null-free inline type arrays
1044     RawInitValue,                     // Same as above but as raw machine word
1045     ParmLimit
1046   };
1047 
1048   static const TypeFunc* alloc_type(const Type* t) {
1049     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1050     fields[AllocSize]   = TypeInt::POS;
1051     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1052     fields[InitialTest] = TypeInt::BOOL;
1053     fields[ALength]     = t;  // length (can be a bad length)
1054     fields[ValidLengthTest] = TypeInt::BOOL;
1055     fields[InlineType] = Type::BOTTOM;
1056     fields[InitValue] = TypeInstPtr::NOTNULL;
1057     fields[RawInitValue] = TypeX_X;
1058 
1059     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1060 
1061     // create result type (range)
1062     fields = TypeTuple::fields(1);
1063     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1064 
1065     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1066 
1067     return TypeFunc::make(domain, range);
1068   }
1069 
1070   // Result of Escape Analysis
1071   bool _is_scalar_replaceable;
1072   bool _is_non_escaping;
1073   // True when MemBar for new is redundant with MemBar at initialzer exit
1074   bool _is_allocation_MemBar_redundant;
1075   bool _larval;
1076 
1077   virtual uint size_of() const; // Size is bigger
1078   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1079                Node *size, Node *klass_node, Node *initial_test,
1080                InlineTypeNode* inline_type_node = nullptr);
1081   // Expansion modifies the JVMState, so we need to deep clone it
1082   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1083   virtual int Opcode() const;
1084   virtual uint ideal_reg() const { return Op_RegP; }
1085   virtual bool        guaranteed_safepoint()  { return false; }
1086 
1087   // allocations do not modify their arguments
1088   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1089 
1090   // Pattern-match a possible usage of AllocateNode.
1091   // Return null if no allocation is recognized.
1092   // The operand is the pointer produced by the (possible) allocation.
1093   // It must be a projection of the Allocate or its subsequent CastPP.
1094   // (Note:  This function is defined in file graphKit.cpp, near
1095   // GraphKit::new_instance/new_array, whose output it recognizes.)
1096   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1097   static AllocateNode* Ideal_allocation(Node* ptr);
1098 
1099   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1100   // an offset, which is reported back to the caller.

1125 
1126   // Return true if allocation doesn't escape thread, its escape state
1127   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1128   // is true when its allocation's escape state is noEscape or
1129   // ArgEscape. In case allocation's InitializeNode is null, check
1130   // AlllocateNode._is_non_escaping flag.
1131   // AlllocateNode._is_non_escaping is true when its escape state is
1132   // noEscape.
1133   bool does_not_escape_thread() {
1134     InitializeNode* init = nullptr;
1135     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1136   }
1137 
1138   // If object doesn't escape in <.init> method and there is memory barrier
1139   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1140   // Inovke this method when MemBar at exit of initializer and post-dominate
1141   // allocation node.
1142   void compute_MemBar_redundancy(ciMethod* initializer);
1143   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1144 
1145   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1146 
1147   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1148 };
1149 
1150 //------------------------------AllocateArray---------------------------------
1151 //
1152 // High-level array allocation
1153 //
1154 class AllocateArrayNode : public AllocateNode {
1155 public:
1156   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1157                     Node* initial_test, Node* count_val, Node* valid_length_test,
1158                     Node* init_value, Node* raw_init_value)
1159     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1160                    initial_test)
1161   {
1162     init_class_id(Class_AllocateArray);
1163     set_req(AllocateNode::ALength, count_val);
1164     set_req(AllocateNode::ValidLengthTest, valid_length_test);
1165     init_req(AllocateNode::InitValue, init_value);
1166     init_req(AllocateNode::RawInitValue, raw_init_value);
1167   }
1168   virtual uint size_of() const { return sizeof(*this); }
1169   virtual int Opcode() const;
1170 
1171   // Dig the length operand out of a array allocation site.
1172   Node* Ideal_length() {
1173     return in(AllocateNode::ALength);
1174   }
1175 
1176   // Dig the length operand out of a array allocation site and narrow the
1177   // type with a CastII, if necesssary
1178   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1179 
1180   // Pattern-match a possible usage of AllocateArrayNode.
1181   // Return null if no allocation is recognized.
1182   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1183     AllocateNode* allo = Ideal_allocation(ptr);
1184     return (allo == nullptr || !allo->is_AllocateArray())
1185            ? nullptr : allo->as_AllocateArray();
1186   }
1187 };
1188 
< prev index next >