< prev index next >

src/share/vm/opto/callnode.hpp

Print this page




 605   }
 606 
 607   // Returns true if the call may modify n
 608   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
 609   // Does this node have a use of n other than in debug information?
 610   bool                has_non_debug_use(Node *n);
 611   // Returns the unique CheckCastPP of a call
 612   // or result projection is there are several CheckCastPP
 613   // or returns NULL if there is no one.
 614   Node *result_cast();
 615   // Does this node returns pointer?
 616   bool returns_pointer() const {
 617     const TypeTuple *r = tf()->range();
 618     return (r->cnt() > TypeFunc::Parms &&
 619             r->field_at(TypeFunc::Parms)->isa_ptr());
 620   }
 621 
 622   // Collect all the interesting edges from a call for use in
 623   // replacing the call by something else.  Used by macro expansion
 624   // and the late inlining support.
 625   void extract_projections(CallProjections* projs, bool separate_io_proj);
 626 
 627   virtual uint match_edge(uint idx) const;
 628 
 629 #ifndef PRODUCT
 630   virtual void        dump_req(outputStream *st = tty) const;
 631   virtual void        dump_spec(outputStream *st) const;
 632 #endif
 633 };
 634 
 635 
 636 //------------------------------CallJavaNode-----------------------------------
 637 // Make a static or dynamic subroutine call node using Java calling
 638 // convention.  (The "Java" calling convention is the compiler's calling
 639 // convention, as opposed to the interpreter's or that of native C.)
 640 class CallJavaNode : public CallNode {
 641   friend class VMStructs;
 642 protected:
 643   virtual uint cmp( const Node &n ) const;
 644   virtual uint size_of() const; // Size is bigger
 645 


 743 };
 744 
 745 //------------------------------CallRuntimeNode--------------------------------
 746 // Make a direct subroutine call node into compiled C++ code.
 747 class CallRuntimeNode : public CallNode {
 748   virtual uint cmp( const Node &n ) const;
 749   virtual uint size_of() const; // Size is bigger
 750 public:
 751   CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
 752                   const TypePtr* adr_type)
 753     : CallNode(tf, addr, adr_type),
 754       _name(name)
 755   {
 756     init_class_id(Class_CallRuntime);
 757   }
 758 
 759   const char *_name;            // Printable name, if _method is NULL
 760   virtual int   Opcode() const;
 761   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 762 


 763 #ifndef PRODUCT
 764   virtual void  dump_spec(outputStream *st) const;
 765 #endif
 766 };
 767 
 768 //------------------------------CallLeafNode-----------------------------------
 769 // Make a direct subroutine call node into compiled C++ code, without
 770 // safepoints
 771 class CallLeafNode : public CallRuntimeNode {
 772 public:
 773   CallLeafNode(const TypeFunc* tf, address addr, const char* name,
 774                const TypePtr* adr_type)
 775     : CallRuntimeNode(tf, addr, name, adr_type)
 776   {
 777     init_class_id(Class_CallLeaf);
 778   }
 779   virtual int   Opcode() const;
 780   virtual bool        guaranteed_safepoint()  { return false; }





 781 #ifndef PRODUCT
 782   virtual void  dump_spec(outputStream *st) const;
 783 #endif
 784 };
 785 
 786 //------------------------------CallLeafNoFPNode-------------------------------
 787 // CallLeafNode, not using floating point or using it in the same manner as
 788 // the generated code
 789 class CallLeafNoFPNode : public CallLeafNode {
 790 public:
 791   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 792                    const TypePtr* adr_type)
 793     : CallLeafNode(tf, addr, name, adr_type)
 794   {
 795   }
 796   virtual int   Opcode() const;
 797 };
 798 
 799 
 800 //------------------------------Allocate---------------------------------------


 877   // Dig the klass operand out of a (possible) allocation site.
 878   static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
 879     AllocateNode* allo = Ideal_allocation(ptr, phase);
 880     return (allo == NULL) ? NULL : allo->in(KlassNode);
 881   }
 882 
 883   // Conservatively small estimate of offset of first non-header byte.
 884   int minimum_header_size() {
 885     return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
 886                                 instanceOopDesc::base_offset_in_bytes();
 887   }
 888 
 889   // Return the corresponding initialization barrier (or null if none).
 890   // Walks out edges to find it...
 891   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
 892   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
 893   InitializeNode* initialization();
 894 
 895   // Convenience for initialization->maybe_set_complete(phase)
 896   bool maybe_set_complete(PhaseGVN* phase);














 897 };
 898 
 899 //------------------------------AllocateArray---------------------------------
 900 //
 901 // High-level array allocation
 902 //
 903 class AllocateArrayNode : public AllocateNode {
 904 public:
 905   AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 906                     Node* size, Node* klass_node, Node* initial_test,
 907                     Node* count_val
 908                     )
 909     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
 910                    initial_test)
 911   {
 912     init_class_id(Class_AllocateArray);
 913     set_req(AllocateNode::ALength,        count_val);
 914   }
 915   virtual int Opcode() const;
 916   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);




 605   }
 606 
 607   // Returns true if the call may modify n
 608   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
 609   // Does this node have a use of n other than in debug information?
 610   bool                has_non_debug_use(Node *n);
 611   // Returns the unique CheckCastPP of a call
 612   // or result projection is there are several CheckCastPP
 613   // or returns NULL if there is no one.
 614   Node *result_cast();
 615   // Does this node returns pointer?
 616   bool returns_pointer() const {
 617     const TypeTuple *r = tf()->range();
 618     return (r->cnt() > TypeFunc::Parms &&
 619             r->field_at(TypeFunc::Parms)->isa_ptr());
 620   }
 621 
 622   // Collect all the interesting edges from a call for use in
 623   // replacing the call by something else.  Used by macro expansion
 624   // and the late inlining support.
 625   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
 626 
 627   virtual uint match_edge(uint idx) const;
 628 
 629 #ifndef PRODUCT
 630   virtual void        dump_req(outputStream *st = tty) const;
 631   virtual void        dump_spec(outputStream *st) const;
 632 #endif
 633 };
 634 
 635 
 636 //------------------------------CallJavaNode-----------------------------------
 637 // Make a static or dynamic subroutine call node using Java calling
 638 // convention.  (The "Java" calling convention is the compiler's calling
 639 // convention, as opposed to the interpreter's or that of native C.)
 640 class CallJavaNode : public CallNode {
 641   friend class VMStructs;
 642 protected:
 643   virtual uint cmp( const Node &n ) const;
 644   virtual uint size_of() const; // Size is bigger
 645 


 743 };
 744 
 745 //------------------------------CallRuntimeNode--------------------------------
 746 // Make a direct subroutine call node into compiled C++ code.
 747 class CallRuntimeNode : public CallNode {
 748   virtual uint cmp( const Node &n ) const;
 749   virtual uint size_of() const; // Size is bigger
 750 public:
 751   CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
 752                   const TypePtr* adr_type)
 753     : CallNode(tf, addr, adr_type),
 754       _name(name)
 755   {
 756     init_class_id(Class_CallRuntime);
 757   }
 758 
 759   const char *_name;            // Printable name, if _method is NULL
 760   virtual int   Opcode() const;
 761   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 762 
 763   bool is_call_to_arraycopystub() const;
 764 
 765 #ifndef PRODUCT
 766   virtual void  dump_spec(outputStream *st) const;
 767 #endif
 768 };
 769 
 770 //------------------------------CallLeafNode-----------------------------------
 771 // Make a direct subroutine call node into compiled C++ code, without
 772 // safepoints
 773 class CallLeafNode : public CallRuntimeNode {
 774 public:
 775   CallLeafNode(const TypeFunc* tf, address addr, const char* name,
 776                const TypePtr* adr_type)
 777     : CallRuntimeNode(tf, addr, name, adr_type)
 778   {
 779     init_class_id(Class_CallLeaf);
 780   }
 781   virtual int   Opcode() const;
 782   virtual bool        guaranteed_safepoint()  { return false; }
 783   virtual bool is_g1_wb_pre_call() const { return entry_point() == CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre); }
 784   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 785 
 786   static bool has_only_g1_wb_pre_uses(Node* n);
 787 
 788 #ifndef PRODUCT
 789   virtual void  dump_spec(outputStream *st) const;
 790 #endif
 791 };
 792 
 793 //------------------------------CallLeafNoFPNode-------------------------------
 794 // CallLeafNode, not using floating point or using it in the same manner as
 795 // the generated code
 796 class CallLeafNoFPNode : public CallLeafNode {
 797 public:
 798   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 799                    const TypePtr* adr_type)
 800     : CallLeafNode(tf, addr, name, adr_type)
 801   {
 802   }
 803   virtual int   Opcode() const;
 804 };
 805 
 806 
 807 //------------------------------Allocate---------------------------------------


 884   // Dig the klass operand out of a (possible) allocation site.
 885   static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
 886     AllocateNode* allo = Ideal_allocation(ptr, phase);
 887     return (allo == NULL) ? NULL : allo->in(KlassNode);
 888   }
 889 
 890   // Conservatively small estimate of offset of first non-header byte.
 891   int minimum_header_size() {
 892     return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
 893                                 instanceOopDesc::base_offset_in_bytes();
 894   }
 895 
 896   // Return the corresponding initialization barrier (or null if none).
 897   // Walks out edges to find it...
 898   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
 899   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
 900   InitializeNode* initialization();
 901 
 902   // Convenience for initialization->maybe_set_complete(phase)
 903   bool maybe_set_complete(PhaseGVN* phase);
 904 
 905 #ifdef AARCH64
 906   // Return true if allocation doesn't escape thread, its escape state
 907   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
 908   // is true when its allocation's escape state is noEscape or
 909   // ArgEscape. In case allocation's InitializeNode is NULL, check
 910   // AlllocateNode._is_non_escaping flag.
 911   // AlllocateNode._is_non_escaping is true when its escape state is
 912   // noEscape.
 913   bool does_not_escape_thread() {
 914     InitializeNode* init = NULL;
 915     return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
 916   }
 917 #endif
 918 };
 919 
 920 //------------------------------AllocateArray---------------------------------
 921 //
 922 // High-level array allocation
 923 //
 924 class AllocateArrayNode : public AllocateNode {
 925 public:
 926   AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 927                     Node* size, Node* klass_node, Node* initial_test,
 928                     Node* count_val
 929                     )
 930     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
 931                    initial_test)
 932   {
 933     init_class_id(Class_AllocateArray);
 934     set_req(AllocateNode::ALength,        count_val);
 935   }
 936   virtual int Opcode() const;
 937   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);


< prev index next >