< prev index next >

src/hotspot/share/opto/callnode.hpp

Print this page

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match( const ProjNode *proj, const Matcher *m );
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;
  93   static  const TypeTuple *osr_domain();
  94 };
  95 
  96 
  97 //------------------------------ParmNode---------------------------------------
  98 // Incoming parameters
  99 class ParmNode : public ProjNode {
 100   static const char * const names[TypeFunc::Parms+1];
 101 public:
 102   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 103     init_class_id(Class_Parm);
 104   }
 105   virtual int Opcode() const;
 106   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 107   virtual uint ideal_reg() const;
 108 #ifndef PRODUCT
 109   virtual void dump_spec(outputStream *st) const;
 110   virtual void dump_compact_spec(outputStream *st) const;
 111 #endif
 112 };
 113 

 634     assert(jvms != nullptr, "JVMS reference is null.");
 635     return jvms->scloff() + _merge_pointer_idx + 1;
 636   }
 637 
 638   // Assumes that "this" is an argument to a safepoint node "s", and that
 639   // "new_call" is being created to correspond to "s".  But the difference
 640   // between the start index of the jvmstates of "new_call" and "s" is
 641   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 642   // corresponds appropriately to "this" in "new_call".  Assumes that
 643   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 644   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 645   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 646 
 647 #ifndef PRODUCT
 648   virtual void              dump_spec(outputStream *st) const;
 649 #endif
 650 };
 651 
 652 // Simple container for the outgoing projections of a call.  Useful
 653 // for serious surgery on calls.
 654 class CallProjections : public StackObj {
 655 public:
 656   Node* fallthrough_proj;
 657   Node* fallthrough_catchproj;
 658   Node* fallthrough_memproj;
 659   Node* fallthrough_ioproj;
 660   Node* catchall_catchproj;
 661   Node* catchall_memproj;
 662   Node* catchall_ioproj;
 663   Node* resproj;
 664   Node* exobj;



















 665 };
 666 
 667 class CallGenerator;
 668 
 669 //------------------------------CallNode---------------------------------------
 670 // Call nodes now subsume the function of debug nodes at callsites, so they
 671 // contain the functionality of a full scope chain of debug nodes.
 672 class CallNode : public SafePointNode {
 673 
 674 protected:
 675   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 676 
 677 public:
 678   const TypeFunc* _tf;          // Function type
 679   address         _entry_point; // Address of method being called
 680   float           _cnt;         // Estimate of number of times called
 681   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 682   const char*     _name;        // Printable name, if _method is null
 683 
 684   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 685     : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
 686       _tf(tf),
 687       _entry_point(addr),
 688       _cnt(COUNT_UNKNOWN),
 689       _generator(nullptr),
 690       _name(nullptr)
 691   {
 692     init_class_id(Class_Call);
 693   }
 694 
 695   const TypeFunc* tf()         const { return _tf; }
 696   address  entry_point()       const { return _entry_point; }
 697   float    cnt()               const { return _cnt; }
 698   CallGenerator* generator()   const { return _generator; }
 699 
 700   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 701   void set_entry_point(address p)       { _entry_point = p; }
 702   void set_cnt(float c)                 { _cnt = c; }
 703   void set_generator(CallGenerator* cg) { _generator = cg; }
 704 
 705   virtual const Type* bottom_type() const;
 706   virtual const Type* Value(PhaseGVN* phase) const;
 707   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 708   virtual Node* Identity(PhaseGVN* phase) { return this; }
 709   virtual bool        cmp(const Node &n) const;
 710   virtual uint        size_of() const = 0;
 711   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 712   virtual Node*       match(const ProjNode* proj, const Matcher* m);
 713   virtual uint        ideal_reg() const { return NotAMachineReg; }
 714   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 715   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 716   virtual bool        guaranteed_safepoint()  { return true; }
 717   // For macro nodes, the JVMState gets modified during expansion. If calls
 718   // use MachConstantBase, it gets modified during matching. So when cloning
 719   // the node the JVMState must be deep cloned. Default is to shallow clone.
 720   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 721 
 722   // Returns true if the call may modify n
 723   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 724   // Does this node have a use of n other than in debug information?
 725   bool                has_non_debug_use(Node* n);

 726   // Returns the unique CheckCastPP of a call
 727   // or result projection is there are several CheckCastPP
 728   // or returns null if there is no one.
 729   Node* result_cast();
 730   // Does this node returns pointer?
 731   bool returns_pointer() const {
 732     const TypeTuple* r = tf()->range();
 733     return (r->cnt() > TypeFunc::Parms &&

 734             r->field_at(TypeFunc::Parms)->isa_ptr());
 735   }
 736 
 737   // Collect all the interesting edges from a call for use in
 738   // replacing the call by something else.  Used by macro expansion
 739   // and the late inlining support.
 740   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
 741 
 742   virtual uint match_edge(uint idx) const;
 743 
 744   bool is_call_to_arraycopystub() const;
 745 
 746   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 747 
 748 #ifndef PRODUCT
 749   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 750   virtual void        dump_spec(outputStream* st) const;
 751 #endif
 752 };
 753 
 754 
 755 //------------------------------CallJavaNode-----------------------------------
 756 // Make a static or dynamic subroutine call node using Java calling
 757 // convention.  (The "Java" calling convention is the compiler's calling
 758 // convention, as opposed to the interpreter's or that of native C.)
 759 class CallJavaNode : public CallNode {
 760 protected:

 790   void  set_arg_escape(bool f)             { _arg_escape = f; }
 791   bool  arg_escape() const                 { return _arg_escape; }
 792   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 793   void register_for_late_inline();
 794 
 795   DEBUG_ONLY( bool validate_symbolic_info() const; )
 796 
 797 #ifndef PRODUCT
 798   virtual void  dump_spec(outputStream *st) const;
 799   virtual void  dump_compact_spec(outputStream *st) const;
 800 #endif
 801 };
 802 
 803 //------------------------------CallStaticJavaNode-----------------------------
 804 // Make a direct subroutine call using Java calling convention (for static
 805 // calls and optimized virtual calls, plus calls to wrappers for run-time
 806 // routines); generates static stub.
 807 class CallStaticJavaNode : public CallJavaNode {
 808   virtual bool cmp( const Node &n ) const;
 809   virtual uint size_of() const; // Size is bigger



 810 public:
 811   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 812     : CallJavaNode(tf, addr, method) {
 813     init_class_id(Class_CallStaticJava);
 814     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 815       init_flags(Flag_is_macro);
 816       C->add_macro_node(this);
 817     }











 818   }
 819   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 820     : CallJavaNode(tf, addr, nullptr) {
 821     init_class_id(Class_CallStaticJava);
 822     // This node calls a runtime stub, which often has narrow memory effects.
 823     _adr_type = adr_type;
 824     _name = name;
 825   }
 826 
 827   // If this is an uncommon trap, return the request code, else zero.
 828   int uncommon_trap_request() const;
 829   bool is_uncommon_trap() const;
 830   static int extract_uncommon_trap_request(const Node* call);
 831 
 832   bool is_boxing_method() const {
 833     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 834   }
 835   // Late inlining modifies the JVMState, so we need to deep clone it
 836   // when the call node is cloned (because it is macro node).
 837   virtual bool needs_deep_clone_jvms(Compile* C) {

 908   }
 909   virtual int   Opcode() const;
 910   virtual bool        guaranteed_safepoint()  { return false; }
 911 #ifndef PRODUCT
 912   virtual void  dump_spec(outputStream *st) const;
 913 #endif
 914 };
 915 
 916 //------------------------------CallLeafNoFPNode-------------------------------
 917 // CallLeafNode, not using floating point or using it in the same manner as
 918 // the generated code
 919 class CallLeafNoFPNode : public CallLeafNode {
 920 public:
 921   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 922                    const TypePtr* adr_type)
 923     : CallLeafNode(tf, addr, name, adr_type)
 924   {
 925     init_class_id(Class_CallLeafNoFP);
 926   }
 927   virtual int   Opcode() const;

 928 };
 929 
 930 //------------------------------CallLeafVectorNode-------------------------------
 931 // CallLeafNode but calling with vector calling convention instead.
 932 class CallLeafVectorNode : public CallLeafNode {
 933 private:
 934   uint _num_bits;
 935 protected:
 936   virtual bool cmp( const Node &n ) const;
 937   virtual uint size_of() const; // Size is bigger
 938 public:
 939   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 940                    const TypePtr* adr_type, uint num_bits)
 941     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 942   {
 943   }
 944   virtual int   Opcode() const;
 945   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 946 };
 947 

 950 // High-level memory allocation
 951 //
 952 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 953 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 954 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 955 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 956 //  order to differentiate the uses of the projection on the normal control path from
 957 //  those on the exception return path.
 958 //
 959 class AllocateNode : public CallNode {
 960 public:
 961   enum {
 962     // Output:
 963     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 964     // Inputs:
 965     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 966     KlassNode,                        // type (maybe dynamic) of the obj.
 967     InitialTest,                      // slow-path test (may be constant)
 968     ALength,                          // array length (or TOP if none)
 969     ValidLengthTest,



 970     ParmLimit
 971   };
 972 
 973   static const TypeFunc* alloc_type(const Type* t) {
 974     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
 975     fields[AllocSize]   = TypeInt::POS;
 976     fields[KlassNode]   = TypeInstPtr::NOTNULL;
 977     fields[InitialTest] = TypeInt::BOOL;
 978     fields[ALength]     = t;  // length (can be a bad length)
 979     fields[ValidLengthTest] = TypeInt::BOOL;



 980 
 981     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
 982 
 983     // create result type (range)
 984     fields = TypeTuple::fields(1);
 985     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
 986 
 987     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 988 
 989     return TypeFunc::make(domain, range);
 990   }
 991 
 992   // Result of Escape Analysis
 993   bool _is_scalar_replaceable;
 994   bool _is_non_escaping;
 995   // True when MemBar for new is redundant with MemBar at initialzer exit
 996   bool _is_allocation_MemBar_redundant;

 997 
 998   virtual uint size_of() const; // Size is bigger
 999   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1000                Node *size, Node *klass_node, Node *initial_test);

1001   // Expansion modifies the JVMState, so we need to deep clone it
1002   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1003   virtual int Opcode() const;
1004   virtual uint ideal_reg() const { return Op_RegP; }
1005   virtual bool        guaranteed_safepoint()  { return false; }
1006 
1007   // allocations do not modify their arguments
1008   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1009 
1010   // Pattern-match a possible usage of AllocateNode.
1011   // Return null if no allocation is recognized.
1012   // The operand is the pointer produced by the (possible) allocation.
1013   // It must be a projection of the Allocate or its subsequent CastPP.
1014   // (Note:  This function is defined in file graphKit.cpp, near
1015   // GraphKit::new_instance/new_array, whose output it recognizes.)
1016   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1017   static AllocateNode* Ideal_allocation(Node* ptr);
1018 
1019   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1020   // an offset, which is reported back to the caller.

1045 
1046   // Return true if allocation doesn't escape thread, its escape state
1047   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1048   // is true when its allocation's escape state is noEscape or
1049   // ArgEscape. In case allocation's InitializeNode is null, check
1050   // AlllocateNode._is_non_escaping flag.
1051   // AlllocateNode._is_non_escaping is true when its escape state is
1052   // noEscape.
1053   bool does_not_escape_thread() {
1054     InitializeNode* init = nullptr;
1055     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1056   }
1057 
1058   // If object doesn't escape in <.init> method and there is memory barrier
1059   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1060   // Inovke this method when MemBar at exit of initializer and post-dominate
1061   // allocation node.
1062   void compute_MemBar_redundancy(ciMethod* initializer);
1063   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1064 
1065   Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
1066 
1067   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1068 };
1069 
1070 //------------------------------AllocateArray---------------------------------
1071 //
1072 // High-level array allocation
1073 //
1074 class AllocateArrayNode : public AllocateNode {
1075 public:
1076   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1077                     Node* initial_test, Node* count_val, Node* valid_length_test)

1078     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1079                    initial_test)
1080   {
1081     init_class_id(Class_AllocateArray);
1082     set_req(AllocateNode::ALength,        count_val);
1083     set_req(AllocateNode::ValidLengthTest, valid_length_test);


1084   }

1085   virtual int Opcode() const;
1086 
1087   // Dig the length operand out of a array allocation site.
1088   Node* Ideal_length() {
1089     return in(AllocateNode::ALength);
1090   }
1091 
1092   // Dig the length operand out of a array allocation site and narrow the
1093   // type with a CastII, if necesssary
1094   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1095 
1096   // Pattern-match a possible usage of AllocateArrayNode.
1097   // Return null if no allocation is recognized.
1098   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1099     AllocateNode* allo = Ideal_allocation(ptr);
1100     return (allo == nullptr || !allo->is_AllocateArray())
1101            ? nullptr : allo->as_AllocateArray();
1102   }
1103 };
1104 

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;

  93 };
  94 
  95 
  96 //------------------------------ParmNode---------------------------------------
  97 // Incoming parameters
  98 class ParmNode : public ProjNode {
  99   static const char * const names[TypeFunc::Parms+1];
 100 public:
 101   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 102     init_class_id(Class_Parm);
 103   }
 104   virtual int Opcode() const;
 105   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 106   virtual uint ideal_reg() const;
 107 #ifndef PRODUCT
 108   virtual void dump_spec(outputStream *st) const;
 109   virtual void dump_compact_spec(outputStream *st) const;
 110 #endif
 111 };
 112 

 633     assert(jvms != nullptr, "JVMS reference is null.");
 634     return jvms->scloff() + _merge_pointer_idx + 1;
 635   }
 636 
 637   // Assumes that "this" is an argument to a safepoint node "s", and that
 638   // "new_call" is being created to correspond to "s".  But the difference
 639   // between the start index of the jvmstates of "new_call" and "s" is
 640   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 641   // corresponds appropriately to "this" in "new_call".  Assumes that
 642   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 643   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 644   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 645 
 646 #ifndef PRODUCT
 647   virtual void              dump_spec(outputStream *st) const;
 648 #endif
 649 };
 650 
 651 // Simple container for the outgoing projections of a call.  Useful
 652 // for serious surgery on calls.
 653 class CallProjections {
 654 public:
 655   Node* fallthrough_proj;
 656   Node* fallthrough_catchproj;
 657   Node* fallthrough_memproj;
 658   Node* fallthrough_ioproj;
 659   Node* catchall_catchproj;
 660   Node* catchall_memproj;
 661   Node* catchall_ioproj;

 662   Node* exobj;
 663   uint nb_resproj;
 664   Node* resproj[1]; // at least one projection
 665 
 666   CallProjections(uint nbres) {
 667     fallthrough_proj      = nullptr;
 668     fallthrough_catchproj = nullptr;
 669     fallthrough_memproj   = nullptr;
 670     fallthrough_ioproj    = nullptr;
 671     catchall_catchproj    = nullptr;
 672     catchall_memproj      = nullptr;
 673     catchall_ioproj       = nullptr;
 674     exobj                 = nullptr;
 675     nb_resproj            = nbres;
 676     resproj[0]            = nullptr;
 677     for (uint i = 1; i < nb_resproj; i++) {
 678       resproj[i]          = nullptr;
 679     }
 680   }
 681 
 682 };
 683 
 684 class CallGenerator;
 685 
 686 //------------------------------CallNode---------------------------------------
 687 // Call nodes now subsume the function of debug nodes at callsites, so they
 688 // contain the functionality of a full scope chain of debug nodes.
 689 class CallNode : public SafePointNode {
 690 
 691 protected:
 692   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 693 
 694 public:
 695   const TypeFunc* _tf;          // Function type
 696   address         _entry_point; // Address of method being called
 697   float           _cnt;         // Estimate of number of times called
 698   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 699   const char*     _name;        // Printable name, if _method is null
 700 
 701   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 702     : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
 703       _tf(tf),
 704       _entry_point(addr),
 705       _cnt(COUNT_UNKNOWN),
 706       _generator(nullptr),
 707       _name(nullptr)
 708   {
 709     init_class_id(Class_Call);
 710   }
 711 
 712   const TypeFunc* tf()         const { return _tf; }
 713   address  entry_point()       const { return _entry_point; }
 714   float    cnt()               const { return _cnt; }
 715   CallGenerator* generator()   const { return _generator; }
 716 
 717   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 718   void set_entry_point(address p)       { _entry_point = p; }
 719   void set_cnt(float c)                 { _cnt = c; }
 720   void set_generator(CallGenerator* cg) { _generator = cg; }
 721 
 722   virtual const Type* bottom_type() const;
 723   virtual const Type* Value(PhaseGVN* phase) const;
 724   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 725   virtual Node* Identity(PhaseGVN* phase) { return this; }
 726   virtual bool        cmp(const Node &n) const;
 727   virtual uint        size_of() const = 0;
 728   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 729   virtual Node*       match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
 730   virtual uint        ideal_reg() const { return NotAMachineReg; }
 731   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 732   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 733   virtual bool        guaranteed_safepoint()  { return true; }
 734   // For macro nodes, the JVMState gets modified during expansion. If calls
 735   // use MachConstantBase, it gets modified during matching. So when cloning
 736   // the node the JVMState must be deep cloned. Default is to shallow clone.
 737   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 738 
 739   // Returns true if the call may modify n
 740   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 741   // Does this node have a use of n other than in debug information?
 742   bool                has_non_debug_use(Node* n);
 743   bool                has_debug_use(Node* n);
 744   // Returns the unique CheckCastPP of a call
 745   // or result projection is there are several CheckCastPP
 746   // or returns null if there is no one.
 747   Node* result_cast();
 748   // Does this node returns pointer?
 749   bool returns_pointer() const {
 750     const TypeTuple* r = tf()->range_sig();
 751     return (!tf()->returns_inline_type_as_fields() &&
 752             r->cnt() > TypeFunc::Parms &&
 753             r->field_at(TypeFunc::Parms)->isa_ptr());
 754   }
 755 
 756   // Collect all the interesting edges from a call for use in
 757   // replacing the call by something else.  Used by macro expansion
 758   // and the late inlining support.
 759   CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true);
 760 
 761   virtual uint match_edge(uint idx) const;
 762 
 763   bool is_call_to_arraycopystub() const;
 764 
 765   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 766 
 767 #ifndef PRODUCT
 768   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 769   virtual void        dump_spec(outputStream* st) const;
 770 #endif
 771 };
 772 
 773 
 774 //------------------------------CallJavaNode-----------------------------------
 775 // Make a static or dynamic subroutine call node using Java calling
 776 // convention.  (The "Java" calling convention is the compiler's calling
 777 // convention, as opposed to the interpreter's or that of native C.)
 778 class CallJavaNode : public CallNode {
 779 protected:

 809   void  set_arg_escape(bool f)             { _arg_escape = f; }
 810   bool  arg_escape() const                 { return _arg_escape; }
 811   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 812   void register_for_late_inline();
 813 
 814   DEBUG_ONLY( bool validate_symbolic_info() const; )
 815 
 816 #ifndef PRODUCT
 817   virtual void  dump_spec(outputStream *st) const;
 818   virtual void  dump_compact_spec(outputStream *st) const;
 819 #endif
 820 };
 821 
 822 //------------------------------CallStaticJavaNode-----------------------------
 823 // Make a direct subroutine call using Java calling convention (for static
 824 // calls and optimized virtual calls, plus calls to wrappers for run-time
 825 // routines); generates static stub.
 826 class CallStaticJavaNode : public CallJavaNode {
 827   virtual bool cmp( const Node &n ) const;
 828   virtual uint size_of() const; // Size is bigger
 829 
 830   bool remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg);
 831 
 832 public:
 833   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 834     : CallJavaNode(tf, addr, method) {
 835     init_class_id(Class_CallStaticJava);
 836     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 837       init_flags(Flag_is_macro);
 838       C->add_macro_node(this);
 839     }
 840     const TypeTuple *r = tf->range_sig();
 841     if (InlineTypeReturnedAsFields &&
 842         method != nullptr &&
 843         method->is_method_handle_intrinsic() &&
 844         r->cnt() > TypeFunc::Parms &&
 845         r->field_at(TypeFunc::Parms)->isa_oopptr() &&
 846         r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
 847       // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
 848       init_flags(Flag_is_macro);
 849       C->add_macro_node(this);
 850     }
 851   }
 852   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 853     : CallJavaNode(tf, addr, nullptr) {
 854     init_class_id(Class_CallStaticJava);
 855     // This node calls a runtime stub, which often has narrow memory effects.
 856     _adr_type = adr_type;
 857     _name = name;
 858   }
 859 
 860   // If this is an uncommon trap, return the request code, else zero.
 861   int uncommon_trap_request() const;
 862   bool is_uncommon_trap() const;
 863   static int extract_uncommon_trap_request(const Node* call);
 864 
 865   bool is_boxing_method() const {
 866     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 867   }
 868   // Late inlining modifies the JVMState, so we need to deep clone it
 869   // when the call node is cloned (because it is macro node).
 870   virtual bool needs_deep_clone_jvms(Compile* C) {

 941   }
 942   virtual int   Opcode() const;
 943   virtual bool        guaranteed_safepoint()  { return false; }
 944 #ifndef PRODUCT
 945   virtual void  dump_spec(outputStream *st) const;
 946 #endif
 947 };
 948 
 949 //------------------------------CallLeafNoFPNode-------------------------------
 950 // CallLeafNode, not using floating point or using it in the same manner as
 951 // the generated code
 952 class CallLeafNoFPNode : public CallLeafNode {
 953 public:
 954   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 955                    const TypePtr* adr_type)
 956     : CallLeafNode(tf, addr, name, adr_type)
 957   {
 958     init_class_id(Class_CallLeafNoFP);
 959   }
 960   virtual int   Opcode() const;
 961   virtual uint match_edge(uint idx) const;
 962 };
 963 
 964 //------------------------------CallLeafVectorNode-------------------------------
 965 // CallLeafNode but calling with vector calling convention instead.
 966 class CallLeafVectorNode : public CallLeafNode {
 967 private:
 968   uint _num_bits;
 969 protected:
 970   virtual bool cmp( const Node &n ) const;
 971   virtual uint size_of() const; // Size is bigger
 972 public:
 973   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 974                    const TypePtr* adr_type, uint num_bits)
 975     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 976   {
 977   }
 978   virtual int   Opcode() const;
 979   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 980 };
 981 

 984 // High-level memory allocation
 985 //
 986 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 987 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 988 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 989 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 990 //  order to differentiate the uses of the projection on the normal control path from
 991 //  those on the exception return path.
 992 //
 993 class AllocateNode : public CallNode {
 994 public:
 995   enum {
 996     // Output:
 997     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 998     // Inputs:
 999     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
1000     KlassNode,                        // type (maybe dynamic) of the obj.
1001     InitialTest,                      // slow-path test (may be constant)
1002     ALength,                          // array length (or TOP if none)
1003     ValidLengthTest,
1004     InlineType,                       // InlineTypeNode if this is an inline type allocation
1005     InitValue,                        // Init value for null-free inline type arrays
1006     RawInitValue,                     // Same as above but as raw machine word
1007     ParmLimit
1008   };
1009 
1010   static const TypeFunc* alloc_type(const Type* t) {
1011     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1012     fields[AllocSize]   = TypeInt::POS;
1013     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1014     fields[InitialTest] = TypeInt::BOOL;
1015     fields[ALength]     = t;  // length (can be a bad length)
1016     fields[ValidLengthTest] = TypeInt::BOOL;
1017     fields[InlineType] = Type::BOTTOM;
1018     fields[InitValue] = TypeInstPtr::NOTNULL;
1019     fields[RawInitValue] = TypeX_X;
1020 
1021     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1022 
1023     // create result type (range)
1024     fields = TypeTuple::fields(1);
1025     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1026 
1027     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1028 
1029     return TypeFunc::make(domain, range);
1030   }
1031 
1032   // Result of Escape Analysis
1033   bool _is_scalar_replaceable;
1034   bool _is_non_escaping;
1035   // True when MemBar for new is redundant with MemBar at initialzer exit
1036   bool _is_allocation_MemBar_redundant;
1037   bool _larval;
1038 
1039   virtual uint size_of() const; // Size is bigger
1040   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1041                Node *size, Node *klass_node, Node *initial_test,
1042                InlineTypeNode* inline_type_node = nullptr);
1043   // Expansion modifies the JVMState, so we need to deep clone it
1044   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1045   virtual int Opcode() const;
1046   virtual uint ideal_reg() const { return Op_RegP; }
1047   virtual bool        guaranteed_safepoint()  { return false; }
1048 
1049   // allocations do not modify their arguments
1050   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1051 
1052   // Pattern-match a possible usage of AllocateNode.
1053   // Return null if no allocation is recognized.
1054   // The operand is the pointer produced by the (possible) allocation.
1055   // It must be a projection of the Allocate or its subsequent CastPP.
1056   // (Note:  This function is defined in file graphKit.cpp, near
1057   // GraphKit::new_instance/new_array, whose output it recognizes.)
1058   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1059   static AllocateNode* Ideal_allocation(Node* ptr);
1060 
1061   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1062   // an offset, which is reported back to the caller.

1087 
1088   // Return true if allocation doesn't escape thread, its escape state
1089   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1090   // is true when its allocation's escape state is noEscape or
1091   // ArgEscape. In case allocation's InitializeNode is null, check
1092   // AlllocateNode._is_non_escaping flag.
1093   // AlllocateNode._is_non_escaping is true when its escape state is
1094   // noEscape.
1095   bool does_not_escape_thread() {
1096     InitializeNode* init = nullptr;
1097     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1098   }
1099 
1100   // If object doesn't escape in <.init> method and there is memory barrier
1101   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1102   // Inovke this method when MemBar at exit of initializer and post-dominate
1103   // allocation node.
1104   void compute_MemBar_redundancy(ciMethod* initializer);
1105   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1106 
1107   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1108 
1109   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1110 };
1111 
1112 //------------------------------AllocateArray---------------------------------
1113 //
1114 // High-level array allocation
1115 //
1116 class AllocateArrayNode : public AllocateNode {
1117 public:
1118   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1119                     Node* initial_test, Node* count_val, Node* valid_length_test,
1120                     Node* init_value, Node* raw_init_value)
1121     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1122                    initial_test)
1123   {
1124     init_class_id(Class_AllocateArray);
1125     set_req(AllocateNode::ALength, count_val);
1126     set_req(AllocateNode::ValidLengthTest, valid_length_test);
1127     init_req(AllocateNode::InitValue, init_value);
1128     init_req(AllocateNode::RawInitValue, raw_init_value);
1129   }
1130   virtual uint size_of() const { return sizeof(*this); }
1131   virtual int Opcode() const;
1132 
1133   // Dig the length operand out of a array allocation site.
1134   Node* Ideal_length() {
1135     return in(AllocateNode::ALength);
1136   }
1137 
1138   // Dig the length operand out of a array allocation site and narrow the
1139   // type with a CastII, if necesssary
1140   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1141 
1142   // Pattern-match a possible usage of AllocateArrayNode.
1143   // Return null if no allocation is recognized.
1144   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1145     AllocateNode* allo = Ideal_allocation(ptr);
1146     return (allo == nullptr || !allo->is_AllocateArray())
1147            ? nullptr : allo->as_AllocateArray();
1148   }
1149 };
1150 
< prev index next >