< prev index next >

src/hotspot/share/opto/callnode.hpp

Print this page

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match( const ProjNode *proj, const Matcher *m );
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;
  93   static  const TypeTuple *osr_domain();
  94 };
  95 
  96 
  97 //------------------------------ParmNode---------------------------------------
  98 // Incoming parameters
  99 class ParmNode : public ProjNode {
 100   static const char * const names[TypeFunc::Parms+1];
 101 public:
 102   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 103     init_class_id(Class_Parm);
 104   }
 105   virtual int Opcode() const;
 106   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 107   virtual uint ideal_reg() const;
 108 #ifndef PRODUCT
 109   virtual void dump_spec(outputStream *st) const;
 110   virtual void dump_compact_spec(outputStream *st) const;
 111 #endif
 112 };
 113 

 635     assert(jvms != nullptr, "JVMS reference is null.");
 636     return jvms->scloff() + _merge_pointer_idx + 1;
 637   }
 638 
 639   // Assumes that "this" is an argument to a safepoint node "s", and that
 640   // "new_call" is being created to correspond to "s".  But the difference
 641   // between the start index of the jvmstates of "new_call" and "s" is
 642   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 643   // corresponds appropriately to "this" in "new_call".  Assumes that
 644   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 645   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 646   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 647 
 648 #ifndef PRODUCT
 649   virtual void              dump_spec(outputStream *st) const;
 650 #endif
 651 };
 652 
 653 // Simple container for the outgoing projections of a call.  Useful
 654 // for serious surgery on calls.
 655 class CallProjections : public StackObj {
 656 public:
 657   Node* fallthrough_proj;
 658   Node* fallthrough_catchproj;
 659   Node* fallthrough_memproj;
 660   Node* fallthrough_ioproj;
 661   Node* catchall_catchproj;
 662   Node* catchall_memproj;
 663   Node* catchall_ioproj;
 664   Node* resproj;
 665   Node* exobj;



















 666 };
 667 
 668 class CallGenerator;
 669 
 670 //------------------------------CallNode---------------------------------------
 671 // Call nodes now subsume the function of debug nodes at callsites, so they
 672 // contain the functionality of a full scope chain of debug nodes.
 673 class CallNode : public SafePointNode {
 674 
 675 protected:
 676   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 677 
 678 public:
 679   const TypeFunc* _tf;          // Function type
 680   address         _entry_point; // Address of method being called
 681   float           _cnt;         // Estimate of number of times called
 682   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 683   const char*     _name;        // Printable name, if _method is null
 684 
 685   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 686     : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
 687       _tf(tf),
 688       _entry_point(addr),
 689       _cnt(COUNT_UNKNOWN),
 690       _generator(nullptr),
 691       _name(nullptr)
 692   {
 693     init_class_id(Class_Call);
 694   }
 695 
 696   const TypeFunc* tf()         const { return _tf; }
 697   address  entry_point()       const { return _entry_point; }
 698   float    cnt()               const { return _cnt; }
 699   CallGenerator* generator()   const { return _generator; }
 700 
 701   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 702   void set_entry_point(address p)       { _entry_point = p; }
 703   void set_cnt(float c)                 { _cnt = c; }
 704   void set_generator(CallGenerator* cg) { _generator = cg; }
 705 
 706   virtual const Type* bottom_type() const;
 707   virtual const Type* Value(PhaseGVN* phase) const;
 708   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 709   virtual Node* Identity(PhaseGVN* phase) { return this; }
 710   virtual bool        cmp(const Node &n) const;
 711   virtual uint        size_of() const = 0;
 712   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 713   virtual Node*       match(const ProjNode* proj, const Matcher* m);
 714   virtual uint        ideal_reg() const { return NotAMachineReg; }
 715   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 716   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 717   virtual bool        guaranteed_safepoint()  { return true; }
 718   // For macro nodes, the JVMState gets modified during expansion. If calls
 719   // use MachConstantBase, it gets modified during matching. So when cloning
 720   // the node the JVMState must be deep cloned. Default is to shallow clone.
 721   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 722 
 723   // Returns true if the call may modify n
 724   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 725   // Does this node have a use of n other than in debug information?
 726   bool                has_non_debug_use(Node* n);

 727   // Returns the unique CheckCastPP of a call
 728   // or result projection is there are several CheckCastPP
 729   // or returns null if there is no one.
 730   Node* result_cast();
 731   // Does this node returns pointer?
 732   bool returns_pointer() const {
 733     const TypeTuple* r = tf()->range();
 734     return (r->cnt() > TypeFunc::Parms &&

 735             r->field_at(TypeFunc::Parms)->isa_ptr());
 736   }
 737 
 738   // Collect all the interesting edges from a call for use in
 739   // replacing the call by something else.  Used by macro expansion
 740   // and the late inlining support.
 741   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true) const;
 742 
 743   virtual uint match_edge(uint idx) const;
 744 
 745   bool is_call_to_arraycopystub() const;
 746 
 747   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 748 
 749 #ifndef PRODUCT
 750   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 751   virtual void        dump_spec(outputStream* st) const;
 752 #endif
 753 };
 754 
 755 
 756 //------------------------------CallJavaNode-----------------------------------
 757 // Make a static or dynamic subroutine call node using Java calling
 758 // convention.  (The "Java" calling convention is the compiler's calling
 759 // convention, as opposed to the interpreter's or that of native C.)
 760 class CallJavaNode : public CallNode {
 761 protected:

 791   void  set_arg_escape(bool f)             { _arg_escape = f; }
 792   bool  arg_escape() const                 { return _arg_escape; }
 793   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 794   void register_for_late_inline();
 795 
 796   DEBUG_ONLY( bool validate_symbolic_info() const; )
 797 
 798 #ifndef PRODUCT
 799   virtual void  dump_spec(outputStream *st) const;
 800   virtual void  dump_compact_spec(outputStream *st) const;
 801 #endif
 802 };
 803 
 804 //------------------------------CallStaticJavaNode-----------------------------
 805 // Make a direct subroutine call using Java calling convention (for static
 806 // calls and optimized virtual calls, plus calls to wrappers for run-time
 807 // routines); generates static stub.
 808 class CallStaticJavaNode : public CallJavaNode {
 809   virtual bool cmp( const Node &n ) const;
 810   virtual uint size_of() const; // Size is bigger



 811 public:
 812   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 813     : CallJavaNode(tf, addr, method) {
 814     init_class_id(Class_CallStaticJava);
 815     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 816       init_flags(Flag_is_macro);
 817       C->add_macro_node(this);
 818     }











 819   }
 820   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 821     : CallJavaNode(tf, addr, nullptr) {
 822     init_class_id(Class_CallStaticJava);
 823     // This node calls a runtime stub, which often has narrow memory effects.
 824     _adr_type = adr_type;
 825     _name = name;
 826   }
 827 
 828   // If this is an uncommon trap, return the request code, else zero.
 829   int uncommon_trap_request() const;
 830   bool is_uncommon_trap() const;
 831   static int extract_uncommon_trap_request(const Node* call);
 832 
 833   bool is_boxing_method() const {
 834     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 835   }
 836   // Late inlining modifies the JVMState, so we need to deep clone it
 837   // when the call node is cloned (because it is macro node).
 838   virtual bool needs_deep_clone_jvms(Compile* C) {

 936                    const TypePtr* adr_type)
 937       : CallLeafNode(tf, addr, name, adr_type) {
 938     init_class_id(Class_CallLeafPure);
 939   }
 940   int Opcode() const override;
 941   Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
 942 };
 943 
 944 //------------------------------CallLeafNoFPNode-------------------------------
 945 // CallLeafNode, not using floating point or using it in the same manner as
 946 // the generated code
 947 class CallLeafNoFPNode : public CallLeafNode {
 948 public:
 949   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 950                    const TypePtr* adr_type)
 951     : CallLeafNode(tf, addr, name, adr_type)
 952   {
 953     init_class_id(Class_CallLeafNoFP);
 954   }
 955   virtual int   Opcode() const;

 956 };
 957 
 958 //------------------------------CallLeafVectorNode-------------------------------
 959 // CallLeafNode but calling with vector calling convention instead.
 960 class CallLeafVectorNode : public CallLeafNode {
 961 private:
 962   uint _num_bits;
 963 protected:
 964   virtual bool cmp( const Node &n ) const;
 965   virtual uint size_of() const; // Size is bigger
 966 public:
 967   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 968                    const TypePtr* adr_type, uint num_bits)
 969     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 970   {
 971   }
 972   virtual int   Opcode() const;
 973   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 974 };
 975 

 978 // High-level memory allocation
 979 //
 980 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 981 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 982 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 983 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 984 //  order to differentiate the uses of the projection on the normal control path from
 985 //  those on the exception return path.
 986 //
 987 class AllocateNode : public CallNode {
 988 public:
 989   enum {
 990     // Output:
 991     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 992     // Inputs:
 993     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 994     KlassNode,                        // type (maybe dynamic) of the obj.
 995     InitialTest,                      // slow-path test (may be constant)
 996     ALength,                          // array length (or TOP if none)
 997     ValidLengthTest,



 998     ParmLimit
 999   };
1000 
1001   static const TypeFunc* alloc_type(const Type* t) {
1002     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1003     fields[AllocSize]   = TypeInt::POS;
1004     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1005     fields[InitialTest] = TypeInt::BOOL;
1006     fields[ALength]     = t;  // length (can be a bad length)
1007     fields[ValidLengthTest] = TypeInt::BOOL;



1008 
1009     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1010 
1011     // create result type (range)
1012     fields = TypeTuple::fields(1);
1013     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1014 
1015     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1016 
1017     return TypeFunc::make(domain, range);
1018   }
1019 
1020   // Result of Escape Analysis
1021   bool _is_scalar_replaceable;
1022   bool _is_non_escaping;
1023   // True when MemBar for new is redundant with MemBar at initialzer exit
1024   bool _is_allocation_MemBar_redundant;

1025 
1026   virtual uint size_of() const; // Size is bigger
1027   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1028                Node *size, Node *klass_node, Node *initial_test);

1029   // Expansion modifies the JVMState, so we need to deep clone it
1030   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1031   virtual int Opcode() const;
1032   virtual uint ideal_reg() const { return Op_RegP; }
1033   virtual bool        guaranteed_safepoint()  { return false; }
1034 
1035   // allocations do not modify their arguments
1036   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1037 
1038   // Pattern-match a possible usage of AllocateNode.
1039   // Return null if no allocation is recognized.
1040   // The operand is the pointer produced by the (possible) allocation.
1041   // It must be a projection of the Allocate or its subsequent CastPP.
1042   // (Note:  This function is defined in file graphKit.cpp, near
1043   // GraphKit::new_instance/new_array, whose output it recognizes.)
1044   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1045   static AllocateNode* Ideal_allocation(Node* ptr);
1046 
1047   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1048   // an offset, which is reported back to the caller.

1073 
1074   // Return true if allocation doesn't escape thread, its escape state
1075   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1076   // is true when its allocation's escape state is noEscape or
1077   // ArgEscape. In case allocation's InitializeNode is null, check
1078   // AlllocateNode._is_non_escaping flag.
1079   // AlllocateNode._is_non_escaping is true when its escape state is
1080   // noEscape.
1081   bool does_not_escape_thread() {
1082     InitializeNode* init = nullptr;
1083     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1084   }
1085 
1086   // If object doesn't escape in <.init> method and there is memory barrier
1087   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1088   // Inovke this method when MemBar at exit of initializer and post-dominate
1089   // allocation node.
1090   void compute_MemBar_redundancy(ciMethod* initializer);
1091   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1092 
1093   Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
1094 
1095   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1096 };
1097 
1098 //------------------------------AllocateArray---------------------------------
1099 //
1100 // High-level array allocation
1101 //
1102 class AllocateArrayNode : public AllocateNode {
1103 public:
1104   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1105                     Node* initial_test, Node* count_val, Node* valid_length_test)

1106     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1107                    initial_test)
1108   {
1109     init_class_id(Class_AllocateArray);
1110     set_req(AllocateNode::ALength,        count_val);
1111     set_req(AllocateNode::ValidLengthTest, valid_length_test);


1112   }

1113   virtual int Opcode() const;
1114 
1115   // Dig the length operand out of a array allocation site.
1116   Node* Ideal_length() {
1117     return in(AllocateNode::ALength);
1118   }
1119 
1120   // Dig the length operand out of a array allocation site and narrow the
1121   // type with a CastII, if necesssary
1122   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1123 
1124   // Pattern-match a possible usage of AllocateArrayNode.
1125   // Return null if no allocation is recognized.
1126   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1127     AllocateNode* allo = Ideal_allocation(ptr);
1128     return (allo == nullptr || !allo->is_AllocateArray())
1129            ? nullptr : allo->as_AllocateArray();
1130   }
1131 };
1132 

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;

  93 };
  94 
  95 
  96 //------------------------------ParmNode---------------------------------------
  97 // Incoming parameters
  98 class ParmNode : public ProjNode {
  99   static const char * const names[TypeFunc::Parms+1];
 100 public:
 101   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 102     init_class_id(Class_Parm);
 103   }
 104   virtual int Opcode() const;
 105   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 106   virtual uint ideal_reg() const;
 107 #ifndef PRODUCT
 108   virtual void dump_spec(outputStream *st) const;
 109   virtual void dump_compact_spec(outputStream *st) const;
 110 #endif
 111 };
 112 

 634     assert(jvms != nullptr, "JVMS reference is null.");
 635     return jvms->scloff() + _merge_pointer_idx + 1;
 636   }
 637 
 638   // Assumes that "this" is an argument to a safepoint node "s", and that
 639   // "new_call" is being created to correspond to "s".  But the difference
 640   // between the start index of the jvmstates of "new_call" and "s" is
 641   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 642   // corresponds appropriately to "this" in "new_call".  Assumes that
 643   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 644   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 645   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 646 
 647 #ifndef PRODUCT
 648   virtual void              dump_spec(outputStream *st) const;
 649 #endif
 650 };
 651 
 652 // Simple container for the outgoing projections of a call.  Useful
 653 // for serious surgery on calls.
 654 class CallProjections {
 655 public:
 656   Node* fallthrough_proj;
 657   Node* fallthrough_catchproj;
 658   Node* fallthrough_memproj;
 659   Node* fallthrough_ioproj;
 660   Node* catchall_catchproj;
 661   Node* catchall_memproj;
 662   Node* catchall_ioproj;

 663   Node* exobj;
 664   uint nb_resproj;
 665   Node* resproj[1]; // at least one projection
 666 
 667   CallProjections(uint nbres) {
 668     fallthrough_proj      = nullptr;
 669     fallthrough_catchproj = nullptr;
 670     fallthrough_memproj   = nullptr;
 671     fallthrough_ioproj    = nullptr;
 672     catchall_catchproj    = nullptr;
 673     catchall_memproj      = nullptr;
 674     catchall_ioproj       = nullptr;
 675     exobj                 = nullptr;
 676     nb_resproj            = nbres;
 677     resproj[0]            = nullptr;
 678     for (uint i = 1; i < nb_resproj; i++) {
 679       resproj[i]          = nullptr;
 680     }
 681   }
 682 
 683 };
 684 
 685 class CallGenerator;
 686 
 687 //------------------------------CallNode---------------------------------------
 688 // Call nodes now subsume the function of debug nodes at callsites, so they
 689 // contain the functionality of a full scope chain of debug nodes.
 690 class CallNode : public SafePointNode {
 691 
 692 protected:
 693   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 694 
 695 public:
 696   const TypeFunc* _tf;          // Function type
 697   address         _entry_point; // Address of method being called
 698   float           _cnt;         // Estimate of number of times called
 699   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 700   const char*     _name;        // Printable name, if _method is null
 701 
 702   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 703     : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
 704       _tf(tf),
 705       _entry_point(addr),
 706       _cnt(COUNT_UNKNOWN),
 707       _generator(nullptr),
 708       _name(nullptr)
 709   {
 710     init_class_id(Class_Call);
 711   }
 712 
 713   const TypeFunc* tf()         const { return _tf; }
 714   address  entry_point()       const { return _entry_point; }
 715   float    cnt()               const { return _cnt; }
 716   CallGenerator* generator()   const { return _generator; }
 717 
 718   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 719   void set_entry_point(address p)       { _entry_point = p; }
 720   void set_cnt(float c)                 { _cnt = c; }
 721   void set_generator(CallGenerator* cg) { _generator = cg; }
 722 
 723   virtual const Type* bottom_type() const;
 724   virtual const Type* Value(PhaseGVN* phase) const;
 725   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 726   virtual Node* Identity(PhaseGVN* phase) { return this; }
 727   virtual bool        cmp(const Node &n) const;
 728   virtual uint        size_of() const = 0;
 729   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 730   virtual Node*       match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
 731   virtual uint        ideal_reg() const { return NotAMachineReg; }
 732   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 733   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 734   virtual bool        guaranteed_safepoint()  { return true; }
 735   // For macro nodes, the JVMState gets modified during expansion. If calls
 736   // use MachConstantBase, it gets modified during matching. So when cloning
 737   // the node the JVMState must be deep cloned. Default is to shallow clone.
 738   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 739 
 740   // Returns true if the call may modify n
 741   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 742   // Does this node have a use of n other than in debug information?
 743   bool                has_non_debug_use(Node* n);
 744   bool                has_debug_use(Node* n);
 745   // Returns the unique CheckCastPP of a call
 746   // or result projection is there are several CheckCastPP
 747   // or returns null if there is no one.
 748   Node* result_cast();
 749   // Does this node returns pointer?
 750   bool returns_pointer() const {
 751     const TypeTuple* r = tf()->range_sig();
 752     return (!tf()->returns_inline_type_as_fields() &&
 753             r->cnt() > TypeFunc::Parms &&
 754             r->field_at(TypeFunc::Parms)->isa_ptr());
 755   }
 756 
 757   // Collect all the interesting edges from a call for use in
 758   // replacing the call by something else.  Used by macro expansion
 759   // and the late inlining support.
 760   CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true) const;
 761 
 762   virtual uint match_edge(uint idx) const;
 763 
 764   bool is_call_to_arraycopystub() const;
 765 
 766   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 767 
 768 #ifndef PRODUCT
 769   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 770   virtual void        dump_spec(outputStream* st) const;
 771 #endif
 772 };
 773 
 774 
 775 //------------------------------CallJavaNode-----------------------------------
 776 // Make a static or dynamic subroutine call node using Java calling
 777 // convention.  (The "Java" calling convention is the compiler's calling
 778 // convention, as opposed to the interpreter's or that of native C.)
 779 class CallJavaNode : public CallNode {
 780 protected:

 810   void  set_arg_escape(bool f)             { _arg_escape = f; }
 811   bool  arg_escape() const                 { return _arg_escape; }
 812   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 813   void register_for_late_inline();
 814 
 815   DEBUG_ONLY( bool validate_symbolic_info() const; )
 816 
 817 #ifndef PRODUCT
 818   virtual void  dump_spec(outputStream *st) const;
 819   virtual void  dump_compact_spec(outputStream *st) const;
 820 #endif
 821 };
 822 
 823 //------------------------------CallStaticJavaNode-----------------------------
 824 // Make a direct subroutine call using Java calling convention (for static
 825 // calls and optimized virtual calls, plus calls to wrappers for run-time
 826 // routines); generates static stub.
 827 class CallStaticJavaNode : public CallJavaNode {
 828   virtual bool cmp( const Node &n ) const;
 829   virtual uint size_of() const; // Size is bigger
 830 
 831   bool remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg);
 832 
 833 public:
 834   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 835     : CallJavaNode(tf, addr, method) {
 836     init_class_id(Class_CallStaticJava);
 837     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 838       init_flags(Flag_is_macro);
 839       C->add_macro_node(this);
 840     }
 841     const TypeTuple *r = tf->range_sig();
 842     if (InlineTypeReturnedAsFields &&
 843         method != nullptr &&
 844         method->is_method_handle_intrinsic() &&
 845         r->cnt() > TypeFunc::Parms &&
 846         r->field_at(TypeFunc::Parms)->isa_oopptr() &&
 847         r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
 848       // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
 849       init_flags(Flag_is_macro);
 850       C->add_macro_node(this);
 851     }
 852   }
 853   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 854     : CallJavaNode(tf, addr, nullptr) {
 855     init_class_id(Class_CallStaticJava);
 856     // This node calls a runtime stub, which often has narrow memory effects.
 857     _adr_type = adr_type;
 858     _name = name;
 859   }
 860 
 861   // If this is an uncommon trap, return the request code, else zero.
 862   int uncommon_trap_request() const;
 863   bool is_uncommon_trap() const;
 864   static int extract_uncommon_trap_request(const Node* call);
 865 
 866   bool is_boxing_method() const {
 867     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 868   }
 869   // Late inlining modifies the JVMState, so we need to deep clone it
 870   // when the call node is cloned (because it is macro node).
 871   virtual bool needs_deep_clone_jvms(Compile* C) {

 969                    const TypePtr* adr_type)
 970       : CallLeafNode(tf, addr, name, adr_type) {
 971     init_class_id(Class_CallLeafPure);
 972   }
 973   int Opcode() const override;
 974   Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
 975 };
 976 
 977 //------------------------------CallLeafNoFPNode-------------------------------
 978 // CallLeafNode, not using floating point or using it in the same manner as
 979 // the generated code
 980 class CallLeafNoFPNode : public CallLeafNode {
 981 public:
 982   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 983                    const TypePtr* adr_type)
 984     : CallLeafNode(tf, addr, name, adr_type)
 985   {
 986     init_class_id(Class_CallLeafNoFP);
 987   }
 988   virtual int   Opcode() const;
 989   virtual uint match_edge(uint idx) const;
 990 };
 991 
 992 //------------------------------CallLeafVectorNode-------------------------------
 993 // CallLeafNode but calling with vector calling convention instead.
 994 class CallLeafVectorNode : public CallLeafNode {
 995 private:
 996   uint _num_bits;
 997 protected:
 998   virtual bool cmp( const Node &n ) const;
 999   virtual uint size_of() const; // Size is bigger
1000 public:
1001   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
1002                    const TypePtr* adr_type, uint num_bits)
1003     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
1004   {
1005   }
1006   virtual int   Opcode() const;
1007   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
1008 };
1009 

1012 // High-level memory allocation
1013 //
1014 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
1015 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
1016 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
1017 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
1018 //  order to differentiate the uses of the projection on the normal control path from
1019 //  those on the exception return path.
1020 //
1021 class AllocateNode : public CallNode {
1022 public:
1023   enum {
1024     // Output:
1025     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
1026     // Inputs:
1027     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
1028     KlassNode,                        // type (maybe dynamic) of the obj.
1029     InitialTest,                      // slow-path test (may be constant)
1030     ALength,                          // array length (or TOP if none)
1031     ValidLengthTest,
1032     InlineType,                       // InlineTypeNode if this is an inline type allocation
1033     InitValue,                        // Init value for null-free inline type arrays
1034     RawInitValue,                     // Same as above but as raw machine word
1035     ParmLimit
1036   };
1037 
1038   static const TypeFunc* alloc_type(const Type* t) {
1039     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1040     fields[AllocSize]   = TypeInt::POS;
1041     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1042     fields[InitialTest] = TypeInt::BOOL;
1043     fields[ALength]     = t;  // length (can be a bad length)
1044     fields[ValidLengthTest] = TypeInt::BOOL;
1045     fields[InlineType] = Type::BOTTOM;
1046     fields[InitValue] = TypeInstPtr::NOTNULL;
1047     fields[RawInitValue] = TypeX_X;
1048 
1049     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1050 
1051     // create result type (range)
1052     fields = TypeTuple::fields(1);
1053     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1054 
1055     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1056 
1057     return TypeFunc::make(domain, range);
1058   }
1059 
1060   // Result of Escape Analysis
1061   bool _is_scalar_replaceable;
1062   bool _is_non_escaping;
1063   // True when MemBar for new is redundant with MemBar at initialzer exit
1064   bool _is_allocation_MemBar_redundant;
1065   bool _larval;
1066 
1067   virtual uint size_of() const; // Size is bigger
1068   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1069                Node *size, Node *klass_node, Node *initial_test,
1070                InlineTypeNode* inline_type_node = nullptr);
1071   // Expansion modifies the JVMState, so we need to deep clone it
1072   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1073   virtual int Opcode() const;
1074   virtual uint ideal_reg() const { return Op_RegP; }
1075   virtual bool        guaranteed_safepoint()  { return false; }
1076 
1077   // allocations do not modify their arguments
1078   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1079 
1080   // Pattern-match a possible usage of AllocateNode.
1081   // Return null if no allocation is recognized.
1082   // The operand is the pointer produced by the (possible) allocation.
1083   // It must be a projection of the Allocate or its subsequent CastPP.
1084   // (Note:  This function is defined in file graphKit.cpp, near
1085   // GraphKit::new_instance/new_array, whose output it recognizes.)
1086   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1087   static AllocateNode* Ideal_allocation(Node* ptr);
1088 
1089   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1090   // an offset, which is reported back to the caller.

1115 
1116   // Return true if allocation doesn't escape thread, its escape state
1117   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1118   // is true when its allocation's escape state is noEscape or
1119   // ArgEscape. In case allocation's InitializeNode is null, check
1120   // AlllocateNode._is_non_escaping flag.
1121   // AlllocateNode._is_non_escaping is true when its escape state is
1122   // noEscape.
1123   bool does_not_escape_thread() {
1124     InitializeNode* init = nullptr;
1125     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1126   }
1127 
1128   // If object doesn't escape in <.init> method and there is memory barrier
1129   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1130   // Inovke this method when MemBar at exit of initializer and post-dominate
1131   // allocation node.
1132   void compute_MemBar_redundancy(ciMethod* initializer);
1133   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1134 
1135   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1136 
1137   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1138 };
1139 
1140 //------------------------------AllocateArray---------------------------------
1141 //
1142 // High-level array allocation
1143 //
1144 class AllocateArrayNode : public AllocateNode {
1145 public:
1146   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1147                     Node* initial_test, Node* count_val, Node* valid_length_test,
1148                     Node* init_value, Node* raw_init_value)
1149     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1150                    initial_test)
1151   {
1152     init_class_id(Class_AllocateArray);
1153     set_req(AllocateNode::ALength, count_val);
1154     set_req(AllocateNode::ValidLengthTest, valid_length_test);
1155     init_req(AllocateNode::InitValue, init_value);
1156     init_req(AllocateNode::RawInitValue, raw_init_value);
1157   }
1158   virtual uint size_of() const { return sizeof(*this); }
1159   virtual int Opcode() const;
1160 
1161   // Dig the length operand out of a array allocation site.
1162   Node* Ideal_length() {
1163     return in(AllocateNode::ALength);
1164   }
1165 
1166   // Dig the length operand out of a array allocation site and narrow the
1167   // type with a CastII, if necesssary
1168   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1169 
1170   // Pattern-match a possible usage of AllocateArrayNode.
1171   // Return null if no allocation is recognized.
1172   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1173     AllocateNode* allo = Ideal_allocation(ptr);
1174     return (allo == nullptr || !allo->is_AllocateArray())
1175            ? nullptr : allo->as_AllocateArray();
1176   }
1177 };
1178 
< prev index next >