< prev index next >

src/hotspot/share/opto/callnode.hpp

Print this page

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match( const ProjNode *proj, const Matcher *m );
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;
  93   static  const TypeTuple *osr_domain();
  94 };
  95 
  96 
  97 //------------------------------ParmNode---------------------------------------
  98 // Incoming parameters
  99 class ParmNode : public ProjNode {
 100   static const char * const names[TypeFunc::Parms+1];
 101 public:
 102   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 103     init_class_id(Class_Parm);
 104   }
 105   virtual int Opcode() const;
 106   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 107   virtual uint ideal_reg() const;
 108 #ifndef PRODUCT
 109   virtual void dump_spec(outputStream *st) const;
 110   virtual void dump_compact_spec(outputStream *st) const;
 111 #endif
 112 };
 113 

 636     assert(jvms != nullptr, "JVMS reference is null.");
 637     return jvms->scloff() + _merge_pointer_idx + 1;
 638   }
 639 
 640   // Assumes that "this" is an argument to a safepoint node "s", and that
 641   // "new_call" is being created to correspond to "s".  But the difference
 642   // between the start index of the jvmstates of "new_call" and "s" is
 643   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 644   // corresponds appropriately to "this" in "new_call".  Assumes that
 645   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 646   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 647   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 648 
 649 #ifndef PRODUCT
 650   virtual void              dump_spec(outputStream *st) const;
 651 #endif
 652 };
 653 
 654 // Simple container for the outgoing projections of a call.  Useful
 655 // for serious surgery on calls.
 656 class CallProjections : public StackObj {
 657 public:
 658   Node* fallthrough_proj;
 659   Node* fallthrough_catchproj;
 660   Node* fallthrough_memproj;
 661   Node* fallthrough_ioproj;
 662   Node* catchall_catchproj;
 663   Node* catchall_memproj;
 664   Node* catchall_ioproj;
 665   Node* resproj;
 666   Node* exobj;



















 667 };
 668 
 669 class CallGenerator;
 670 
 671 //------------------------------CallNode---------------------------------------
 672 // Call nodes now subsume the function of debug nodes at callsites, so they
 673 // contain the functionality of a full scope chain of debug nodes.
 674 class CallNode : public SafePointNode {
 675   friend class VMStructs;
 676 
 677 protected:
 678   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 679 
 680 public:
 681   const TypeFunc* _tf;          // Function type
 682   address         _entry_point; // Address of method being called
 683   float           _cnt;         // Estimate of number of times called
 684   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 685   const char*     _name;        // Printable name, if _method is null
 686 
 687   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 688     : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
 689       _tf(tf),
 690       _entry_point(addr),
 691       _cnt(COUNT_UNKNOWN),
 692       _generator(nullptr),
 693       _name(nullptr)
 694   {
 695     init_class_id(Class_Call);
 696   }
 697 
 698   const TypeFunc* tf()         const { return _tf; }
 699   address  entry_point()       const { return _entry_point; }
 700   float    cnt()               const { return _cnt; }
 701   CallGenerator* generator()   const { return _generator; }
 702 
 703   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 704   void set_entry_point(address p)       { _entry_point = p; }
 705   void set_cnt(float c)                 { _cnt = c; }
 706   void set_generator(CallGenerator* cg) { _generator = cg; }
 707 
 708   virtual const Type* bottom_type() const;
 709   virtual const Type* Value(PhaseGVN* phase) const;
 710   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 711   virtual Node* Identity(PhaseGVN* phase) { return this; }
 712   virtual bool        cmp(const Node &n) const;
 713   virtual uint        size_of() const = 0;
 714   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 715   virtual Node*       match(const ProjNode* proj, const Matcher* m);
 716   virtual uint        ideal_reg() const { return NotAMachineReg; }
 717   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 718   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 719   virtual bool        guaranteed_safepoint()  { return true; }
 720   // For macro nodes, the JVMState gets modified during expansion. If calls
 721   // use MachConstantBase, it gets modified during matching. So when cloning
 722   // the node the JVMState must be deep cloned. Default is to shallow clone.
 723   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 724 
 725   // Returns true if the call may modify n
 726   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 727   // Does this node have a use of n other than in debug information?
 728   bool                has_non_debug_use(Node* n);

 729   // Returns the unique CheckCastPP of a call
 730   // or result projection is there are several CheckCastPP
 731   // or returns null if there is no one.
 732   Node* result_cast();
 733   // Does this node returns pointer?
 734   bool returns_pointer() const {
 735     const TypeTuple* r = tf()->range();
 736     return (r->cnt() > TypeFunc::Parms &&

 737             r->field_at(TypeFunc::Parms)->isa_ptr());
 738   }
 739 
 740   // Collect all the interesting edges from a call for use in
 741   // replacing the call by something else.  Used by macro expansion
 742   // and the late inlining support.
 743   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
 744 
 745   virtual uint match_edge(uint idx) const;
 746 
 747   bool is_call_to_arraycopystub() const;
 748 
 749   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 750 
 751 #ifndef PRODUCT
 752   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 753   virtual void        dump_spec(outputStream* st) const;
 754 #endif
 755 };
 756 
 757 
 758 //------------------------------CallJavaNode-----------------------------------
 759 // Make a static or dynamic subroutine call node using Java calling
 760 // convention.  (The "Java" calling convention is the compiler's calling
 761 // convention, as opposed to the interpreter's or that of native C.)
 762 class CallJavaNode : public CallNode {
 763   friend class VMStructs;

 793   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 794   void  set_arg_escape(bool f)             { _arg_escape = f; }
 795   bool  arg_escape() const                 { return _arg_escape; }
 796   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 797 
 798   DEBUG_ONLY( bool validate_symbolic_info() const; )
 799 
 800 #ifndef PRODUCT
 801   virtual void  dump_spec(outputStream *st) const;
 802   virtual void  dump_compact_spec(outputStream *st) const;
 803 #endif
 804 };
 805 
 806 //------------------------------CallStaticJavaNode-----------------------------
 807 // Make a direct subroutine call using Java calling convention (for static
 808 // calls and optimized virtual calls, plus calls to wrappers for run-time
 809 // routines); generates static stub.
 810 class CallStaticJavaNode : public CallJavaNode {
 811   virtual bool cmp( const Node &n ) const;
 812   virtual uint size_of() const; // Size is bigger



 813 public:
 814   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 815     : CallJavaNode(tf, addr, method) {
 816     init_class_id(Class_CallStaticJava);
 817     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 818       init_flags(Flag_is_macro);
 819       C->add_macro_node(this);
 820     }











 821   }
 822   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 823     : CallJavaNode(tf, addr, nullptr) {
 824     init_class_id(Class_CallStaticJava);
 825     // This node calls a runtime stub, which often has narrow memory effects.
 826     _adr_type = adr_type;
 827     _name = name;
 828   }
 829 
 830   // If this is an uncommon trap, return the request code, else zero.
 831   int uncommon_trap_request() const;
 832   bool is_uncommon_trap() const;
 833   static int extract_uncommon_trap_request(const Node* call);
 834 
 835   bool is_boxing_method() const {
 836     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 837   }
 838   // Late inlining modifies the JVMState, so we need to deep clone it
 839   // when the call node is cloned (because it is macro node).
 840   virtual bool needs_deep_clone_jvms(Compile* C) {

 911   }
 912   virtual int   Opcode() const;
 913   virtual bool        guaranteed_safepoint()  { return false; }
 914 #ifndef PRODUCT
 915   virtual void  dump_spec(outputStream *st) const;
 916 #endif
 917 };
 918 
 919 //------------------------------CallLeafNoFPNode-------------------------------
 920 // CallLeafNode, not using floating point or using it in the same manner as
 921 // the generated code
 922 class CallLeafNoFPNode : public CallLeafNode {
 923 public:
 924   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 925                    const TypePtr* adr_type)
 926     : CallLeafNode(tf, addr, name, adr_type)
 927   {
 928     init_class_id(Class_CallLeafNoFP);
 929   }
 930   virtual int   Opcode() const;

 931 };
 932 
 933 //------------------------------CallLeafVectorNode-------------------------------
 934 // CallLeafNode but calling with vector calling convention instead.
 935 class CallLeafVectorNode : public CallLeafNode {
 936 private:
 937   uint _num_bits;
 938 protected:
 939   virtual bool cmp( const Node &n ) const;
 940   virtual uint size_of() const; // Size is bigger
 941 public:
 942   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 943                    const TypePtr* adr_type, uint num_bits)
 944     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 945   {
 946   }
 947   virtual int   Opcode() const;
 948   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 949 };
 950 

 953 // High-level memory allocation
 954 //
 955 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 956 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 957 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 958 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 959 //  order to differentiate the uses of the projection on the normal control path from
 960 //  those on the exception return path.
 961 //
 962 class AllocateNode : public CallNode {
 963 public:
 964   enum {
 965     // Output:
 966     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 967     // Inputs:
 968     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 969     KlassNode,                        // type (maybe dynamic) of the obj.
 970     InitialTest,                      // slow-path test (may be constant)
 971     ALength,                          // array length (or TOP if none)
 972     ValidLengthTest,



 973     ParmLimit
 974   };
 975 
 976   static const TypeFunc* alloc_type(const Type* t) {
 977     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
 978     fields[AllocSize]   = TypeInt::POS;
 979     fields[KlassNode]   = TypeInstPtr::NOTNULL;
 980     fields[InitialTest] = TypeInt::BOOL;
 981     fields[ALength]     = t;  // length (can be a bad length)
 982     fields[ValidLengthTest] = TypeInt::BOOL;



 983 
 984     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
 985 
 986     // create result type (range)
 987     fields = TypeTuple::fields(1);
 988     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
 989 
 990     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 991 
 992     return TypeFunc::make(domain, range);
 993   }
 994 
 995   // Result of Escape Analysis
 996   bool _is_scalar_replaceable;
 997   bool _is_non_escaping;
 998   // True when MemBar for new is redundant with MemBar at initialzer exit
 999   bool _is_allocation_MemBar_redundant;

1000 
1001   virtual uint size_of() const; // Size is bigger
1002   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1003                Node *size, Node *klass_node, Node *initial_test);

1004   // Expansion modifies the JVMState, so we need to deep clone it
1005   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1006   virtual int Opcode() const;
1007   virtual uint ideal_reg() const { return Op_RegP; }
1008   virtual bool        guaranteed_safepoint()  { return false; }
1009 
1010   // allocations do not modify their arguments
1011   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1012 
1013   // Pattern-match a possible usage of AllocateNode.
1014   // Return null if no allocation is recognized.
1015   // The operand is the pointer produced by the (possible) allocation.
1016   // It must be a projection of the Allocate or its subsequent CastPP.
1017   // (Note:  This function is defined in file graphKit.cpp, near
1018   // GraphKit::new_instance/new_array, whose output it recognizes.)
1019   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1020   static AllocateNode* Ideal_allocation(Node* ptr);
1021 
1022   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1023   // an offset, which is reported back to the caller.

1048 
1049   // Return true if allocation doesn't escape thread, its escape state
1050   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1051   // is true when its allocation's escape state is noEscape or
1052   // ArgEscape. In case allocation's InitializeNode is null, check
1053   // AlllocateNode._is_non_escaping flag.
1054   // AlllocateNode._is_non_escaping is true when its escape state is
1055   // noEscape.
1056   bool does_not_escape_thread() {
1057     InitializeNode* init = nullptr;
1058     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1059   }
1060 
1061   // If object doesn't escape in <.init> method and there is memory barrier
1062   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1063   // Inovke this method when MemBar at exit of initializer and post-dominate
1064   // allocation node.
1065   void compute_MemBar_redundancy(ciMethod* initializer);
1066   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1067 
1068   Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
1069 };
1070 
1071 //------------------------------AllocateArray---------------------------------
1072 //
1073 // High-level array allocation
1074 //
1075 class AllocateArrayNode : public AllocateNode {
1076 public:
1077   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1078                     Node* initial_test, Node* count_val, Node* valid_length_test)

1079     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1080                    initial_test)
1081   {
1082     init_class_id(Class_AllocateArray);
1083     set_req(AllocateNode::ALength,        count_val);
1084     set_req(AllocateNode::ValidLengthTest, valid_length_test);


1085   }

1086   virtual int Opcode() const;
1087 
1088   // Dig the length operand out of a array allocation site.
1089   Node* Ideal_length() {
1090     return in(AllocateNode::ALength);
1091   }
1092 
1093   // Dig the length operand out of a array allocation site and narrow the
1094   // type with a CastII, if necesssary
1095   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1096 
1097   // Pattern-match a possible usage of AllocateArrayNode.
1098   // Return null if no allocation is recognized.
1099   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1100     AllocateNode* allo = Ideal_allocation(ptr);
1101     return (allo == nullptr || !allo->is_AllocateArray())
1102            ? nullptr : allo->as_AllocateArray();
1103   }
1104 };
1105 

1188 //    0  -  object to lock
1189 //    1 -   a BoxLockNode
1190 //    2 -   a FastLockNode
1191 //
1192 class LockNode : public AbstractLockNode {
1193 public:
1194 
1195   static const TypeFunc *lock_type() {
1196     // create input type (domain)
1197     const Type **fields = TypeTuple::fields(3);
1198     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1199     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1200     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1201     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1202 
1203     // create result type (range)
1204     fields = TypeTuple::fields(0);
1205 
1206     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1207 
1208     return TypeFunc::make(domain,range);
1209   }
1210 
1211   virtual int Opcode() const;
1212   virtual uint size_of() const; // Size is bigger
1213   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1214     init_class_id(Class_Lock);
1215     init_flags(Flag_is_macro);
1216     C->add_macro_node(this);
1217   }
1218   virtual bool        guaranteed_safepoint()  { return false; }
1219 
1220   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1221   // Expansion modifies the JVMState, so we need to deep clone it
1222   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1223 
1224   bool is_nested_lock_region(); // Is this Lock nested?
1225   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1226 };
1227 
1228 //------------------------------Unlock---------------------------------------

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;

  93 };
  94 
  95 
  96 //------------------------------ParmNode---------------------------------------
  97 // Incoming parameters
  98 class ParmNode : public ProjNode {
  99   static const char * const names[TypeFunc::Parms+1];
 100 public:
 101   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 102     init_class_id(Class_Parm);
 103   }
 104   virtual int Opcode() const;
 105   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 106   virtual uint ideal_reg() const;
 107 #ifndef PRODUCT
 108   virtual void dump_spec(outputStream *st) const;
 109   virtual void dump_compact_spec(outputStream *st) const;
 110 #endif
 111 };
 112 

 635     assert(jvms != nullptr, "JVMS reference is null.");
 636     return jvms->scloff() + _merge_pointer_idx + 1;
 637   }
 638 
 639   // Assumes that "this" is an argument to a safepoint node "s", and that
 640   // "new_call" is being created to correspond to "s".  But the difference
 641   // between the start index of the jvmstates of "new_call" and "s" is
 642   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 643   // corresponds appropriately to "this" in "new_call".  Assumes that
 644   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 645   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 646   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 647 
 648 #ifndef PRODUCT
 649   virtual void              dump_spec(outputStream *st) const;
 650 #endif
 651 };
 652 
 653 // Simple container for the outgoing projections of a call.  Useful
 654 // for serious surgery on calls.
 655 class CallProjections {
 656 public:
 657   Node* fallthrough_proj;
 658   Node* fallthrough_catchproj;
 659   Node* fallthrough_memproj;
 660   Node* fallthrough_ioproj;
 661   Node* catchall_catchproj;
 662   Node* catchall_memproj;
 663   Node* catchall_ioproj;

 664   Node* exobj;
 665   uint nb_resproj;
 666   Node* resproj[1]; // at least one projection
 667 
 668   CallProjections(uint nbres) {
 669     fallthrough_proj      = nullptr;
 670     fallthrough_catchproj = nullptr;
 671     fallthrough_memproj   = nullptr;
 672     fallthrough_ioproj    = nullptr;
 673     catchall_catchproj    = nullptr;
 674     catchall_memproj      = nullptr;
 675     catchall_ioproj       = nullptr;
 676     exobj                 = nullptr;
 677     nb_resproj            = nbres;
 678     resproj[0]            = nullptr;
 679     for (uint i = 1; i < nb_resproj; i++) {
 680       resproj[i]          = nullptr;
 681     }
 682   }
 683 
 684 };
 685 
 686 class CallGenerator;
 687 
 688 //------------------------------CallNode---------------------------------------
 689 // Call nodes now subsume the function of debug nodes at callsites, so they
 690 // contain the functionality of a full scope chain of debug nodes.
 691 class CallNode : public SafePointNode {
 692   friend class VMStructs;
 693 
 694 protected:
 695   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 696 
 697 public:
 698   const TypeFunc* _tf;          // Function type
 699   address         _entry_point; // Address of method being called
 700   float           _cnt;         // Estimate of number of times called
 701   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 702   const char*     _name;        // Printable name, if _method is null
 703 
 704   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 705     : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
 706       _tf(tf),
 707       _entry_point(addr),
 708       _cnt(COUNT_UNKNOWN),
 709       _generator(nullptr),
 710       _name(nullptr)
 711   {
 712     init_class_id(Class_Call);
 713   }
 714 
 715   const TypeFunc* tf()         const { return _tf; }
 716   address  entry_point()       const { return _entry_point; }
 717   float    cnt()               const { return _cnt; }
 718   CallGenerator* generator()   const { return _generator; }
 719 
 720   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 721   void set_entry_point(address p)       { _entry_point = p; }
 722   void set_cnt(float c)                 { _cnt = c; }
 723   void set_generator(CallGenerator* cg) { _generator = cg; }
 724 
 725   virtual const Type* bottom_type() const;
 726   virtual const Type* Value(PhaseGVN* phase) const;
 727   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 728   virtual Node* Identity(PhaseGVN* phase) { return this; }
 729   virtual bool        cmp(const Node &n) const;
 730   virtual uint        size_of() const = 0;
 731   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 732   virtual Node*       match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
 733   virtual uint        ideal_reg() const { return NotAMachineReg; }
 734   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 735   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 736   virtual bool        guaranteed_safepoint()  { return true; }
 737   // For macro nodes, the JVMState gets modified during expansion. If calls
 738   // use MachConstantBase, it gets modified during matching. So when cloning
 739   // the node the JVMState must be deep cloned. Default is to shallow clone.
 740   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 741 
 742   // Returns true if the call may modify n
 743   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 744   // Does this node have a use of n other than in debug information?
 745   bool                has_non_debug_use(Node* n);
 746   bool                has_debug_use(Node* n);
 747   // Returns the unique CheckCastPP of a call
 748   // or result projection is there are several CheckCastPP
 749   // or returns null if there is no one.
 750   Node* result_cast();
 751   // Does this node returns pointer?
 752   bool returns_pointer() const {
 753     const TypeTuple* r = tf()->range_sig();
 754     return (!tf()->returns_inline_type_as_fields() &&
 755             r->cnt() > TypeFunc::Parms &&
 756             r->field_at(TypeFunc::Parms)->isa_ptr());
 757   }
 758 
 759   // Collect all the interesting edges from a call for use in
 760   // replacing the call by something else.  Used by macro expansion
 761   // and the late inlining support.
 762   CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true);
 763 
 764   virtual uint match_edge(uint idx) const;
 765 
 766   bool is_call_to_arraycopystub() const;
 767 
 768   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 769 
 770 #ifndef PRODUCT
 771   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 772   virtual void        dump_spec(outputStream* st) const;
 773 #endif
 774 };
 775 
 776 
 777 //------------------------------CallJavaNode-----------------------------------
 778 // Make a static or dynamic subroutine call node using Java calling
 779 // convention.  (The "Java" calling convention is the compiler's calling
 780 // convention, as opposed to the interpreter's or that of native C.)
 781 class CallJavaNode : public CallNode {
 782   friend class VMStructs;

 812   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 813   void  set_arg_escape(bool f)             { _arg_escape = f; }
 814   bool  arg_escape() const                 { return _arg_escape; }
 815   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 816 
 817   DEBUG_ONLY( bool validate_symbolic_info() const; )
 818 
 819 #ifndef PRODUCT
 820   virtual void  dump_spec(outputStream *st) const;
 821   virtual void  dump_compact_spec(outputStream *st) const;
 822 #endif
 823 };
 824 
 825 //------------------------------CallStaticJavaNode-----------------------------
 826 // Make a direct subroutine call using Java calling convention (for static
 827 // calls and optimized virtual calls, plus calls to wrappers for run-time
 828 // routines); generates static stub.
 829 class CallStaticJavaNode : public CallJavaNode {
 830   virtual bool cmp( const Node &n ) const;
 831   virtual uint size_of() const; // Size is bigger
 832 
 833   bool remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg);
 834 
 835 public:
 836   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 837     : CallJavaNode(tf, addr, method) {
 838     init_class_id(Class_CallStaticJava);
 839     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 840       init_flags(Flag_is_macro);
 841       C->add_macro_node(this);
 842     }
 843     const TypeTuple *r = tf->range_sig();
 844     if (InlineTypeReturnedAsFields &&
 845         method != nullptr &&
 846         method->is_method_handle_intrinsic() &&
 847         r->cnt() > TypeFunc::Parms &&
 848         r->field_at(TypeFunc::Parms)->isa_oopptr() &&
 849         r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
 850       // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
 851       init_flags(Flag_is_macro);
 852       C->add_macro_node(this);
 853     }
 854   }
 855   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 856     : CallJavaNode(tf, addr, nullptr) {
 857     init_class_id(Class_CallStaticJava);
 858     // This node calls a runtime stub, which often has narrow memory effects.
 859     _adr_type = adr_type;
 860     _name = name;
 861   }
 862 
 863   // If this is an uncommon trap, return the request code, else zero.
 864   int uncommon_trap_request() const;
 865   bool is_uncommon_trap() const;
 866   static int extract_uncommon_trap_request(const Node* call);
 867 
 868   bool is_boxing_method() const {
 869     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 870   }
 871   // Late inlining modifies the JVMState, so we need to deep clone it
 872   // when the call node is cloned (because it is macro node).
 873   virtual bool needs_deep_clone_jvms(Compile* C) {

 944   }
 945   virtual int   Opcode() const;
 946   virtual bool        guaranteed_safepoint()  { return false; }
 947 #ifndef PRODUCT
 948   virtual void  dump_spec(outputStream *st) const;
 949 #endif
 950 };
 951 
 952 //------------------------------CallLeafNoFPNode-------------------------------
 953 // CallLeafNode, not using floating point or using it in the same manner as
 954 // the generated code
 955 class CallLeafNoFPNode : public CallLeafNode {
 956 public:
 957   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 958                    const TypePtr* adr_type)
 959     : CallLeafNode(tf, addr, name, adr_type)
 960   {
 961     init_class_id(Class_CallLeafNoFP);
 962   }
 963   virtual int   Opcode() const;
 964   virtual uint match_edge(uint idx) const;
 965 };
 966 
 967 //------------------------------CallLeafVectorNode-------------------------------
 968 // CallLeafNode but calling with vector calling convention instead.
 969 class CallLeafVectorNode : public CallLeafNode {
 970 private:
 971   uint _num_bits;
 972 protected:
 973   virtual bool cmp( const Node &n ) const;
 974   virtual uint size_of() const; // Size is bigger
 975 public:
 976   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 977                    const TypePtr* adr_type, uint num_bits)
 978     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 979   {
 980   }
 981   virtual int   Opcode() const;
 982   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 983 };
 984 

 987 // High-level memory allocation
 988 //
 989 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 990 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 991 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 992 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 993 //  order to differentiate the uses of the projection on the normal control path from
 994 //  those on the exception return path.
 995 //
 996 class AllocateNode : public CallNode {
 997 public:
 998   enum {
 999     // Output:
1000     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
1001     // Inputs:
1002     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
1003     KlassNode,                        // type (maybe dynamic) of the obj.
1004     InitialTest,                      // slow-path test (may be constant)
1005     ALength,                          // array length (or TOP if none)
1006     ValidLengthTest,
1007     InlineType,                       // InlineTypeNode if this is an inline type allocation
1008     DefaultValue,                     // default value in case of non-flat inline type array
1009     RawDefaultValue,                  // same as above but as raw machine word
1010     ParmLimit
1011   };
1012 
1013   static const TypeFunc* alloc_type(const Type* t) {
1014     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1015     fields[AllocSize]   = TypeInt::POS;
1016     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1017     fields[InitialTest] = TypeInt::BOOL;
1018     fields[ALength]     = t;  // length (can be a bad length)
1019     fields[ValidLengthTest] = TypeInt::BOOL;
1020     fields[InlineType] = Type::BOTTOM;
1021     fields[DefaultValue] = TypeInstPtr::NOTNULL;
1022     fields[RawDefaultValue] = TypeX_X;
1023 
1024     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1025 
1026     // create result type (range)
1027     fields = TypeTuple::fields(1);
1028     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1029 
1030     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1031 
1032     return TypeFunc::make(domain, range);
1033   }
1034 
1035   // Result of Escape Analysis
1036   bool _is_scalar_replaceable;
1037   bool _is_non_escaping;
1038   // True when MemBar for new is redundant with MemBar at initialzer exit
1039   bool _is_allocation_MemBar_redundant;
1040   bool _larval;
1041 
1042   virtual uint size_of() const; // Size is bigger
1043   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1044                Node *size, Node *klass_node, Node *initial_test,
1045                InlineTypeNode* inline_type_node = nullptr);
1046   // Expansion modifies the JVMState, so we need to deep clone it
1047   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1048   virtual int Opcode() const;
1049   virtual uint ideal_reg() const { return Op_RegP; }
1050   virtual bool        guaranteed_safepoint()  { return false; }
1051 
1052   // allocations do not modify their arguments
1053   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1054 
1055   // Pattern-match a possible usage of AllocateNode.
1056   // Return null if no allocation is recognized.
1057   // The operand is the pointer produced by the (possible) allocation.
1058   // It must be a projection of the Allocate or its subsequent CastPP.
1059   // (Note:  This function is defined in file graphKit.cpp, near
1060   // GraphKit::new_instance/new_array, whose output it recognizes.)
1061   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1062   static AllocateNode* Ideal_allocation(Node* ptr);
1063 
1064   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1065   // an offset, which is reported back to the caller.

1090 
1091   // Return true if allocation doesn't escape thread, its escape state
1092   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1093   // is true when its allocation's escape state is noEscape or
1094   // ArgEscape. In case allocation's InitializeNode is null, check
1095   // AlllocateNode._is_non_escaping flag.
1096   // AlllocateNode._is_non_escaping is true when its escape state is
1097   // noEscape.
1098   bool does_not_escape_thread() {
1099     InitializeNode* init = nullptr;
1100     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1101   }
1102 
1103   // If object doesn't escape in <.init> method and there is memory barrier
1104   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1105   // Inovke this method when MemBar at exit of initializer and post-dominate
1106   // allocation node.
1107   void compute_MemBar_redundancy(ciMethod* initializer);
1108   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1109 
1110   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1111 };
1112 
1113 //------------------------------AllocateArray---------------------------------
1114 //
1115 // High-level array allocation
1116 //
1117 class AllocateArrayNode : public AllocateNode {
1118 public:
1119   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1120                     Node* initial_test, Node* count_val, Node* valid_length_test,
1121                     Node* default_value, Node* raw_default_value)
1122     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1123                    initial_test)
1124   {
1125     init_class_id(Class_AllocateArray);
1126     set_req(AllocateNode::ALength,        count_val);
1127     set_req(AllocateNode::ValidLengthTest, valid_length_test);
1128     init_req(AllocateNode::DefaultValue,  default_value);
1129     init_req(AllocateNode::RawDefaultValue, raw_default_value);
1130   }
1131   virtual uint size_of() const { return sizeof(*this); }
1132   virtual int Opcode() const;
1133 
1134   // Dig the length operand out of a array allocation site.
1135   Node* Ideal_length() {
1136     return in(AllocateNode::ALength);
1137   }
1138 
1139   // Dig the length operand out of a array allocation site and narrow the
1140   // type with a CastII, if necesssary
1141   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1142 
1143   // Pattern-match a possible usage of AllocateArrayNode.
1144   // Return null if no allocation is recognized.
1145   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1146     AllocateNode* allo = Ideal_allocation(ptr);
1147     return (allo == nullptr || !allo->is_AllocateArray())
1148            ? nullptr : allo->as_AllocateArray();
1149   }
1150 };
1151 

1234 //    0  -  object to lock
1235 //    1 -   a BoxLockNode
1236 //    2 -   a FastLockNode
1237 //
1238 class LockNode : public AbstractLockNode {
1239 public:
1240 
1241   static const TypeFunc *lock_type() {
1242     // create input type (domain)
1243     const Type **fields = TypeTuple::fields(3);
1244     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1245     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1246     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1247     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1248 
1249     // create result type (range)
1250     fields = TypeTuple::fields(0);
1251 
1252     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1253 
1254     return TypeFunc::make(domain, range);
1255   }
1256 
1257   virtual int Opcode() const;
1258   virtual uint size_of() const; // Size is bigger
1259   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1260     init_class_id(Class_Lock);
1261     init_flags(Flag_is_macro);
1262     C->add_macro_node(this);
1263   }
1264   virtual bool        guaranteed_safepoint()  { return false; }
1265 
1266   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1267   // Expansion modifies the JVMState, so we need to deep clone it
1268   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1269 
1270   bool is_nested_lock_region(); // Is this Lock nested?
1271   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1272 };
1273 
1274 //------------------------------Unlock---------------------------------------
< prev index next >