< prev index next >

src/hotspot/share/opto/callnode.hpp

Print this page

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match( const ProjNode *proj, const Matcher *m );
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;
  93   static  const TypeTuple *osr_domain();
  94 };
  95 
  96 
  97 //------------------------------ParmNode---------------------------------------
  98 // Incoming parameters
  99 class ParmNode : public ProjNode {
 100   static const char * const names[TypeFunc::Parms+1];
 101 public:
 102   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 103     init_class_id(Class_Parm);
 104   }
 105   virtual int Opcode() const;
 106   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 107   virtual uint ideal_reg() const;
 108 #ifndef PRODUCT
 109   virtual void dump_spec(outputStream *st) const;
 110   virtual void dump_compact_spec(outputStream *st) const;
 111 #endif
 112 };
 113 

 542 
 543   virtual uint size_of() const { return sizeof(*this); }
 544 
 545   // Assumes that "this" is an argument to a safepoint node "s", and that
 546   // "new_call" is being created to correspond to "s".  But the difference
 547   // between the start index of the jvmstates of "new_call" and "s" is
 548   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 549   // corresponds appropriately to "this" in "new_call".  Assumes that
 550   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 551   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 552   SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
 553 
 554 #ifndef PRODUCT
 555   virtual void              dump_spec(outputStream *st) const;
 556 #endif
 557 };
 558 
 559 
 560 // Simple container for the outgoing projections of a call.  Useful
 561 // for serious surgery on calls.
 562 class CallProjections : public StackObj {
 563 public:
 564   Node* fallthrough_proj;
 565   Node* fallthrough_catchproj;
 566   Node* fallthrough_memproj;
 567   Node* fallthrough_ioproj;
 568   Node* catchall_catchproj;
 569   Node* catchall_memproj;
 570   Node* catchall_ioproj;
 571   Node* resproj;
 572   Node* exobj;



















 573 };
 574 
 575 class CallGenerator;
 576 
 577 //------------------------------CallNode---------------------------------------
 578 // Call nodes now subsume the function of debug nodes at callsites, so they
 579 // contain the functionality of a full scope chain of debug nodes.
 580 class CallNode : public SafePointNode {
 581   friend class VMStructs;
 582 
 583 protected:
 584   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseTransform* phase);
 585 
 586 public:
 587   const TypeFunc* _tf;          // Function type
 588   address         _entry_point; // Address of method being called
 589   float           _cnt;         // Estimate of number of times called
 590   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 591   const char*     _name;        // Printable name, if _method is NULL
 592 
 593   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 594     : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
 595       _tf(tf),
 596       _entry_point(addr),
 597       _cnt(COUNT_UNKNOWN),
 598       _generator(NULL),
 599       _name(NULL)
 600   {
 601     init_class_id(Class_Call);
 602   }
 603 
 604   const TypeFunc* tf()         const { return _tf; }
 605   const address  entry_point() const { return _entry_point; }
 606   const float    cnt()         const { return _cnt; }
 607   CallGenerator* generator()   const { return _generator; }
 608 
 609   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 610   void set_entry_point(address p)       { _entry_point = p; }
 611   void set_cnt(float c)                 { _cnt = c; }
 612   void set_generator(CallGenerator* cg) { _generator = cg; }
 613 
 614   virtual const Type* bottom_type() const;
 615   virtual const Type* Value(PhaseGVN* phase) const;
 616   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 617   virtual Node* Identity(PhaseGVN* phase) { return this; }
 618   virtual bool        cmp(const Node &n) const;
 619   virtual uint        size_of() const = 0;
 620   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 621   virtual Node*       match(const ProjNode* proj, const Matcher* m);
 622   virtual uint        ideal_reg() const { return NotAMachineReg; }
 623   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 624   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 625   virtual bool        guaranteed_safepoint()  { return true; }
 626   // For macro nodes, the JVMState gets modified during expansion. If calls
 627   // use MachConstantBase, it gets modified during matching. So when cloning
 628   // the node the JVMState must be deep cloned. Default is to shallow clone.
 629   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 630 
 631   // Returns true if the call may modify n
 632   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseTransform* phase);
 633   // Does this node have a use of n other than in debug information?
 634   bool                has_non_debug_use(Node* n);

 635   // Returns the unique CheckCastPP of a call
 636   // or result projection is there are several CheckCastPP
 637   // or returns NULL if there is no one.
 638   Node* result_cast();
 639   // Does this node returns pointer?
 640   bool returns_pointer() const {
 641     const TypeTuple* r = tf()->range();
 642     return (r->cnt() > TypeFunc::Parms &&

 643             r->field_at(TypeFunc::Parms)->isa_ptr());
 644   }
 645 
 646   // Collect all the interesting edges from a call for use in
 647   // replacing the call by something else.  Used by macro expansion
 648   // and the late inlining support.
 649   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
 650 
 651   virtual uint match_edge(uint idx) const;
 652 
 653   bool is_call_to_arraycopystub() const;
 654 
 655   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 656 
 657 #ifndef PRODUCT
 658   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 659   virtual void        dump_spec(outputStream* st) const;
 660 #endif
 661 };
 662 
 663 
 664 //------------------------------CallJavaNode-----------------------------------
 665 // Make a static or dynamic subroutine call node using Java calling
 666 // convention.  (The "Java" calling convention is the compiler's calling
 667 // convention, as opposed to the interpreter's or that of native C.)
 668 class CallJavaNode : public CallNode {
 669   friend class VMStructs;

 699   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 700   void  set_arg_escape(bool f)             { _arg_escape = f; }
 701   bool  arg_escape() const                 { return _arg_escape; }
 702   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 703 
 704   DEBUG_ONLY( bool validate_symbolic_info() const; )
 705 
 706 #ifndef PRODUCT
 707   virtual void  dump_spec(outputStream *st) const;
 708   virtual void  dump_compact_spec(outputStream *st) const;
 709 #endif
 710 };
 711 
 712 //------------------------------CallStaticJavaNode-----------------------------
 713 // Make a direct subroutine call using Java calling convention (for static
 714 // calls and optimized virtual calls, plus calls to wrappers for run-time
 715 // routines); generates static stub.
 716 class CallStaticJavaNode : public CallJavaNode {
 717   virtual bool cmp( const Node &n ) const;
 718   virtual uint size_of() const; // Size is bigger



 719 public:
 720   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 721     : CallJavaNode(tf, addr, method) {
 722     init_class_id(Class_CallStaticJava);
 723     if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
 724       init_flags(Flag_is_macro);
 725       C->add_macro_node(this);
 726     }











 727   }
 728   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 729     : CallJavaNode(tf, addr, NULL) {
 730     init_class_id(Class_CallStaticJava);
 731     // This node calls a runtime stub, which often has narrow memory effects.
 732     _adr_type = adr_type;
 733     _name = name;
 734   }
 735 
 736   // If this is an uncommon trap, return the request code, else zero.
 737   int uncommon_trap_request() const;
 738   static int extract_uncommon_trap_request(const Node* call);
 739 
 740   bool is_boxing_method() const {
 741     return is_macro() && (method() != NULL) && method()->is_boxing_method();
 742   }
 743   // Late inlining modifies the JVMState, so we need to deep clone it
 744   // when the call node is cloned (because it is macro node).
 745   virtual bool needs_deep_clone_jvms(Compile* C) {
 746     return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);

 816   }
 817   virtual int   Opcode() const;
 818   virtual bool        guaranteed_safepoint()  { return false; }
 819 #ifndef PRODUCT
 820   virtual void  dump_spec(outputStream *st) const;
 821 #endif
 822 };
 823 
 824 //------------------------------CallLeafNoFPNode-------------------------------
 825 // CallLeafNode, not using floating point or using it in the same manner as
 826 // the generated code
 827 class CallLeafNoFPNode : public CallLeafNode {
 828 public:
 829   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 830                    const TypePtr* adr_type)
 831     : CallLeafNode(tf, addr, name, adr_type)
 832   {
 833     init_class_id(Class_CallLeafNoFP);
 834   }
 835   virtual int   Opcode() const;

 836 };
 837 
 838 //------------------------------CallLeafVectorNode-------------------------------
 839 // CallLeafNode but calling with vector calling convention instead.
 840 class CallLeafVectorNode : public CallLeafNode {
 841 private:
 842   uint _num_bits;
 843 protected:
 844   virtual bool cmp( const Node &n ) const;
 845   virtual uint size_of() const; // Size is bigger
 846 public:
 847   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 848                    const TypePtr* adr_type, uint num_bits)
 849     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 850   {
 851   }
 852   virtual int   Opcode() const;
 853   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 854 };
 855 

 858 // High-level memory allocation
 859 //
 860 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 861 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 862 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 863 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 864 //  order to differentiate the uses of the projection on the normal control path from
 865 //  those on the exception return path.
 866 //
 867 class AllocateNode : public CallNode {
 868 public:
 869   enum {
 870     // Output:
 871     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 872     // Inputs:
 873     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 874     KlassNode,                        // type (maybe dynamic) of the obj.
 875     InitialTest,                      // slow-path test (may be constant)
 876     ALength,                          // array length (or TOP if none)
 877     ValidLengthTest,



 878     ParmLimit
 879   };
 880 
 881   static const TypeFunc* alloc_type(const Type* t) {
 882     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
 883     fields[AllocSize]   = TypeInt::POS;
 884     fields[KlassNode]   = TypeInstPtr::NOTNULL;
 885     fields[InitialTest] = TypeInt::BOOL;
 886     fields[ALength]     = t;  // length (can be a bad length)
 887     fields[ValidLengthTest] = TypeInt::BOOL;



 888 
 889     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
 890 
 891     // create result type (range)
 892     fields = TypeTuple::fields(1);
 893     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
 894 
 895     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 896 
 897     return TypeFunc::make(domain, range);
 898   }
 899 
 900   // Result of Escape Analysis
 901   bool _is_scalar_replaceable;
 902   bool _is_non_escaping;
 903   // True when MemBar for new is redundant with MemBar at initialzer exit
 904   bool _is_allocation_MemBar_redundant;

 905 
 906   virtual uint size_of() const; // Size is bigger
 907   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 908                Node *size, Node *klass_node, Node *initial_test);

 909   // Expansion modifies the JVMState, so we need to deep clone it
 910   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
 911   virtual int Opcode() const;
 912   virtual uint ideal_reg() const { return Op_RegP; }
 913   virtual bool        guaranteed_safepoint()  { return false; }
 914 
 915   // allocations do not modify their arguments
 916   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
 917 
 918   // Pattern-match a possible usage of AllocateNode.
 919   // Return null if no allocation is recognized.
 920   // The operand is the pointer produced by the (possible) allocation.
 921   // It must be a projection of the Allocate or its subsequent CastPP.
 922   // (Note:  This function is defined in file graphKit.cpp, near
 923   // GraphKit::new_instance/new_array, whose output it recognizes.)
 924   // The 'ptr' may not have an offset unless the 'offset' argument is given.
 925   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
 926 
 927   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
 928   // an offset, which is reported back to the caller.

 953 
 954   // Return true if allocation doesn't escape thread, its escape state
 955   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
 956   // is true when its allocation's escape state is noEscape or
 957   // ArgEscape. In case allocation's InitializeNode is NULL, check
 958   // AlllocateNode._is_non_escaping flag.
 959   // AlllocateNode._is_non_escaping is true when its escape state is
 960   // noEscape.
 961   bool does_not_escape_thread() {
 962     InitializeNode* init = NULL;
 963     return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
 964   }
 965 
 966   // If object doesn't escape in <.init> method and there is memory barrier
 967   // inserted at exit of its <.init>, memory barrier for new is not necessary.
 968   // Inovke this method when MemBar at exit of initializer and post-dominate
 969   // allocation node.
 970   void compute_MemBar_redundancy(ciMethod* initializer);
 971   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
 972 
 973   Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
 974 };
 975 
 976 //------------------------------AllocateArray---------------------------------
 977 //
 978 // High-level array allocation
 979 //
 980 class AllocateArrayNode : public AllocateNode {
 981 public:
 982   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
 983                     Node* initial_test, Node* count_val, Node* valid_length_test)

 984     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
 985                    initial_test)
 986   {
 987     init_class_id(Class_AllocateArray);
 988     set_req(AllocateNode::ALength,        count_val);
 989     set_req(AllocateNode::ValidLengthTest, valid_length_test);


 990   }
 991   virtual int Opcode() const;
 992 
 993   // Dig the length operand out of a array allocation site.
 994   Node* Ideal_length() {
 995     return in(AllocateNode::ALength);
 996   }
 997 
 998   // Dig the length operand out of a array allocation site and narrow the
 999   // type with a CastII, if necesssary
1000   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
1001 
1002   // Pattern-match a possible usage of AllocateArrayNode.
1003   // Return null if no allocation is recognized.
1004   static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
1005     AllocateNode* allo = Ideal_allocation(ptr, phase);
1006     return (allo == NULL || !allo->is_AllocateArray())
1007            ? NULL : allo->as_AllocateArray();
1008   }
1009 };

1089 //    0  -  object to lock
1090 //    1 -   a BoxLockNode
1091 //    2 -   a FastLockNode
1092 //
1093 class LockNode : public AbstractLockNode {
1094 public:
1095 
1096   static const TypeFunc *lock_type() {
1097     // create input type (domain)
1098     const Type **fields = TypeTuple::fields(3);
1099     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1100     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1101     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1102     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1103 
1104     // create result type (range)
1105     fields = TypeTuple::fields(0);
1106 
1107     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1108 
1109     return TypeFunc::make(domain,range);
1110   }
1111 
1112   virtual int Opcode() const;
1113   virtual uint size_of() const; // Size is bigger
1114   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1115     init_class_id(Class_Lock);
1116     init_flags(Flag_is_macro);
1117     C->add_macro_node(this);
1118   }
1119   virtual bool        guaranteed_safepoint()  { return false; }
1120 
1121   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1122   // Expansion modifies the JVMState, so we need to deep clone it
1123   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1124 
1125   bool is_nested_lock_region(); // Is this Lock nested?
1126   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1127 };
1128 
1129 //------------------------------Unlock---------------------------------------

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;

  93 };
  94 
  95 
  96 //------------------------------ParmNode---------------------------------------
  97 // Incoming parameters
  98 class ParmNode : public ProjNode {
  99   static const char * const names[TypeFunc::Parms+1];
 100 public:
 101   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 102     init_class_id(Class_Parm);
 103   }
 104   virtual int Opcode() const;
 105   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 106   virtual uint ideal_reg() const;
 107 #ifndef PRODUCT
 108   virtual void dump_spec(outputStream *st) const;
 109   virtual void dump_compact_spec(outputStream *st) const;
 110 #endif
 111 };
 112 

 541 
 542   virtual uint size_of() const { return sizeof(*this); }
 543 
 544   // Assumes that "this" is an argument to a safepoint node "s", and that
 545   // "new_call" is being created to correspond to "s".  But the difference
 546   // between the start index of the jvmstates of "new_call" and "s" is
 547   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 548   // corresponds appropriately to "this" in "new_call".  Assumes that
 549   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 550   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 551   SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
 552 
 553 #ifndef PRODUCT
 554   virtual void              dump_spec(outputStream *st) const;
 555 #endif
 556 };
 557 
 558 
 559 // Simple container for the outgoing projections of a call.  Useful
 560 // for serious surgery on calls.
 561 class CallProjections {
 562 public:
 563   Node* fallthrough_proj;
 564   Node* fallthrough_catchproj;
 565   Node* fallthrough_memproj;
 566   Node* fallthrough_ioproj;
 567   Node* catchall_catchproj;
 568   Node* catchall_memproj;
 569   Node* catchall_ioproj;

 570   Node* exobj;
 571   uint nb_resproj;
 572   Node* resproj[1]; // at least one projection
 573 
 574   CallProjections(uint nbres) {
 575     fallthrough_proj      = NULL;
 576     fallthrough_catchproj = NULL;
 577     fallthrough_memproj   = NULL;
 578     fallthrough_ioproj    = NULL;
 579     catchall_catchproj    = NULL;
 580     catchall_memproj      = NULL;
 581     catchall_ioproj       = NULL;
 582     exobj                 = NULL;
 583     nb_resproj            = nbres;
 584     resproj[0]            = NULL;
 585     for (uint i = 1; i < nb_resproj; i++) {
 586       resproj[i]          = NULL;
 587     }
 588   }
 589 
 590 };
 591 
 592 class CallGenerator;
 593 
 594 //------------------------------CallNode---------------------------------------
 595 // Call nodes now subsume the function of debug nodes at callsites, so they
 596 // contain the functionality of a full scope chain of debug nodes.
 597 class CallNode : public SafePointNode {
 598   friend class VMStructs;
 599 
 600 protected:
 601   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseTransform* phase);
 602 
 603 public:
 604   const TypeFunc* _tf;          // Function type
 605   address         _entry_point; // Address of method being called
 606   float           _cnt;         // Estimate of number of times called
 607   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 608   const char*     _name;        // Printable name, if _method is NULL
 609 
 610   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 611     : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
 612       _tf(tf),
 613       _entry_point(addr),
 614       _cnt(COUNT_UNKNOWN),
 615       _generator(NULL),
 616       _name(NULL)
 617   {
 618     init_class_id(Class_Call);
 619   }
 620 
 621   const TypeFunc* tf()         const { return _tf; }
 622   const address  entry_point() const { return _entry_point; }
 623   const float    cnt()         const { return _cnt; }
 624   CallGenerator* generator()   const { return _generator; }
 625 
 626   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 627   void set_entry_point(address p)       { _entry_point = p; }
 628   void set_cnt(float c)                 { _cnt = c; }
 629   void set_generator(CallGenerator* cg) { _generator = cg; }
 630 
 631   virtual const Type* bottom_type() const;
 632   virtual const Type* Value(PhaseGVN* phase) const;
 633   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 634   virtual Node* Identity(PhaseGVN* phase) { return this; }
 635   virtual bool        cmp(const Node &n) const;
 636   virtual uint        size_of() const = 0;
 637   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 638   virtual Node*       match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
 639   virtual uint        ideal_reg() const { return NotAMachineReg; }
 640   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 641   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 642   virtual bool        guaranteed_safepoint()  { return true; }
 643   // For macro nodes, the JVMState gets modified during expansion. If calls
 644   // use MachConstantBase, it gets modified during matching. So when cloning
 645   // the node the JVMState must be deep cloned. Default is to shallow clone.
 646   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 647 
 648   // Returns true if the call may modify n
 649   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseTransform* phase);
 650   // Does this node have a use of n other than in debug information?
 651   bool                has_non_debug_use(Node* n);
 652   bool                has_debug_use(Node* n);
 653   // Returns the unique CheckCastPP of a call
 654   // or result projection is there are several CheckCastPP
 655   // or returns NULL if there is no one.
 656   Node* result_cast();
 657   // Does this node returns pointer?
 658   bool returns_pointer() const {
 659     const TypeTuple* r = tf()->range_sig();
 660     return (!tf()->returns_inline_type_as_fields() &&
 661             r->cnt() > TypeFunc::Parms &&
 662             r->field_at(TypeFunc::Parms)->isa_ptr());
 663   }
 664 
 665   // Collect all the interesting edges from a call for use in
 666   // replacing the call by something else.  Used by macro expansion
 667   // and the late inlining support.
 668   CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true);
 669 
 670   virtual uint match_edge(uint idx) const;
 671 
 672   bool is_call_to_arraycopystub() const;
 673 
 674   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 675 
 676 #ifndef PRODUCT
 677   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 678   virtual void        dump_spec(outputStream* st) const;
 679 #endif
 680 };
 681 
 682 
 683 //------------------------------CallJavaNode-----------------------------------
 684 // Make a static or dynamic subroutine call node using Java calling
 685 // convention.  (The "Java" calling convention is the compiler's calling
 686 // convention, as opposed to the interpreter's or that of native C.)
 687 class CallJavaNode : public CallNode {
 688   friend class VMStructs;

 718   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 719   void  set_arg_escape(bool f)             { _arg_escape = f; }
 720   bool  arg_escape() const                 { return _arg_escape; }
 721   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 722 
 723   DEBUG_ONLY( bool validate_symbolic_info() const; )
 724 
 725 #ifndef PRODUCT
 726   virtual void  dump_spec(outputStream *st) const;
 727   virtual void  dump_compact_spec(outputStream *st) const;
 728 #endif
 729 };
 730 
 731 //------------------------------CallStaticJavaNode-----------------------------
 732 // Make a direct subroutine call using Java calling convention (for static
 733 // calls and optimized virtual calls, plus calls to wrappers for run-time
 734 // routines); generates static stub.
 735 class CallStaticJavaNode : public CallJavaNode {
 736   virtual bool cmp( const Node &n ) const;
 737   virtual uint size_of() const; // Size is bigger
 738 
 739   bool remove_useless_allocation(PhaseGVN *phase, Node* ctl, Node* mem, Node* unc_arg);
 740 
 741 public:
 742   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 743     : CallJavaNode(tf, addr, method) {
 744     init_class_id(Class_CallStaticJava);
 745     if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
 746       init_flags(Flag_is_macro);
 747       C->add_macro_node(this);
 748     }
 749     const TypeTuple *r = tf->range_sig();
 750     if (InlineTypeReturnedAsFields &&
 751         method != NULL &&
 752         method->is_method_handle_intrinsic() &&
 753         r->cnt() > TypeFunc::Parms &&
 754         r->field_at(TypeFunc::Parms)->isa_oopptr() &&
 755         r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
 756       // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
 757       init_flags(Flag_is_macro);
 758       C->add_macro_node(this);
 759     }
 760   }
 761   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 762     : CallJavaNode(tf, addr, NULL) {
 763     init_class_id(Class_CallStaticJava);
 764     // This node calls a runtime stub, which often has narrow memory effects.
 765     _adr_type = adr_type;
 766     _name = name;
 767   }
 768 
 769   // If this is an uncommon trap, return the request code, else zero.
 770   int uncommon_trap_request() const;
 771   static int extract_uncommon_trap_request(const Node* call);
 772 
 773   bool is_boxing_method() const {
 774     return is_macro() && (method() != NULL) && method()->is_boxing_method();
 775   }
 776   // Late inlining modifies the JVMState, so we need to deep clone it
 777   // when the call node is cloned (because it is macro node).
 778   virtual bool needs_deep_clone_jvms(Compile* C) {
 779     return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);

 849   }
 850   virtual int   Opcode() const;
 851   virtual bool        guaranteed_safepoint()  { return false; }
 852 #ifndef PRODUCT
 853   virtual void  dump_spec(outputStream *st) const;
 854 #endif
 855 };
 856 
 857 //------------------------------CallLeafNoFPNode-------------------------------
 858 // CallLeafNode, not using floating point or using it in the same manner as
 859 // the generated code
 860 class CallLeafNoFPNode : public CallLeafNode {
 861 public:
 862   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 863                    const TypePtr* adr_type)
 864     : CallLeafNode(tf, addr, name, adr_type)
 865   {
 866     init_class_id(Class_CallLeafNoFP);
 867   }
 868   virtual int   Opcode() const;
 869   virtual uint match_edge(uint idx) const;
 870 };
 871 
 872 //------------------------------CallLeafVectorNode-------------------------------
 873 // CallLeafNode but calling with vector calling convention instead.
 874 class CallLeafVectorNode : public CallLeafNode {
 875 private:
 876   uint _num_bits;
 877 protected:
 878   virtual bool cmp( const Node &n ) const;
 879   virtual uint size_of() const; // Size is bigger
 880 public:
 881   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 882                    const TypePtr* adr_type, uint num_bits)
 883     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 884   {
 885   }
 886   virtual int   Opcode() const;
 887   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 888 };
 889 

 892 // High-level memory allocation
 893 //
 894 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 895 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 896 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 897 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 898 //  order to differentiate the uses of the projection on the normal control path from
 899 //  those on the exception return path.
 900 //
 901 class AllocateNode : public CallNode {
 902 public:
 903   enum {
 904     // Output:
 905     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 906     // Inputs:
 907     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 908     KlassNode,                        // type (maybe dynamic) of the obj.
 909     InitialTest,                      // slow-path test (may be constant)
 910     ALength,                          // array length (or TOP if none)
 911     ValidLengthTest,
 912     InlineType,                       // InlineTypeNode if this is an inline type allocation
 913     DefaultValue,                     // default value in case of non-flattened inline type array
 914     RawDefaultValue,                  // same as above but as raw machine word
 915     ParmLimit
 916   };
 917 
 918   static const TypeFunc* alloc_type(const Type* t) {
 919     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
 920     fields[AllocSize]   = TypeInt::POS;
 921     fields[KlassNode]   = TypeInstPtr::NOTNULL;
 922     fields[InitialTest] = TypeInt::BOOL;
 923     fields[ALength]     = t;  // length (can be a bad length)
 924     fields[ValidLengthTest] = TypeInt::BOOL;
 925     fields[InlineType] = Type::BOTTOM;
 926     fields[DefaultValue] = TypeInstPtr::NOTNULL;
 927     fields[RawDefaultValue] = TypeX_X;
 928 
 929     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
 930 
 931     // create result type (range)
 932     fields = TypeTuple::fields(1);
 933     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
 934 
 935     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 936 
 937     return TypeFunc::make(domain, range);
 938   }
 939 
 940   // Result of Escape Analysis
 941   bool _is_scalar_replaceable;
 942   bool _is_non_escaping;
 943   // True when MemBar for new is redundant with MemBar at initialzer exit
 944   bool _is_allocation_MemBar_redundant;
 945   bool _larval;
 946 
 947   virtual uint size_of() const; // Size is bigger
 948   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 949                Node *size, Node *klass_node, Node *initial_test,
 950                InlineTypeNode* inline_type_node = NULL);
 951   // Expansion modifies the JVMState, so we need to deep clone it
 952   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
 953   virtual int Opcode() const;
 954   virtual uint ideal_reg() const { return Op_RegP; }
 955   virtual bool        guaranteed_safepoint()  { return false; }
 956 
 957   // allocations do not modify their arguments
 958   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
 959 
 960   // Pattern-match a possible usage of AllocateNode.
 961   // Return null if no allocation is recognized.
 962   // The operand is the pointer produced by the (possible) allocation.
 963   // It must be a projection of the Allocate or its subsequent CastPP.
 964   // (Note:  This function is defined in file graphKit.cpp, near
 965   // GraphKit::new_instance/new_array, whose output it recognizes.)
 966   // The 'ptr' may not have an offset unless the 'offset' argument is given.
 967   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
 968 
 969   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
 970   // an offset, which is reported back to the caller.

 995 
 996   // Return true if allocation doesn't escape thread, its escape state
 997   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
 998   // is true when its allocation's escape state is noEscape or
 999   // ArgEscape. In case allocation's InitializeNode is NULL, check
1000   // AlllocateNode._is_non_escaping flag.
1001   // AlllocateNode._is_non_escaping is true when its escape state is
1002   // noEscape.
1003   bool does_not_escape_thread() {
1004     InitializeNode* init = NULL;
1005     return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
1006   }
1007 
1008   // If object doesn't escape in <.init> method and there is memory barrier
1009   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1010   // Inovke this method when MemBar at exit of initializer and post-dominate
1011   // allocation node.
1012   void compute_MemBar_redundancy(ciMethod* initializer);
1013   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1014 
1015   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1016 };
1017 
1018 //------------------------------AllocateArray---------------------------------
1019 //
1020 // High-level array allocation
1021 //
1022 class AllocateArrayNode : public AllocateNode {
1023 public:
1024   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1025                     Node* initial_test, Node* count_val, Node* valid_length_test,
1026                     Node* default_value, Node* raw_default_value)
1027     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1028                    initial_test)
1029   {
1030     init_class_id(Class_AllocateArray);
1031     set_req(AllocateNode::ALength,        count_val);
1032     set_req(AllocateNode::ValidLengthTest, valid_length_test);
1033     init_req(AllocateNode::DefaultValue,  default_value);
1034     init_req(AllocateNode::RawDefaultValue, raw_default_value);
1035   }
1036   virtual int Opcode() const;
1037 
1038   // Dig the length operand out of a array allocation site.
1039   Node* Ideal_length() {
1040     return in(AllocateNode::ALength);
1041   }
1042 
1043   // Dig the length operand out of a array allocation site and narrow the
1044   // type with a CastII, if necesssary
1045   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
1046 
1047   // Pattern-match a possible usage of AllocateArrayNode.
1048   // Return null if no allocation is recognized.
1049   static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
1050     AllocateNode* allo = Ideal_allocation(ptr, phase);
1051     return (allo == NULL || !allo->is_AllocateArray())
1052            ? NULL : allo->as_AllocateArray();
1053   }
1054 };

1134 //    0  -  object to lock
1135 //    1 -   a BoxLockNode
1136 //    2 -   a FastLockNode
1137 //
1138 class LockNode : public AbstractLockNode {
1139 public:
1140 
1141   static const TypeFunc *lock_type() {
1142     // create input type (domain)
1143     const Type **fields = TypeTuple::fields(3);
1144     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1145     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1146     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1147     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1148 
1149     // create result type (range)
1150     fields = TypeTuple::fields(0);
1151 
1152     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1153 
1154     return TypeFunc::make(domain, range);
1155   }
1156 
1157   virtual int Opcode() const;
1158   virtual uint size_of() const; // Size is bigger
1159   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1160     init_class_id(Class_Lock);
1161     init_flags(Flag_is_macro);
1162     C->add_macro_node(this);
1163   }
1164   virtual bool        guaranteed_safepoint()  { return false; }
1165 
1166   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1167   // Expansion modifies the JVMState, so we need to deep clone it
1168   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1169 
1170   bool is_nested_lock_region(); // Is this Lock nested?
1171   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1172 };
1173 
1174 //------------------------------Unlock---------------------------------------
< prev index next >