< prev index next >

src/hotspot/share/opto/callnode.hpp

Print this page

  60 //------------------------------StartNode--------------------------------------
  61 // The method start node
  62 class StartNode : public MultiNode {
  63   virtual bool cmp( const Node &n ) const;
  64   virtual uint size_of() const; // Size is bigger
  65 public:
  66   const TypeTuple *_domain;
  67   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  68     init_class_id(Class_Start);
  69     init_req(0,this);
  70     init_req(1,root);
  71   }
  72   virtual int Opcode() const;
  73   virtual bool pinned() const { return true; };
  74   virtual const Type *bottom_type() const;
  75   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  76   virtual const Type* Value(PhaseGVN* phase) const;
  77   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  78   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  79   virtual const RegMask &in_RegMask(uint) const;
  80   virtual Node *match( const ProjNode *proj, const Matcher *m );
  81   virtual uint ideal_reg() const { return 0; }
  82 #ifndef PRODUCT
  83   virtual void  dump_spec(outputStream *st) const;
  84   virtual void  dump_compact_spec(outputStream *st) const;
  85 #endif
  86 };
  87 
  88 //------------------------------StartOSRNode-----------------------------------
  89 // The method start node for on stack replacement code
  90 class StartOSRNode : public StartNode {
  91 public:
  92   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  93   virtual int   Opcode() const;
  94   static  const TypeTuple *osr_domain();
  95 };
  96 
  97 
  98 //------------------------------ParmNode---------------------------------------
  99 // Incoming parameters
 100 class ParmNode : public ProjNode {
 101   static const char * const names[TypeFunc::Parms+1];
 102 public:
 103   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 104     init_class_id(Class_Parm);
 105   }
 106   virtual int Opcode() const;
 107   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 108   virtual uint ideal_reg() const;
 109 #ifndef PRODUCT
 110   virtual void dump_spec(outputStream *st) const;
 111   virtual void dump_compact_spec(outputStream *st) const;
 112   virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const;
 113 #endif
 114 };

 545 
 546   virtual uint size_of() const { return sizeof(*this); }
 547 
 548   // Assumes that "this" is an argument to a safepoint node "s", and that
 549   // "new_call" is being created to correspond to "s".  But the difference
 550   // between the start index of the jvmstates of "new_call" and "s" is
 551   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 552   // corresponds appropriately to "this" in "new_call".  Assumes that
 553   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 554   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 555   SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
 556 
 557 #ifndef PRODUCT
 558   virtual void              dump_spec(outputStream *st) const;
 559 #endif
 560 };
 561 
 562 
 563 // Simple container for the outgoing projections of a call.  Useful
 564 // for serious surgery on calls.
 565 class CallProjections : public StackObj {
 566 public:
 567   Node* fallthrough_proj;
 568   Node* fallthrough_catchproj;
 569   Node* fallthrough_memproj;
 570   Node* fallthrough_ioproj;
 571   Node* catchall_catchproj;
 572   Node* catchall_memproj;
 573   Node* catchall_ioproj;
 574   Node* resproj;
 575   Node* exobj;



















 576 };
 577 
 578 class CallGenerator;
 579 
 580 //------------------------------CallNode---------------------------------------
 581 // Call nodes now subsume the function of debug nodes at callsites, so they
 582 // contain the functionality of a full scope chain of debug nodes.
 583 class CallNode : public SafePointNode {
 584   friend class VMStructs;
 585 
 586 protected:
 587   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseTransform* phase);
 588 
 589 public:
 590   const TypeFunc* _tf;          // Function type
 591   address         _entry_point; // Address of method being called
 592   float           _cnt;         // Estimate of number of times called
 593   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 594   const char*     _name;        // Printable name, if _method is NULL
 595 
 596   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 597     : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
 598       _tf(tf),
 599       _entry_point(addr),
 600       _cnt(COUNT_UNKNOWN),
 601       _generator(NULL),
 602       _name(NULL)
 603   {
 604     init_class_id(Class_Call);
 605   }
 606 
 607   const TypeFunc* tf()         const { return _tf; }
 608   const address  entry_point() const { return _entry_point; }
 609   const float    cnt()         const { return _cnt; }
 610   CallGenerator* generator()   const { return _generator; }
 611 
 612   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 613   void set_entry_point(address p)       { _entry_point = p; }
 614   void set_cnt(float c)                 { _cnt = c; }
 615   void set_generator(CallGenerator* cg) { _generator = cg; }
 616 
 617   virtual const Type* bottom_type() const;
 618   virtual const Type* Value(PhaseGVN* phase) const;
 619   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 620   virtual Node* Identity(PhaseGVN* phase) { return this; }
 621   virtual bool        cmp(const Node &n) const;
 622   virtual uint        size_of() const = 0;
 623   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 624   virtual Node*       match(const ProjNode* proj, const Matcher* m);
 625   virtual uint        ideal_reg() const { return NotAMachineReg; }
 626   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 627   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 628   virtual bool        guaranteed_safepoint()  { return true; }
 629   // For macro nodes, the JVMState gets modified during expansion. If calls
 630   // use MachConstantBase, it gets modified during matching. So when cloning
 631   // the node the JVMState must be deep cloned. Default is to shallow clone.
 632   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 633 
 634   // Returns true if the call may modify n
 635   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseTransform* phase);
 636   // Does this node have a use of n other than in debug information?
 637   bool                has_non_debug_use(Node* n);

 638   // Returns the unique CheckCastPP of a call
 639   // or result projection is there are several CheckCastPP
 640   // or returns NULL if there is no one.
 641   Node* result_cast();
 642   // Does this node returns pointer?
 643   bool returns_pointer() const {
 644     const TypeTuple* r = tf()->range();
 645     return (r->cnt() > TypeFunc::Parms &&

 646             r->field_at(TypeFunc::Parms)->isa_ptr());
 647   }
 648 
 649   // Collect all the interesting edges from a call for use in
 650   // replacing the call by something else.  Used by macro expansion
 651   // and the late inlining support.
 652   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
 653 
 654   virtual uint match_edge(uint idx) const;
 655 
 656   bool is_call_to_arraycopystub() const;
 657 
 658   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 659 
 660 #ifndef PRODUCT
 661   virtual void        dump_req(outputStream* st = tty) const;
 662   virtual void        dump_spec(outputStream* st) const;
 663 #endif
 664 };
 665 
 666 
 667 //------------------------------CallJavaNode-----------------------------------
 668 // Make a static or dynamic subroutine call node using Java calling
 669 // convention.  (The "Java" calling convention is the compiler's calling
 670 // convention, as opposed to the interpreter's or that of native C.)
 671 class CallJavaNode : public CallNode {
 672   friend class VMStructs;

 702   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 703   void  set_arg_escape(bool f)             { _arg_escape = f; }
 704   bool  arg_escape() const                 { return _arg_escape; }
 705   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 706 
 707   DEBUG_ONLY( bool validate_symbolic_info() const; )
 708 
 709 #ifndef PRODUCT
 710   virtual void  dump_spec(outputStream *st) const;
 711   virtual void  dump_compact_spec(outputStream *st) const;
 712 #endif
 713 };
 714 
 715 //------------------------------CallStaticJavaNode-----------------------------
 716 // Make a direct subroutine call using Java calling convention (for static
 717 // calls and optimized virtual calls, plus calls to wrappers for run-time
 718 // routines); generates static stub.
 719 class CallStaticJavaNode : public CallJavaNode {
 720   virtual bool cmp( const Node &n ) const;
 721   virtual uint size_of() const; // Size is bigger



 722 public:
 723   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 724     : CallJavaNode(tf, addr, method) {
 725     init_class_id(Class_CallStaticJava);
 726     if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
 727       init_flags(Flag_is_macro);
 728       C->add_macro_node(this);
 729     }











 730   }
 731   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 732     : CallJavaNode(tf, addr, NULL) {
 733     init_class_id(Class_CallStaticJava);
 734     // This node calls a runtime stub, which often has narrow memory effects.
 735     _adr_type = adr_type;
 736     _name = name;
 737   }
 738 
 739   // If this is an uncommon trap, return the request code, else zero.
 740   int uncommon_trap_request() const;
 741   static int extract_uncommon_trap_request(const Node* call);
 742 
 743   bool is_boxing_method() const {
 744     return is_macro() && (method() != NULL) && method()->is_boxing_method();
 745   }
 746   // Late inlining modifies the JVMState, so we need to deep clone it
 747   // when the call node is cloned (because it is macro node).
 748   virtual bool needs_deep_clone_jvms(Compile* C) {
 749     return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);

 836   GrowableArray<VMReg> _arg_regs;
 837   GrowableArray<VMReg> _ret_regs;
 838   const int _shadow_space_bytes;
 839   const bool _need_transition;
 840 
 841   CallNativeNode(const TypeFunc* tf, address addr, const char* name,
 842                  const TypePtr* adr_type,
 843                  const GrowableArray<VMReg>& arg_regs,
 844                  const GrowableArray<VMReg>& ret_regs,
 845                  int shadow_space_bytes,
 846                  bool need_transition)
 847     : CallNode(tf, addr, adr_type), _arg_regs(arg_regs),
 848       _ret_regs(ret_regs), _shadow_space_bytes(shadow_space_bytes),
 849       _need_transition(need_transition)
 850   {
 851     init_class_id(Class_CallNative);
 852     _name = name;
 853   }
 854   virtual int   Opcode() const;
 855   virtual bool  guaranteed_safepoint()  { return _need_transition; }
 856   virtual Node* match(const ProjNode *proj, const Matcher *m);
 857   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 858 #ifndef PRODUCT
 859   virtual void  dump_spec(outputStream *st) const;
 860 #endif
 861 };
 862 
 863 //------------------------------CallLeafNoFPNode-------------------------------
 864 // CallLeafNode, not using floating point or using it in the same manner as
 865 // the generated code
 866 class CallLeafNoFPNode : public CallLeafNode {
 867 public:
 868   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 869                    const TypePtr* adr_type)
 870     : CallLeafNode(tf, addr, name, adr_type)
 871   {
 872     init_class_id(Class_CallLeafNoFP);
 873   }
 874   virtual int   Opcode() const;

 875 };
 876 
 877 //------------------------------CallLeafVectorNode-------------------------------
 878 // CallLeafNode but calling with vector calling convention instead.
 879 class CallLeafVectorNode : public CallLeafNode {
 880 private:
 881   uint _num_bits;
 882 protected:
 883   virtual bool cmp( const Node &n ) const;
 884   virtual uint size_of() const; // Size is bigger
 885 public:
 886   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 887                    const TypePtr* adr_type, uint num_bits)
 888     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 889   {
 890   }
 891   virtual int   Opcode() const;
 892   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 893 };
 894 

 896 //------------------------------Allocate---------------------------------------
 897 // High-level memory allocation
 898 //
 899 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 900 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 901 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 902 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 903 //  order to differentiate the uses of the projection on the normal control path from
 904 //  those on the exception return path.
 905 //
 906 class AllocateNode : public CallNode {
 907 public:
 908   enum {
 909     // Output:
 910     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 911     // Inputs:
 912     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 913     KlassNode,                        // type (maybe dynamic) of the obj.
 914     InitialTest,                      // slow-path test (may be constant)
 915     ALength,                          // array length (or TOP if none)



 916     ParmLimit
 917   };
 918 
 919   static const TypeFunc* alloc_type(const Type* t) {
 920     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
 921     fields[AllocSize]   = TypeInt::POS;
 922     fields[KlassNode]   = TypeInstPtr::NOTNULL;
 923     fields[InitialTest] = TypeInt::BOOL;
 924     fields[ALength]     = t;  // length (can be a bad length)



 925 
 926     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
 927 
 928     // create result type (range)
 929     fields = TypeTuple::fields(1);
 930     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
 931 
 932     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 933 
 934     return TypeFunc::make(domain, range);
 935   }
 936 
 937   // Result of Escape Analysis
 938   bool _is_scalar_replaceable;
 939   bool _is_non_escaping;
 940   // True when MemBar for new is redundant with MemBar at initialzer exit
 941   bool _is_allocation_MemBar_redundant;

 942 
 943   virtual uint size_of() const; // Size is bigger
 944   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 945                Node *size, Node *klass_node, Node *initial_test);

 946   // Expansion modifies the JVMState, so we need to deep clone it
 947   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
 948   virtual int Opcode() const;
 949   virtual uint ideal_reg() const { return Op_RegP; }
 950   virtual bool        guaranteed_safepoint()  { return false; }
 951 
 952   // allocations do not modify their arguments
 953   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
 954 
 955   // Pattern-match a possible usage of AllocateNode.
 956   // Return null if no allocation is recognized.
 957   // The operand is the pointer produced by the (possible) allocation.
 958   // It must be a projection of the Allocate or its subsequent CastPP.
 959   // (Note:  This function is defined in file graphKit.cpp, near
 960   // GraphKit::new_instance/new_array, whose output it recognizes.)
 961   // The 'ptr' may not have an offset unless the 'offset' argument is given.
 962   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
 963 
 964   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
 965   // an offset, which is reported back to the caller.

 990 
 991   // Return true if allocation doesn't escape thread, its escape state
 992   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
 993   // is true when its allocation's escape state is noEscape or
 994   // ArgEscape. In case allocation's InitializeNode is NULL, check
 995   // AlllocateNode._is_non_escaping flag.
 996   // AlllocateNode._is_non_escaping is true when its escape state is
 997   // noEscape.
 998   bool does_not_escape_thread() {
 999     InitializeNode* init = NULL;
1000     return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
1001   }
1002 
1003   // If object doesn't escape in <.init> method and there is memory barrier
1004   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1005   // Inovke this method when MemBar at exit of initializer and post-dominate
1006   // allocation node.
1007   void compute_MemBar_redundancy(ciMethod* initializer);
1008   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1009 
1010   Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
1011 };
1012 
1013 //------------------------------AllocateArray---------------------------------
1014 //
1015 // High-level array allocation
1016 //
1017 class AllocateArrayNode : public AllocateNode {
1018 public:
1019   AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1020                     Node* size, Node* klass_node, Node* initial_test,
1021                     Node* count_val
1022                     )
1023     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1024                    initial_test)
1025   {
1026     init_class_id(Class_AllocateArray);
1027     set_req(AllocateNode::ALength,        count_val);


1028   }
1029   virtual int Opcode() const;
1030   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1031 
1032   // Dig the length operand out of a array allocation site.
1033   Node* Ideal_length() {
1034     return in(AllocateNode::ALength);
1035   }
1036 
1037   // Dig the length operand out of a array allocation site and narrow the
1038   // type with a CastII, if necesssary
1039   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
1040 
1041   // Pattern-match a possible usage of AllocateArrayNode.
1042   // Return null if no allocation is recognized.
1043   static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
1044     AllocateNode* allo = Ideal_allocation(ptr, phase);
1045     return (allo == NULL || !allo->is_AllocateArray())
1046            ? NULL : allo->as_AllocateArray();
1047   }

1129 //    0  -  object to lock
1130 //    1 -   a BoxLockNode
1131 //    2 -   a FastLockNode
1132 //
1133 class LockNode : public AbstractLockNode {
1134 public:
1135 
1136   static const TypeFunc *lock_type() {
1137     // create input type (domain)
1138     const Type **fields = TypeTuple::fields(3);
1139     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1140     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1141     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1142     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1143 
1144     // create result type (range)
1145     fields = TypeTuple::fields(0);
1146 
1147     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1148 
1149     return TypeFunc::make(domain,range);
1150   }
1151 
1152   virtual int Opcode() const;
1153   virtual uint size_of() const; // Size is bigger
1154   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1155     init_class_id(Class_Lock);
1156     init_flags(Flag_is_macro);
1157     C->add_macro_node(this);
1158   }
1159   virtual bool        guaranteed_safepoint()  { return false; }
1160 
1161   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1162   // Expansion modifies the JVMState, so we need to deep clone it
1163   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1164 
1165   bool is_nested_lock_region(); // Is this Lock nested?
1166   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1167 };
1168 
1169 //------------------------------Unlock---------------------------------------

  60 //------------------------------StartNode--------------------------------------
  61 // The method start node
  62 class StartNode : public MultiNode {
  63   virtual bool cmp( const Node &n ) const;
  64   virtual uint size_of() const; // Size is bigger
  65 public:
  66   const TypeTuple *_domain;
  67   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  68     init_class_id(Class_Start);
  69     init_req(0,this);
  70     init_req(1,root);
  71   }
  72   virtual int Opcode() const;
  73   virtual bool pinned() const { return true; };
  74   virtual const Type *bottom_type() const;
  75   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  76   virtual const Type* Value(PhaseGVN* phase) const;
  77   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  78   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  79   virtual const RegMask &in_RegMask(uint) const;
  80   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
  81   virtual uint ideal_reg() const { return 0; }
  82 #ifndef PRODUCT
  83   virtual void  dump_spec(outputStream *st) const;
  84   virtual void  dump_compact_spec(outputStream *st) const;
  85 #endif
  86 };
  87 
  88 //------------------------------StartOSRNode-----------------------------------
  89 // The method start node for on stack replacement code
  90 class StartOSRNode : public StartNode {
  91 public:
  92   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  93   virtual int   Opcode() const;

  94 };
  95 
  96 
  97 //------------------------------ParmNode---------------------------------------
  98 // Incoming parameters
  99 class ParmNode : public ProjNode {
 100   static const char * const names[TypeFunc::Parms+1];
 101 public:
 102   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 103     init_class_id(Class_Parm);
 104   }
 105   virtual int Opcode() const;
 106   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 107   virtual uint ideal_reg() const;
 108 #ifndef PRODUCT
 109   virtual void dump_spec(outputStream *st) const;
 110   virtual void dump_compact_spec(outputStream *st) const;
 111   virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const;
 112 #endif
 113 };

 544 
 545   virtual uint size_of() const { return sizeof(*this); }
 546 
 547   // Assumes that "this" is an argument to a safepoint node "s", and that
 548   // "new_call" is being created to correspond to "s".  But the difference
 549   // between the start index of the jvmstates of "new_call" and "s" is
 550   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 551   // corresponds appropriately to "this" in "new_call".  Assumes that
 552   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 553   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 554   SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
 555 
 556 #ifndef PRODUCT
 557   virtual void              dump_spec(outputStream *st) const;
 558 #endif
 559 };
 560 
 561 
 562 // Simple container for the outgoing projections of a call.  Useful
 563 // for serious surgery on calls.
 564 class CallProjections {
 565 public:
 566   Node* fallthrough_proj;
 567   Node* fallthrough_catchproj;
 568   Node* fallthrough_memproj;
 569   Node* fallthrough_ioproj;
 570   Node* catchall_catchproj;
 571   Node* catchall_memproj;
 572   Node* catchall_ioproj;

 573   Node* exobj;
 574   uint nb_resproj;
 575   Node* resproj[1]; // at least one projection
 576 
 577   CallProjections(uint nbres) {
 578     fallthrough_proj      = NULL;
 579     fallthrough_catchproj = NULL;
 580     fallthrough_memproj   = NULL;
 581     fallthrough_ioproj    = NULL;
 582     catchall_catchproj    = NULL;
 583     catchall_memproj      = NULL;
 584     catchall_ioproj       = NULL;
 585     exobj                 = NULL;
 586     nb_resproj            = nbres;
 587     resproj[0]            = NULL;
 588     for (uint i = 1; i < nb_resproj; i++) {
 589       resproj[i]          = NULL;
 590     }
 591   }
 592 
 593 };
 594 
 595 class CallGenerator;
 596 
 597 //------------------------------CallNode---------------------------------------
 598 // Call nodes now subsume the function of debug nodes at callsites, so they
 599 // contain the functionality of a full scope chain of debug nodes.
 600 class CallNode : public SafePointNode {
 601   friend class VMStructs;
 602 
 603 protected:
 604   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseTransform* phase);
 605 
 606 public:
 607   const TypeFunc* _tf;          // Function type
 608   address         _entry_point; // Address of method being called
 609   float           _cnt;         // Estimate of number of times called
 610   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 611   const char*     _name;        // Printable name, if _method is NULL
 612 
 613   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 614     : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
 615       _tf(tf),
 616       _entry_point(addr),
 617       _cnt(COUNT_UNKNOWN),
 618       _generator(NULL),
 619       _name(NULL)
 620   {
 621     init_class_id(Class_Call);
 622   }
 623 
 624   const TypeFunc* tf()         const { return _tf; }
 625   const address  entry_point() const { return _entry_point; }
 626   const float    cnt()         const { return _cnt; }
 627   CallGenerator* generator()   const { return _generator; }
 628 
 629   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 630   void set_entry_point(address p)       { _entry_point = p; }
 631   void set_cnt(float c)                 { _cnt = c; }
 632   void set_generator(CallGenerator* cg) { _generator = cg; }
 633 
 634   virtual const Type* bottom_type() const;
 635   virtual const Type* Value(PhaseGVN* phase) const;
 636   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 637   virtual Node* Identity(PhaseGVN* phase) { return this; }
 638   virtual bool        cmp(const Node &n) const;
 639   virtual uint        size_of() const = 0;
 640   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 641   virtual Node*       match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
 642   virtual uint        ideal_reg() const { return NotAMachineReg; }
 643   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 644   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 645   virtual bool        guaranteed_safepoint()  { return true; }
 646   // For macro nodes, the JVMState gets modified during expansion. If calls
 647   // use MachConstantBase, it gets modified during matching. So when cloning
 648   // the node the JVMState must be deep cloned. Default is to shallow clone.
 649   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 650 
 651   // Returns true if the call may modify n
 652   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseTransform* phase);
 653   // Does this node have a use of n other than in debug information?
 654   bool                has_non_debug_use(Node* n);
 655   bool                has_debug_use(Node* n);
 656   // Returns the unique CheckCastPP of a call
 657   // or result projection is there are several CheckCastPP
 658   // or returns NULL if there is no one.
 659   Node* result_cast();
 660   // Does this node returns pointer?
 661   bool returns_pointer() const {
 662     const TypeTuple* r = tf()->range_sig();
 663     return (!tf()->returns_inline_type_as_fields() &&
 664             r->cnt() > TypeFunc::Parms &&
 665             r->field_at(TypeFunc::Parms)->isa_ptr());
 666   }
 667 
 668   // Collect all the interesting edges from a call for use in
 669   // replacing the call by something else.  Used by macro expansion
 670   // and the late inlining support.
 671   CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true);
 672 
 673   virtual uint match_edge(uint idx) const;
 674 
 675   bool is_call_to_arraycopystub() const;
 676 
 677   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 678 
 679 #ifndef PRODUCT
 680   virtual void        dump_req(outputStream* st = tty) const;
 681   virtual void        dump_spec(outputStream* st) const;
 682 #endif
 683 };
 684 
 685 
 686 //------------------------------CallJavaNode-----------------------------------
 687 // Make a static or dynamic subroutine call node using Java calling
 688 // convention.  (The "Java" calling convention is the compiler's calling
 689 // convention, as opposed to the interpreter's or that of native C.)
 690 class CallJavaNode : public CallNode {
 691   friend class VMStructs;

 721   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 722   void  set_arg_escape(bool f)             { _arg_escape = f; }
 723   bool  arg_escape() const                 { return _arg_escape; }
 724   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 725 
 726   DEBUG_ONLY( bool validate_symbolic_info() const; )
 727 
 728 #ifndef PRODUCT
 729   virtual void  dump_spec(outputStream *st) const;
 730   virtual void  dump_compact_spec(outputStream *st) const;
 731 #endif
 732 };
 733 
 734 //------------------------------CallStaticJavaNode-----------------------------
 735 // Make a direct subroutine call using Java calling convention (for static
 736 // calls and optimized virtual calls, plus calls to wrappers for run-time
 737 // routines); generates static stub.
 738 class CallStaticJavaNode : public CallJavaNode {
 739   virtual bool cmp( const Node &n ) const;
 740   virtual uint size_of() const; // Size is bigger
 741 
 742   bool remove_useless_allocation(PhaseGVN *phase, Node* ctl, Node* mem, Node* unc_arg);
 743 
 744 public:
 745   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 746     : CallJavaNode(tf, addr, method) {
 747     init_class_id(Class_CallStaticJava);
 748     if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
 749       init_flags(Flag_is_macro);
 750       C->add_macro_node(this);
 751     }
 752     const TypeTuple *r = tf->range_sig();
 753     if (InlineTypeReturnedAsFields &&
 754         method != NULL &&
 755         method->is_method_handle_intrinsic() &&
 756         r->cnt() > TypeFunc::Parms &&
 757         r->field_at(TypeFunc::Parms)->isa_oopptr() &&
 758         r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
 759       // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
 760       init_flags(Flag_is_macro);
 761       C->add_macro_node(this);
 762     }
 763   }
 764   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 765     : CallJavaNode(tf, addr, NULL) {
 766     init_class_id(Class_CallStaticJava);
 767     // This node calls a runtime stub, which often has narrow memory effects.
 768     _adr_type = adr_type;
 769     _name = name;
 770   }
 771 
 772   // If this is an uncommon trap, return the request code, else zero.
 773   int uncommon_trap_request() const;
 774   static int extract_uncommon_trap_request(const Node* call);
 775 
 776   bool is_boxing_method() const {
 777     return is_macro() && (method() != NULL) && method()->is_boxing_method();
 778   }
 779   // Late inlining modifies the JVMState, so we need to deep clone it
 780   // when the call node is cloned (because it is macro node).
 781   virtual bool needs_deep_clone_jvms(Compile* C) {
 782     return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);

 869   GrowableArray<VMReg> _arg_regs;
 870   GrowableArray<VMReg> _ret_regs;
 871   const int _shadow_space_bytes;
 872   const bool _need_transition;
 873 
 874   CallNativeNode(const TypeFunc* tf, address addr, const char* name,
 875                  const TypePtr* adr_type,
 876                  const GrowableArray<VMReg>& arg_regs,
 877                  const GrowableArray<VMReg>& ret_regs,
 878                  int shadow_space_bytes,
 879                  bool need_transition)
 880     : CallNode(tf, addr, adr_type), _arg_regs(arg_regs),
 881       _ret_regs(ret_regs), _shadow_space_bytes(shadow_space_bytes),
 882       _need_transition(need_transition)
 883   {
 884     init_class_id(Class_CallNative);
 885     _name = name;
 886   }
 887   virtual int   Opcode() const;
 888   virtual bool  guaranteed_safepoint()  { return _need_transition; }
 889   virtual Node* match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
 890   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 891 #ifndef PRODUCT
 892   virtual void  dump_spec(outputStream *st) const;
 893 #endif
 894 };
 895 
 896 //------------------------------CallLeafNoFPNode-------------------------------
 897 // CallLeafNode, not using floating point or using it in the same manner as
 898 // the generated code
 899 class CallLeafNoFPNode : public CallLeafNode {
 900 public:
 901   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 902                    const TypePtr* adr_type)
 903     : CallLeafNode(tf, addr, name, adr_type)
 904   {
 905     init_class_id(Class_CallLeafNoFP);
 906   }
 907   virtual int   Opcode() const;
 908   virtual uint match_edge(uint idx) const;
 909 };
 910 
 911 //------------------------------CallLeafVectorNode-------------------------------
 912 // CallLeafNode but calling with vector calling convention instead.
 913 class CallLeafVectorNode : public CallLeafNode {
 914 private:
 915   uint _num_bits;
 916 protected:
 917   virtual bool cmp( const Node &n ) const;
 918   virtual uint size_of() const; // Size is bigger
 919 public:
 920   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 921                    const TypePtr* adr_type, uint num_bits)
 922     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 923   {
 924   }
 925   virtual int   Opcode() const;
 926   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 927 };
 928 

 930 //------------------------------Allocate---------------------------------------
 931 // High-level memory allocation
 932 //
 933 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 934 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 935 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 936 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 937 //  order to differentiate the uses of the projection on the normal control path from
 938 //  those on the exception return path.
 939 //
 940 class AllocateNode : public CallNode {
 941 public:
 942   enum {
 943     // Output:
 944     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 945     // Inputs:
 946     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 947     KlassNode,                        // type (maybe dynamic) of the obj.
 948     InitialTest,                      // slow-path test (may be constant)
 949     ALength,                          // array length (or TOP if none)
 950     InlineTypeNode,                   // InlineTypeNode if this is an inline type allocation
 951     DefaultValue,                     // default value in case of non-flattened inline type array
 952     RawDefaultValue,                  // same as above but as raw machine word
 953     ParmLimit
 954   };
 955 
 956   static const TypeFunc* alloc_type(const Type* t) {
 957     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
 958     fields[AllocSize]   = TypeInt::POS;
 959     fields[KlassNode]   = TypeInstPtr::NOTNULL;
 960     fields[InitialTest] = TypeInt::BOOL;
 961     fields[ALength]     = t;  // length (can be a bad length)
 962     fields[InlineTypeNode] = Type::BOTTOM;
 963     fields[DefaultValue] = TypeInstPtr::NOTNULL;
 964     fields[RawDefaultValue] = TypeX_X;
 965 
 966     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
 967 
 968     // create result type (range)
 969     fields = TypeTuple::fields(1);
 970     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
 971 
 972     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 973 
 974     return TypeFunc::make(domain, range);
 975   }
 976 
 977   // Result of Escape Analysis
 978   bool _is_scalar_replaceable;
 979   bool _is_non_escaping;
 980   // True when MemBar for new is redundant with MemBar at initialzer exit
 981   bool _is_allocation_MemBar_redundant;
 982   bool _larval;
 983 
 984   virtual uint size_of() const; // Size is bigger
 985   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 986                Node *size, Node *klass_node, Node *initial_test,
 987                InlineTypeBaseNode* inline_type_node = NULL);
 988   // Expansion modifies the JVMState, so we need to deep clone it
 989   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
 990   virtual int Opcode() const;
 991   virtual uint ideal_reg() const { return Op_RegP; }
 992   virtual bool        guaranteed_safepoint()  { return false; }
 993 
 994   // allocations do not modify their arguments
 995   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
 996 
 997   // Pattern-match a possible usage of AllocateNode.
 998   // Return null if no allocation is recognized.
 999   // The operand is the pointer produced by the (possible) allocation.
1000   // It must be a projection of the Allocate or its subsequent CastPP.
1001   // (Note:  This function is defined in file graphKit.cpp, near
1002   // GraphKit::new_instance/new_array, whose output it recognizes.)
1003   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1004   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
1005 
1006   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1007   // an offset, which is reported back to the caller.

1032 
1033   // Return true if allocation doesn't escape thread, its escape state
1034   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1035   // is true when its allocation's escape state is noEscape or
1036   // ArgEscape. In case allocation's InitializeNode is NULL, check
1037   // AlllocateNode._is_non_escaping flag.
1038   // AlllocateNode._is_non_escaping is true when its escape state is
1039   // noEscape.
1040   bool does_not_escape_thread() {
1041     InitializeNode* init = NULL;
1042     return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
1043   }
1044 
1045   // If object doesn't escape in <.init> method and there is memory barrier
1046   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1047   // Inovke this method when MemBar at exit of initializer and post-dominate
1048   // allocation node.
1049   void compute_MemBar_redundancy(ciMethod* initializer);
1050   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1051 
1052   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1053 };
1054 
1055 //------------------------------AllocateArray---------------------------------
1056 //
1057 // High-level array allocation
1058 //
1059 class AllocateArrayNode : public AllocateNode {
1060 public:
1061   AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1062                     Node* size, Node* klass_node, Node* initial_test,
1063                     Node* count_val, Node* default_value, Node* raw_default_value)
1064     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, initial_test)


1065   {
1066     init_class_id(Class_AllocateArray);
1067     set_req(AllocateNode::ALength,        count_val);
1068     init_req(AllocateNode::DefaultValue,  default_value);
1069     init_req(AllocateNode::RawDefaultValue, raw_default_value);
1070   }
1071   virtual int Opcode() const;
1072   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1073 
1074   // Dig the length operand out of a array allocation site.
1075   Node* Ideal_length() {
1076     return in(AllocateNode::ALength);
1077   }
1078 
1079   // Dig the length operand out of a array allocation site and narrow the
1080   // type with a CastII, if necesssary
1081   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
1082 
1083   // Pattern-match a possible usage of AllocateArrayNode.
1084   // Return null if no allocation is recognized.
1085   static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
1086     AllocateNode* allo = Ideal_allocation(ptr, phase);
1087     return (allo == NULL || !allo->is_AllocateArray())
1088            ? NULL : allo->as_AllocateArray();
1089   }

1171 //    0  -  object to lock
1172 //    1 -   a BoxLockNode
1173 //    2 -   a FastLockNode
1174 //
1175 class LockNode : public AbstractLockNode {
1176 public:
1177 
1178   static const TypeFunc *lock_type() {
1179     // create input type (domain)
1180     const Type **fields = TypeTuple::fields(3);
1181     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1182     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1183     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1184     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1185 
1186     // create result type (range)
1187     fields = TypeTuple::fields(0);
1188 
1189     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1190 
1191     return TypeFunc::make(domain, range);
1192   }
1193 
1194   virtual int Opcode() const;
1195   virtual uint size_of() const; // Size is bigger
1196   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1197     init_class_id(Class_Lock);
1198     init_flags(Flag_is_macro);
1199     C->add_macro_node(this);
1200   }
1201   virtual bool        guaranteed_safepoint()  { return false; }
1202 
1203   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1204   // Expansion modifies the JVMState, so we need to deep clone it
1205   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1206 
1207   bool is_nested_lock_region(); // Is this Lock nested?
1208   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1209 };
1210 
1211 //------------------------------Unlock---------------------------------------
< prev index next >