60 //------------------------------StartNode--------------------------------------
61 // The method start node
62 class StartNode : public MultiNode {
63 virtual bool cmp( const Node &n ) const;
64 virtual uint size_of() const; // Size is bigger
65 public:
66 const TypeTuple *_domain;
67 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
68 init_class_id(Class_Start);
69 init_req(0,this);
70 init_req(1,root);
71 }
72 virtual int Opcode() const;
73 virtual bool pinned() const { return true; };
74 virtual const Type *bottom_type() const;
75 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
76 virtual const Type* Value(PhaseGVN* phase) const;
77 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
78 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
79 virtual const RegMask &in_RegMask(uint) const;
80 virtual Node *match( const ProjNode *proj, const Matcher *m );
81 virtual uint ideal_reg() const { return 0; }
82 #ifndef PRODUCT
83 virtual void dump_spec(outputStream *st) const;
84 virtual void dump_compact_spec(outputStream *st) const;
85 #endif
86 };
87
88 //------------------------------StartOSRNode-----------------------------------
89 // The method start node for on stack replacement code
90 class StartOSRNode : public StartNode {
91 public:
92 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
93 virtual int Opcode() const;
94 static const TypeTuple *osr_domain();
95 };
96
97
98 //------------------------------ParmNode---------------------------------------
99 // Incoming parameters
100 class ParmNode : public ProjNode {
101 static const char * const names[TypeFunc::Parms+1];
102 public:
103 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
104 init_class_id(Class_Parm);
105 }
106 virtual int Opcode() const;
107 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
108 virtual uint ideal_reg() const;
109 #ifndef PRODUCT
110 virtual void dump_spec(outputStream *st) const;
111 virtual void dump_compact_spec(outputStream *st) const;
112 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const;
113 #endif
114 };
545
546 virtual uint size_of() const { return sizeof(*this); }
547
548 // Assumes that "this" is an argument to a safepoint node "s", and that
549 // "new_call" is being created to correspond to "s". But the difference
550 // between the start index of the jvmstates of "new_call" and "s" is
551 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
552 // corresponds appropriately to "this" in "new_call". Assumes that
553 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
554 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
555 SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
556
557 #ifndef PRODUCT
558 virtual void dump_spec(outputStream *st) const;
559 #endif
560 };
561
562
563 // Simple container for the outgoing projections of a call. Useful
564 // for serious surgery on calls.
565 class CallProjections : public StackObj {
566 public:
567 Node* fallthrough_proj;
568 Node* fallthrough_catchproj;
569 Node* fallthrough_memproj;
570 Node* fallthrough_ioproj;
571 Node* catchall_catchproj;
572 Node* catchall_memproj;
573 Node* catchall_ioproj;
574 Node* resproj;
575 Node* exobj;
576 };
577
578 class CallGenerator;
579
580 //------------------------------CallNode---------------------------------------
581 // Call nodes now subsume the function of debug nodes at callsites, so they
582 // contain the functionality of a full scope chain of debug nodes.
583 class CallNode : public SafePointNode {
584 friend class VMStructs;
585
586 protected:
587 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseTransform* phase);
588
589 public:
590 const TypeFunc* _tf; // Function type
591 address _entry_point; // Address of method being called
592 float _cnt; // Estimate of number of times called
593 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
594 const char* _name; // Printable name, if _method is NULL
595
596 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
597 : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
598 _tf(tf),
599 _entry_point(addr),
600 _cnt(COUNT_UNKNOWN),
601 _generator(NULL),
602 _name(NULL)
603 {
604 init_class_id(Class_Call);
605 }
606
607 const TypeFunc* tf() const { return _tf; }
608 const address entry_point() const { return _entry_point; }
609 const float cnt() const { return _cnt; }
610 CallGenerator* generator() const { return _generator; }
611
612 void set_tf(const TypeFunc* tf) { _tf = tf; }
613 void set_entry_point(address p) { _entry_point = p; }
614 void set_cnt(float c) { _cnt = c; }
615 void set_generator(CallGenerator* cg) { _generator = cg; }
616
617 virtual const Type* bottom_type() const;
618 virtual const Type* Value(PhaseGVN* phase) const;
619 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
620 virtual Node* Identity(PhaseGVN* phase) { return this; }
621 virtual bool cmp(const Node &n) const;
622 virtual uint size_of() const = 0;
623 virtual void calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
624 virtual Node* match(const ProjNode* proj, const Matcher* m);
625 virtual uint ideal_reg() const { return NotAMachineReg; }
626 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
627 // for some macro nodes whose expansion does not have a safepoint on the fast path.
628 virtual bool guaranteed_safepoint() { return true; }
629 // For macro nodes, the JVMState gets modified during expansion. If calls
630 // use MachConstantBase, it gets modified during matching. So when cloning
631 // the node the JVMState must be deep cloned. Default is to shallow clone.
632 virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
633
634 // Returns true if the call may modify n
635 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseTransform* phase);
636 // Does this node have a use of n other than in debug information?
637 bool has_non_debug_use(Node* n);
638 // Returns the unique CheckCastPP of a call
639 // or result projection is there are several CheckCastPP
640 // or returns NULL if there is no one.
641 Node* result_cast();
642 // Does this node returns pointer?
643 bool returns_pointer() const {
644 const TypeTuple* r = tf()->range();
645 return (r->cnt() > TypeFunc::Parms &&
646 r->field_at(TypeFunc::Parms)->isa_ptr());
647 }
648
649 // Collect all the interesting edges from a call for use in
650 // replacing the call by something else. Used by macro expansion
651 // and the late inlining support.
652 void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
653
654 virtual uint match_edge(uint idx) const;
655
656 bool is_call_to_arraycopystub() const;
657
658 virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
659
660 #ifndef PRODUCT
661 virtual void dump_req(outputStream* st = tty) const;
662 virtual void dump_spec(outputStream* st) const;
663 #endif
664 };
665
666
667 //------------------------------CallJavaNode-----------------------------------
668 // Make a static or dynamic subroutine call node using Java calling
669 // convention. (The "Java" calling convention is the compiler's calling
670 // convention, as opposed to the interpreter's or that of native C.)
671 class CallJavaNode : public CallNode {
672 friend class VMStructs;
702 bool override_symbolic_info() const { return _override_symbolic_info; }
703 void set_arg_escape(bool f) { _arg_escape = f; }
704 bool arg_escape() const { return _arg_escape; }
705 void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
706
707 DEBUG_ONLY( bool validate_symbolic_info() const; )
708
709 #ifndef PRODUCT
710 virtual void dump_spec(outputStream *st) const;
711 virtual void dump_compact_spec(outputStream *st) const;
712 #endif
713 };
714
715 //------------------------------CallStaticJavaNode-----------------------------
716 // Make a direct subroutine call using Java calling convention (for static
717 // calls and optimized virtual calls, plus calls to wrappers for run-time
718 // routines); generates static stub.
719 class CallStaticJavaNode : public CallJavaNode {
720 virtual bool cmp( const Node &n ) const;
721 virtual uint size_of() const; // Size is bigger
722 public:
723 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
724 : CallJavaNode(tf, addr, method) {
725 init_class_id(Class_CallStaticJava);
726 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
727 init_flags(Flag_is_macro);
728 C->add_macro_node(this);
729 }
730 }
731 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
732 : CallJavaNode(tf, addr, NULL) {
733 init_class_id(Class_CallStaticJava);
734 // This node calls a runtime stub, which often has narrow memory effects.
735 _adr_type = adr_type;
736 _name = name;
737 }
738
739 // If this is an uncommon trap, return the request code, else zero.
740 int uncommon_trap_request() const;
741 static int extract_uncommon_trap_request(const Node* call);
742
743 bool is_boxing_method() const {
744 return is_macro() && (method() != NULL) && method()->is_boxing_method();
745 }
746 // Late inlining modifies the JVMState, so we need to deep clone it
747 // when the call node is cloned (because it is macro node).
748 virtual bool needs_deep_clone_jvms(Compile* C) {
749 return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);
836 GrowableArray<VMReg> _arg_regs;
837 GrowableArray<VMReg> _ret_regs;
838 const int _shadow_space_bytes;
839 const bool _need_transition;
840
841 CallNativeNode(const TypeFunc* tf, address addr, const char* name,
842 const TypePtr* adr_type,
843 const GrowableArray<VMReg>& arg_regs,
844 const GrowableArray<VMReg>& ret_regs,
845 int shadow_space_bytes,
846 bool need_transition)
847 : CallNode(tf, addr, adr_type), _arg_regs(arg_regs),
848 _ret_regs(ret_regs), _shadow_space_bytes(shadow_space_bytes),
849 _need_transition(need_transition)
850 {
851 init_class_id(Class_CallNative);
852 _name = name;
853 }
854 virtual int Opcode() const;
855 virtual bool guaranteed_safepoint() { return _need_transition; }
856 virtual Node* match(const ProjNode *proj, const Matcher *m);
857 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
858 #ifndef PRODUCT
859 virtual void dump_spec(outputStream *st) const;
860 #endif
861 };
862
863 //------------------------------CallLeafNoFPNode-------------------------------
864 // CallLeafNode, not using floating point or using it in the same manner as
865 // the generated code
866 class CallLeafNoFPNode : public CallLeafNode {
867 public:
868 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
869 const TypePtr* adr_type)
870 : CallLeafNode(tf, addr, name, adr_type)
871 {
872 init_class_id(Class_CallLeafNoFP);
873 }
874 virtual int Opcode() const;
875 };
876
877 //------------------------------CallLeafVectorNode-------------------------------
878 // CallLeafNode but calling with vector calling convention instead.
879 class CallLeafVectorNode : public CallLeafNode {
880 private:
881 uint _num_bits;
882 protected:
883 virtual bool cmp( const Node &n ) const;
884 virtual uint size_of() const; // Size is bigger
885 public:
886 CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
887 const TypePtr* adr_type, uint num_bits)
888 : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
889 {
890 }
891 virtual int Opcode() const;
892 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
893 };
894
896 //------------------------------Allocate---------------------------------------
897 // High-level memory allocation
898 //
899 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
900 // get expanded into a code sequence containing a call. Unlike other CallNodes,
901 // they have 2 memory projections and 2 i_o projections (which are distinguished by
902 // the _is_io_use flag in the projection.) This is needed when expanding the node in
903 // order to differentiate the uses of the projection on the normal control path from
904 // those on the exception return path.
905 //
906 class AllocateNode : public CallNode {
907 public:
908 enum {
909 // Output:
910 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
911 // Inputs:
912 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
913 KlassNode, // type (maybe dynamic) of the obj.
914 InitialTest, // slow-path test (may be constant)
915 ALength, // array length (or TOP if none)
916 ValidLengthTest,
917 ParmLimit
918 };
919
920 static const TypeFunc* alloc_type(const Type* t) {
921 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
922 fields[AllocSize] = TypeInt::POS;
923 fields[KlassNode] = TypeInstPtr::NOTNULL;
924 fields[InitialTest] = TypeInt::BOOL;
925 fields[ALength] = t; // length (can be a bad length)
926 fields[ValidLengthTest] = TypeInt::BOOL;
927
928 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
929
930 // create result type (range)
931 fields = TypeTuple::fields(1);
932 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
933
934 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
935
936 return TypeFunc::make(domain, range);
937 }
938
939 // Result of Escape Analysis
940 bool _is_scalar_replaceable;
941 bool _is_non_escaping;
942 // True when MemBar for new is redundant with MemBar at initialzer exit
943 bool _is_allocation_MemBar_redundant;
944
945 virtual uint size_of() const; // Size is bigger
946 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
947 Node *size, Node *klass_node, Node *initial_test);
948 // Expansion modifies the JVMState, so we need to deep clone it
949 virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
950 virtual int Opcode() const;
951 virtual uint ideal_reg() const { return Op_RegP; }
952 virtual bool guaranteed_safepoint() { return false; }
953
954 // allocations do not modify their arguments
955 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
956
957 // Pattern-match a possible usage of AllocateNode.
958 // Return null if no allocation is recognized.
959 // The operand is the pointer produced by the (possible) allocation.
960 // It must be a projection of the Allocate or its subsequent CastPP.
961 // (Note: This function is defined in file graphKit.cpp, near
962 // GraphKit::new_instance/new_array, whose output it recognizes.)
963 // The 'ptr' may not have an offset unless the 'offset' argument is given.
964 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
965
966 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
967 // an offset, which is reported back to the caller.
992
993 // Return true if allocation doesn't escape thread, its escape state
994 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
995 // is true when its allocation's escape state is noEscape or
996 // ArgEscape. In case allocation's InitializeNode is NULL, check
997 // AlllocateNode._is_non_escaping flag.
998 // AlllocateNode._is_non_escaping is true when its escape state is
999 // noEscape.
1000 bool does_not_escape_thread() {
1001 InitializeNode* init = NULL;
1002 return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
1003 }
1004
1005 // If object doesn't escape in <.init> method and there is memory barrier
1006 // inserted at exit of its <.init>, memory barrier for new is not necessary.
1007 // Inovke this method when MemBar at exit of initializer and post-dominate
1008 // allocation node.
1009 void compute_MemBar_redundancy(ciMethod* initializer);
1010 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1011
1012 Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
1013 };
1014
1015 //------------------------------AllocateArray---------------------------------
1016 //
1017 // High-level array allocation
1018 //
1019 class AllocateArrayNode : public AllocateNode {
1020 public:
1021 AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1022 Node* initial_test, Node* count_val, Node* valid_length_test)
1023 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1024 initial_test)
1025 {
1026 init_class_id(Class_AllocateArray);
1027 set_req(AllocateNode::ALength, count_val);
1028 set_req(AllocateNode::ValidLengthTest, valid_length_test);
1029 }
1030 virtual int Opcode() const;
1031
1032 // Dig the length operand out of a array allocation site.
1033 Node* Ideal_length() {
1034 return in(AllocateNode::ALength);
1035 }
1036
1037 // Dig the length operand out of a array allocation site and narrow the
1038 // type with a CastII, if necesssary
1039 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
1040
1041 // Pattern-match a possible usage of AllocateArrayNode.
1042 // Return null if no allocation is recognized.
1043 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
1044 AllocateNode* allo = Ideal_allocation(ptr, phase);
1045 return (allo == NULL || !allo->is_AllocateArray())
1046 ? NULL : allo->as_AllocateArray();
1047 }
1048 };
1129 // 0 - object to lock
1130 // 1 - a BoxLockNode
1131 // 2 - a FastLockNode
1132 //
1133 class LockNode : public AbstractLockNode {
1134 public:
1135
1136 static const TypeFunc *lock_type() {
1137 // create input type (domain)
1138 const Type **fields = TypeTuple::fields(3);
1139 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
1140 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
1141 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
1142 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1143
1144 // create result type (range)
1145 fields = TypeTuple::fields(0);
1146
1147 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1148
1149 return TypeFunc::make(domain,range);
1150 }
1151
1152 virtual int Opcode() const;
1153 virtual uint size_of() const; // Size is bigger
1154 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1155 init_class_id(Class_Lock);
1156 init_flags(Flag_is_macro);
1157 C->add_macro_node(this);
1158 }
1159 virtual bool guaranteed_safepoint() { return false; }
1160
1161 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1162 // Expansion modifies the JVMState, so we need to deep clone it
1163 virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1164
1165 bool is_nested_lock_region(); // Is this Lock nested?
1166 bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1167 };
1168
1169 //------------------------------Unlock---------------------------------------
|
60 //------------------------------StartNode--------------------------------------
61 // The method start node
62 class StartNode : public MultiNode {
63 virtual bool cmp( const Node &n ) const;
64 virtual uint size_of() const; // Size is bigger
65 public:
66 const TypeTuple *_domain;
67 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
68 init_class_id(Class_Start);
69 init_req(0,this);
70 init_req(1,root);
71 }
72 virtual int Opcode() const;
73 virtual bool pinned() const { return true; };
74 virtual const Type *bottom_type() const;
75 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
76 virtual const Type* Value(PhaseGVN* phase) const;
77 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
78 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
79 virtual const RegMask &in_RegMask(uint) const;
80 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
81 virtual uint ideal_reg() const { return 0; }
82 #ifndef PRODUCT
83 virtual void dump_spec(outputStream *st) const;
84 virtual void dump_compact_spec(outputStream *st) const;
85 #endif
86 };
87
88 //------------------------------StartOSRNode-----------------------------------
89 // The method start node for on stack replacement code
90 class StartOSRNode : public StartNode {
91 public:
92 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
93 virtual int Opcode() const;
94 };
95
96
97 //------------------------------ParmNode---------------------------------------
98 // Incoming parameters
99 class ParmNode : public ProjNode {
100 static const char * const names[TypeFunc::Parms+1];
101 public:
102 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
103 init_class_id(Class_Parm);
104 }
105 virtual int Opcode() const;
106 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
107 virtual uint ideal_reg() const;
108 #ifndef PRODUCT
109 virtual void dump_spec(outputStream *st) const;
110 virtual void dump_compact_spec(outputStream *st) const;
111 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const;
112 #endif
113 };
544
545 virtual uint size_of() const { return sizeof(*this); }
546
547 // Assumes that "this" is an argument to a safepoint node "s", and that
548 // "new_call" is being created to correspond to "s". But the difference
549 // between the start index of the jvmstates of "new_call" and "s" is
550 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
551 // corresponds appropriately to "this" in "new_call". Assumes that
552 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
553 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
554 SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const;
555
556 #ifndef PRODUCT
557 virtual void dump_spec(outputStream *st) const;
558 #endif
559 };
560
561
562 // Simple container for the outgoing projections of a call. Useful
563 // for serious surgery on calls.
564 class CallProjections {
565 public:
566 Node* fallthrough_proj;
567 Node* fallthrough_catchproj;
568 Node* fallthrough_memproj;
569 Node* fallthrough_ioproj;
570 Node* catchall_catchproj;
571 Node* catchall_memproj;
572 Node* catchall_ioproj;
573 Node* exobj;
574 uint nb_resproj;
575 Node* resproj[1]; // at least one projection
576
577 CallProjections(uint nbres) {
578 fallthrough_proj = NULL;
579 fallthrough_catchproj = NULL;
580 fallthrough_memproj = NULL;
581 fallthrough_ioproj = NULL;
582 catchall_catchproj = NULL;
583 catchall_memproj = NULL;
584 catchall_ioproj = NULL;
585 exobj = NULL;
586 nb_resproj = nbres;
587 resproj[0] = NULL;
588 for (uint i = 1; i < nb_resproj; i++) {
589 resproj[i] = NULL;
590 }
591 }
592
593 };
594
595 class CallGenerator;
596
597 //------------------------------CallNode---------------------------------------
598 // Call nodes now subsume the function of debug nodes at callsites, so they
599 // contain the functionality of a full scope chain of debug nodes.
600 class CallNode : public SafePointNode {
601 friend class VMStructs;
602
603 protected:
604 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseTransform* phase);
605
606 public:
607 const TypeFunc* _tf; // Function type
608 address _entry_point; // Address of method being called
609 float _cnt; // Estimate of number of times called
610 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
611 const char* _name; // Printable name, if _method is NULL
612
613 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
614 : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
615 _tf(tf),
616 _entry_point(addr),
617 _cnt(COUNT_UNKNOWN),
618 _generator(NULL),
619 _name(NULL)
620 {
621 init_class_id(Class_Call);
622 }
623
624 const TypeFunc* tf() const { return _tf; }
625 const address entry_point() const { return _entry_point; }
626 const float cnt() const { return _cnt; }
627 CallGenerator* generator() const { return _generator; }
628
629 void set_tf(const TypeFunc* tf) { _tf = tf; }
630 void set_entry_point(address p) { _entry_point = p; }
631 void set_cnt(float c) { _cnt = c; }
632 void set_generator(CallGenerator* cg) { _generator = cg; }
633
634 virtual const Type* bottom_type() const;
635 virtual const Type* Value(PhaseGVN* phase) const;
636 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
637 virtual Node* Identity(PhaseGVN* phase) { return this; }
638 virtual bool cmp(const Node &n) const;
639 virtual uint size_of() const = 0;
640 virtual void calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
641 virtual Node* match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
642 virtual uint ideal_reg() const { return NotAMachineReg; }
643 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
644 // for some macro nodes whose expansion does not have a safepoint on the fast path.
645 virtual bool guaranteed_safepoint() { return true; }
646 // For macro nodes, the JVMState gets modified during expansion. If calls
647 // use MachConstantBase, it gets modified during matching. So when cloning
648 // the node the JVMState must be deep cloned. Default is to shallow clone.
649 virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
650
651 // Returns true if the call may modify n
652 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseTransform* phase);
653 // Does this node have a use of n other than in debug information?
654 bool has_non_debug_use(Node* n);
655 bool has_debug_use(Node* n);
656 // Returns the unique CheckCastPP of a call
657 // or result projection is there are several CheckCastPP
658 // or returns NULL if there is no one.
659 Node* result_cast();
660 // Does this node returns pointer?
661 bool returns_pointer() const {
662 const TypeTuple* r = tf()->range_sig();
663 return (!tf()->returns_inline_type_as_fields() &&
664 r->cnt() > TypeFunc::Parms &&
665 r->field_at(TypeFunc::Parms)->isa_ptr());
666 }
667
668 // Collect all the interesting edges from a call for use in
669 // replacing the call by something else. Used by macro expansion
670 // and the late inlining support.
671 CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true);
672
673 virtual uint match_edge(uint idx) const;
674
675 bool is_call_to_arraycopystub() const;
676
677 virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
678
679 #ifndef PRODUCT
680 virtual void dump_req(outputStream* st = tty) const;
681 virtual void dump_spec(outputStream* st) const;
682 #endif
683 };
684
685
686 //------------------------------CallJavaNode-----------------------------------
687 // Make a static or dynamic subroutine call node using Java calling
688 // convention. (The "Java" calling convention is the compiler's calling
689 // convention, as opposed to the interpreter's or that of native C.)
690 class CallJavaNode : public CallNode {
691 friend class VMStructs;
721 bool override_symbolic_info() const { return _override_symbolic_info; }
722 void set_arg_escape(bool f) { _arg_escape = f; }
723 bool arg_escape() const { return _arg_escape; }
724 void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
725
726 DEBUG_ONLY( bool validate_symbolic_info() const; )
727
728 #ifndef PRODUCT
729 virtual void dump_spec(outputStream *st) const;
730 virtual void dump_compact_spec(outputStream *st) const;
731 #endif
732 };
733
734 //------------------------------CallStaticJavaNode-----------------------------
735 // Make a direct subroutine call using Java calling convention (for static
736 // calls and optimized virtual calls, plus calls to wrappers for run-time
737 // routines); generates static stub.
738 class CallStaticJavaNode : public CallJavaNode {
739 virtual bool cmp( const Node &n ) const;
740 virtual uint size_of() const; // Size is bigger
741
742 bool remove_useless_allocation(PhaseGVN *phase, Node* ctl, Node* mem, Node* unc_arg);
743
744 public:
745 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
746 : CallJavaNode(tf, addr, method) {
747 init_class_id(Class_CallStaticJava);
748 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
749 init_flags(Flag_is_macro);
750 C->add_macro_node(this);
751 }
752 const TypeTuple *r = tf->range_sig();
753 if (InlineTypeReturnedAsFields &&
754 method != NULL &&
755 method->is_method_handle_intrinsic() &&
756 r->cnt() > TypeFunc::Parms &&
757 r->field_at(TypeFunc::Parms)->isa_oopptr() &&
758 r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
759 // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
760 init_flags(Flag_is_macro);
761 C->add_macro_node(this);
762 }
763 }
764 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
765 : CallJavaNode(tf, addr, NULL) {
766 init_class_id(Class_CallStaticJava);
767 // This node calls a runtime stub, which often has narrow memory effects.
768 _adr_type = adr_type;
769 _name = name;
770 }
771
772 // If this is an uncommon trap, return the request code, else zero.
773 int uncommon_trap_request() const;
774 static int extract_uncommon_trap_request(const Node* call);
775
776 bool is_boxing_method() const {
777 return is_macro() && (method() != NULL) && method()->is_boxing_method();
778 }
779 // Late inlining modifies the JVMState, so we need to deep clone it
780 // when the call node is cloned (because it is macro node).
781 virtual bool needs_deep_clone_jvms(Compile* C) {
782 return is_boxing_method() || CallNode::needs_deep_clone_jvms(C);
869 GrowableArray<VMReg> _arg_regs;
870 GrowableArray<VMReg> _ret_regs;
871 const int _shadow_space_bytes;
872 const bool _need_transition;
873
874 CallNativeNode(const TypeFunc* tf, address addr, const char* name,
875 const TypePtr* adr_type,
876 const GrowableArray<VMReg>& arg_regs,
877 const GrowableArray<VMReg>& ret_regs,
878 int shadow_space_bytes,
879 bool need_transition)
880 : CallNode(tf, addr, adr_type), _arg_regs(arg_regs),
881 _ret_regs(ret_regs), _shadow_space_bytes(shadow_space_bytes),
882 _need_transition(need_transition)
883 {
884 init_class_id(Class_CallNative);
885 _name = name;
886 }
887 virtual int Opcode() const;
888 virtual bool guaranteed_safepoint() { return _need_transition; }
889 virtual Node* match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
890 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
891 #ifndef PRODUCT
892 virtual void dump_spec(outputStream *st) const;
893 #endif
894 };
895
896 //------------------------------CallLeafNoFPNode-------------------------------
897 // CallLeafNode, not using floating point or using it in the same manner as
898 // the generated code
899 class CallLeafNoFPNode : public CallLeafNode {
900 public:
901 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
902 const TypePtr* adr_type)
903 : CallLeafNode(tf, addr, name, adr_type)
904 {
905 init_class_id(Class_CallLeafNoFP);
906 }
907 virtual int Opcode() const;
908 virtual uint match_edge(uint idx) const;
909 };
910
911 //------------------------------CallLeafVectorNode-------------------------------
912 // CallLeafNode but calling with vector calling convention instead.
913 class CallLeafVectorNode : public CallLeafNode {
914 private:
915 uint _num_bits;
916 protected:
917 virtual bool cmp( const Node &n ) const;
918 virtual uint size_of() const; // Size is bigger
919 public:
920 CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
921 const TypePtr* adr_type, uint num_bits)
922 : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
923 {
924 }
925 virtual int Opcode() const;
926 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
927 };
928
930 //------------------------------Allocate---------------------------------------
931 // High-level memory allocation
932 //
933 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
934 // get expanded into a code sequence containing a call. Unlike other CallNodes,
935 // they have 2 memory projections and 2 i_o projections (which are distinguished by
936 // the _is_io_use flag in the projection.) This is needed when expanding the node in
937 // order to differentiate the uses of the projection on the normal control path from
938 // those on the exception return path.
939 //
940 class AllocateNode : public CallNode {
941 public:
942 enum {
943 // Output:
944 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
945 // Inputs:
946 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
947 KlassNode, // type (maybe dynamic) of the obj.
948 InitialTest, // slow-path test (may be constant)
949 ALength, // array length (or TOP if none)
950 InlineTypeNode, // InlineTypeNode if this is an inline type allocation
951 DefaultValue, // default value in case of non-flattened inline type array
952 RawDefaultValue, // same as above but as raw machine word
953 ValidLengthTest,
954 ParmLimit
955 };
956
957 static const TypeFunc* alloc_type(const Type* t) {
958 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
959 fields[AllocSize] = TypeInt::POS;
960 fields[KlassNode] = TypeInstPtr::NOTNULL;
961 fields[InitialTest] = TypeInt::BOOL;
962 fields[ALength] = t; // length (can be a bad length)
963 fields[InlineTypeNode] = Type::BOTTOM;
964 fields[DefaultValue] = TypeInstPtr::NOTNULL;
965 fields[RawDefaultValue] = TypeX_X;
966 fields[ValidLengthTest] = TypeInt::BOOL;
967
968 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
969
970 // create result type (range)
971 fields = TypeTuple::fields(1);
972 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
973
974 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
975
976 return TypeFunc::make(domain, range);
977 }
978
979 // Result of Escape Analysis
980 bool _is_scalar_replaceable;
981 bool _is_non_escaping;
982 // True when MemBar for new is redundant with MemBar at initialzer exit
983 bool _is_allocation_MemBar_redundant;
984 bool _larval;
985
986 virtual uint size_of() const; // Size is bigger
987 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
988 Node *size, Node *klass_node, Node *initial_test,
989 InlineTypeBaseNode* inline_type_node = NULL);
990 // Expansion modifies the JVMState, so we need to deep clone it
991 virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
992 virtual int Opcode() const;
993 virtual uint ideal_reg() const { return Op_RegP; }
994 virtual bool guaranteed_safepoint() { return false; }
995
996 // allocations do not modify their arguments
997 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
998
999 // Pattern-match a possible usage of AllocateNode.
1000 // Return null if no allocation is recognized.
1001 // The operand is the pointer produced by the (possible) allocation.
1002 // It must be a projection of the Allocate or its subsequent CastPP.
1003 // (Note: This function is defined in file graphKit.cpp, near
1004 // GraphKit::new_instance/new_array, whose output it recognizes.)
1005 // The 'ptr' may not have an offset unless the 'offset' argument is given.
1006 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
1007
1008 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1009 // an offset, which is reported back to the caller.
1034
1035 // Return true if allocation doesn't escape thread, its escape state
1036 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1037 // is true when its allocation's escape state is noEscape or
1038 // ArgEscape. In case allocation's InitializeNode is NULL, check
1039 // AlllocateNode._is_non_escaping flag.
1040 // AlllocateNode._is_non_escaping is true when its escape state is
1041 // noEscape.
1042 bool does_not_escape_thread() {
1043 InitializeNode* init = NULL;
1044 return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
1045 }
1046
1047 // If object doesn't escape in <.init> method and there is memory barrier
1048 // inserted at exit of its <.init>, memory barrier for new is not necessary.
1049 // Inovke this method when MemBar at exit of initializer and post-dominate
1050 // allocation node.
1051 void compute_MemBar_redundancy(ciMethod* initializer);
1052 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1053
1054 Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1055 };
1056
1057 //------------------------------AllocateArray---------------------------------
1058 //
1059 // High-level array allocation
1060 //
1061 class AllocateArrayNode : public AllocateNode {
1062 public:
1063 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, Node* size, Node* klass_node,
1064 Node* initial_test, Node* count_val, Node* valid_length_test,
1065 Node* default_value, Node* raw_default_value)
1066 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, initial_test)
1067 {
1068 init_class_id(Class_AllocateArray);
1069 set_req(AllocateNode::ALength, count_val);
1070 set_req(AllocateNode::ValidLengthTest, valid_length_test);
1071 init_req(AllocateNode::DefaultValue, default_value);
1072 init_req(AllocateNode::RawDefaultValue, raw_default_value);
1073 }
1074 virtual int Opcode() const;
1075
1076 // Dig the length operand out of a array allocation site.
1077 Node* Ideal_length() {
1078 return in(AllocateNode::ALength);
1079 }
1080
1081 // Dig the length operand out of a array allocation site and narrow the
1082 // type with a CastII, if necesssary
1083 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
1084
1085 // Pattern-match a possible usage of AllocateArrayNode.
1086 // Return null if no allocation is recognized.
1087 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
1088 AllocateNode* allo = Ideal_allocation(ptr, phase);
1089 return (allo == NULL || !allo->is_AllocateArray())
1090 ? NULL : allo->as_AllocateArray();
1091 }
1092 };
1173 // 0 - object to lock
1174 // 1 - a BoxLockNode
1175 // 2 - a FastLockNode
1176 //
1177 class LockNode : public AbstractLockNode {
1178 public:
1179
1180 static const TypeFunc *lock_type() {
1181 // create input type (domain)
1182 const Type **fields = TypeTuple::fields(3);
1183 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
1184 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
1185 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
1186 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1187
1188 // create result type (range)
1189 fields = TypeTuple::fields(0);
1190
1191 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1192
1193 return TypeFunc::make(domain, range);
1194 }
1195
1196 virtual int Opcode() const;
1197 virtual uint size_of() const; // Size is bigger
1198 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1199 init_class_id(Class_Lock);
1200 init_flags(Flag_is_macro);
1201 C->add_macro_node(this);
1202 }
1203 virtual bool guaranteed_safepoint() { return false; }
1204
1205 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1206 // Expansion modifies the JVMState, so we need to deep clone it
1207 virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1208
1209 bool is_nested_lock_region(); // Is this Lock nested?
1210 bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1211 };
1212
1213 //------------------------------Unlock---------------------------------------
|