59 //------------------------------StartNode--------------------------------------
60 // The method start node
61 class StartNode : public MultiNode {
62 virtual bool cmp( const Node &n ) const;
63 virtual uint size_of() const; // Size is bigger
64 public:
65 const TypeTuple *_domain;
66 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
67 init_class_id(Class_Start);
68 init_req(0,this);
69 init_req(1,root);
70 }
71 virtual int Opcode() const;
72 virtual bool pinned() const { return true; };
73 virtual const Type *bottom_type() const;
74 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
75 virtual const Type* Value(PhaseGVN* phase) const;
76 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
77 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
78 virtual const RegMask &in_RegMask(uint) const;
79 virtual Node *match( const ProjNode *proj, const Matcher *m );
80 virtual uint ideal_reg() const { return 0; }
81 #ifndef PRODUCT
82 virtual void dump_spec(outputStream *st) const;
83 virtual void dump_compact_spec(outputStream *st) const;
84 #endif
85 };
86
87 //------------------------------StartOSRNode-----------------------------------
88 // The method start node for on stack replacement code
89 class StartOSRNode : public StartNode {
90 public:
91 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
92 virtual int Opcode() const;
93 static const TypeTuple *osr_domain();
94 };
95
96
97 //------------------------------ParmNode---------------------------------------
98 // Incoming parameters
99 class ParmNode : public ProjNode {
100 static const char * const names[TypeFunc::Parms+1];
101 public:
102 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
103 init_class_id(Class_Parm);
104 }
105 virtual int Opcode() const;
106 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
107 virtual uint ideal_reg() const;
108 #ifndef PRODUCT
109 virtual void dump_spec(outputStream *st) const;
110 virtual void dump_compact_spec(outputStream *st) const;
111 #endif
112 };
113
647 assert(jvms != nullptr, "JVMS reference is null.");
648 return jvms->scloff() + _merge_pointer_idx + 1;
649 }
650
651 // Assumes that "this" is an argument to a safepoint node "s", and that
652 // "new_call" is being created to correspond to "s". But the difference
653 // between the start index of the jvmstates of "new_call" and "s" is
654 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
655 // corresponds appropriately to "this" in "new_call". Assumes that
656 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
657 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
658 SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
659
660 #ifndef PRODUCT
661 virtual void dump_spec(outputStream *st) const;
662 #endif
663 };
664
665 // Simple container for the outgoing projections of a call. Useful
666 // for serious surgery on calls.
667 class CallProjections : public StackObj {
668 public:
669 Node* fallthrough_proj;
670 Node* fallthrough_catchproj;
671 Node* fallthrough_memproj;
672 Node* fallthrough_ioproj;
673 Node* catchall_catchproj;
674 Node* catchall_memproj;
675 Node* catchall_ioproj;
676 Node* resproj;
677 Node* exobj;
678 };
679
680 class CallGenerator;
681
682 //------------------------------CallNode---------------------------------------
683 // Call nodes now subsume the function of debug nodes at callsites, so they
684 // contain the functionality of a full scope chain of debug nodes.
685 class CallNode : public SafePointNode {
686
687 protected:
688 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
689
690 public:
691 const TypeFunc* _tf; // Function type
692 address _entry_point; // Address of method being called
693 float _cnt; // Estimate of number of times called
694 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
695 const char* _name; // Printable name, if _method is null
696
697 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
698 : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
699 _tf(tf),
700 _entry_point(addr),
701 _cnt(COUNT_UNKNOWN),
702 _generator(nullptr),
703 _name(nullptr)
704 {
705 init_class_id(Class_Call);
706 }
707
708 const TypeFunc* tf() const { return _tf; }
709 address entry_point() const { return _entry_point; }
710 float cnt() const { return _cnt; }
711 CallGenerator* generator() const { return _generator; }
712
713 void set_tf(const TypeFunc* tf) { _tf = tf; }
714 void set_entry_point(address p) { _entry_point = p; }
715 void set_cnt(float c) { _cnt = c; }
716 void set_generator(CallGenerator* cg) { _generator = cg; }
717
718 virtual const Type* bottom_type() const;
719 virtual const Type* Value(PhaseGVN* phase) const;
720 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
721 virtual Node* Identity(PhaseGVN* phase) { return this; }
722 virtual bool cmp(const Node &n) const;
723 virtual uint size_of() const = 0;
724 virtual void calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
725 virtual Node* match(const ProjNode* proj, const Matcher* m);
726 virtual uint ideal_reg() const { return NotAMachineReg; }
727 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
728 // for some macro nodes whose expansion does not have a safepoint on the fast path.
729 virtual bool guaranteed_safepoint() { return true; }
730 // For macro nodes, the JVMState gets modified during expansion. If calls
731 // use MachConstantBase, it gets modified during matching. If the call is
732 // late inlined, it also needs the full JVMState. So when cloning the
733 // node the JVMState must be deep cloned. Default is to shallow clone.
734 virtual bool needs_deep_clone_jvms(Compile* C) { return _generator != nullptr || C->needs_deep_clone_jvms(); }
735
736 // Returns true if the call may modify n
737 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
738 // Does this node have a use of n other than in debug information?
739 bool has_non_debug_use(Node* n);
740 // Returns the unique CheckCastPP of a call
741 // or result projection is there are several CheckCastPP
742 // or returns null if there is no one.
743 Node* result_cast();
744 // Does this node returns pointer?
745 bool returns_pointer() const {
746 const TypeTuple* r = tf()->range();
747 return (r->cnt() > TypeFunc::Parms &&
748 r->field_at(TypeFunc::Parms)->isa_ptr());
749 }
750
751 // Collect all the interesting edges from a call for use in
752 // replacing the call by something else. Used by macro expansion
753 // and the late inlining support.
754 void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true) const;
755
756 virtual uint match_edge(uint idx) const;
757
758 bool is_call_to_arraycopystub() const;
759 bool is_call_to_multianewarray_stub() const;
760
761 virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
762
763 #ifndef PRODUCT
764 virtual void dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
765 virtual void dump_spec(outputStream* st) const;
766 #endif
767 };
768
769
770 //------------------------------CallJavaNode-----------------------------------
771 // Make a static or dynamic subroutine call node using Java calling
772 // convention. (The "Java" calling convention is the compiler's calling
773 // convention, as opposed to the interpreter's or that of native C.)
774 class CallJavaNode : public CallNode {
801 void set_arg_escape(bool f) { _arg_escape = f; }
802 bool arg_escape() const { return _arg_escape; }
803 void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
804 void register_for_late_inline();
805
806 DEBUG_ONLY( bool validate_symbolic_info() const; )
807
808 #ifndef PRODUCT
809 virtual void dump_spec(outputStream *st) const;
810 virtual void dump_compact_spec(outputStream *st) const;
811 #endif
812 };
813
814 //------------------------------CallStaticJavaNode-----------------------------
815 // Make a direct subroutine call using Java calling convention (for static
816 // calls and optimized virtual calls, plus calls to wrappers for run-time
817 // routines); generates static stub.
818 class CallStaticJavaNode : public CallJavaNode {
819 virtual bool cmp( const Node &n ) const;
820 virtual uint size_of() const; // Size is bigger
821 public:
822 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
823 : CallJavaNode(tf, addr, method) {
824 init_class_id(Class_CallStaticJava);
825 if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
826 init_flags(Flag_is_macro);
827 C->add_macro_node(this);
828 }
829 }
830 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
831 : CallJavaNode(tf, addr, nullptr) {
832 init_class_id(Class_CallStaticJava);
833 // This node calls a runtime stub, which often has narrow memory effects.
834 _adr_type = adr_type;
835 _name = name;
836 }
837
838 // If this is an uncommon trap, return the request code, else zero.
839 int uncommon_trap_request() const;
840 bool is_uncommon_trap() const;
841 static int extract_uncommon_trap_request(const Node* call);
842
843 bool is_boxing_method() const {
844 return is_macro() && (method() != nullptr) && method()->is_boxing_method();
845 }
846 // Late inlining modifies the JVMState, so we need to deep clone it
847 // when the call node is cloned (because it is macro node).
848 virtual bool needs_deep_clone_jvms(Compile* C) {
947 init_class_id(Class_CallLeafPure);
948 }
949 int Opcode() const override;
950 Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
951
952 CallLeafPureNode* inline_call_leaf_pure_node(Node* control = nullptr) const;
953 };
954
955 //------------------------------CallLeafNoFPNode-------------------------------
956 // CallLeafNode, not using floating point or using it in the same manner as
957 // the generated code
958 class CallLeafNoFPNode : public CallLeafNode {
959 public:
960 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
961 const TypePtr* adr_type)
962 : CallLeafNode(tf, addr, name, adr_type)
963 {
964 init_class_id(Class_CallLeafNoFP);
965 }
966 virtual int Opcode() const;
967 };
968
969 //------------------------------CallLeafVectorNode-------------------------------
970 // CallLeafNode but calling with vector calling convention instead.
971 class CallLeafVectorNode : public CallLeafNode {
972 private:
973 uint _num_bits;
974 protected:
975 virtual bool cmp( const Node &n ) const;
976 virtual uint size_of() const; // Size is bigger
977 public:
978 CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
979 const TypePtr* adr_type, uint num_bits)
980 : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
981 {
982 }
983 virtual int Opcode() const;
984 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
985 };
986
989 // High-level memory allocation
990 //
991 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
992 // get expanded into a code sequence containing a call. Unlike other CallNodes,
993 // they have 2 memory projections and 2 i_o projections (which are distinguished by
994 // the _is_io_use flag in the projection.) This is needed when expanding the node in
995 // order to differentiate the uses of the projection on the normal control path from
996 // those on the exception return path.
997 //
998 class AllocateNode : public CallNode {
999 public:
1000 enum {
1001 // Output:
1002 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
1003 // Inputs:
1004 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
1005 KlassNode, // type (maybe dynamic) of the obj.
1006 InitialTest, // slow-path test (may be constant)
1007 ALength, // array length (or TOP if none)
1008 ValidLengthTest,
1009 ParmLimit
1010 };
1011
1012 static const TypeFunc* alloc_type(const Type* t) {
1013 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1014 fields[AllocSize] = TypeInt::POS;
1015 fields[KlassNode] = TypeInstPtr::NOTNULL;
1016 fields[InitialTest] = TypeInt::BOOL;
1017 fields[ALength] = t; // length (can be a bad length)
1018 fields[ValidLengthTest] = TypeInt::BOOL;
1019
1020 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1021
1022 // create result type (range)
1023 fields = TypeTuple::fields(1);
1024 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1025
1026 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1027
1028 return TypeFunc::make(domain, range);
1029 }
1030
1031 // Result of Escape Analysis
1032 bool _is_scalar_replaceable;
1033 bool _is_non_escaping;
1034 // True when MemBar for new is redundant with MemBar at initialzer exit
1035 bool _is_allocation_MemBar_redundant;
1036
1037 virtual uint size_of() const; // Size is bigger
1038 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1039 Node *size, Node *klass_node, Node *initial_test);
1040 // Expansion modifies the JVMState, so we need to deep clone it
1041 virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1042 virtual int Opcode() const;
1043 virtual uint ideal_reg() const { return Op_RegP; }
1044 virtual bool guaranteed_safepoint() { return false; }
1045
1046 // allocations do not modify their arguments
1047 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1048
1049 // Pattern-match a possible usage of AllocateNode.
1050 // Return null if no allocation is recognized.
1051 // The operand is the pointer produced by the (possible) allocation.
1052 // It must be a projection of the Allocate or its subsequent CastPP.
1053 // (Note: This function is defined in file graphKit.cpp, near
1054 // GraphKit::new_instance/new_array, whose output it recognizes.)
1055 // The 'ptr' may not have an offset unless the 'offset' argument is given.
1056 static AllocateNode* Ideal_allocation(Node* ptr);
1057
1058 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1059 // an offset, which is reported back to the caller.
1096
1097 // If object doesn't escape in <.init> method and there is memory barrier
1098 // inserted at exit of its <.init>, memory barrier for new is not necessary.
1099 // Inovke this method when MemBar at exit of initializer and post-dominate
1100 // allocation node.
1101 void compute_MemBar_redundancy(ciMethod* initializer);
1102 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1103
1104 Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1105
1106 NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1107 };
1108
1109 //------------------------------AllocateArray---------------------------------
1110 //
1111 // High-level array allocation
1112 //
1113 class AllocateArrayNode : public AllocateNode {
1114 public:
1115 AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1116 Node* initial_test, Node* count_val, Node* valid_length_test)
1117 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1118 initial_test)
1119 {
1120 init_class_id(Class_AllocateArray);
1121 set_req(AllocateNode::ALength, count_val);
1122 set_req(AllocateNode::ValidLengthTest, valid_length_test);
1123 }
1124 virtual int Opcode() const;
1125
1126 // Dig the length operand out of a array allocation site.
1127 Node* Ideal_length() {
1128 return in(AllocateNode::ALength);
1129 }
1130
1131 // Dig the length operand out of a array allocation site and narrow the
1132 // type with a CastII, if necesssary
1133 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1134
1135 // Pattern-match a possible usage of AllocateArrayNode.
1136 // Return null if no allocation is recognized.
1137 static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1138 AllocateNode* allo = Ideal_allocation(ptr);
1139 return (allo == nullptr || !allo->is_AllocateArray())
1140 ? nullptr : allo->as_AllocateArray();
1141 }
1142 };
1143
|
59 //------------------------------StartNode--------------------------------------
60 // The method start node
61 class StartNode : public MultiNode {
62 virtual bool cmp( const Node &n ) const;
63 virtual uint size_of() const; // Size is bigger
64 public:
65 const TypeTuple *_domain;
66 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
67 init_class_id(Class_Start);
68 init_req(0,this);
69 init_req(1,root);
70 }
71 virtual int Opcode() const;
72 virtual bool pinned() const { return true; };
73 virtual const Type *bottom_type() const;
74 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
75 virtual const Type* Value(PhaseGVN* phase) const;
76 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
77 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
78 virtual const RegMask &in_RegMask(uint) const;
79 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
80 virtual uint ideal_reg() const { return 0; }
81 #ifndef PRODUCT
82 virtual void dump_spec(outputStream *st) const;
83 virtual void dump_compact_spec(outputStream *st) const;
84 #endif
85 };
86
87 //------------------------------StartOSRNode-----------------------------------
88 // The method start node for on stack replacement code
89 class StartOSRNode : public StartNode {
90 public:
91 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
92 virtual int Opcode() const;
93 };
94
95
96 //------------------------------ParmNode---------------------------------------
97 // Incoming parameters
98 class ParmNode : public ProjNode {
99 static const char * const names[TypeFunc::Parms+1];
100 public:
101 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
102 init_class_id(Class_Parm);
103 }
104 virtual int Opcode() const;
105 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
106 virtual uint ideal_reg() const;
107 #ifndef PRODUCT
108 virtual void dump_spec(outputStream *st) const;
109 virtual void dump_compact_spec(outputStream *st) const;
110 #endif
111 };
112
646 assert(jvms != nullptr, "JVMS reference is null.");
647 return jvms->scloff() + _merge_pointer_idx + 1;
648 }
649
650 // Assumes that "this" is an argument to a safepoint node "s", and that
651 // "new_call" is being created to correspond to "s". But the difference
652 // between the start index of the jvmstates of "new_call" and "s" is
653 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
654 // corresponds appropriately to "this" in "new_call". Assumes that
655 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
656 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
657 SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
658
659 #ifndef PRODUCT
660 virtual void dump_spec(outputStream *st) const;
661 #endif
662 };
663
664 // Simple container for the outgoing projections of a call. Useful
665 // for serious surgery on calls.
666 class CallProjections {
667 public:
668 Node* fallthrough_proj;
669 Node* fallthrough_catchproj;
670 Node* fallthrough_memproj;
671 Node* fallthrough_ioproj;
672 Node* catchall_catchproj;
673 Node* catchall_memproj;
674 Node* catchall_ioproj;
675 Node* exobj;
676 uint nb_resproj;
677 Node* resproj[1]; // at least one projection
678
679 CallProjections(uint nbres) {
680 fallthrough_proj = nullptr;
681 fallthrough_catchproj = nullptr;
682 fallthrough_memproj = nullptr;
683 fallthrough_ioproj = nullptr;
684 catchall_catchproj = nullptr;
685 catchall_memproj = nullptr;
686 catchall_ioproj = nullptr;
687 exobj = nullptr;
688 nb_resproj = nbres;
689 resproj[0] = nullptr;
690 for (uint i = 1; i < nb_resproj; i++) {
691 resproj[i] = nullptr;
692 }
693 }
694
695 };
696
697 class CallGenerator;
698
699 //------------------------------CallNode---------------------------------------
700 // Call nodes now subsume the function of debug nodes at callsites, so they
701 // contain the functionality of a full scope chain of debug nodes.
702 class CallNode : public SafePointNode {
703
704 protected:
705 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
706
707 public:
708 const TypeFunc* _tf; // Function type
709 address _entry_point; // Address of method being called
710 float _cnt; // Estimate of number of times called
711 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
712 const char* _name; // Printable name, if _method is null
713
714 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
715 : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
716 _tf(tf),
717 _entry_point(addr),
718 _cnt(COUNT_UNKNOWN),
719 _generator(nullptr),
720 _name(nullptr)
721 {
722 init_class_id(Class_Call);
723 }
724
725 const TypeFunc* tf() const { return _tf; }
726 address entry_point() const { return _entry_point; }
727 float cnt() const { return _cnt; }
728 CallGenerator* generator() const { return _generator; }
729
730 void set_tf(const TypeFunc* tf) { _tf = tf; }
731 void set_entry_point(address p) { _entry_point = p; }
732 void set_cnt(float c) { _cnt = c; }
733 void set_generator(CallGenerator* cg) { _generator = cg; }
734
735 virtual const Type* bottom_type() const;
736 virtual const Type* Value(PhaseGVN* phase) const;
737 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
738 virtual Node* Identity(PhaseGVN* phase) { return this; }
739 virtual bool cmp(const Node &n) const;
740 virtual uint size_of() const = 0;
741 virtual void calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
742 virtual Node* match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
743 virtual uint ideal_reg() const { return NotAMachineReg; }
744 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
745 // for some macro nodes whose expansion does not have a safepoint on the fast path.
746 virtual bool guaranteed_safepoint() { return true; }
747 // For macro nodes, the JVMState gets modified during expansion. If calls
748 // use MachConstantBase, it gets modified during matching. If the call is
749 // late inlined, it also needs the full JVMState. So when cloning the
750 // node the JVMState must be deep cloned. Default is to shallow clone.
751 virtual bool needs_deep_clone_jvms(Compile* C) { return _generator != nullptr || C->needs_deep_clone_jvms(); }
752
753 // Returns true if the call may modify n
754 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
755 // Does this node have a use of n other than in debug information?
756 bool has_non_debug_use(Node* n);
757 bool has_debug_use(Node* n);
758 // Returns the unique CheckCastPP of a call
759 // or result projection is there are several CheckCastPP
760 // or returns null if there is no one.
761 Node* result_cast();
762 // Does this node returns pointer?
763 bool returns_pointer() const {
764 const TypeTuple* r = tf()->range_sig();
765 return (!tf()->returns_inline_type_as_fields() &&
766 r->cnt() > TypeFunc::Parms &&
767 r->field_at(TypeFunc::Parms)->isa_ptr());
768 }
769
770 // Collect all the interesting edges from a call for use in
771 // replacing the call by something else. Used by macro expansion
772 // and the late inlining support.
773 CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true) const;
774
775 virtual uint match_edge(uint idx) const;
776
777 bool is_call_to_arraycopystub() const;
778 bool is_call_to_multianewarray_stub() const;
779
780 virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
781
782 #ifndef PRODUCT
783 virtual void dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
784 virtual void dump_spec(outputStream* st) const;
785 #endif
786 };
787
788
789 //------------------------------CallJavaNode-----------------------------------
790 // Make a static or dynamic subroutine call node using Java calling
791 // convention. (The "Java" calling convention is the compiler's calling
792 // convention, as opposed to the interpreter's or that of native C.)
793 class CallJavaNode : public CallNode {
820 void set_arg_escape(bool f) { _arg_escape = f; }
821 bool arg_escape() const { return _arg_escape; }
822 void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
823 void register_for_late_inline();
824
825 DEBUG_ONLY( bool validate_symbolic_info() const; )
826
827 #ifndef PRODUCT
828 virtual void dump_spec(outputStream *st) const;
829 virtual void dump_compact_spec(outputStream *st) const;
830 #endif
831 };
832
833 //------------------------------CallStaticJavaNode-----------------------------
834 // Make a direct subroutine call using Java calling convention (for static
835 // calls and optimized virtual calls, plus calls to wrappers for run-time
836 // routines); generates static stub.
837 class CallStaticJavaNode : public CallJavaNode {
838 virtual bool cmp( const Node &n ) const;
839 virtual uint size_of() const; // Size is bigger
840
841 bool remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg);
842
843 public:
844 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
845 : CallJavaNode(tf, addr, method) {
846 init_class_id(Class_CallStaticJava);
847 if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
848 init_flags(Flag_is_macro);
849 C->add_macro_node(this);
850 }
851 const TypeTuple *r = tf->range_sig();
852 if (InlineTypeReturnedAsFields &&
853 method != nullptr &&
854 method->is_method_handle_intrinsic() &&
855 r->cnt() > TypeFunc::Parms &&
856 r->field_at(TypeFunc::Parms)->isa_oopptr() &&
857 r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
858 // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
859 init_flags(Flag_is_macro);
860 C->add_macro_node(this);
861 }
862 }
863 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
864 : CallJavaNode(tf, addr, nullptr) {
865 init_class_id(Class_CallStaticJava);
866 // This node calls a runtime stub, which often has narrow memory effects.
867 _adr_type = adr_type;
868 _name = name;
869 }
870
871 // If this is an uncommon trap, return the request code, else zero.
872 int uncommon_trap_request() const;
873 bool is_uncommon_trap() const;
874 static int extract_uncommon_trap_request(const Node* call);
875
876 bool is_boxing_method() const {
877 return is_macro() && (method() != nullptr) && method()->is_boxing_method();
878 }
879 // Late inlining modifies the JVMState, so we need to deep clone it
880 // when the call node is cloned (because it is macro node).
881 virtual bool needs_deep_clone_jvms(Compile* C) {
980 init_class_id(Class_CallLeafPure);
981 }
982 int Opcode() const override;
983 Node* Ideal(PhaseGVN* phase, bool can_reshape) override;
984
985 CallLeafPureNode* inline_call_leaf_pure_node(Node* control = nullptr) const;
986 };
987
988 //------------------------------CallLeafNoFPNode-------------------------------
989 // CallLeafNode, not using floating point or using it in the same manner as
990 // the generated code
991 class CallLeafNoFPNode : public CallLeafNode {
992 public:
993 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
994 const TypePtr* adr_type)
995 : CallLeafNode(tf, addr, name, adr_type)
996 {
997 init_class_id(Class_CallLeafNoFP);
998 }
999 virtual int Opcode() const;
1000 virtual uint match_edge(uint idx) const;
1001 };
1002
1003 //------------------------------CallLeafVectorNode-------------------------------
1004 // CallLeafNode but calling with vector calling convention instead.
1005 class CallLeafVectorNode : public CallLeafNode {
1006 private:
1007 uint _num_bits;
1008 protected:
1009 virtual bool cmp( const Node &n ) const;
1010 virtual uint size_of() const; // Size is bigger
1011 public:
1012 CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
1013 const TypePtr* adr_type, uint num_bits)
1014 : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
1015 {
1016 }
1017 virtual int Opcode() const;
1018 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
1019 };
1020
1023 // High-level memory allocation
1024 //
1025 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
1026 // get expanded into a code sequence containing a call. Unlike other CallNodes,
1027 // they have 2 memory projections and 2 i_o projections (which are distinguished by
1028 // the _is_io_use flag in the projection.) This is needed when expanding the node in
1029 // order to differentiate the uses of the projection on the normal control path from
1030 // those on the exception return path.
1031 //
1032 class AllocateNode : public CallNode {
1033 public:
1034 enum {
1035 // Output:
1036 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
1037 // Inputs:
1038 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
1039 KlassNode, // type (maybe dynamic) of the obj.
1040 InitialTest, // slow-path test (may be constant)
1041 ALength, // array length (or TOP if none)
1042 ValidLengthTest,
1043 InlineType, // InlineTypeNode if this is an inline type allocation
1044 InitValue, // Init value for null-free inline type arrays
1045 RawInitValue, // Same as above but as raw machine word
1046 ParmLimit
1047 };
1048
1049 static const TypeFunc* alloc_type(const Type* t) {
1050 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1051 fields[AllocSize] = TypeInt::POS;
1052 fields[KlassNode] = TypeInstPtr::NOTNULL;
1053 fields[InitialTest] = TypeInt::BOOL;
1054 fields[ALength] = t; // length (can be a bad length)
1055 fields[ValidLengthTest] = TypeInt::BOOL;
1056 fields[InlineType] = Type::BOTTOM;
1057 fields[InitValue] = TypeInstPtr::NOTNULL;
1058 fields[RawInitValue] = TypeX_X;
1059
1060 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1061
1062 // create result type (range)
1063 fields = TypeTuple::fields(1);
1064 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1065
1066 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1067
1068 return TypeFunc::make(domain, range);
1069 }
1070
1071 // Result of Escape Analysis
1072 bool _is_scalar_replaceable;
1073 bool _is_non_escaping;
1074 // True when MemBar for new is redundant with MemBar at initialzer exit
1075 bool _is_allocation_MemBar_redundant;
1076
1077 virtual uint size_of() const; // Size is bigger
1078 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1079 Node *size, Node *klass_node, Node *initial_test,
1080 InlineTypeNode* inline_type_node = nullptr);
1081 // Expansion modifies the JVMState, so we need to deep clone it
1082 virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1083 virtual int Opcode() const;
1084 virtual uint ideal_reg() const { return Op_RegP; }
1085 virtual bool guaranteed_safepoint() { return false; }
1086
1087 // allocations do not modify their arguments
1088 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1089
1090 // Pattern-match a possible usage of AllocateNode.
1091 // Return null if no allocation is recognized.
1092 // The operand is the pointer produced by the (possible) allocation.
1093 // It must be a projection of the Allocate or its subsequent CastPP.
1094 // (Note: This function is defined in file graphKit.cpp, near
1095 // GraphKit::new_instance/new_array, whose output it recognizes.)
1096 // The 'ptr' may not have an offset unless the 'offset' argument is given.
1097 static AllocateNode* Ideal_allocation(Node* ptr);
1098
1099 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1100 // an offset, which is reported back to the caller.
1137
1138 // If object doesn't escape in <.init> method and there is memory barrier
1139 // inserted at exit of its <.init>, memory barrier for new is not necessary.
1140 // Inovke this method when MemBar at exit of initializer and post-dominate
1141 // allocation node.
1142 void compute_MemBar_redundancy(ciMethod* initializer);
1143 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1144
1145 Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1146
1147 NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1148 };
1149
1150 //------------------------------AllocateArray---------------------------------
1151 //
1152 // High-level array allocation
1153 //
1154 class AllocateArrayNode : public AllocateNode {
1155 public:
1156 AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1157 Node* initial_test, Node* count_val, Node* valid_length_test,
1158 Node* init_value, Node* raw_init_value)
1159 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1160 initial_test)
1161 {
1162 init_class_id(Class_AllocateArray);
1163 set_req(AllocateNode::ALength, count_val);
1164 set_req(AllocateNode::ValidLengthTest, valid_length_test);
1165 init_req(AllocateNode::InitValue, init_value);
1166 init_req(AllocateNode::RawInitValue, raw_init_value);
1167 }
1168 virtual uint size_of() const { return sizeof(*this); }
1169 virtual int Opcode() const;
1170
1171 // Dig the length operand out of a array allocation site.
1172 Node* Ideal_length() {
1173 return in(AllocateNode::ALength);
1174 }
1175
1176 // Dig the length operand out of a array allocation site and narrow the
1177 // type with a CastII, if necesssary
1178 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1179
1180 // Pattern-match a possible usage of AllocateArrayNode.
1181 // Return null if no allocation is recognized.
1182 static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1183 AllocateNode* allo = Ideal_allocation(ptr);
1184 return (allo == nullptr || !allo->is_AllocateArray())
1185 ? nullptr : allo->as_AllocateArray();
1186 }
1187 };
1188
|