109 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
110 static bool all_controls_dominate(Node* dom, Node* sub) {
111 DomResult dom_result = maybe_all_controls_dominate(dom, sub);
112 return dom_result == DomResult::Dominate;
113 }
114
115 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
116
117 // Shared code for Ideal methods:
118 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
119
120 // Helper function for adr_type() implementations.
121 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
122
123 // Raw access function, to allow copying of adr_type efficiently in
124 // product builds and retain the debug info for debug builds.
125 const TypePtr *raw_adr_type() const {
126 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
127 }
128
129 // Return the barrier data of n, if available, or 0 otherwise.
130 static uint8_t barrier_data(const Node* n);
131
132 // Map a load or store opcode to its corresponding store opcode.
133 // (Return -1 if unknown.)
134 virtual int store_Opcode() const { return -1; }
135
136 // What is the type of the value in memory? (T_VOID mean "unspecified".)
137 virtual BasicType memory_type() const = 0;
138 virtual int memory_size() const {
139 #ifdef ASSERT
140 return type2aelembytes(memory_type(), true);
141 #else
142 return type2aelembytes(memory_type());
143 #endif
144 }
145
146 uint8_t barrier_data() { return _barrier_data; }
147 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
148
538 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
539 const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
540 };
541
542 //------------------------------LoadNKlassNode---------------------------------
543 // Load a narrow Klass from an object.
544 class LoadNKlassNode : public LoadNNode {
545 public:
546 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
547 : LoadNNode(c, mem, adr, at, tk, mo) {}
548 virtual int Opcode() const;
549 virtual uint ideal_reg() const { return Op_RegN; }
550 virtual int store_Opcode() const { return Op_StoreNKlass; }
551 virtual BasicType memory_type() const { return T_NARROWKLASS; }
552
553 virtual const Type* Value(PhaseGVN* phase) const;
554 virtual Node* Identity(PhaseGVN* phase);
555 virtual bool depends_only_on_test() const { return true; }
556 };
557
558
559 //------------------------------StoreNode--------------------------------------
560 // Store value; requires Store, Address and Value
561 class StoreNode : public MemNode {
562 private:
563 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
564 // stores that can be reordered, and such requiring release semantics to
565 // adhere to the Java specification. The required behaviour is stored in
566 // this field.
567 const MemOrd _mo;
568 // Needed for proper cloning.
569 virtual uint size_of() const { return sizeof(*this); }
570 protected:
571 virtual bool cmp( const Node &n ) const;
572 virtual bool depends_only_on_test() const { return false; }
573
574 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
575 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
576
577 public:
578 // We must ensure that stores of object references will be visible
1047 };
1048
1049 //------------------------------GetAndSetPNode---------------------------
1050 class GetAndSetPNode : public LoadStoreNode {
1051 public:
1052 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1053 virtual int Opcode() const;
1054 };
1055
1056 //------------------------------GetAndSetNNode---------------------------
1057 class GetAndSetNNode : public LoadStoreNode {
1058 public:
1059 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1060 virtual int Opcode() const;
1061 };
1062
1063 //------------------------------ClearArray-------------------------------------
1064 class ClearArrayNode: public Node {
1065 private:
1066 bool _is_large;
1067 public:
1068 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1069 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {
1070 init_class_id(Class_ClearArray);
1071 }
1072 virtual int Opcode() const;
1073 virtual const Type *bottom_type() const { return Type::MEMORY; }
1074 // ClearArray modifies array elements, and so affects only the
1075 // array memory addressed by the bottom_type of its base address.
1076 virtual const class TypePtr *adr_type() const;
1077 virtual Node* Identity(PhaseGVN* phase);
1078 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1079 virtual uint match_edge(uint idx) const;
1080 bool is_large() const { return _is_large; }
1081
1082 // Clear the given area of an object or array.
1083 // The start offset must always be aligned mod BytesPerInt.
1084 // The end offset must always be aligned mod BytesPerLong.
1085 // Return the new memory.
1086 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1087 intptr_t start_offset,
1088 intptr_t end_offset,
1089 PhaseGVN* phase);
1090 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1091 intptr_t start_offset,
1092 Node* end_offset,
1093 PhaseGVN* phase);
1094 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1095 Node* start_offset,
1096 Node* end_offset,
1097 PhaseGVN* phase);
1098 // Return allocation input memory edge if it is different instance
1099 // or itself if it is the one we are looking for.
1100 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1101 };
1102
1103 //------------------------------MemBar-----------------------------------------
1104 // There are different flavors of Memory Barriers to match the Java Memory
1105 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1106 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1107 // volatile-load. Monitor-exit and volatile-store act as Release: no
1108 // preceding ref can be moved to after them. We insert a MemBar-Release
1109 // before a FastUnlock or volatile-store. All volatiles need to be
1110 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1111 // separate it from any following volatile-load.
1112 class MemBarNode: public MultiNode {
1113 virtual uint hash() const ; // { return NO_HASH; }
1114 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1126 TrailingLoadStore,
1127 LeadingLoadStore,
1128 TrailingPartialArrayCopy
1129 } _kind;
1130
1131 #ifdef ASSERT
1132 uint _pair_idx;
1133 #endif
1134
1135 public:
1136 enum {
1137 Precedent = TypeFunc::Parms // optional edge to force precedence
1138 };
1139 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1140 virtual int Opcode() const = 0;
1141 virtual const class TypePtr *adr_type() const { return _adr_type; }
1142 virtual const Type* Value(PhaseGVN* phase) const;
1143 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1144 virtual uint match_edge(uint idx) const { return 0; }
1145 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1146 virtual Node *match( const ProjNode *proj, const Matcher *m );
1147 // Factory method. Builds a wide or narrow membar.
1148 // Optional 'precedent' becomes an extra edge if not null.
1149 static MemBarNode* make(Compile* C, int opcode,
1150 int alias_idx = Compile::AliasIdxBot,
1151 Node* precedent = nullptr);
1152
1153 MemBarNode* trailing_membar() const;
1154 MemBarNode* leading_membar() const;
1155
1156 void set_trailing_load() { _kind = TrailingLoad; }
1157 bool trailing_load() const { return _kind == TrailingLoad; }
1158 bool trailing_store() const { return _kind == TrailingStore; }
1159 bool leading_store() const { return _kind == LeadingStore; }
1160 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1161 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1162 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1163 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1164 bool standalone() const { return _kind == Standalone; }
1165 void set_trailing_partial_array_copy() { _kind = TrailingPartialArrayCopy; }
1166 bool trailing_partial_array_copy() const { return _kind == TrailingPartialArrayCopy; }
|
109 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
110 static bool all_controls_dominate(Node* dom, Node* sub) {
111 DomResult dom_result = maybe_all_controls_dominate(dom, sub);
112 return dom_result == DomResult::Dominate;
113 }
114
115 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
116
117 // Shared code for Ideal methods:
118 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
119
120 // Helper function for adr_type() implementations.
121 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
122
123 // Raw access function, to allow copying of adr_type efficiently in
124 // product builds and retain the debug info for debug builds.
125 const TypePtr *raw_adr_type() const {
126 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
127 }
128
129 #ifdef ASSERT
130 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
131 #endif
132
133 // Return the barrier data of n, if available, or 0 otherwise.
134 static uint8_t barrier_data(const Node* n);
135
136 // Map a load or store opcode to its corresponding store opcode.
137 // (Return -1 if unknown.)
138 virtual int store_Opcode() const { return -1; }
139
140 // What is the type of the value in memory? (T_VOID mean "unspecified".)
141 virtual BasicType memory_type() const = 0;
142 virtual int memory_size() const {
143 #ifdef ASSERT
144 return type2aelembytes(memory_type(), true);
145 #else
146 return type2aelembytes(memory_type());
147 #endif
148 }
149
150 uint8_t barrier_data() { return _barrier_data; }
151 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
152
542 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
543 const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
544 };
545
546 //------------------------------LoadNKlassNode---------------------------------
547 // Load a narrow Klass from an object.
548 class LoadNKlassNode : public LoadNNode {
549 public:
550 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
551 : LoadNNode(c, mem, adr, at, tk, mo) {}
552 virtual int Opcode() const;
553 virtual uint ideal_reg() const { return Op_RegN; }
554 virtual int store_Opcode() const { return Op_StoreNKlass; }
555 virtual BasicType memory_type() const { return T_NARROWKLASS; }
556
557 virtual const Type* Value(PhaseGVN* phase) const;
558 virtual Node* Identity(PhaseGVN* phase);
559 virtual bool depends_only_on_test() const { return true; }
560 };
561
562 //------------------------------StoreNode--------------------------------------
563 // Store value; requires Store, Address and Value
564 class StoreNode : public MemNode {
565 private:
566 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
567 // stores that can be reordered, and such requiring release semantics to
568 // adhere to the Java specification. The required behaviour is stored in
569 // this field.
570 const MemOrd _mo;
571 // Needed for proper cloning.
572 virtual uint size_of() const { return sizeof(*this); }
573 protected:
574 virtual bool cmp( const Node &n ) const;
575 virtual bool depends_only_on_test() const { return false; }
576
577 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
578 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
579
580 public:
581 // We must ensure that stores of object references will be visible
1050 };
1051
1052 //------------------------------GetAndSetPNode---------------------------
1053 class GetAndSetPNode : public LoadStoreNode {
1054 public:
1055 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1056 virtual int Opcode() const;
1057 };
1058
1059 //------------------------------GetAndSetNNode---------------------------
1060 class GetAndSetNNode : public LoadStoreNode {
1061 public:
1062 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1063 virtual int Opcode() const;
1064 };
1065
1066 //------------------------------ClearArray-------------------------------------
1067 class ClearArrayNode: public Node {
1068 private:
1069 bool _is_large;
1070 bool _word_copy_only;
1071 public:
1072 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1073 : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1074 _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1075 init_class_id(Class_ClearArray);
1076 }
1077 virtual int Opcode() const;
1078 virtual const Type *bottom_type() const { return Type::MEMORY; }
1079 // ClearArray modifies array elements, and so affects only the
1080 // array memory addressed by the bottom_type of its base address.
1081 virtual const class TypePtr *adr_type() const;
1082 virtual Node* Identity(PhaseGVN* phase);
1083 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1084 virtual uint match_edge(uint idx) const;
1085 bool is_large() const { return _is_large; }
1086 bool word_copy_only() const { return _word_copy_only; }
1087
1088 // Clear the given area of an object or array.
1089 // The start offset must always be aligned mod BytesPerInt.
1090 // The end offset must always be aligned mod BytesPerLong.
1091 // Return the new memory.
1092 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1093 Node* val,
1094 Node* raw_val,
1095 intptr_t start_offset,
1096 intptr_t end_offset,
1097 PhaseGVN* phase);
1098 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1099 Node* val,
1100 Node* raw_val,
1101 intptr_t start_offset,
1102 Node* end_offset,
1103 PhaseGVN* phase);
1104 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1105 Node* raw_val,
1106 Node* start_offset,
1107 Node* end_offset,
1108 PhaseGVN* phase);
1109 // Return allocation input memory edge if it is different instance
1110 // or itself if it is the one we are looking for.
1111 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1112 };
1113
1114 //------------------------------MemBar-----------------------------------------
1115 // There are different flavors of Memory Barriers to match the Java Memory
1116 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1117 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1118 // volatile-load. Monitor-exit and volatile-store act as Release: no
1119 // preceding ref can be moved to after them. We insert a MemBar-Release
1120 // before a FastUnlock or volatile-store. All volatiles need to be
1121 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1122 // separate it from any following volatile-load.
1123 class MemBarNode: public MultiNode {
1124 virtual uint hash() const ; // { return NO_HASH; }
1125 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1137 TrailingLoadStore,
1138 LeadingLoadStore,
1139 TrailingPartialArrayCopy
1140 } _kind;
1141
1142 #ifdef ASSERT
1143 uint _pair_idx;
1144 #endif
1145
1146 public:
1147 enum {
1148 Precedent = TypeFunc::Parms // optional edge to force precedence
1149 };
1150 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1151 virtual int Opcode() const = 0;
1152 virtual const class TypePtr *adr_type() const { return _adr_type; }
1153 virtual const Type* Value(PhaseGVN* phase) const;
1154 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1155 virtual uint match_edge(uint idx) const { return 0; }
1156 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1157 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1158 // Factory method. Builds a wide or narrow membar.
1159 // Optional 'precedent' becomes an extra edge if not null.
1160 static MemBarNode* make(Compile* C, int opcode,
1161 int alias_idx = Compile::AliasIdxBot,
1162 Node* precedent = nullptr);
1163
1164 MemBarNode* trailing_membar() const;
1165 MemBarNode* leading_membar() const;
1166
1167 void set_trailing_load() { _kind = TrailingLoad; }
1168 bool trailing_load() const { return _kind == TrailingLoad; }
1169 bool trailing_store() const { return _kind == TrailingStore; }
1170 bool leading_store() const { return _kind == LeadingStore; }
1171 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1172 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1173 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1174 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1175 bool standalone() const { return _kind == Standalone; }
1176 void set_trailing_partial_array_copy() { _kind = TrailingPartialArrayCopy; }
1177 bool trailing_partial_array_copy() const { return _kind == TrailingPartialArrayCopy; }
|