109 static bool all_controls_dominate(Node* dom, Node* sub);
110
111 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
112
113 // Shared code for Ideal methods:
114 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
115
116 // Helper function for adr_type() implementations.
117 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
118
119 // Raw access function, to allow copying of adr_type efficiently in
120 // product builds and retain the debug info for debug builds.
121 const TypePtr *raw_adr_type() const {
122 #ifdef ASSERT
123 return _adr_type;
124 #else
125 return 0;
126 #endif
127 }
128
129 // Map a load or store opcode to its corresponding store opcode.
130 // (Return -1 if unknown.)
131 virtual int store_Opcode() const { return -1; }
132
133 // What is the type of the value in memory? (T_VOID mean "unspecified".)
134 virtual BasicType memory_type() const = 0;
135 virtual int memory_size() const {
136 #ifdef ASSERT
137 return type2aelembytes(memory_type(), true);
138 #else
139 return type2aelembytes(memory_type());
140 #endif
141 }
142
143 uint8_t barrier_data() { return _barrier_data; }
144 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
145
146 // Search through memory states which precede this node (load or store).
147 // Look for an exact match for the address, with no intervening
148 // aliased stores.
531 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
532 const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
533 };
534
535 //------------------------------LoadNKlassNode---------------------------------
536 // Load a narrow Klass from an object.
537 class LoadNKlassNode : public LoadNNode {
538 public:
539 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
540 : LoadNNode(c, mem, adr, at, tk, mo) {}
541 virtual int Opcode() const;
542 virtual uint ideal_reg() const { return Op_RegN; }
543 virtual int store_Opcode() const { return Op_StoreNKlass; }
544 virtual BasicType memory_type() const { return T_NARROWKLASS; }
545
546 virtual const Type* Value(PhaseGVN* phase) const;
547 virtual Node* Identity(PhaseGVN* phase);
548 virtual bool depends_only_on_test() const { return true; }
549 };
550
551
552 //------------------------------StoreNode--------------------------------------
553 // Store value; requires Store, Address and Value
554 class StoreNode : public MemNode {
555 private:
556 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
557 // stores that can be reordered, and such requiring release semantics to
558 // adhere to the Java specification. The required behaviour is stored in
559 // this field.
560 const MemOrd _mo;
561 // Needed for proper cloning.
562 virtual uint size_of() const { return sizeof(*this); }
563 protected:
564 virtual bool cmp( const Node &n ) const;
565 virtual bool depends_only_on_test() const { return false; }
566
567 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
568 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
569
570 public:
571 // We must ensure that stores of object references will be visible
1070 };
1071
1072 //------------------------------GetAndSetPNode---------------------------
1073 class GetAndSetPNode : public LoadStoreNode {
1074 public:
1075 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1076 virtual int Opcode() const;
1077 };
1078
1079 //------------------------------GetAndSetNNode---------------------------
1080 class GetAndSetNNode : public LoadStoreNode {
1081 public:
1082 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1083 virtual int Opcode() const;
1084 };
1085
1086 //------------------------------ClearArray-------------------------------------
1087 class ClearArrayNode: public Node {
1088 private:
1089 bool _is_large;
1090 public:
1091 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1092 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {
1093 init_class_id(Class_ClearArray);
1094 }
1095 virtual int Opcode() const;
1096 virtual const Type *bottom_type() const { return Type::MEMORY; }
1097 // ClearArray modifies array elements, and so affects only the
1098 // array memory addressed by the bottom_type of its base address.
1099 virtual const class TypePtr *adr_type() const;
1100 virtual Node* Identity(PhaseGVN* phase);
1101 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1102 virtual uint match_edge(uint idx) const;
1103 bool is_large() const { return _is_large; }
1104
1105 // Clear the given area of an object or array.
1106 // The start offset must always be aligned mod BytesPerInt.
1107 // The end offset must always be aligned mod BytesPerLong.
1108 // Return the new memory.
1109 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1110 intptr_t start_offset,
1111 intptr_t end_offset,
1112 PhaseGVN* phase);
1113 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1114 intptr_t start_offset,
1115 Node* end_offset,
1116 PhaseGVN* phase);
1117 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1118 Node* start_offset,
1119 Node* end_offset,
1120 PhaseGVN* phase);
1121 // Return allocation input memory edge if it is different instance
1122 // or itself if it is the one we are looking for.
1123 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1124 };
1125
1126 //------------------------------MemBar-----------------------------------------
1127 // There are different flavors of Memory Barriers to match the Java Memory
1128 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1129 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1130 // volatile-load. Monitor-exit and volatile-store act as Release: no
1131 // preceding ref can be moved to after them. We insert a MemBar-Release
1132 // before a FastUnlock or volatile-store. All volatiles need to be
1133 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1134 // separate it from any following volatile-load.
1135 class MemBarNode: public MultiNode {
1136 virtual uint hash() const ; // { return NO_HASH; }
1137 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1149 TrailingLoadStore,
1150 LeadingLoadStore,
1151 TrailingPartialArrayCopy
1152 } _kind;
1153
1154 #ifdef ASSERT
1155 uint _pair_idx;
1156 #endif
1157
1158 public:
1159 enum {
1160 Precedent = TypeFunc::Parms // optional edge to force precedence
1161 };
1162 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1163 virtual int Opcode() const = 0;
1164 virtual const class TypePtr *adr_type() const { return _adr_type; }
1165 virtual const Type* Value(PhaseGVN* phase) const;
1166 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1167 virtual uint match_edge(uint idx) const { return 0; }
1168 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1169 virtual Node *match( const ProjNode *proj, const Matcher *m );
1170 // Factory method. Builds a wide or narrow membar.
1171 // Optional 'precedent' becomes an extra edge if not null.
1172 static MemBarNode* make(Compile* C, int opcode,
1173 int alias_idx = Compile::AliasIdxBot,
1174 Node* precedent = nullptr);
1175
1176 MemBarNode* trailing_membar() const;
1177 MemBarNode* leading_membar() const;
1178
1179 void set_trailing_load() { _kind = TrailingLoad; }
1180 bool trailing_load() const { return _kind == TrailingLoad; }
1181 bool trailing_store() const { return _kind == TrailingStore; }
1182 bool leading_store() const { return _kind == LeadingStore; }
1183 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1184 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1185 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1186 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1187 bool standalone() const { return _kind == Standalone; }
1188 void set_trailing_partial_array_copy() { _kind = TrailingPartialArrayCopy; }
1189 bool trailing_partial_array_copy() const { return _kind == TrailingPartialArrayCopy; }
|
109 static bool all_controls_dominate(Node* dom, Node* sub);
110
111 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
112
113 // Shared code for Ideal methods:
114 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
115
116 // Helper function for adr_type() implementations.
117 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
118
119 // Raw access function, to allow copying of adr_type efficiently in
120 // product builds and retain the debug info for debug builds.
121 const TypePtr *raw_adr_type() const {
122 #ifdef ASSERT
123 return _adr_type;
124 #else
125 return 0;
126 #endif
127 }
128
129 #ifdef ASSERT
130 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
131 #endif
132
133 // Map a load or store opcode to its corresponding store opcode.
134 // (Return -1 if unknown.)
135 virtual int store_Opcode() const { return -1; }
136
137 // What is the type of the value in memory? (T_VOID mean "unspecified".)
138 virtual BasicType memory_type() const = 0;
139 virtual int memory_size() const {
140 #ifdef ASSERT
141 return type2aelembytes(memory_type(), true);
142 #else
143 return type2aelembytes(memory_type());
144 #endif
145 }
146
147 uint8_t barrier_data() { return _barrier_data; }
148 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
149
150 // Search through memory states which precede this node (load or store).
151 // Look for an exact match for the address, with no intervening
152 // aliased stores.
535 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
536 const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
537 };
538
539 //------------------------------LoadNKlassNode---------------------------------
540 // Load a narrow Klass from an object.
541 class LoadNKlassNode : public LoadNNode {
542 public:
543 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
544 : LoadNNode(c, mem, adr, at, tk, mo) {}
545 virtual int Opcode() const;
546 virtual uint ideal_reg() const { return Op_RegN; }
547 virtual int store_Opcode() const { return Op_StoreNKlass; }
548 virtual BasicType memory_type() const { return T_NARROWKLASS; }
549
550 virtual const Type* Value(PhaseGVN* phase) const;
551 virtual Node* Identity(PhaseGVN* phase);
552 virtual bool depends_only_on_test() const { return true; }
553 };
554
555 //------------------------------StoreNode--------------------------------------
556 // Store value; requires Store, Address and Value
557 class StoreNode : public MemNode {
558 private:
559 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
560 // stores that can be reordered, and such requiring release semantics to
561 // adhere to the Java specification. The required behaviour is stored in
562 // this field.
563 const MemOrd _mo;
564 // Needed for proper cloning.
565 virtual uint size_of() const { return sizeof(*this); }
566 protected:
567 virtual bool cmp( const Node &n ) const;
568 virtual bool depends_only_on_test() const { return false; }
569
570 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
571 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
572
573 public:
574 // We must ensure that stores of object references will be visible
1073 };
1074
1075 //------------------------------GetAndSetPNode---------------------------
1076 class GetAndSetPNode : public LoadStoreNode {
1077 public:
1078 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1079 virtual int Opcode() const;
1080 };
1081
1082 //------------------------------GetAndSetNNode---------------------------
1083 class GetAndSetNNode : public LoadStoreNode {
1084 public:
1085 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1086 virtual int Opcode() const;
1087 };
1088
1089 //------------------------------ClearArray-------------------------------------
1090 class ClearArrayNode: public Node {
1091 private:
1092 bool _is_large;
1093 bool _word_copy_only;
1094 public:
1095 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1096 : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1097 _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1098 init_class_id(Class_ClearArray);
1099 }
1100 virtual int Opcode() const;
1101 virtual const Type *bottom_type() const { return Type::MEMORY; }
1102 // ClearArray modifies array elements, and so affects only the
1103 // array memory addressed by the bottom_type of its base address.
1104 virtual const class TypePtr *adr_type() const;
1105 virtual Node* Identity(PhaseGVN* phase);
1106 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1107 virtual uint match_edge(uint idx) const;
1108 bool is_large() const { return _is_large; }
1109 bool word_copy_only() const { return _word_copy_only; }
1110
1111 // Clear the given area of an object or array.
1112 // The start offset must always be aligned mod BytesPerInt.
1113 // The end offset must always be aligned mod BytesPerLong.
1114 // Return the new memory.
1115 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1116 Node* val,
1117 Node* raw_val,
1118 intptr_t start_offset,
1119 intptr_t end_offset,
1120 PhaseGVN* phase);
1121 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1122 Node* val,
1123 Node* raw_val,
1124 intptr_t start_offset,
1125 Node* end_offset,
1126 PhaseGVN* phase);
1127 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1128 Node* raw_val,
1129 Node* start_offset,
1130 Node* end_offset,
1131 PhaseGVN* phase);
1132 // Return allocation input memory edge if it is different instance
1133 // or itself if it is the one we are looking for.
1134 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1135 };
1136
1137 //------------------------------MemBar-----------------------------------------
1138 // There are different flavors of Memory Barriers to match the Java Memory
1139 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1140 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1141 // volatile-load. Monitor-exit and volatile-store act as Release: no
1142 // preceding ref can be moved to after them. We insert a MemBar-Release
1143 // before a FastUnlock or volatile-store. All volatiles need to be
1144 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1145 // separate it from any following volatile-load.
1146 class MemBarNode: public MultiNode {
1147 virtual uint hash() const ; // { return NO_HASH; }
1148 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1160 TrailingLoadStore,
1161 LeadingLoadStore,
1162 TrailingPartialArrayCopy
1163 } _kind;
1164
1165 #ifdef ASSERT
1166 uint _pair_idx;
1167 #endif
1168
1169 public:
1170 enum {
1171 Precedent = TypeFunc::Parms // optional edge to force precedence
1172 };
1173 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1174 virtual int Opcode() const = 0;
1175 virtual const class TypePtr *adr_type() const { return _adr_type; }
1176 virtual const Type* Value(PhaseGVN* phase) const;
1177 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1178 virtual uint match_edge(uint idx) const { return 0; }
1179 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1180 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1181 // Factory method. Builds a wide or narrow membar.
1182 // Optional 'precedent' becomes an extra edge if not null.
1183 static MemBarNode* make(Compile* C, int opcode,
1184 int alias_idx = Compile::AliasIdxBot,
1185 Node* precedent = nullptr);
1186
1187 MemBarNode* trailing_membar() const;
1188 MemBarNode* leading_membar() const;
1189
1190 void set_trailing_load() { _kind = TrailingLoad; }
1191 bool trailing_load() const { return _kind == TrailingLoad; }
1192 bool trailing_store() const { return _kind == TrailingStore; }
1193 bool leading_store() const { return _kind == LeadingStore; }
1194 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1195 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1196 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1197 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1198 bool standalone() const { return _kind == Standalone; }
1199 void set_trailing_partial_array_copy() { _kind = TrailingPartialArrayCopy; }
1200 bool trailing_partial_array_copy() const { return _kind == TrailingPartialArrayCopy; }
|