112 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
113 static bool all_controls_dominate(Node* dom, Node* sub) {
114 DomResult dom_result = maybe_all_controls_dominate(dom, sub);
115 return dom_result == DomResult::Dominate;
116 }
117
118 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
119
120 // Shared code for Ideal methods:
121 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
122
123 // Helper function for adr_type() implementations.
124 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
125
126 // Raw access function, to allow copying of adr_type efficiently in
127 // product builds and retain the debug info for debug builds.
128 const TypePtr *raw_adr_type() const {
129 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
130 }
131
132 // Return the barrier data of n, if available, or 0 otherwise.
133 static uint8_t barrier_data(const Node* n);
134
135 // Map a load or store opcode to its corresponding store opcode.
136 // (Return -1 if unknown.)
137 virtual int store_Opcode() const { return -1; }
138
139 // What is the type of the value in memory? (T_VOID mean "unspecified".)
140 // The returned type is a property of the value that is loaded/stored and
141 // not the memory that is accessed. For mismatched memory accesses
142 // they might differ. For instance, a value of type 'short' may be stored
143 // into an array of elements of type 'long'.
144 virtual BasicType value_basic_type() const = 0;
145 virtual int memory_size() const {
146 #ifdef ASSERT
147 return type2aelembytes(value_basic_type(), true);
148 #else
149 return type2aelembytes(value_basic_type());
150 #endif
151 }
542
543 //------------------------------LoadPNode--------------------------------------
544 // Load a pointer from memory (either object or array)
545 class LoadPNode : public LoadNode {
546 public:
547 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
548 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
549 virtual int Opcode() const;
550 virtual uint ideal_reg() const { return Op_RegP; }
551 virtual int store_Opcode() const { return Op_StoreP; }
552 virtual BasicType value_basic_type() const { return T_ADDRESS; }
553 };
554
555
556 //------------------------------LoadNNode--------------------------------------
557 // Load a narrow oop from memory (either object or array)
558 class LoadNNode : public LoadNode {
559 public:
560 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
561 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
562 virtual int Opcode() const;
563 virtual uint ideal_reg() const { return Op_RegN; }
564 virtual int store_Opcode() const { return Op_StoreN; }
565 virtual BasicType value_basic_type() const { return T_NARROWOOP; }
566 };
567
568 //------------------------------LoadKlassNode----------------------------------
569 // Load a Klass from an object
570 class LoadKlassNode : public LoadPNode {
571 private:
572 LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
573 : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
574
575 public:
576 virtual int Opcode() const;
577 virtual const Type* Value(PhaseGVN* phase) const;
578 virtual Node* Identity(PhaseGVN* phase);
579
580 // Polymorphic factory method:
581 static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
589 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
590 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
591 // extract the actual class pointer. C2's type system is agnostic on whether the
592 // input address directly points into the class pointer.
593 class LoadNKlassNode : public LoadNNode {
594 private:
595 friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
596 LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
597 : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
598
599 public:
600 virtual int Opcode() const;
601 virtual uint ideal_reg() const { return Op_RegN; }
602 virtual int store_Opcode() const { return Op_StoreNKlass; }
603 virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
604
605 virtual const Type* Value(PhaseGVN* phase) const;
606 virtual Node* Identity(PhaseGVN* phase);
607 };
608
609
610 //------------------------------StoreNode--------------------------------------
611 // Store value; requires Store, Address and Value
612 class StoreNode : public MemNode {
613 private:
614 // On platforms with weak memory ordering (e.g., PPC) we distinguish
615 // stores that can be reordered, and such requiring release semantics to
616 // adhere to the Java specification. The required behaviour is stored in
617 // this field.
618 const MemOrd _mo;
619 // Needed for proper cloning.
620 virtual uint size_of() const { return sizeof(*this); }
621 protected:
622 virtual bool cmp( const Node &n ) const;
623
624 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
625 Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
626
627 public:
628 // We must ensure that stores of object references will be visible
629 // only after the object's initialization. So the callers of this
745 && StoreNode::cmp(n);
746 }
747 virtual uint size_of() const { return sizeof(*this); }
748 const bool _require_atomic_access; // is piecewise store forbidden?
749
750 public:
751 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
752 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
753 virtual int Opcode() const;
754 virtual BasicType value_basic_type() const { return T_LONG; }
755 bool require_atomic_access() const { return _require_atomic_access; }
756
757 #ifndef PRODUCT
758 virtual void dump_spec(outputStream *st) const {
759 StoreNode::dump_spec(st);
760 if (_require_atomic_access) st->print(" Atomic!");
761 }
762 #endif
763 };
764
765 //------------------------------StoreFNode-------------------------------------
766 // Store float to memory
767 class StoreFNode : public StoreNode {
768 public:
769 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
770 : StoreNode(c, mem, adr, at, val, mo) {}
771 virtual int Opcode() const;
772 virtual BasicType value_basic_type() const { return T_FLOAT; }
773 };
774
775 //------------------------------StoreDNode-------------------------------------
776 // Store double to memory
777 class StoreDNode : public StoreNode {
778 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
779 virtual bool cmp( const Node &n ) const {
780 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
781 && StoreNode::cmp(n);
782 }
783 virtual uint size_of() const { return sizeof(*this); }
784 const bool _require_atomic_access; // is piecewise store forbidden?
1099 };
1100
1101 //------------------------------GetAndSetPNode---------------------------
1102 class GetAndSetPNode : public LoadStoreNode {
1103 public:
1104 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1105 virtual int Opcode() const;
1106 };
1107
1108 //------------------------------GetAndSetNNode---------------------------
1109 class GetAndSetNNode : public LoadStoreNode {
1110 public:
1111 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1112 virtual int Opcode() const;
1113 };
1114
1115 //------------------------------ClearArray-------------------------------------
1116 class ClearArrayNode: public Node {
1117 private:
1118 bool _is_large;
1119 static Node* make_address(Node* dest, Node* offset, bool raw_base, PhaseGVN* phase);
1120 public:
1121 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1122 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {
1123 init_class_id(Class_ClearArray);
1124 }
1125 virtual int Opcode() const;
1126 virtual const Type *bottom_type() const { return Type::MEMORY; }
1127 // ClearArray modifies array elements, and so affects only the
1128 // array memory addressed by the bottom_type of its base address.
1129 virtual const class TypePtr *adr_type() const;
1130 virtual Node* Identity(PhaseGVN* phase);
1131 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1132 virtual uint match_edge(uint idx) const;
1133 bool is_large() const { return _is_large; }
1134 virtual uint size_of() const { return sizeof(ClearArrayNode); }
1135 virtual uint hash() const { return Node::hash() + _is_large; }
1136 virtual bool cmp(const Node& n) const {
1137 return Node::cmp(n) && _is_large == ((ClearArrayNode&)n).is_large();
1138 }
1139
1140 // Clear the given area of an object or array.
1141 // The start offset must always be aligned mod BytesPerInt.
1142 // The end offset must always be aligned mod BytesPerLong.
1143 // Return the new memory.
1144 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1145 intptr_t start_offset,
1146 intptr_t end_offset,
1147 bool raw_base,
1148 PhaseGVN* phase);
1149 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1150 intptr_t start_offset,
1151 Node* end_offset,
1152 bool raw_base,
1153 PhaseGVN* phase);
1154 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1155 Node* start_offset,
1156 Node* end_offset,
1157 bool raw_base,
1158 PhaseGVN* phase);
1159 // Return allocation input memory edge if it is different instance
1160 // or itself if it is the one we are looking for.
1161 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1162
1163 private:
1164 virtual bool depends_only_on_test_impl() const { return false; }
1165 };
1166
1167 //------------------------------MemBar-----------------------------------------
1168 // There are different flavors of Memory Barriers to match the Java Memory
1169 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1170 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1171 // volatile-load. Monitor-exit and volatile-store act as Release: no
1172 // preceding ref can be moved to after them. We insert a MemBar-Release
1173 // before a FastUnlock or volatile-store. All volatiles need to be
1174 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1190 TrailingLoadStore,
1191 LeadingLoadStore,
1192 TrailingExpandedArrayCopy
1193 } _kind;
1194
1195 #ifdef ASSERT
1196 uint _pair_idx;
1197 #endif
1198
1199 public:
1200 enum {
1201 Precedent = TypeFunc::Parms // optional edge to force precedence
1202 };
1203 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1204 virtual int Opcode() const = 0;
1205 virtual const class TypePtr *adr_type() const { return _adr_type; }
1206 virtual const Type* Value(PhaseGVN* phase) const;
1207 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1208 virtual uint match_edge(uint idx) const { return 0; }
1209 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1210 virtual Node *match( const ProjNode *proj, const Matcher *m );
1211 // Factory method. Builds a wide or narrow membar.
1212 // Optional 'precedent' becomes an extra edge if not null.
1213 static MemBarNode* make(Compile* C, int opcode,
1214 int alias_idx = Compile::AliasIdxBot,
1215 Node* precedent = nullptr);
1216
1217 MemBarNode* trailing_membar() const;
1218 MemBarNode* leading_membar() const;
1219
1220 void set_trailing_load() { _kind = TrailingLoad; }
1221 bool trailing_load() const { return _kind == TrailingLoad; }
1222 bool trailing_store() const { return _kind == TrailingStore; }
1223 bool leading_store() const { return _kind == LeadingStore; }
1224 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1225 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1226 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1227 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1228 bool standalone() const { return _kind == Standalone; }
1229 void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1230 bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
|
112 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
113 static bool all_controls_dominate(Node* dom, Node* sub) {
114 DomResult dom_result = maybe_all_controls_dominate(dom, sub);
115 return dom_result == DomResult::Dominate;
116 }
117
118 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
119
120 // Shared code for Ideal methods:
121 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
122
123 // Helper function for adr_type() implementations.
124 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
125
126 // Raw access function, to allow copying of adr_type efficiently in
127 // product builds and retain the debug info for debug builds.
128 const TypePtr *raw_adr_type() const {
129 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
130 }
131
132 #ifdef ASSERT
133 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
134 #endif
135
136 // Return the barrier data of n, if available, or 0 otherwise.
137 static uint8_t barrier_data(const Node* n);
138
139 // Map a load or store opcode to its corresponding store opcode.
140 // (Return -1 if unknown.)
141 virtual int store_Opcode() const { return -1; }
142
143 // What is the type of the value in memory? (T_VOID mean "unspecified".)
144 // The returned type is a property of the value that is loaded/stored and
145 // not the memory that is accessed. For mismatched memory accesses
146 // they might differ. For instance, a value of type 'short' may be stored
147 // into an array of elements of type 'long'.
148 virtual BasicType value_basic_type() const = 0;
149 virtual int memory_size() const {
150 #ifdef ASSERT
151 return type2aelembytes(value_basic_type(), true);
152 #else
153 return type2aelembytes(value_basic_type());
154 #endif
155 }
546
547 //------------------------------LoadPNode--------------------------------------
548 // Load a pointer from memory (either object or array)
549 class LoadPNode : public LoadNode {
550 public:
551 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
552 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
553 virtual int Opcode() const;
554 virtual uint ideal_reg() const { return Op_RegP; }
555 virtual int store_Opcode() const { return Op_StoreP; }
556 virtual BasicType value_basic_type() const { return T_ADDRESS; }
557 };
558
559
560 //------------------------------LoadNNode--------------------------------------
561 // Load a narrow oop from memory (either object or array)
562 class LoadNNode : public LoadNode {
563 public:
564 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
565 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
566 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
567 virtual int Opcode() const;
568 virtual uint ideal_reg() const { return Op_RegN; }
569 virtual int store_Opcode() const { return Op_StoreN; }
570 virtual BasicType value_basic_type() const { return T_NARROWOOP; }
571 };
572
573 //------------------------------LoadKlassNode----------------------------------
574 // Load a Klass from an object
575 class LoadKlassNode : public LoadPNode {
576 private:
577 LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
578 : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
579
580 public:
581 virtual int Opcode() const;
582 virtual const Type* Value(PhaseGVN* phase) const;
583 virtual Node* Identity(PhaseGVN* phase);
584
585 // Polymorphic factory method:
586 static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
594 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
595 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
596 // extract the actual class pointer. C2's type system is agnostic on whether the
597 // input address directly points into the class pointer.
598 class LoadNKlassNode : public LoadNNode {
599 private:
600 friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
601 LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
602 : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
603
604 public:
605 virtual int Opcode() const;
606 virtual uint ideal_reg() const { return Op_RegN; }
607 virtual int store_Opcode() const { return Op_StoreNKlass; }
608 virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
609
610 virtual const Type* Value(PhaseGVN* phase) const;
611 virtual Node* Identity(PhaseGVN* phase);
612 };
613
614 //------------------------------StoreNode--------------------------------------
615 // Store value; requires Store, Address and Value
616 class StoreNode : public MemNode {
617 private:
618 // On platforms with weak memory ordering (e.g., PPC) we distinguish
619 // stores that can be reordered, and such requiring release semantics to
620 // adhere to the Java specification. The required behaviour is stored in
621 // this field.
622 const MemOrd _mo;
623 // Needed for proper cloning.
624 virtual uint size_of() const { return sizeof(*this); }
625 protected:
626 virtual bool cmp( const Node &n ) const;
627
628 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
629 Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
630
631 public:
632 // We must ensure that stores of object references will be visible
633 // only after the object's initialization. So the callers of this
749 && StoreNode::cmp(n);
750 }
751 virtual uint size_of() const { return sizeof(*this); }
752 const bool _require_atomic_access; // is piecewise store forbidden?
753
754 public:
755 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
756 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
757 virtual int Opcode() const;
758 virtual BasicType value_basic_type() const { return T_LONG; }
759 bool require_atomic_access() const { return _require_atomic_access; }
760
761 #ifndef PRODUCT
762 virtual void dump_spec(outputStream *st) const {
763 StoreNode::dump_spec(st);
764 if (_require_atomic_access) st->print(" Atomic!");
765 }
766 #endif
767 };
768
769 // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
770 class StoreLSpecialNode : public StoreNode {
771
772 public:
773 StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
774 : StoreNode(c, mem, adr, at, val, mo) {
775 set_mismatched_access();
776 if (oop_off != nullptr) {
777 add_req(oop_off);
778 }
779 }
780 virtual int Opcode() const;
781 virtual BasicType value_basic_type() const { return T_LONG; }
782
783 virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
784 idx == MemNode::ValueIn ||
785 idx == MemNode::ValueIn + 1; }
786 };
787
788 //------------------------------StoreFNode-------------------------------------
789 // Store float to memory
790 class StoreFNode : public StoreNode {
791 public:
792 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
793 : StoreNode(c, mem, adr, at, val, mo) {}
794 virtual int Opcode() const;
795 virtual BasicType value_basic_type() const { return T_FLOAT; }
796 };
797
798 //------------------------------StoreDNode-------------------------------------
799 // Store double to memory
800 class StoreDNode : public StoreNode {
801 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
802 virtual bool cmp( const Node &n ) const {
803 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
804 && StoreNode::cmp(n);
805 }
806 virtual uint size_of() const { return sizeof(*this); }
807 const bool _require_atomic_access; // is piecewise store forbidden?
1122 };
1123
1124 //------------------------------GetAndSetPNode---------------------------
1125 class GetAndSetPNode : public LoadStoreNode {
1126 public:
1127 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1128 virtual int Opcode() const;
1129 };
1130
1131 //------------------------------GetAndSetNNode---------------------------
1132 class GetAndSetNNode : public LoadStoreNode {
1133 public:
1134 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1135 virtual int Opcode() const;
1136 };
1137
1138 //------------------------------ClearArray-------------------------------------
1139 class ClearArrayNode: public Node {
1140 private:
1141 bool _is_large;
1142 bool _word_copy_only;
1143 static Node* make_address(Node* dest, Node* offset, bool raw_base, PhaseGVN* phase);
1144 public:
1145 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1146 : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1147 _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1148 init_class_id(Class_ClearArray);
1149 }
1150 virtual int Opcode() const;
1151 virtual const Type *bottom_type() const { return Type::MEMORY; }
1152 // ClearArray modifies array elements, and so affects only the
1153 // array memory addressed by the bottom_type of its base address.
1154 virtual const class TypePtr *adr_type() const;
1155 virtual Node* Identity(PhaseGVN* phase);
1156 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1157 virtual uint match_edge(uint idx) const;
1158 bool is_large() const { return _is_large; }
1159 bool word_copy_only() const { return _word_copy_only; }
1160 virtual uint size_of() const { return sizeof(ClearArrayNode); }
1161 virtual uint hash() const { return Node::hash() + _is_large; }
1162 virtual bool cmp(const Node& n) const {
1163 return Node::cmp(n) && _is_large == ((ClearArrayNode&)n).is_large();
1164 }
1165
1166 // Clear the given area of an object or array.
1167 // The start offset must always be aligned mod BytesPerInt.
1168 // The end offset must always be aligned mod BytesPerLong.
1169 // Return the new memory.
1170 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1171 Node* val,
1172 Node* raw_val,
1173 intptr_t start_offset,
1174 intptr_t end_offset,
1175 bool raw_base,
1176 PhaseGVN* phase);
1177 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1178 Node* val,
1179 Node* raw_val,
1180 intptr_t start_offset,
1181 Node* end_offset,
1182 bool raw_base,
1183 PhaseGVN* phase);
1184 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1185 Node* raw_val,
1186 Node* start_offset,
1187 Node* end_offset,
1188 bool raw_base,
1189 PhaseGVN* phase);
1190 // Return allocation input memory edge if it is different instance
1191 // or itself if it is the one we are looking for.
1192 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1193
1194 private:
1195 virtual bool depends_only_on_test_impl() const { return false; }
1196 };
1197
1198 //------------------------------MemBar-----------------------------------------
1199 // There are different flavors of Memory Barriers to match the Java Memory
1200 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1201 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1202 // volatile-load. Monitor-exit and volatile-store act as Release: no
1203 // preceding ref can be moved to after them. We insert a MemBar-Release
1204 // before a FastUnlock or volatile-store. All volatiles need to be
1205 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1221 TrailingLoadStore,
1222 LeadingLoadStore,
1223 TrailingExpandedArrayCopy
1224 } _kind;
1225
1226 #ifdef ASSERT
1227 uint _pair_idx;
1228 #endif
1229
1230 public:
1231 enum {
1232 Precedent = TypeFunc::Parms // optional edge to force precedence
1233 };
1234 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1235 virtual int Opcode() const = 0;
1236 virtual const class TypePtr *adr_type() const { return _adr_type; }
1237 virtual const Type* Value(PhaseGVN* phase) const;
1238 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1239 virtual uint match_edge(uint idx) const { return 0; }
1240 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1241 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1242 // Factory method. Builds a wide or narrow membar.
1243 // Optional 'precedent' becomes an extra edge if not null.
1244 static MemBarNode* make(Compile* C, int opcode,
1245 int alias_idx = Compile::AliasIdxBot,
1246 Node* precedent = nullptr);
1247
1248 MemBarNode* trailing_membar() const;
1249 MemBarNode* leading_membar() const;
1250
1251 void set_trailing_load() { _kind = TrailingLoad; }
1252 bool trailing_load() const { return _kind == TrailingLoad; }
1253 bool trailing_store() const { return _kind == TrailingStore; }
1254 bool leading_store() const { return _kind == LeadingStore; }
1255 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1256 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1257 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1258 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1259 bool standalone() const { return _kind == Standalone; }
1260 void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1261 bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
|