109 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
110 static bool all_controls_dominate(Node* dom, Node* sub) {
111 DomResult dom_result = maybe_all_controls_dominate(dom, sub);
112 return dom_result == DomResult::Dominate;
113 }
114
115 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
116
117 // Shared code for Ideal methods:
118 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
119
120 // Helper function for adr_type() implementations.
121 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
122
123 // Raw access function, to allow copying of adr_type efficiently in
124 // product builds and retain the debug info for debug builds.
125 const TypePtr *raw_adr_type() const {
126 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
127 }
128
129 // Return the barrier data of n, if available, or 0 otherwise.
130 static uint8_t barrier_data(const Node* n);
131
132 // Map a load or store opcode to its corresponding store opcode.
133 // (Return -1 if unknown.)
134 virtual int store_Opcode() const { return -1; }
135
136 // What is the type of the value in memory? (T_VOID mean "unspecified".)
137 virtual BasicType memory_type() const = 0;
138 virtual int memory_size() const {
139 #ifdef ASSERT
140 return type2aelembytes(memory_type(), true);
141 #else
142 return type2aelembytes(memory_type());
143 #endif
144 }
145
146 uint8_t barrier_data() { return _barrier_data; }
147 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
148
496
497 //------------------------------LoadPNode--------------------------------------
498 // Load a pointer from memory (either object or array)
499 class LoadPNode : public LoadNode {
500 public:
501 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
502 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
503 virtual int Opcode() const;
504 virtual uint ideal_reg() const { return Op_RegP; }
505 virtual int store_Opcode() const { return Op_StoreP; }
506 virtual BasicType memory_type() const { return T_ADDRESS; }
507 };
508
509
510 //------------------------------LoadNNode--------------------------------------
511 // Load a narrow oop from memory (either object or array)
512 class LoadNNode : public LoadNode {
513 public:
514 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
515 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
516 virtual int Opcode() const;
517 virtual uint ideal_reg() const { return Op_RegN; }
518 virtual int store_Opcode() const { return Op_StoreN; }
519 virtual BasicType memory_type() const { return T_NARROWOOP; }
520 };
521
522 //------------------------------LoadKlassNode----------------------------------
523 // Load a Klass from an object
524 class LoadKlassNode : public LoadPNode {
525 private:
526 LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
527 : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
528
529 public:
530 virtual int Opcode() const;
531 virtual const Type* Value(PhaseGVN* phase) const;
532 virtual Node* Identity(PhaseGVN* phase);
533 virtual bool depends_only_on_test() const { return true; }
534
535 // Polymorphic factory method:
545 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
546 // extract the actual class pointer. C2's type system is agnostic on whether the
547 // input address directly points into the class pointer.
548 class LoadNKlassNode : public LoadNNode {
549 private:
550 friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
551 LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
552 : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
553
554 public:
555 virtual int Opcode() const;
556 virtual uint ideal_reg() const { return Op_RegN; }
557 virtual int store_Opcode() const { return Op_StoreNKlass; }
558 virtual BasicType memory_type() const { return T_NARROWKLASS; }
559
560 virtual const Type* Value(PhaseGVN* phase) const;
561 virtual Node* Identity(PhaseGVN* phase);
562 virtual bool depends_only_on_test() const { return true; }
563 };
564
565
566 //------------------------------StoreNode--------------------------------------
567 // Store value; requires Store, Address and Value
568 class StoreNode : public MemNode {
569 private:
570 // On platforms with weak memory ordering (e.g., PPC) we distinguish
571 // stores that can be reordered, and such requiring release semantics to
572 // adhere to the Java specification. The required behaviour is stored in
573 // this field.
574 const MemOrd _mo;
575 // Needed for proper cloning.
576 virtual uint size_of() const { return sizeof(*this); }
577 protected:
578 virtual bool cmp( const Node &n ) const;
579 virtual bool depends_only_on_test() const { return false; }
580
581 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
582 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
583
584 public:
585 // We must ensure that stores of object references will be visible
699 && StoreNode::cmp(n);
700 }
701 virtual uint size_of() const { return sizeof(*this); }
702 const bool _require_atomic_access; // is piecewise store forbidden?
703
704 public:
705 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
706 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
707 virtual int Opcode() const;
708 virtual BasicType memory_type() const { return T_LONG; }
709 bool require_atomic_access() const { return _require_atomic_access; }
710
711 #ifndef PRODUCT
712 virtual void dump_spec(outputStream *st) const {
713 StoreNode::dump_spec(st);
714 if (_require_atomic_access) st->print(" Atomic!");
715 }
716 #endif
717 };
718
719 //------------------------------StoreFNode-------------------------------------
720 // Store float to memory
721 class StoreFNode : public StoreNode {
722 public:
723 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
724 : StoreNode(c, mem, adr, at, val, mo) {}
725 virtual int Opcode() const;
726 virtual BasicType memory_type() const { return T_FLOAT; }
727 };
728
729 //------------------------------StoreDNode-------------------------------------
730 // Store double to memory
731 class StoreDNode : public StoreNode {
732 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
733 virtual bool cmp( const Node &n ) const {
734 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
735 && StoreNode::cmp(n);
736 }
737 virtual uint size_of() const { return sizeof(*this); }
738 const bool _require_atomic_access; // is piecewise store forbidden?
1054 };
1055
1056 //------------------------------GetAndSetPNode---------------------------
1057 class GetAndSetPNode : public LoadStoreNode {
1058 public:
1059 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1060 virtual int Opcode() const;
1061 };
1062
1063 //------------------------------GetAndSetNNode---------------------------
1064 class GetAndSetNNode : public LoadStoreNode {
1065 public:
1066 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1067 virtual int Opcode() const;
1068 };
1069
1070 //------------------------------ClearArray-------------------------------------
1071 class ClearArrayNode: public Node {
1072 private:
1073 bool _is_large;
1074 public:
1075 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1076 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {
1077 init_class_id(Class_ClearArray);
1078 }
1079 virtual int Opcode() const;
1080 virtual const Type *bottom_type() const { return Type::MEMORY; }
1081 // ClearArray modifies array elements, and so affects only the
1082 // array memory addressed by the bottom_type of its base address.
1083 virtual const class TypePtr *adr_type() const;
1084 virtual Node* Identity(PhaseGVN* phase);
1085 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1086 virtual uint match_edge(uint idx) const;
1087 bool is_large() const { return _is_large; }
1088
1089 // Clear the given area of an object or array.
1090 // The start offset must always be aligned mod BytesPerInt.
1091 // The end offset must always be aligned mod BytesPerLong.
1092 // Return the new memory.
1093 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1094 intptr_t start_offset,
1095 intptr_t end_offset,
1096 PhaseGVN* phase);
1097 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1098 intptr_t start_offset,
1099 Node* end_offset,
1100 PhaseGVN* phase);
1101 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1102 Node* start_offset,
1103 Node* end_offset,
1104 PhaseGVN* phase);
1105 // Return allocation input memory edge if it is different instance
1106 // or itself if it is the one we are looking for.
1107 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1108 };
1109
1110 //------------------------------MemBar-----------------------------------------
1111 // There are different flavors of Memory Barriers to match the Java Memory
1112 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1113 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1114 // volatile-load. Monitor-exit and volatile-store act as Release: no
1115 // preceding ref can be moved to after them. We insert a MemBar-Release
1116 // before a FastUnlock or volatile-store. All volatiles need to be
1117 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1118 // separate it from any following volatile-load.
1119 class MemBarNode: public MultiNode {
1120 virtual uint hash() const ; // { return NO_HASH; }
1121 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1133 TrailingLoadStore,
1134 LeadingLoadStore,
1135 TrailingPartialArrayCopy
1136 } _kind;
1137
1138 #ifdef ASSERT
1139 uint _pair_idx;
1140 #endif
1141
1142 public:
1143 enum {
1144 Precedent = TypeFunc::Parms // optional edge to force precedence
1145 };
1146 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1147 virtual int Opcode() const = 0;
1148 virtual const class TypePtr *adr_type() const { return _adr_type; }
1149 virtual const Type* Value(PhaseGVN* phase) const;
1150 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1151 virtual uint match_edge(uint idx) const { return 0; }
1152 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1153 virtual Node *match( const ProjNode *proj, const Matcher *m );
1154 // Factory method. Builds a wide or narrow membar.
1155 // Optional 'precedent' becomes an extra edge if not null.
1156 static MemBarNode* make(Compile* C, int opcode,
1157 int alias_idx = Compile::AliasIdxBot,
1158 Node* precedent = nullptr);
1159
1160 MemBarNode* trailing_membar() const;
1161 MemBarNode* leading_membar() const;
1162
1163 void set_trailing_load() { _kind = TrailingLoad; }
1164 bool trailing_load() const { return _kind == TrailingLoad; }
1165 bool trailing_store() const { return _kind == TrailingStore; }
1166 bool leading_store() const { return _kind == LeadingStore; }
1167 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1168 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1169 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1170 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1171 bool standalone() const { return _kind == Standalone; }
1172 void set_trailing_partial_array_copy() { _kind = TrailingPartialArrayCopy; }
1173 bool trailing_partial_array_copy() const { return _kind == TrailingPartialArrayCopy; }
|
109 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
110 static bool all_controls_dominate(Node* dom, Node* sub) {
111 DomResult dom_result = maybe_all_controls_dominate(dom, sub);
112 return dom_result == DomResult::Dominate;
113 }
114
115 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
116
117 // Shared code for Ideal methods:
118 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
119
120 // Helper function for adr_type() implementations.
121 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
122
123 // Raw access function, to allow copying of adr_type efficiently in
124 // product builds and retain the debug info for debug builds.
125 const TypePtr *raw_adr_type() const {
126 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
127 }
128
129 #ifdef ASSERT
130 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
131 #endif
132
133 // Return the barrier data of n, if available, or 0 otherwise.
134 static uint8_t barrier_data(const Node* n);
135
136 // Map a load or store opcode to its corresponding store opcode.
137 // (Return -1 if unknown.)
138 virtual int store_Opcode() const { return -1; }
139
140 // What is the type of the value in memory? (T_VOID mean "unspecified".)
141 virtual BasicType memory_type() const = 0;
142 virtual int memory_size() const {
143 #ifdef ASSERT
144 return type2aelembytes(memory_type(), true);
145 #else
146 return type2aelembytes(memory_type());
147 #endif
148 }
149
150 uint8_t barrier_data() { return _barrier_data; }
151 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
152
500
501 //------------------------------LoadPNode--------------------------------------
502 // Load a pointer from memory (either object or array)
503 class LoadPNode : public LoadNode {
504 public:
505 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
506 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
507 virtual int Opcode() const;
508 virtual uint ideal_reg() const { return Op_RegP; }
509 virtual int store_Opcode() const { return Op_StoreP; }
510 virtual BasicType memory_type() const { return T_ADDRESS; }
511 };
512
513
514 //------------------------------LoadNNode--------------------------------------
515 // Load a narrow oop from memory (either object or array)
516 class LoadNNode : public LoadNode {
517 public:
518 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
519 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
520 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
521 virtual int Opcode() const;
522 virtual uint ideal_reg() const { return Op_RegN; }
523 virtual int store_Opcode() const { return Op_StoreN; }
524 virtual BasicType memory_type() const { return T_NARROWOOP; }
525 };
526
527 //------------------------------LoadKlassNode----------------------------------
528 // Load a Klass from an object
529 class LoadKlassNode : public LoadPNode {
530 private:
531 LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
532 : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
533
534 public:
535 virtual int Opcode() const;
536 virtual const Type* Value(PhaseGVN* phase) const;
537 virtual Node* Identity(PhaseGVN* phase);
538 virtual bool depends_only_on_test() const { return true; }
539
540 // Polymorphic factory method:
550 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
551 // extract the actual class pointer. C2's type system is agnostic on whether the
552 // input address directly points into the class pointer.
553 class LoadNKlassNode : public LoadNNode {
554 private:
555 friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
556 LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
557 : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
558
559 public:
560 virtual int Opcode() const;
561 virtual uint ideal_reg() const { return Op_RegN; }
562 virtual int store_Opcode() const { return Op_StoreNKlass; }
563 virtual BasicType memory_type() const { return T_NARROWKLASS; }
564
565 virtual const Type* Value(PhaseGVN* phase) const;
566 virtual Node* Identity(PhaseGVN* phase);
567 virtual bool depends_only_on_test() const { return true; }
568 };
569
570 //------------------------------StoreNode--------------------------------------
571 // Store value; requires Store, Address and Value
572 class StoreNode : public MemNode {
573 private:
574 // On platforms with weak memory ordering (e.g., PPC) we distinguish
575 // stores that can be reordered, and such requiring release semantics to
576 // adhere to the Java specification. The required behaviour is stored in
577 // this field.
578 const MemOrd _mo;
579 // Needed for proper cloning.
580 virtual uint size_of() const { return sizeof(*this); }
581 protected:
582 virtual bool cmp( const Node &n ) const;
583 virtual bool depends_only_on_test() const { return false; }
584
585 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
586 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
587
588 public:
589 // We must ensure that stores of object references will be visible
703 && StoreNode::cmp(n);
704 }
705 virtual uint size_of() const { return sizeof(*this); }
706 const bool _require_atomic_access; // is piecewise store forbidden?
707
708 public:
709 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
710 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
711 virtual int Opcode() const;
712 virtual BasicType memory_type() const { return T_LONG; }
713 bool require_atomic_access() const { return _require_atomic_access; }
714
715 #ifndef PRODUCT
716 virtual void dump_spec(outputStream *st) const {
717 StoreNode::dump_spec(st);
718 if (_require_atomic_access) st->print(" Atomic!");
719 }
720 #endif
721 };
722
723 // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
724 class StoreLSpecialNode : public StoreNode {
725
726 public:
727 StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
728 : StoreNode(c, mem, adr, at, val, mo) {
729 set_mismatched_access();
730 if (oop_off != nullptr) {
731 add_req(oop_off);
732 }
733 }
734 virtual int Opcode() const;
735 virtual BasicType memory_type() const { return T_LONG; }
736
737 virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
738 idx == MemNode::ValueIn ||
739 idx == MemNode::ValueIn + 1; }
740 };
741
742 //------------------------------StoreFNode-------------------------------------
743 // Store float to memory
744 class StoreFNode : public StoreNode {
745 public:
746 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
747 : StoreNode(c, mem, adr, at, val, mo) {}
748 virtual int Opcode() const;
749 virtual BasicType memory_type() const { return T_FLOAT; }
750 };
751
752 //------------------------------StoreDNode-------------------------------------
753 // Store double to memory
754 class StoreDNode : public StoreNode {
755 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
756 virtual bool cmp( const Node &n ) const {
757 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
758 && StoreNode::cmp(n);
759 }
760 virtual uint size_of() const { return sizeof(*this); }
761 const bool _require_atomic_access; // is piecewise store forbidden?
1077 };
1078
1079 //------------------------------GetAndSetPNode---------------------------
1080 class GetAndSetPNode : public LoadStoreNode {
1081 public:
1082 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1083 virtual int Opcode() const;
1084 };
1085
1086 //------------------------------GetAndSetNNode---------------------------
1087 class GetAndSetNNode : public LoadStoreNode {
1088 public:
1089 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1090 virtual int Opcode() const;
1091 };
1092
1093 //------------------------------ClearArray-------------------------------------
1094 class ClearArrayNode: public Node {
1095 private:
1096 bool _is_large;
1097 bool _word_copy_only;
1098 public:
1099 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1100 : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1101 _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1102 init_class_id(Class_ClearArray);
1103 }
1104 virtual int Opcode() const;
1105 virtual const Type *bottom_type() const { return Type::MEMORY; }
1106 // ClearArray modifies array elements, and so affects only the
1107 // array memory addressed by the bottom_type of its base address.
1108 virtual const class TypePtr *adr_type() const;
1109 virtual Node* Identity(PhaseGVN* phase);
1110 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1111 virtual uint match_edge(uint idx) const;
1112 bool is_large() const { return _is_large; }
1113 bool word_copy_only() const { return _word_copy_only; }
1114
1115 // Clear the given area of an object or array.
1116 // The start offset must always be aligned mod BytesPerInt.
1117 // The end offset must always be aligned mod BytesPerLong.
1118 // Return the new memory.
1119 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1120 Node* val,
1121 Node* raw_val,
1122 intptr_t start_offset,
1123 intptr_t end_offset,
1124 PhaseGVN* phase);
1125 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1126 Node* val,
1127 Node* raw_val,
1128 intptr_t start_offset,
1129 Node* end_offset,
1130 PhaseGVN* phase);
1131 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1132 Node* raw_val,
1133 Node* start_offset,
1134 Node* end_offset,
1135 PhaseGVN* phase);
1136 // Return allocation input memory edge if it is different instance
1137 // or itself if it is the one we are looking for.
1138 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1139 };
1140
1141 //------------------------------MemBar-----------------------------------------
1142 // There are different flavors of Memory Barriers to match the Java Memory
1143 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1144 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1145 // volatile-load. Monitor-exit and volatile-store act as Release: no
1146 // preceding ref can be moved to after them. We insert a MemBar-Release
1147 // before a FastUnlock or volatile-store. All volatiles need to be
1148 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1149 // separate it from any following volatile-load.
1150 class MemBarNode: public MultiNode {
1151 virtual uint hash() const ; // { return NO_HASH; }
1152 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1164 TrailingLoadStore,
1165 LeadingLoadStore,
1166 TrailingPartialArrayCopy
1167 } _kind;
1168
1169 #ifdef ASSERT
1170 uint _pair_idx;
1171 #endif
1172
1173 public:
1174 enum {
1175 Precedent = TypeFunc::Parms // optional edge to force precedence
1176 };
1177 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1178 virtual int Opcode() const = 0;
1179 virtual const class TypePtr *adr_type() const { return _adr_type; }
1180 virtual const Type* Value(PhaseGVN* phase) const;
1181 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1182 virtual uint match_edge(uint idx) const { return 0; }
1183 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1184 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1185 // Factory method. Builds a wide or narrow membar.
1186 // Optional 'precedent' becomes an extra edge if not null.
1187 static MemBarNode* make(Compile* C, int opcode,
1188 int alias_idx = Compile::AliasIdxBot,
1189 Node* precedent = nullptr);
1190
1191 MemBarNode* trailing_membar() const;
1192 MemBarNode* leading_membar() const;
1193
1194 void set_trailing_load() { _kind = TrailingLoad; }
1195 bool trailing_load() const { return _kind == TrailingLoad; }
1196 bool trailing_store() const { return _kind == TrailingStore; }
1197 bool leading_store() const { return _kind == LeadingStore; }
1198 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1199 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1200 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1201 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1202 bool standalone() const { return _kind == Standalone; }
1203 void set_trailing_partial_array_copy() { _kind = TrailingPartialArrayCopy; }
1204 bool trailing_partial_array_copy() const { return _kind == TrailingPartialArrayCopy; }
|