109 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
110 static bool all_controls_dominate(Node* dom, Node* sub) {
111 DomResult dom_result = maybe_all_controls_dominate(dom, sub);
112 return dom_result == DomResult::Dominate;
113 }
114
115 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
116
117 // Shared code for Ideal methods:
118 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
119
120 // Helper function for adr_type() implementations.
121 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
122
123 // Raw access function, to allow copying of adr_type efficiently in
124 // product builds and retain the debug info for debug builds.
125 const TypePtr *raw_adr_type() const {
126 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
127 }
128
129 // Return the barrier data of n, if available, or 0 otherwise.
130 static uint8_t barrier_data(const Node* n);
131
132 // Map a load or store opcode to its corresponding store opcode.
133 // (Return -1 if unknown.)
134 virtual int store_Opcode() const { return -1; }
135
136 // What is the type of the value in memory? (T_VOID mean "unspecified".)
137 // The returned type is a property of the value that is loaded/stored and
138 // not the memory that is accessed. For mismatched memory accesses
139 // they might differ. For instance, a value of type 'short' may be stored
140 // into an array of elements of type 'long'.
141 virtual BasicType value_basic_type() const = 0;
142 virtual int memory_size() const {
143 #ifdef ASSERT
144 return type2aelembytes(value_basic_type(), true);
145 #else
146 return type2aelembytes(value_basic_type());
147 #endif
148 }
500
501 //------------------------------LoadPNode--------------------------------------
502 // Load a pointer from memory (either object or array)
503 class LoadPNode : public LoadNode {
504 public:
505 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
506 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
507 virtual int Opcode() const;
508 virtual uint ideal_reg() const { return Op_RegP; }
509 virtual int store_Opcode() const { return Op_StoreP; }
510 virtual BasicType value_basic_type() const { return T_ADDRESS; }
511 };
512
513
514 //------------------------------LoadNNode--------------------------------------
515 // Load a narrow oop from memory (either object or array)
516 class LoadNNode : public LoadNode {
517 public:
518 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
519 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
520 virtual int Opcode() const;
521 virtual uint ideal_reg() const { return Op_RegN; }
522 virtual int store_Opcode() const { return Op_StoreN; }
523 virtual BasicType value_basic_type() const { return T_NARROWOOP; }
524 };
525
526 //------------------------------LoadKlassNode----------------------------------
527 // Load a Klass from an object
528 class LoadKlassNode : public LoadPNode {
529 private:
530 LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
531 : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
532
533 public:
534 virtual int Opcode() const;
535 virtual const Type* Value(PhaseGVN* phase) const;
536 virtual Node* Identity(PhaseGVN* phase);
537 virtual bool depends_only_on_test() const { return true; }
538
539 // Polymorphic factory method:
549 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
550 // extract the actual class pointer. C2's type system is agnostic on whether the
551 // input address directly points into the class pointer.
552 class LoadNKlassNode : public LoadNNode {
553 private:
554 friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
555 LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
556 : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
557
558 public:
559 virtual int Opcode() const;
560 virtual uint ideal_reg() const { return Op_RegN; }
561 virtual int store_Opcode() const { return Op_StoreNKlass; }
562 virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
563
564 virtual const Type* Value(PhaseGVN* phase) const;
565 virtual Node* Identity(PhaseGVN* phase);
566 virtual bool depends_only_on_test() const { return true; }
567 };
568
569
570 //------------------------------StoreNode--------------------------------------
571 // Store value; requires Store, Address and Value
572 class StoreNode : public MemNode {
573 private:
574 // On platforms with weak memory ordering (e.g., PPC) we distinguish
575 // stores that can be reordered, and such requiring release semantics to
576 // adhere to the Java specification. The required behaviour is stored in
577 // this field.
578 const MemOrd _mo;
579 // Needed for proper cloning.
580 virtual uint size_of() const { return sizeof(*this); }
581 protected:
582 virtual bool cmp( const Node &n ) const;
583 virtual bool depends_only_on_test() const { return false; }
584
585 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
586 Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
587
588 public:
589 // We must ensure that stores of object references will be visible
703 && StoreNode::cmp(n);
704 }
705 virtual uint size_of() const { return sizeof(*this); }
706 const bool _require_atomic_access; // is piecewise store forbidden?
707
708 public:
709 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
710 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
711 virtual int Opcode() const;
712 virtual BasicType value_basic_type() const { return T_LONG; }
713 bool require_atomic_access() const { return _require_atomic_access; }
714
715 #ifndef PRODUCT
716 virtual void dump_spec(outputStream *st) const {
717 StoreNode::dump_spec(st);
718 if (_require_atomic_access) st->print(" Atomic!");
719 }
720 #endif
721 };
722
723 //------------------------------StoreFNode-------------------------------------
724 // Store float to memory
725 class StoreFNode : public StoreNode {
726 public:
727 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
728 : StoreNode(c, mem, adr, at, val, mo) {}
729 virtual int Opcode() const;
730 virtual BasicType value_basic_type() const { return T_FLOAT; }
731 };
732
733 //------------------------------StoreDNode-------------------------------------
734 // Store double to memory
735 class StoreDNode : public StoreNode {
736 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
737 virtual bool cmp( const Node &n ) const {
738 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
739 && StoreNode::cmp(n);
740 }
741 virtual uint size_of() const { return sizeof(*this); }
742 const bool _require_atomic_access; // is piecewise store forbidden?
1055 };
1056
1057 //------------------------------GetAndSetPNode---------------------------
1058 class GetAndSetPNode : public LoadStoreNode {
1059 public:
1060 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1061 virtual int Opcode() const;
1062 };
1063
1064 //------------------------------GetAndSetNNode---------------------------
1065 class GetAndSetNNode : public LoadStoreNode {
1066 public:
1067 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1068 virtual int Opcode() const;
1069 };
1070
1071 //------------------------------ClearArray-------------------------------------
1072 class ClearArrayNode: public Node {
1073 private:
1074 bool _is_large;
1075 public:
1076 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1077 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {
1078 init_class_id(Class_ClearArray);
1079 }
1080 virtual int Opcode() const;
1081 virtual const Type *bottom_type() const { return Type::MEMORY; }
1082 // ClearArray modifies array elements, and so affects only the
1083 // array memory addressed by the bottom_type of its base address.
1084 virtual const class TypePtr *adr_type() const;
1085 virtual Node* Identity(PhaseGVN* phase);
1086 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1087 virtual uint match_edge(uint idx) const;
1088 bool is_large() const { return _is_large; }
1089 virtual uint size_of() const { return sizeof(ClearArrayNode); }
1090 virtual uint hash() const { return Node::hash() + _is_large; }
1091 virtual bool cmp(const Node& n) const {
1092 return Node::cmp(n) && _is_large == ((ClearArrayNode&)n).is_large();
1093 }
1094
1095 // Clear the given area of an object or array.
1096 // The start offset must always be aligned mod BytesPerInt.
1097 // The end offset must always be aligned mod BytesPerLong.
1098 // Return the new memory.
1099 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1100 intptr_t start_offset,
1101 intptr_t end_offset,
1102 PhaseGVN* phase);
1103 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1104 intptr_t start_offset,
1105 Node* end_offset,
1106 PhaseGVN* phase);
1107 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1108 Node* start_offset,
1109 Node* end_offset,
1110 PhaseGVN* phase);
1111 // Return allocation input memory edge if it is different instance
1112 // or itself if it is the one we are looking for.
1113 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1114 };
1115
1116 //------------------------------MemBar-----------------------------------------
1117 // There are different flavors of Memory Barriers to match the Java Memory
1118 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1119 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1120 // volatile-load. Monitor-exit and volatile-store act as Release: no
1121 // preceding ref can be moved to after them. We insert a MemBar-Release
1122 // before a FastUnlock or volatile-store. All volatiles need to be
1123 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1124 // separate it from any following volatile-load.
1125 class MemBarNode: public MultiNode {
1126 virtual uint hash() const ; // { return NO_HASH; }
1127 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1139 TrailingLoadStore,
1140 LeadingLoadStore,
1141 TrailingExpandedArrayCopy
1142 } _kind;
1143
1144 #ifdef ASSERT
1145 uint _pair_idx;
1146 #endif
1147
1148 public:
1149 enum {
1150 Precedent = TypeFunc::Parms // optional edge to force precedence
1151 };
1152 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1153 virtual int Opcode() const = 0;
1154 virtual const class TypePtr *adr_type() const { return _adr_type; }
1155 virtual const Type* Value(PhaseGVN* phase) const;
1156 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1157 virtual uint match_edge(uint idx) const { return 0; }
1158 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1159 virtual Node *match( const ProjNode *proj, const Matcher *m );
1160 // Factory method. Builds a wide or narrow membar.
1161 // Optional 'precedent' becomes an extra edge if not null.
1162 static MemBarNode* make(Compile* C, int opcode,
1163 int alias_idx = Compile::AliasIdxBot,
1164 Node* precedent = nullptr);
1165
1166 MemBarNode* trailing_membar() const;
1167 MemBarNode* leading_membar() const;
1168
1169 void set_trailing_load() { _kind = TrailingLoad; }
1170 bool trailing_load() const { return _kind == TrailingLoad; }
1171 bool trailing_store() const { return _kind == TrailingStore; }
1172 bool leading_store() const { return _kind == LeadingStore; }
1173 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1174 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1175 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1176 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1177 bool standalone() const { return _kind == Standalone; }
1178 void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1179 bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
|
109 static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
110 static bool all_controls_dominate(Node* dom, Node* sub) {
111 DomResult dom_result = maybe_all_controls_dominate(dom, sub);
112 return dom_result == DomResult::Dominate;
113 }
114
115 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
116
117 // Shared code for Ideal methods:
118 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
119
120 // Helper function for adr_type() implementations.
121 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
122
123 // Raw access function, to allow copying of adr_type efficiently in
124 // product builds and retain the debug info for debug builds.
125 const TypePtr *raw_adr_type() const {
126 return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
127 }
128
129 #ifdef ASSERT
130 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
131 #endif
132
133 // Return the barrier data of n, if available, or 0 otherwise.
134 static uint8_t barrier_data(const Node* n);
135
136 // Map a load or store opcode to its corresponding store opcode.
137 // (Return -1 if unknown.)
138 virtual int store_Opcode() const { return -1; }
139
140 // What is the type of the value in memory? (T_VOID mean "unspecified".)
141 // The returned type is a property of the value that is loaded/stored and
142 // not the memory that is accessed. For mismatched memory accesses
143 // they might differ. For instance, a value of type 'short' may be stored
144 // into an array of elements of type 'long'.
145 virtual BasicType value_basic_type() const = 0;
146 virtual int memory_size() const {
147 #ifdef ASSERT
148 return type2aelembytes(value_basic_type(), true);
149 #else
150 return type2aelembytes(value_basic_type());
151 #endif
152 }
504
505 //------------------------------LoadPNode--------------------------------------
506 // Load a pointer from memory (either object or array)
507 class LoadPNode : public LoadNode {
508 public:
509 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
510 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
511 virtual int Opcode() const;
512 virtual uint ideal_reg() const { return Op_RegP; }
513 virtual int store_Opcode() const { return Op_StoreP; }
514 virtual BasicType value_basic_type() const { return T_ADDRESS; }
515 };
516
517
518 //------------------------------LoadNNode--------------------------------------
519 // Load a narrow oop from memory (either object or array)
520 class LoadNNode : public LoadNode {
521 public:
522 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
523 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
524 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
525 virtual int Opcode() const;
526 virtual uint ideal_reg() const { return Op_RegN; }
527 virtual int store_Opcode() const { return Op_StoreN; }
528 virtual BasicType value_basic_type() const { return T_NARROWOOP; }
529 };
530
531 //------------------------------LoadKlassNode----------------------------------
532 // Load a Klass from an object
533 class LoadKlassNode : public LoadPNode {
534 private:
535 LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
536 : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
537
538 public:
539 virtual int Opcode() const;
540 virtual const Type* Value(PhaseGVN* phase) const;
541 virtual Node* Identity(PhaseGVN* phase);
542 virtual bool depends_only_on_test() const { return true; }
543
544 // Polymorphic factory method:
554 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
555 // extract the actual class pointer. C2's type system is agnostic on whether the
556 // input address directly points into the class pointer.
557 class LoadNKlassNode : public LoadNNode {
558 private:
559 friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
560 LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
561 : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
562
563 public:
564 virtual int Opcode() const;
565 virtual uint ideal_reg() const { return Op_RegN; }
566 virtual int store_Opcode() const { return Op_StoreNKlass; }
567 virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
568
569 virtual const Type* Value(PhaseGVN* phase) const;
570 virtual Node* Identity(PhaseGVN* phase);
571 virtual bool depends_only_on_test() const { return true; }
572 };
573
574 //------------------------------StoreNode--------------------------------------
575 // Store value; requires Store, Address and Value
576 class StoreNode : public MemNode {
577 private:
578 // On platforms with weak memory ordering (e.g., PPC) we distinguish
579 // stores that can be reordered, and such requiring release semantics to
580 // adhere to the Java specification. The required behaviour is stored in
581 // this field.
582 const MemOrd _mo;
583 // Needed for proper cloning.
584 virtual uint size_of() const { return sizeof(*this); }
585 protected:
586 virtual bool cmp( const Node &n ) const;
587 virtual bool depends_only_on_test() const { return false; }
588
589 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
590 Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
591
592 public:
593 // We must ensure that stores of object references will be visible
707 && StoreNode::cmp(n);
708 }
709 virtual uint size_of() const { return sizeof(*this); }
710 const bool _require_atomic_access; // is piecewise store forbidden?
711
712 public:
713 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
714 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
715 virtual int Opcode() const;
716 virtual BasicType value_basic_type() const { return T_LONG; }
717 bool require_atomic_access() const { return _require_atomic_access; }
718
719 #ifndef PRODUCT
720 virtual void dump_spec(outputStream *st) const {
721 StoreNode::dump_spec(st);
722 if (_require_atomic_access) st->print(" Atomic!");
723 }
724 #endif
725 };
726
727 // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
728 class StoreLSpecialNode : public StoreNode {
729
730 public:
731 StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
732 : StoreNode(c, mem, adr, at, val, mo) {
733 set_mismatched_access();
734 if (oop_off != nullptr) {
735 add_req(oop_off);
736 }
737 }
738 virtual int Opcode() const;
739 virtual BasicType value_basic_type() const { return T_LONG; }
740
741 virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
742 idx == MemNode::ValueIn ||
743 idx == MemNode::ValueIn + 1; }
744 };
745
746 //------------------------------StoreFNode-------------------------------------
747 // Store float to memory
748 class StoreFNode : public StoreNode {
749 public:
750 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
751 : StoreNode(c, mem, adr, at, val, mo) {}
752 virtual int Opcode() const;
753 virtual BasicType value_basic_type() const { return T_FLOAT; }
754 };
755
756 //------------------------------StoreDNode-------------------------------------
757 // Store double to memory
758 class StoreDNode : public StoreNode {
759 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
760 virtual bool cmp( const Node &n ) const {
761 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
762 && StoreNode::cmp(n);
763 }
764 virtual uint size_of() const { return sizeof(*this); }
765 const bool _require_atomic_access; // is piecewise store forbidden?
1078 };
1079
1080 //------------------------------GetAndSetPNode---------------------------
1081 class GetAndSetPNode : public LoadStoreNode {
1082 public:
1083 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1084 virtual int Opcode() const;
1085 };
1086
1087 //------------------------------GetAndSetNNode---------------------------
1088 class GetAndSetNNode : public LoadStoreNode {
1089 public:
1090 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1091 virtual int Opcode() const;
1092 };
1093
1094 //------------------------------ClearArray-------------------------------------
1095 class ClearArrayNode: public Node {
1096 private:
1097 bool _is_large;
1098 bool _word_copy_only;
1099 public:
1100 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1101 : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1102 _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1103 init_class_id(Class_ClearArray);
1104 }
1105 virtual int Opcode() const;
1106 virtual const Type *bottom_type() const { return Type::MEMORY; }
1107 // ClearArray modifies array elements, and so affects only the
1108 // array memory addressed by the bottom_type of its base address.
1109 virtual const class TypePtr *adr_type() const;
1110 virtual Node* Identity(PhaseGVN* phase);
1111 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1112 virtual uint match_edge(uint idx) const;
1113 bool is_large() const { return _is_large; }
1114 bool word_copy_only() const { return _word_copy_only; }
1115 virtual uint size_of() const { return sizeof(ClearArrayNode); }
1116 virtual uint hash() const { return Node::hash() + _is_large; }
1117 virtual bool cmp(const Node& n) const {
1118 return Node::cmp(n) && _is_large == ((ClearArrayNode&)n).is_large();
1119 }
1120
1121 // Clear the given area of an object or array.
1122 // The start offset must always be aligned mod BytesPerInt.
1123 // The end offset must always be aligned mod BytesPerLong.
1124 // Return the new memory.
1125 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1126 Node* val,
1127 Node* raw_val,
1128 intptr_t start_offset,
1129 intptr_t end_offset,
1130 PhaseGVN* phase);
1131 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1132 Node* val,
1133 Node* raw_val,
1134 intptr_t start_offset,
1135 Node* end_offset,
1136 PhaseGVN* phase);
1137 static Node* clear_memory(Node* control, Node* mem, Node* dest,
1138 Node* raw_val,
1139 Node* start_offset,
1140 Node* end_offset,
1141 PhaseGVN* phase);
1142 // Return allocation input memory edge if it is different instance
1143 // or itself if it is the one we are looking for.
1144 static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1145 };
1146
1147 //------------------------------MemBar-----------------------------------------
1148 // There are different flavors of Memory Barriers to match the Java Memory
1149 // Model. Monitor-enter and volatile-load act as Acquires: no following ref
1150 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1151 // volatile-load. Monitor-exit and volatile-store act as Release: no
1152 // preceding ref can be moved to after them. We insert a MemBar-Release
1153 // before a FastUnlock or volatile-store. All volatiles need to be
1154 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1155 // separate it from any following volatile-load.
1156 class MemBarNode: public MultiNode {
1157 virtual uint hash() const ; // { return NO_HASH; }
1158 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
1170 TrailingLoadStore,
1171 LeadingLoadStore,
1172 TrailingExpandedArrayCopy
1173 } _kind;
1174
1175 #ifdef ASSERT
1176 uint _pair_idx;
1177 #endif
1178
1179 public:
1180 enum {
1181 Precedent = TypeFunc::Parms // optional edge to force precedence
1182 };
1183 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1184 virtual int Opcode() const = 0;
1185 virtual const class TypePtr *adr_type() const { return _adr_type; }
1186 virtual const Type* Value(PhaseGVN* phase) const;
1187 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1188 virtual uint match_edge(uint idx) const { return 0; }
1189 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1190 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1191 // Factory method. Builds a wide or narrow membar.
1192 // Optional 'precedent' becomes an extra edge if not null.
1193 static MemBarNode* make(Compile* C, int opcode,
1194 int alias_idx = Compile::AliasIdxBot,
1195 Node* precedent = nullptr);
1196
1197 MemBarNode* trailing_membar() const;
1198 MemBarNode* leading_membar() const;
1199
1200 void set_trailing_load() { _kind = TrailingLoad; }
1201 bool trailing_load() const { return _kind == TrailingLoad; }
1202 bool trailing_store() const { return _kind == TrailingStore; }
1203 bool leading_store() const { return _kind == LeadingStore; }
1204 bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1205 bool leading_load_store() const { return _kind == LeadingLoadStore; }
1206 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1207 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1208 bool standalone() const { return _kind == Standalone; }
1209 void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1210 bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
|