< prev index next >

src/hotspot/share/opto/memnode.hpp

Print this page

 112   static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
 113   static bool all_controls_dominate(Node* dom, Node* sub) {
 114     DomResult dom_result = maybe_all_controls_dominate(dom, sub);
 115     return dom_result == DomResult::Dominate;
 116   }
 117 
 118   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 119 
 120   // Shared code for Ideal methods:
 121   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit null.
 122 
 123   // Helper function for adr_type() implementations.
 124   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
 125 
 126   // Raw access function, to allow copying of adr_type efficiently in
 127   // product builds and retain the debug info for debug builds.
 128   const TypePtr *raw_adr_type() const {
 129     return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
 130   }
 131 




 132   // Return the barrier data of n, if available, or 0 otherwise.
 133   static uint8_t barrier_data(const Node* n);
 134 
 135   // Map a load or store opcode to its corresponding store opcode.
 136   // (Return -1 if unknown.)
 137   virtual int store_Opcode() const { return -1; }
 138 
 139   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 140   // The returned type is a property of the value that is loaded/stored and
 141   // not the memory that is accessed. For mismatched memory accesses
 142   // they might differ. For instance, a value of type 'short' may be stored
 143   // into an array of elements of type 'long'.
 144   virtual BasicType value_basic_type() const = 0;
 145   virtual int memory_size() const {
 146 #ifdef ASSERT
 147     return type2aelembytes(value_basic_type(), true);
 148 #else
 149     return type2aelembytes(value_basic_type());
 150 #endif
 151   }

 551 
 552 //------------------------------LoadPNode--------------------------------------
 553 // Load a pointer from memory (either object or array)
 554 class LoadPNode : public LoadNode {
 555 public:
 556   LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 557     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 558   virtual int Opcode() const;
 559   virtual uint ideal_reg() const { return Op_RegP; }
 560   virtual int store_Opcode() const { return Op_StoreP; }
 561   virtual BasicType value_basic_type() const { return T_ADDRESS; }
 562 };
 563 
 564 
 565 //------------------------------LoadNNode--------------------------------------
 566 // Load a narrow oop from memory (either object or array)
 567 class LoadNNode : public LoadNode {
 568 public:
 569   LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 570     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}

 571   virtual int Opcode() const;
 572   virtual uint ideal_reg() const { return Op_RegN; }
 573   virtual int store_Opcode() const { return Op_StoreN; }
 574   virtual BasicType value_basic_type() const { return T_NARROWOOP; }
 575 };
 576 
 577 //------------------------------LoadKlassNode----------------------------------
 578 // Load a Klass from an object
 579 class LoadKlassNode : public LoadPNode {
 580 private:
 581   LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
 582     : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
 583 
 584 public:
 585   virtual int Opcode() const;
 586   virtual const Type* Value(PhaseGVN* phase) const;
 587   virtual Node* Identity(PhaseGVN* phase);
 588 
 589   // Polymorphic factory method:
 590   static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,

 598 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
 599 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
 600 // extract the actual class pointer. C2's type system is agnostic on whether the
 601 // input address directly points into the class pointer.
 602 class LoadNKlassNode : public LoadNNode {
 603 private:
 604   friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
 605   LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
 606     : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
 607 
 608 public:
 609   virtual int Opcode() const;
 610   virtual uint ideal_reg() const { return Op_RegN; }
 611   virtual int store_Opcode() const { return Op_StoreNKlass; }
 612   virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
 613 
 614   virtual const Type* Value(PhaseGVN* phase) const;
 615   virtual Node* Identity(PhaseGVN* phase);
 616 };
 617 
 618 
 619 //------------------------------StoreNode--------------------------------------
 620 // Store value; requires Store, Address and Value
 621 class StoreNode : public MemNode {
 622 private:
 623   // On platforms with weak memory ordering (e.g., PPC) we distinguish
 624   // stores that can be reordered, and such requiring release semantics to
 625   // adhere to the Java specification.  The required behaviour is stored in
 626   // this field.
 627   const MemOrd _mo;
 628   // Needed for proper cloning.
 629   virtual uint size_of() const { return sizeof(*this); }
 630 protected:
 631   virtual bool cmp( const Node &n ) const;
 632 
 633   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 634   Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
 635 
 636 public:
 637   // We must ensure that stores of object references will be visible
 638   // only after the object's initialization. So the callers of this

 754       && StoreNode::cmp(n);
 755   }
 756   virtual uint size_of() const { return sizeof(*this); }
 757   const bool _require_atomic_access;  // is piecewise store forbidden?
 758 
 759 public:
 760   StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
 761     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
 762   virtual int Opcode() const;
 763   virtual BasicType value_basic_type() const { return T_LONG; }
 764   bool require_atomic_access() const { return _require_atomic_access; }
 765 
 766 #ifndef PRODUCT
 767   virtual void dump_spec(outputStream *st) const {
 768     StoreNode::dump_spec(st);
 769     if (_require_atomic_access)  st->print(" Atomic!");
 770   }
 771 #endif
 772 };
 773 



















 774 //------------------------------StoreFNode-------------------------------------
 775 // Store float to memory
 776 class StoreFNode : public StoreNode {
 777 public:
 778   StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 779     : StoreNode(c, mem, adr, at, val, mo) {}
 780   virtual int Opcode() const;
 781   virtual BasicType value_basic_type() const { return T_FLOAT; }
 782 };
 783 
 784 //------------------------------StoreDNode-------------------------------------
 785 // Store double to memory
 786 class StoreDNode : public StoreNode {
 787   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 788   virtual bool cmp( const Node &n ) const {
 789     return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
 790       && StoreNode::cmp(n);
 791   }
 792   virtual uint size_of() const { return sizeof(*this); }
 793   const bool _require_atomic_access;  // is piecewise store forbidden?

1108 };
1109 
1110 //------------------------------GetAndSetPNode---------------------------
1111 class GetAndSetPNode : public LoadStoreNode {
1112 public:
1113   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1114   virtual int Opcode() const;
1115 };
1116 
1117 //------------------------------GetAndSetNNode---------------------------
1118 class GetAndSetNNode : public LoadStoreNode {
1119 public:
1120   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1121   virtual int Opcode() const;
1122 };
1123 
1124 //------------------------------ClearArray-------------------------------------
1125 class ClearArrayNode: public Node {
1126 private:
1127   bool _is_large;

1128   static Node* make_address(Node* dest, Node* offset, bool raw_base, PhaseGVN* phase);
1129 public:
1130   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1131     : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {

1132     init_class_id(Class_ClearArray);
1133   }
1134   virtual int         Opcode() const;
1135   virtual const Type *bottom_type() const { return Type::MEMORY; }
1136   // ClearArray modifies array elements, and so affects only the
1137   // array memory addressed by the bottom_type of its base address.
1138   virtual const class TypePtr *adr_type() const;
1139   virtual Node* Identity(PhaseGVN* phase);
1140   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1141   virtual uint match_edge(uint idx) const;
1142   bool is_large() const { return _is_large; }

1143   virtual uint size_of() const { return sizeof(ClearArrayNode); }
1144   virtual uint hash() const { return Node::hash() + _is_large; }
1145   virtual bool cmp(const Node& n) const {
1146     return Node::cmp(n) && _is_large == ((ClearArrayNode&)n).is_large();
1147   }
1148 
1149   // Clear the given area of an object or array.
1150   // The start offset must always be aligned mod BytesPerInt.
1151   // The end offset must always be aligned mod BytesPerLong.
1152   // Return the new memory.
1153   static Node* clear_memory(Node* control, Node* mem, Node* dest,


1154                             intptr_t start_offset,
1155                             intptr_t end_offset,
1156                             bool raw_base,
1157                             PhaseGVN* phase);
1158   static Node* clear_memory(Node* control, Node* mem, Node* dest,


1159                             intptr_t start_offset,
1160                             Node* end_offset,
1161                             bool raw_base,
1162                             PhaseGVN* phase);
1163   static Node* clear_memory(Node* control, Node* mem, Node* dest,

1164                             Node* start_offset,
1165                             Node* end_offset,
1166                             bool raw_base,
1167                             PhaseGVN* phase);
1168   // Return allocation input memory edge if it is different instance
1169   // or itself if it is the one we are looking for.
1170   static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1171 
1172 private:
1173   virtual bool depends_only_on_test_impl() const { return false; }
1174 };
1175 
1176 //------------------------------MemBar-----------------------------------------
1177 // There are different flavors of Memory Barriers to match the Java Memory
1178 // Model.  Monitor-enter and volatile-load act as Acquires: no following ref
1179 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1180 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1181 // preceding ref can be moved to after them.  We insert a MemBar-Release
1182 // before a FastUnlock or volatile-store.  All volatiles need to be
1183 // serialized, so we follow all volatile-stores with a MemBar-Volatile to

1199     TrailingLoadStore,
1200     LeadingLoadStore,
1201     TrailingExpandedArrayCopy
1202   } _kind;
1203 
1204 #ifdef ASSERT
1205   uint _pair_idx;
1206 #endif
1207 
1208 public:
1209   enum {
1210     Precedent = TypeFunc::Parms  // optional edge to force precedence
1211   };
1212   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1213   virtual int Opcode() const = 0;
1214   virtual const class TypePtr *adr_type() const { return _adr_type; }
1215   virtual const Type* Value(PhaseGVN* phase) const;
1216   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1217   virtual uint match_edge(uint idx) const { return 0; }
1218   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1219   virtual Node *match( const ProjNode *proj, const Matcher *m );
1220   // Factory method.  Builds a wide or narrow membar.
1221   // Optional 'precedent' becomes an extra edge if not null.
1222   static MemBarNode* make(Compile* C, int opcode,
1223                           int alias_idx = Compile::AliasIdxBot,
1224                           Node* precedent = nullptr);
1225 
1226   MemBarNode* trailing_membar() const;
1227   MemBarNode* leading_membar() const;
1228 
1229   void set_trailing_load() { _kind = TrailingLoad; }
1230   bool trailing_load() const { return _kind == TrailingLoad; }
1231   bool trailing_store() const { return _kind == TrailingStore; }
1232   bool leading_store() const { return _kind == LeadingStore; }
1233   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1234   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1235   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1236   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1237   bool standalone() const { return _kind == Standalone; }
1238   void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1239   bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }

 112   static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
 113   static bool all_controls_dominate(Node* dom, Node* sub) {
 114     DomResult dom_result = maybe_all_controls_dominate(dom, sub);
 115     return dom_result == DomResult::Dominate;
 116   }
 117 
 118   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 119 
 120   // Shared code for Ideal methods:
 121   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit null.
 122 
 123   // Helper function for adr_type() implementations.
 124   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
 125 
 126   // Raw access function, to allow copying of adr_type efficiently in
 127   // product builds and retain the debug info for debug builds.
 128   const TypePtr *raw_adr_type() const {
 129     return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
 130   }
 131 
 132 #ifdef ASSERT
 133   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
 134 #endif
 135 
 136   // Return the barrier data of n, if available, or 0 otherwise.
 137   static uint8_t barrier_data(const Node* n);
 138 
 139   // Map a load or store opcode to its corresponding store opcode.
 140   // (Return -1 if unknown.)
 141   virtual int store_Opcode() const { return -1; }
 142 
 143   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 144   // The returned type is a property of the value that is loaded/stored and
 145   // not the memory that is accessed. For mismatched memory accesses
 146   // they might differ. For instance, a value of type 'short' may be stored
 147   // into an array of elements of type 'long'.
 148   virtual BasicType value_basic_type() const = 0;
 149   virtual int memory_size() const {
 150 #ifdef ASSERT
 151     return type2aelembytes(value_basic_type(), true);
 152 #else
 153     return type2aelembytes(value_basic_type());
 154 #endif
 155   }

 555 
 556 //------------------------------LoadPNode--------------------------------------
 557 // Load a pointer from memory (either object or array)
 558 class LoadPNode : public LoadNode {
 559 public:
 560   LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 561     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 562   virtual int Opcode() const;
 563   virtual uint ideal_reg() const { return Op_RegP; }
 564   virtual int store_Opcode() const { return Op_StoreP; }
 565   virtual BasicType value_basic_type() const { return T_ADDRESS; }
 566 };
 567 
 568 
 569 //------------------------------LoadNNode--------------------------------------
 570 // Load a narrow oop from memory (either object or array)
 571 class LoadNNode : public LoadNode {
 572 public:
 573   LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 574     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 575   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 576   virtual int Opcode() const;
 577   virtual uint ideal_reg() const { return Op_RegN; }
 578   virtual int store_Opcode() const { return Op_StoreN; }
 579   virtual BasicType value_basic_type() const { return T_NARROWOOP; }
 580 };
 581 
 582 //------------------------------LoadKlassNode----------------------------------
 583 // Load a Klass from an object
 584 class LoadKlassNode : public LoadPNode {
 585 private:
 586   LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
 587     : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
 588 
 589 public:
 590   virtual int Opcode() const;
 591   virtual const Type* Value(PhaseGVN* phase) const;
 592   virtual Node* Identity(PhaseGVN* phase);
 593 
 594   // Polymorphic factory method:
 595   static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,

 603 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
 604 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
 605 // extract the actual class pointer. C2's type system is agnostic on whether the
 606 // input address directly points into the class pointer.
 607 class LoadNKlassNode : public LoadNNode {
 608 private:
 609   friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
 610   LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
 611     : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
 612 
 613 public:
 614   virtual int Opcode() const;
 615   virtual uint ideal_reg() const { return Op_RegN; }
 616   virtual int store_Opcode() const { return Op_StoreNKlass; }
 617   virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
 618 
 619   virtual const Type* Value(PhaseGVN* phase) const;
 620   virtual Node* Identity(PhaseGVN* phase);
 621 };
 622 

 623 //------------------------------StoreNode--------------------------------------
 624 // Store value; requires Store, Address and Value
 625 class StoreNode : public MemNode {
 626 private:
 627   // On platforms with weak memory ordering (e.g., PPC) we distinguish
 628   // stores that can be reordered, and such requiring release semantics to
 629   // adhere to the Java specification.  The required behaviour is stored in
 630   // this field.
 631   const MemOrd _mo;
 632   // Needed for proper cloning.
 633   virtual uint size_of() const { return sizeof(*this); }
 634 protected:
 635   virtual bool cmp( const Node &n ) const;
 636 
 637   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 638   Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
 639 
 640 public:
 641   // We must ensure that stores of object references will be visible
 642   // only after the object's initialization. So the callers of this

 758       && StoreNode::cmp(n);
 759   }
 760   virtual uint size_of() const { return sizeof(*this); }
 761   const bool _require_atomic_access;  // is piecewise store forbidden?
 762 
 763 public:
 764   StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
 765     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
 766   virtual int Opcode() const;
 767   virtual BasicType value_basic_type() const { return T_LONG; }
 768   bool require_atomic_access() const { return _require_atomic_access; }
 769 
 770 #ifndef PRODUCT
 771   virtual void dump_spec(outputStream *st) const {
 772     StoreNode::dump_spec(st);
 773     if (_require_atomic_access)  st->print(" Atomic!");
 774   }
 775 #endif
 776 };
 777 
 778 // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
 779 class StoreLSpecialNode : public StoreNode {
 780 
 781 public:
 782   StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
 783     : StoreNode(c, mem, adr, at, val, mo) {
 784     set_mismatched_access();
 785     if (oop_off != nullptr) {
 786       add_req(oop_off);
 787     }
 788   }
 789   virtual int Opcode() const;
 790   virtual BasicType value_basic_type() const { return T_LONG; }
 791 
 792   virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
 793                                                    idx == MemNode::ValueIn ||
 794                                                    idx == MemNode::ValueIn + 1; }
 795 };
 796 
 797 //------------------------------StoreFNode-------------------------------------
 798 // Store float to memory
 799 class StoreFNode : public StoreNode {
 800 public:
 801   StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 802     : StoreNode(c, mem, adr, at, val, mo) {}
 803   virtual int Opcode() const;
 804   virtual BasicType value_basic_type() const { return T_FLOAT; }
 805 };
 806 
 807 //------------------------------StoreDNode-------------------------------------
 808 // Store double to memory
 809 class StoreDNode : public StoreNode {
 810   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 811   virtual bool cmp( const Node &n ) const {
 812     return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
 813       && StoreNode::cmp(n);
 814   }
 815   virtual uint size_of() const { return sizeof(*this); }
 816   const bool _require_atomic_access;  // is piecewise store forbidden?

1131 };
1132 
1133 //------------------------------GetAndSetPNode---------------------------
1134 class GetAndSetPNode : public LoadStoreNode {
1135 public:
1136   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1137   virtual int Opcode() const;
1138 };
1139 
1140 //------------------------------GetAndSetNNode---------------------------
1141 class GetAndSetNNode : public LoadStoreNode {
1142 public:
1143   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1144   virtual int Opcode() const;
1145 };
1146 
1147 //------------------------------ClearArray-------------------------------------
1148 class ClearArrayNode: public Node {
1149 private:
1150   bool _is_large;
1151   bool _word_copy_only;
1152   static Node* make_address(Node* dest, Node* offset, bool raw_base, PhaseGVN* phase);
1153 public:
1154   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1155     : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1156       _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1157     init_class_id(Class_ClearArray);
1158   }
1159   virtual int         Opcode() const;
1160   virtual const Type *bottom_type() const { return Type::MEMORY; }
1161   // ClearArray modifies array elements, and so affects only the
1162   // array memory addressed by the bottom_type of its base address.
1163   virtual const class TypePtr *adr_type() const;
1164   virtual Node* Identity(PhaseGVN* phase);
1165   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1166   virtual uint match_edge(uint idx) const;
1167   bool is_large() const { return _is_large; }
1168   bool word_copy_only() const { return _word_copy_only; }
1169   virtual uint size_of() const { return sizeof(ClearArrayNode); }
1170   virtual uint hash() const { return Node::hash() + _is_large; }
1171   virtual bool cmp(const Node& n) const {
1172     return Node::cmp(n) && _is_large == ((ClearArrayNode&)n).is_large();
1173   }
1174 
1175   // Clear the given area of an object or array.
1176   // The start offset must always be aligned mod BytesPerInt.
1177   // The end offset must always be aligned mod BytesPerLong.
1178   // Return the new memory.
1179   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1180                             Node* val,
1181                             Node* raw_val,
1182                             intptr_t start_offset,
1183                             intptr_t end_offset,
1184                             bool raw_base,
1185                             PhaseGVN* phase);
1186   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1187                             Node* val,
1188                             Node* raw_val,
1189                             intptr_t start_offset,
1190                             Node* end_offset,
1191                             bool raw_base,
1192                             PhaseGVN* phase);
1193   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1194                             Node* raw_val,
1195                             Node* start_offset,
1196                             Node* end_offset,
1197                             bool raw_base,
1198                             PhaseGVN* phase);
1199   // Return allocation input memory edge if it is different instance
1200   // or itself if it is the one we are looking for.
1201   static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1202 
1203 private:
1204   virtual bool depends_only_on_test_impl() const { return false; }
1205 };
1206 
1207 //------------------------------MemBar-----------------------------------------
1208 // There are different flavors of Memory Barriers to match the Java Memory
1209 // Model.  Monitor-enter and volatile-load act as Acquires: no following ref
1210 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1211 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1212 // preceding ref can be moved to after them.  We insert a MemBar-Release
1213 // before a FastUnlock or volatile-store.  All volatiles need to be
1214 // serialized, so we follow all volatile-stores with a MemBar-Volatile to

1230     TrailingLoadStore,
1231     LeadingLoadStore,
1232     TrailingExpandedArrayCopy
1233   } _kind;
1234 
1235 #ifdef ASSERT
1236   uint _pair_idx;
1237 #endif
1238 
1239 public:
1240   enum {
1241     Precedent = TypeFunc::Parms  // optional edge to force precedence
1242   };
1243   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1244   virtual int Opcode() const = 0;
1245   virtual const class TypePtr *adr_type() const { return _adr_type; }
1246   virtual const Type* Value(PhaseGVN* phase) const;
1247   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1248   virtual uint match_edge(uint idx) const { return 0; }
1249   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1250   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1251   // Factory method.  Builds a wide or narrow membar.
1252   // Optional 'precedent' becomes an extra edge if not null.
1253   static MemBarNode* make(Compile* C, int opcode,
1254                           int alias_idx = Compile::AliasIdxBot,
1255                           Node* precedent = nullptr);
1256 
1257   MemBarNode* trailing_membar() const;
1258   MemBarNode* leading_membar() const;
1259 
1260   void set_trailing_load() { _kind = TrailingLoad; }
1261   bool trailing_load() const { return _kind == TrailingLoad; }
1262   bool trailing_store() const { return _kind == TrailingStore; }
1263   bool leading_store() const { return _kind == LeadingStore; }
1264   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1265   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1266   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1267   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1268   bool standalone() const { return _kind == Standalone; }
1269   void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1270   bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
< prev index next >