< prev index next >

src/hotspot/share/opto/memnode.hpp

Print this page

 109   static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
 110   static bool all_controls_dominate(Node* dom, Node* sub) {
 111     DomResult dom_result = maybe_all_controls_dominate(dom, sub);
 112     return dom_result == DomResult::Dominate;
 113   }
 114 
 115   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 116 
 117   // Shared code for Ideal methods:
 118   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit null.
 119 
 120   // Helper function for adr_type() implementations.
 121   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
 122 
 123   // Raw access function, to allow copying of adr_type efficiently in
 124   // product builds and retain the debug info for debug builds.
 125   const TypePtr *raw_adr_type() const {
 126     return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
 127   }
 128 




 129   // Return the barrier data of n, if available, or 0 otherwise.
 130   static uint8_t barrier_data(const Node* n);
 131 
 132   // Map a load or store opcode to its corresponding store opcode.
 133   // (Return -1 if unknown.)
 134   virtual int store_Opcode() const { return -1; }
 135 
 136   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 137   // The returned type is a property of the value that is loaded/stored and
 138   // not the memory that is accessed. For mismatched memory accesses
 139   // they might differ. For instance, a value of type 'short' may be stored
 140   // into an array of elements of type 'long'.
 141   virtual BasicType value_basic_type() const = 0;
 142   virtual int memory_size() const {
 143 #ifdef ASSERT
 144     return type2aelembytes(value_basic_type(), true);
 145 #else
 146     return type2aelembytes(value_basic_type());
 147 #endif
 148   }

 500 
 501 //------------------------------LoadPNode--------------------------------------
 502 // Load a pointer from memory (either object or array)
 503 class LoadPNode : public LoadNode {
 504 public:
 505   LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 506     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 507   virtual int Opcode() const;
 508   virtual uint ideal_reg() const { return Op_RegP; }
 509   virtual int store_Opcode() const { return Op_StoreP; }
 510   virtual BasicType value_basic_type() const { return T_ADDRESS; }
 511 };
 512 
 513 
 514 //------------------------------LoadNNode--------------------------------------
 515 // Load a narrow oop from memory (either object or array)
 516 class LoadNNode : public LoadNode {
 517 public:
 518   LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 519     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}

 520   virtual int Opcode() const;
 521   virtual uint ideal_reg() const { return Op_RegN; }
 522   virtual int store_Opcode() const { return Op_StoreN; }
 523   virtual BasicType value_basic_type() const { return T_NARROWOOP; }
 524 };
 525 
 526 //------------------------------LoadKlassNode----------------------------------
 527 // Load a Klass from an object
 528 class LoadKlassNode : public LoadPNode {
 529 private:
 530   LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
 531     : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
 532 
 533 public:
 534   virtual int Opcode() const;
 535   virtual const Type* Value(PhaseGVN* phase) const;
 536   virtual Node* Identity(PhaseGVN* phase);
 537   virtual bool depends_only_on_test() const { return true; }
 538 
 539   // Polymorphic factory method:

 549 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
 550 // extract the actual class pointer. C2's type system is agnostic on whether the
 551 // input address directly points into the class pointer.
 552 class LoadNKlassNode : public LoadNNode {
 553 private:
 554   friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
 555   LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
 556     : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
 557 
 558 public:
 559   virtual int Opcode() const;
 560   virtual uint ideal_reg() const { return Op_RegN; }
 561   virtual int store_Opcode() const { return Op_StoreNKlass; }
 562   virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
 563 
 564   virtual const Type* Value(PhaseGVN* phase) const;
 565   virtual Node* Identity(PhaseGVN* phase);
 566   virtual bool depends_only_on_test() const { return true; }
 567 };
 568 
 569 
 570 //------------------------------StoreNode--------------------------------------
 571 // Store value; requires Store, Address and Value
 572 class StoreNode : public MemNode {
 573 private:
 574   // On platforms with weak memory ordering (e.g., PPC) we distinguish
 575   // stores that can be reordered, and such requiring release semantics to
 576   // adhere to the Java specification.  The required behaviour is stored in
 577   // this field.
 578   const MemOrd _mo;
 579   // Needed for proper cloning.
 580   virtual uint size_of() const { return sizeof(*this); }
 581 protected:
 582   virtual bool cmp( const Node &n ) const;
 583   virtual bool depends_only_on_test() const { return false; }
 584 
 585   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 586   Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
 587 
 588 public:
 589   // We must ensure that stores of object references will be visible

 703       && StoreNode::cmp(n);
 704   }
 705   virtual uint size_of() const { return sizeof(*this); }
 706   const bool _require_atomic_access;  // is piecewise store forbidden?
 707 
 708 public:
 709   StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
 710     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
 711   virtual int Opcode() const;
 712   virtual BasicType value_basic_type() const { return T_LONG; }
 713   bool require_atomic_access() const { return _require_atomic_access; }
 714 
 715 #ifndef PRODUCT
 716   virtual void dump_spec(outputStream *st) const {
 717     StoreNode::dump_spec(st);
 718     if (_require_atomic_access)  st->print(" Atomic!");
 719   }
 720 #endif
 721 };
 722 



















 723 //------------------------------StoreFNode-------------------------------------
 724 // Store float to memory
 725 class StoreFNode : public StoreNode {
 726 public:
 727   StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 728     : StoreNode(c, mem, adr, at, val, mo) {}
 729   virtual int Opcode() const;
 730   virtual BasicType value_basic_type() const { return T_FLOAT; }
 731 };
 732 
 733 //------------------------------StoreDNode-------------------------------------
 734 // Store double to memory
 735 class StoreDNode : public StoreNode {
 736   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 737   virtual bool cmp( const Node &n ) const {
 738     return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
 739       && StoreNode::cmp(n);
 740   }
 741   virtual uint size_of() const { return sizeof(*this); }
 742   const bool _require_atomic_access;  // is piecewise store forbidden?

1058 };
1059 
1060 //------------------------------GetAndSetPNode---------------------------
1061 class GetAndSetPNode : public LoadStoreNode {
1062 public:
1063   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1064   virtual int Opcode() const;
1065 };
1066 
1067 //------------------------------GetAndSetNNode---------------------------
1068 class GetAndSetNNode : public LoadStoreNode {
1069 public:
1070   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1071   virtual int Opcode() const;
1072 };
1073 
1074 //------------------------------ClearArray-------------------------------------
1075 class ClearArrayNode: public Node {
1076 private:
1077   bool _is_large;

1078 public:
1079   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1080     : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {

1081     init_class_id(Class_ClearArray);
1082   }
1083   virtual int         Opcode() const;
1084   virtual const Type *bottom_type() const { return Type::MEMORY; }
1085   // ClearArray modifies array elements, and so affects only the
1086   // array memory addressed by the bottom_type of its base address.
1087   virtual const class TypePtr *adr_type() const;
1088   virtual Node* Identity(PhaseGVN* phase);
1089   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1090   virtual uint match_edge(uint idx) const;
1091   bool is_large() const { return _is_large; }

1092   virtual uint size_of() const { return sizeof(ClearArrayNode); }
1093   virtual uint hash() const { return Node::hash() + _is_large; }
1094   virtual bool cmp(const Node& n) const {
1095     return Node::cmp(n) && _is_large == ((ClearArrayNode&)n).is_large();
1096   }
1097 
1098   // Clear the given area of an object or array.
1099   // The start offset must always be aligned mod BytesPerInt.
1100   // The end offset must always be aligned mod BytesPerLong.
1101   // Return the new memory.
1102   static Node* clear_memory(Node* control, Node* mem, Node* dest,


1103                             intptr_t start_offset,
1104                             intptr_t end_offset,
1105                             PhaseGVN* phase);
1106   static Node* clear_memory(Node* control, Node* mem, Node* dest,


1107                             intptr_t start_offset,
1108                             Node* end_offset,
1109                             PhaseGVN* phase);
1110   static Node* clear_memory(Node* control, Node* mem, Node* dest,

1111                             Node* start_offset,
1112                             Node* end_offset,
1113                             PhaseGVN* phase);
1114   // Return allocation input memory edge if it is different instance
1115   // or itself if it is the one we are looking for.
1116   static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1117 };
1118 
1119 //------------------------------MemBar-----------------------------------------
1120 // There are different flavors of Memory Barriers to match the Java Memory
1121 // Model.  Monitor-enter and volatile-load act as Acquires: no following ref
1122 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1123 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1124 // preceding ref can be moved to after them.  We insert a MemBar-Release
1125 // before a FastUnlock or volatile-store.  All volatiles need to be
1126 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1127 // separate it from any following volatile-load.
1128 class MemBarNode: public MultiNode {
1129   virtual uint hash() const ;                  // { return NO_HASH; }
1130   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self

1142     TrailingLoadStore,
1143     LeadingLoadStore,
1144     TrailingExpandedArrayCopy
1145   } _kind;
1146 
1147 #ifdef ASSERT
1148   uint _pair_idx;
1149 #endif
1150 
1151 public:
1152   enum {
1153     Precedent = TypeFunc::Parms  // optional edge to force precedence
1154   };
1155   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1156   virtual int Opcode() const = 0;
1157   virtual const class TypePtr *adr_type() const { return _adr_type; }
1158   virtual const Type* Value(PhaseGVN* phase) const;
1159   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1160   virtual uint match_edge(uint idx) const { return 0; }
1161   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1162   virtual Node *match( const ProjNode *proj, const Matcher *m );
1163   // Factory method.  Builds a wide or narrow membar.
1164   // Optional 'precedent' becomes an extra edge if not null.
1165   static MemBarNode* make(Compile* C, int opcode,
1166                           int alias_idx = Compile::AliasIdxBot,
1167                           Node* precedent = nullptr);
1168 
1169   MemBarNode* trailing_membar() const;
1170   MemBarNode* leading_membar() const;
1171 
1172   void set_trailing_load() { _kind = TrailingLoad; }
1173   bool trailing_load() const { return _kind == TrailingLoad; }
1174   bool trailing_store() const { return _kind == TrailingStore; }
1175   bool leading_store() const { return _kind == LeadingStore; }
1176   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1177   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1178   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1179   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1180   bool standalone() const { return _kind == Standalone; }
1181   void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1182   bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }

 109   static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
 110   static bool all_controls_dominate(Node* dom, Node* sub) {
 111     DomResult dom_result = maybe_all_controls_dominate(dom, sub);
 112     return dom_result == DomResult::Dominate;
 113   }
 114 
 115   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 116 
 117   // Shared code for Ideal methods:
 118   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit null.
 119 
 120   // Helper function for adr_type() implementations.
 121   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
 122 
 123   // Raw access function, to allow copying of adr_type efficiently in
 124   // product builds and retain the debug info for debug builds.
 125   const TypePtr *raw_adr_type() const {
 126     return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
 127   }
 128 
 129 #ifdef ASSERT
 130   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
 131 #endif
 132 
 133   // Return the barrier data of n, if available, or 0 otherwise.
 134   static uint8_t barrier_data(const Node* n);
 135 
 136   // Map a load or store opcode to its corresponding store opcode.
 137   // (Return -1 if unknown.)
 138   virtual int store_Opcode() const { return -1; }
 139 
 140   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 141   // The returned type is a property of the value that is loaded/stored and
 142   // not the memory that is accessed. For mismatched memory accesses
 143   // they might differ. For instance, a value of type 'short' may be stored
 144   // into an array of elements of type 'long'.
 145   virtual BasicType value_basic_type() const = 0;
 146   virtual int memory_size() const {
 147 #ifdef ASSERT
 148     return type2aelembytes(value_basic_type(), true);
 149 #else
 150     return type2aelembytes(value_basic_type());
 151 #endif
 152   }

 504 
 505 //------------------------------LoadPNode--------------------------------------
 506 // Load a pointer from memory (either object or array)
 507 class LoadPNode : public LoadNode {
 508 public:
 509   LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 510     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 511   virtual int Opcode() const;
 512   virtual uint ideal_reg() const { return Op_RegP; }
 513   virtual int store_Opcode() const { return Op_StoreP; }
 514   virtual BasicType value_basic_type() const { return T_ADDRESS; }
 515 };
 516 
 517 
 518 //------------------------------LoadNNode--------------------------------------
 519 // Load a narrow oop from memory (either object or array)
 520 class LoadNNode : public LoadNode {
 521 public:
 522   LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 523     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 524   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 525   virtual int Opcode() const;
 526   virtual uint ideal_reg() const { return Op_RegN; }
 527   virtual int store_Opcode() const { return Op_StoreN; }
 528   virtual BasicType value_basic_type() const { return T_NARROWOOP; }
 529 };
 530 
 531 //------------------------------LoadKlassNode----------------------------------
 532 // Load a Klass from an object
 533 class LoadKlassNode : public LoadPNode {
 534 private:
 535   LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
 536     : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
 537 
 538 public:
 539   virtual int Opcode() const;
 540   virtual const Type* Value(PhaseGVN* phase) const;
 541   virtual Node* Identity(PhaseGVN* phase);
 542   virtual bool depends_only_on_test() const { return true; }
 543 
 544   // Polymorphic factory method:

 554 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
 555 // extract the actual class pointer. C2's type system is agnostic on whether the
 556 // input address directly points into the class pointer.
 557 class LoadNKlassNode : public LoadNNode {
 558 private:
 559   friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
 560   LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
 561     : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
 562 
 563 public:
 564   virtual int Opcode() const;
 565   virtual uint ideal_reg() const { return Op_RegN; }
 566   virtual int store_Opcode() const { return Op_StoreNKlass; }
 567   virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
 568 
 569   virtual const Type* Value(PhaseGVN* phase) const;
 570   virtual Node* Identity(PhaseGVN* phase);
 571   virtual bool depends_only_on_test() const { return true; }
 572 };
 573 

 574 //------------------------------StoreNode--------------------------------------
 575 // Store value; requires Store, Address and Value
 576 class StoreNode : public MemNode {
 577 private:
 578   // On platforms with weak memory ordering (e.g., PPC) we distinguish
 579   // stores that can be reordered, and such requiring release semantics to
 580   // adhere to the Java specification.  The required behaviour is stored in
 581   // this field.
 582   const MemOrd _mo;
 583   // Needed for proper cloning.
 584   virtual uint size_of() const { return sizeof(*this); }
 585 protected:
 586   virtual bool cmp( const Node &n ) const;
 587   virtual bool depends_only_on_test() const { return false; }
 588 
 589   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 590   Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
 591 
 592 public:
 593   // We must ensure that stores of object references will be visible

 707       && StoreNode::cmp(n);
 708   }
 709   virtual uint size_of() const { return sizeof(*this); }
 710   const bool _require_atomic_access;  // is piecewise store forbidden?
 711 
 712 public:
 713   StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
 714     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
 715   virtual int Opcode() const;
 716   virtual BasicType value_basic_type() const { return T_LONG; }
 717   bool require_atomic_access() const { return _require_atomic_access; }
 718 
 719 #ifndef PRODUCT
 720   virtual void dump_spec(outputStream *st) const {
 721     StoreNode::dump_spec(st);
 722     if (_require_atomic_access)  st->print(" Atomic!");
 723   }
 724 #endif
 725 };
 726 
 727 // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
 728 class StoreLSpecialNode : public StoreNode {
 729 
 730 public:
 731   StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
 732     : StoreNode(c, mem, adr, at, val, mo) {
 733     set_mismatched_access();
 734     if (oop_off != nullptr) {
 735       add_req(oop_off);
 736     }
 737   }
 738   virtual int Opcode() const;
 739   virtual BasicType value_basic_type() const { return T_LONG; }
 740 
 741   virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
 742                                                    idx == MemNode::ValueIn ||
 743                                                    idx == MemNode::ValueIn + 1; }
 744 };
 745 
 746 //------------------------------StoreFNode-------------------------------------
 747 // Store float to memory
 748 class StoreFNode : public StoreNode {
 749 public:
 750   StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 751     : StoreNode(c, mem, adr, at, val, mo) {}
 752   virtual int Opcode() const;
 753   virtual BasicType value_basic_type() const { return T_FLOAT; }
 754 };
 755 
 756 //------------------------------StoreDNode-------------------------------------
 757 // Store double to memory
 758 class StoreDNode : public StoreNode {
 759   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 760   virtual bool cmp( const Node &n ) const {
 761     return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
 762       && StoreNode::cmp(n);
 763   }
 764   virtual uint size_of() const { return sizeof(*this); }
 765   const bool _require_atomic_access;  // is piecewise store forbidden?

1081 };
1082 
1083 //------------------------------GetAndSetPNode---------------------------
1084 class GetAndSetPNode : public LoadStoreNode {
1085 public:
1086   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1087   virtual int Opcode() const;
1088 };
1089 
1090 //------------------------------GetAndSetNNode---------------------------
1091 class GetAndSetNNode : public LoadStoreNode {
1092 public:
1093   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1094   virtual int Opcode() const;
1095 };
1096 
1097 //------------------------------ClearArray-------------------------------------
1098 class ClearArrayNode: public Node {
1099 private:
1100   bool _is_large;
1101   bool _word_copy_only;
1102 public:
1103   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1104     : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1105       _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1106     init_class_id(Class_ClearArray);
1107   }
1108   virtual int         Opcode() const;
1109   virtual const Type *bottom_type() const { return Type::MEMORY; }
1110   // ClearArray modifies array elements, and so affects only the
1111   // array memory addressed by the bottom_type of its base address.
1112   virtual const class TypePtr *adr_type() const;
1113   virtual Node* Identity(PhaseGVN* phase);
1114   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1115   virtual uint match_edge(uint idx) const;
1116   bool is_large() const { return _is_large; }
1117   bool word_copy_only() const { return _word_copy_only; }
1118   virtual uint size_of() const { return sizeof(ClearArrayNode); }
1119   virtual uint hash() const { return Node::hash() + _is_large; }
1120   virtual bool cmp(const Node& n) const {
1121     return Node::cmp(n) && _is_large == ((ClearArrayNode&)n).is_large();
1122   }
1123 
1124   // Clear the given area of an object or array.
1125   // The start offset must always be aligned mod BytesPerInt.
1126   // The end offset must always be aligned mod BytesPerLong.
1127   // Return the new memory.
1128   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1129                             Node* val,
1130                             Node* raw_val,
1131                             intptr_t start_offset,
1132                             intptr_t end_offset,
1133                             PhaseGVN* phase);
1134   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1135                             Node* val,
1136                             Node* raw_val,
1137                             intptr_t start_offset,
1138                             Node* end_offset,
1139                             PhaseGVN* phase);
1140   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1141                             Node* raw_val,
1142                             Node* start_offset,
1143                             Node* end_offset,
1144                             PhaseGVN* phase);
1145   // Return allocation input memory edge if it is different instance
1146   // or itself if it is the one we are looking for.
1147   static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1148 };
1149 
1150 //------------------------------MemBar-----------------------------------------
1151 // There are different flavors of Memory Barriers to match the Java Memory
1152 // Model.  Monitor-enter and volatile-load act as Acquires: no following ref
1153 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1154 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1155 // preceding ref can be moved to after them.  We insert a MemBar-Release
1156 // before a FastUnlock or volatile-store.  All volatiles need to be
1157 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1158 // separate it from any following volatile-load.
1159 class MemBarNode: public MultiNode {
1160   virtual uint hash() const ;                  // { return NO_HASH; }
1161   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self

1173     TrailingLoadStore,
1174     LeadingLoadStore,
1175     TrailingExpandedArrayCopy
1176   } _kind;
1177 
1178 #ifdef ASSERT
1179   uint _pair_idx;
1180 #endif
1181 
1182 public:
1183   enum {
1184     Precedent = TypeFunc::Parms  // optional edge to force precedence
1185   };
1186   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1187   virtual int Opcode() const = 0;
1188   virtual const class TypePtr *adr_type() const { return _adr_type; }
1189   virtual const Type* Value(PhaseGVN* phase) const;
1190   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1191   virtual uint match_edge(uint idx) const { return 0; }
1192   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1193   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1194   // Factory method.  Builds a wide or narrow membar.
1195   // Optional 'precedent' becomes an extra edge if not null.
1196   static MemBarNode* make(Compile* C, int opcode,
1197                           int alias_idx = Compile::AliasIdxBot,
1198                           Node* precedent = nullptr);
1199 
1200   MemBarNode* trailing_membar() const;
1201   MemBarNode* leading_membar() const;
1202 
1203   void set_trailing_load() { _kind = TrailingLoad; }
1204   bool trailing_load() const { return _kind == TrailingLoad; }
1205   bool trailing_store() const { return _kind == TrailingStore; }
1206   bool leading_store() const { return _kind == LeadingStore; }
1207   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1208   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1209   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1210   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1211   bool standalone() const { return _kind == Standalone; }
1212   void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1213   bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
< prev index next >