< prev index next >

src/hotspot/share/opto/memnode.hpp

Print this page

 109   static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
 110   static bool all_controls_dominate(Node* dom, Node* sub) {
 111     DomResult dom_result = maybe_all_controls_dominate(dom, sub);
 112     return dom_result == DomResult::Dominate;
 113   }
 114 
 115   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 116 
 117   // Shared code for Ideal methods:
 118   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit null.
 119 
 120   // Helper function for adr_type() implementations.
 121   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
 122 
 123   // Raw access function, to allow copying of adr_type efficiently in
 124   // product builds and retain the debug info for debug builds.
 125   const TypePtr *raw_adr_type() const {
 126     return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
 127   }
 128 




 129   // Return the barrier data of n, if available, or 0 otherwise.
 130   static uint8_t barrier_data(const Node* n);
 131 
 132   // Map a load or store opcode to its corresponding store opcode.
 133   // (Return -1 if unknown.)
 134   virtual int store_Opcode() const { return -1; }
 135 
 136   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 137   // The returned type is a property of the value that is loaded/stored and
 138   // not the memory that is accessed. For mismatched memory accesses
 139   // they might differ. For instance, a value of type 'short' may be stored
 140   // into an array of elements of type 'long'.
 141   virtual BasicType value_basic_type() const = 0;
 142   virtual int memory_size() const {
 143 #ifdef ASSERT
 144     return type2aelembytes(value_basic_type(), true);
 145 #else
 146     return type2aelembytes(value_basic_type());
 147 #endif
 148   }

 248   // If the load is from Field memory and the pointer is non-null, it might be possible to
 249   // zero out the control input.
 250   // If the offset is constant and the base is an object allocation,
 251   // try to hook me up to the exact initializing store.
 252   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 253 
 254   // Return true if it's possible to split the Load through a Phi merging the bases
 255   bool can_split_through_phi_base(PhaseGVN *phase);
 256 
 257   // Split instance field load through Phi.
 258   Node* split_through_phi(PhaseGVN *phase, bool ignore_missing_instance_id = false);
 259 
 260   // Recover original value from boxed values
 261   Node *eliminate_autobox(PhaseIterGVN *igvn);
 262 
 263   // Compute a new Type for this node.  Basically we just do the pre-check,
 264   // then call the virtual add() to set the type.
 265   virtual const Type* Value(PhaseGVN* phase) const;
 266 
 267   // Common methods for LoadKlass and LoadNKlass nodes.
 268   const Type* klass_value_common(PhaseGVN* phase) const;
 269   Node* klass_identity_common(PhaseGVN* phase);
 270 
 271   virtual uint ideal_reg() const;
 272   virtual const Type *bottom_type() const;
 273   // Following method is copied from TypeNode:
 274   void set_type(const Type* t) {
 275     assert(t != nullptr, "sanity");
 276     DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
 277     *(const Type**)&_type = t;   // cast away const-ness
 278     // If this node is in the hash table, make sure it doesn't need a rehash.
 279     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
 280   }
 281   const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
 282 
 283   // Do not match memory edge
 284   virtual uint match_edge(uint idx) const;
 285 
 286   // Map a load opcode to its corresponding store opcode.
 287   virtual int store_Opcode() const = 0;
 288 

 500 
 501 //------------------------------LoadPNode--------------------------------------
 502 // Load a pointer from memory (either object or array)
 503 class LoadPNode : public LoadNode {
 504 public:
 505   LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 506     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 507   virtual int Opcode() const;
 508   virtual uint ideal_reg() const { return Op_RegP; }
 509   virtual int store_Opcode() const { return Op_StoreP; }
 510   virtual BasicType value_basic_type() const { return T_ADDRESS; }
 511 };
 512 
 513 
 514 //------------------------------LoadNNode--------------------------------------
 515 // Load a narrow oop from memory (either object or array)
 516 class LoadNNode : public LoadNode {
 517 public:
 518   LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 519     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}

 520   virtual int Opcode() const;
 521   virtual uint ideal_reg() const { return Op_RegN; }
 522   virtual int store_Opcode() const { return Op_StoreN; }
 523   virtual BasicType value_basic_type() const { return T_NARROWOOP; }
 524 };
 525 
 526 //------------------------------LoadKlassNode----------------------------------
 527 // Load a Klass from an object
 528 class LoadKlassNode : public LoadPNode {








 529 private:
 530   LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
 531     : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
 532 
 533 public:
 534   virtual int Opcode() const;
 535   virtual const Type* Value(PhaseGVN* phase) const;
 536   virtual Node* Identity(PhaseGVN* phase);
 537   virtual bool depends_only_on_test() const { return true; }
 538 
 539   // Polymorphic factory method:
 540   static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
 541                     const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
 542 };
 543 
 544 //------------------------------LoadNKlassNode---------------------------------
 545 // Load a narrow Klass from an object.
 546 // With compact headers, the input address (adr) does not point at the exact
 547 // header position where the (narrow) class pointer is located, but into the
 548 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
 549 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
 550 // extract the actual class pointer. C2's type system is agnostic on whether the
 551 // input address directly points into the class pointer.
 552 class LoadNKlassNode : public LoadNNode {








 553 private:
 554   friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
 555   LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
 556     : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
 557 
 558 public:
 559   virtual int Opcode() const;
 560   virtual uint ideal_reg() const { return Op_RegN; }
 561   virtual int store_Opcode() const { return Op_StoreNKlass; }
 562   virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
 563 
 564   virtual const Type* Value(PhaseGVN* phase) const;
 565   virtual Node* Identity(PhaseGVN* phase);
 566   virtual bool depends_only_on_test() const { return true; }
 567 };
 568 
 569 
 570 //------------------------------StoreNode--------------------------------------
 571 // Store value; requires Store, Address and Value
 572 class StoreNode : public MemNode {
 573 private:
 574   // On platforms with weak memory ordering (e.g., PPC) we distinguish
 575   // stores that can be reordered, and such requiring release semantics to
 576   // adhere to the Java specification.  The required behaviour is stored in
 577   // this field.
 578   const MemOrd _mo;
 579   // Needed for proper cloning.
 580   virtual uint size_of() const { return sizeof(*this); }
 581 protected:
 582   virtual bool cmp( const Node &n ) const;
 583   virtual bool depends_only_on_test() const { return false; }
 584 
 585   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 586   Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
 587 
 588 public:
 589   // We must ensure that stores of object references will be visible

 703       && StoreNode::cmp(n);
 704   }
 705   virtual uint size_of() const { return sizeof(*this); }
 706   const bool _require_atomic_access;  // is piecewise store forbidden?
 707 
 708 public:
 709   StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
 710     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
 711   virtual int Opcode() const;
 712   virtual BasicType value_basic_type() const { return T_LONG; }
 713   bool require_atomic_access() const { return _require_atomic_access; }
 714 
 715 #ifndef PRODUCT
 716   virtual void dump_spec(outputStream *st) const {
 717     StoreNode::dump_spec(st);
 718     if (_require_atomic_access)  st->print(" Atomic!");
 719   }
 720 #endif
 721 };
 722 



















 723 //------------------------------StoreFNode-------------------------------------
 724 // Store float to memory
 725 class StoreFNode : public StoreNode {
 726 public:
 727   StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 728     : StoreNode(c, mem, adr, at, val, mo) {}
 729   virtual int Opcode() const;
 730   virtual BasicType value_basic_type() const { return T_FLOAT; }
 731 };
 732 
 733 //------------------------------StoreDNode-------------------------------------
 734 // Store double to memory
 735 class StoreDNode : public StoreNode {
 736   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 737   virtual bool cmp( const Node &n ) const {
 738     return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
 739       && StoreNode::cmp(n);
 740   }
 741   virtual uint size_of() const { return sizeof(*this); }
 742   const bool _require_atomic_access;  // is piecewise store forbidden?

1058 };
1059 
1060 //------------------------------GetAndSetPNode---------------------------
1061 class GetAndSetPNode : public LoadStoreNode {
1062 public:
1063   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1064   virtual int Opcode() const;
1065 };
1066 
1067 //------------------------------GetAndSetNNode---------------------------
1068 class GetAndSetNNode : public LoadStoreNode {
1069 public:
1070   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1071   virtual int Opcode() const;
1072 };
1073 
1074 //------------------------------ClearArray-------------------------------------
1075 class ClearArrayNode: public Node {
1076 private:
1077   bool _is_large;

1078 public:
1079   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1080     : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {

1081     init_class_id(Class_ClearArray);
1082   }
1083   virtual int         Opcode() const;
1084   virtual const Type *bottom_type() const { return Type::MEMORY; }
1085   // ClearArray modifies array elements, and so affects only the
1086   // array memory addressed by the bottom_type of its base address.
1087   virtual const class TypePtr *adr_type() const;
1088   virtual Node* Identity(PhaseGVN* phase);
1089   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1090   virtual uint match_edge(uint idx) const;
1091   bool is_large() const { return _is_large; }

1092 
1093   // Clear the given area of an object or array.
1094   // The start offset must always be aligned mod BytesPerInt.
1095   // The end offset must always be aligned mod BytesPerLong.
1096   // Return the new memory.
1097   static Node* clear_memory(Node* control, Node* mem, Node* dest,


1098                             intptr_t start_offset,
1099                             intptr_t end_offset,
1100                             PhaseGVN* phase);
1101   static Node* clear_memory(Node* control, Node* mem, Node* dest,


1102                             intptr_t start_offset,
1103                             Node* end_offset,
1104                             PhaseGVN* phase);
1105   static Node* clear_memory(Node* control, Node* mem, Node* dest,

1106                             Node* start_offset,
1107                             Node* end_offset,
1108                             PhaseGVN* phase);
1109   // Return allocation input memory edge if it is different instance
1110   // or itself if it is the one we are looking for.
1111   static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1112 };
1113 
1114 //------------------------------MemBar-----------------------------------------
1115 // There are different flavors of Memory Barriers to match the Java Memory
1116 // Model.  Monitor-enter and volatile-load act as Acquires: no following ref
1117 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1118 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1119 // preceding ref can be moved to after them.  We insert a MemBar-Release
1120 // before a FastUnlock or volatile-store.  All volatiles need to be
1121 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1122 // separate it from any following volatile-load.
1123 class MemBarNode: public MultiNode {
1124   virtual uint hash() const ;                  // { return NO_HASH; }
1125   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self

1137     TrailingLoadStore,
1138     LeadingLoadStore,
1139     TrailingExpandedArrayCopy
1140   } _kind;
1141 
1142 #ifdef ASSERT
1143   uint _pair_idx;
1144 #endif
1145 
1146 public:
1147   enum {
1148     Precedent = TypeFunc::Parms  // optional edge to force precedence
1149   };
1150   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1151   virtual int Opcode() const = 0;
1152   virtual const class TypePtr *adr_type() const { return _adr_type; }
1153   virtual const Type* Value(PhaseGVN* phase) const;
1154   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1155   virtual uint match_edge(uint idx) const { return 0; }
1156   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1157   virtual Node *match( const ProjNode *proj, const Matcher *m );
1158   // Factory method.  Builds a wide or narrow membar.
1159   // Optional 'precedent' becomes an extra edge if not null.
1160   static MemBarNode* make(Compile* C, int opcode,
1161                           int alias_idx = Compile::AliasIdxBot,
1162                           Node* precedent = nullptr);
1163 
1164   MemBarNode* trailing_membar() const;
1165   MemBarNode* leading_membar() const;
1166 
1167   void set_trailing_load() { _kind = TrailingLoad; }
1168   bool trailing_load() const { return _kind == TrailingLoad; }
1169   bool trailing_store() const { return _kind == TrailingStore; }
1170   bool leading_store() const { return _kind == LeadingStore; }
1171   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1172   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1173   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1174   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1175   bool standalone() const { return _kind == Standalone; }
1176   void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1177   bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }

 109   static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
 110   static bool all_controls_dominate(Node* dom, Node* sub) {
 111     DomResult dom_result = maybe_all_controls_dominate(dom, sub);
 112     return dom_result == DomResult::Dominate;
 113   }
 114 
 115   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 116 
 117   // Shared code for Ideal methods:
 118   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit null.
 119 
 120   // Helper function for adr_type() implementations.
 121   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
 122 
 123   // Raw access function, to allow copying of adr_type efficiently in
 124   // product builds and retain the debug info for debug builds.
 125   const TypePtr *raw_adr_type() const {
 126     return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
 127   }
 128 
 129 #ifdef ASSERT
 130   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
 131 #endif
 132 
 133   // Return the barrier data of n, if available, or 0 otherwise.
 134   static uint8_t barrier_data(const Node* n);
 135 
 136   // Map a load or store opcode to its corresponding store opcode.
 137   // (Return -1 if unknown.)
 138   virtual int store_Opcode() const { return -1; }
 139 
 140   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 141   // The returned type is a property of the value that is loaded/stored and
 142   // not the memory that is accessed. For mismatched memory accesses
 143   // they might differ. For instance, a value of type 'short' may be stored
 144   // into an array of elements of type 'long'.
 145   virtual BasicType value_basic_type() const = 0;
 146   virtual int memory_size() const {
 147 #ifdef ASSERT
 148     return type2aelembytes(value_basic_type(), true);
 149 #else
 150     return type2aelembytes(value_basic_type());
 151 #endif
 152   }

 252   // If the load is from Field memory and the pointer is non-null, it might be possible to
 253   // zero out the control input.
 254   // If the offset is constant and the base is an object allocation,
 255   // try to hook me up to the exact initializing store.
 256   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 257 
 258   // Return true if it's possible to split the Load through a Phi merging the bases
 259   bool can_split_through_phi_base(PhaseGVN *phase);
 260 
 261   // Split instance field load through Phi.
 262   Node* split_through_phi(PhaseGVN *phase, bool ignore_missing_instance_id = false);
 263 
 264   // Recover original value from boxed values
 265   Node *eliminate_autobox(PhaseIterGVN *igvn);
 266 
 267   // Compute a new Type for this node.  Basically we just do the pre-check,
 268   // then call the virtual add() to set the type.
 269   virtual const Type* Value(PhaseGVN* phase) const;
 270 
 271   // Common methods for LoadKlass and LoadNKlass nodes.
 272   const Type* klass_value_common(PhaseGVN* phase, bool fold_for_arrays) const;
 273   Node* klass_identity_common(PhaseGVN* phase);
 274 
 275   virtual uint ideal_reg() const;
 276   virtual const Type *bottom_type() const;
 277   // Following method is copied from TypeNode:
 278   void set_type(const Type* t) {
 279     assert(t != nullptr, "sanity");
 280     DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
 281     *(const Type**)&_type = t;   // cast away const-ness
 282     // If this node is in the hash table, make sure it doesn't need a rehash.
 283     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
 284   }
 285   const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
 286 
 287   // Do not match memory edge
 288   virtual uint match_edge(uint idx) const;
 289 
 290   // Map a load opcode to its corresponding store opcode.
 291   virtual int store_Opcode() const = 0;
 292 

 504 
 505 //------------------------------LoadPNode--------------------------------------
 506 // Load a pointer from memory (either object or array)
 507 class LoadPNode : public LoadNode {
 508 public:
 509   LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 510     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 511   virtual int Opcode() const;
 512   virtual uint ideal_reg() const { return Op_RegP; }
 513   virtual int store_Opcode() const { return Op_StoreP; }
 514   virtual BasicType value_basic_type() const { return T_ADDRESS; }
 515 };
 516 
 517 
 518 //------------------------------LoadNNode--------------------------------------
 519 // Load a narrow oop from memory (either object or array)
 520 class LoadNNode : public LoadNode {
 521 public:
 522   LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 523     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 524   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 525   virtual int Opcode() const;
 526   virtual uint ideal_reg() const { return Op_RegN; }
 527   virtual int store_Opcode() const { return Op_StoreN; }
 528   virtual BasicType value_basic_type() const { return T_NARROWOOP; }
 529 };
 530 
 531 //------------------------------LoadKlassNode----------------------------------
 532 // Load a Klass from an object
 533 class LoadKlassNode : public LoadPNode {
 534   bool _fold_for_arrays;
 535 
 536   virtual uint size_of() const { return sizeof(*this); }
 537   virtual uint hash() const { return LoadNode::hash() + _fold_for_arrays; }
 538   virtual bool cmp( const Node &n ) const {
 539     return _fold_for_arrays == ((LoadKlassNode&)n)._fold_for_arrays && LoadNode::cmp(n);
 540   }
 541 
 542 private:
 543   LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo, bool fold_for_arrays)
 544     : LoadPNode(nullptr, mem, adr, at, tk, mo), _fold_for_arrays(fold_for_arrays) {}
 545 
 546 public:
 547   virtual int Opcode() const;
 548   virtual const Type* Value(PhaseGVN* phase) const;
 549   virtual Node* Identity(PhaseGVN* phase);
 550   virtual bool depends_only_on_test() const { return true; }
 551 
 552   // Polymorphic factory method:
 553   static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
 554                     const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT, bool fold_for_arrays = true);
 555 };
 556 
 557 //------------------------------LoadNKlassNode---------------------------------
 558 // Load a narrow Klass from an object.
 559 // With compact headers, the input address (adr) does not point at the exact
 560 // header position where the (narrow) class pointer is located, but into the
 561 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
 562 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
 563 // extract the actual class pointer. C2's type system is agnostic on whether the
 564 // input address directly points into the class pointer.
 565 class LoadNKlassNode : public LoadNNode {
 566   bool _fold_for_arrays;
 567 
 568   virtual uint size_of() const { return sizeof(*this); }
 569   virtual uint hash() const { return LoadNode::hash() + _fold_for_arrays; }
 570   virtual bool cmp( const Node &n ) const {
 571     return _fold_for_arrays == ((LoadNKlassNode&)n)._fold_for_arrays && LoadNode::cmp(n);
 572   }
 573 
 574 private:
 575   friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*, bool fold_for_arrays);
 576   LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo, bool fold_for_arrays)
 577     : LoadNNode(nullptr, mem, adr, at, tk, mo), _fold_for_arrays(fold_for_arrays) {}
 578 
 579 public:
 580   virtual int Opcode() const;
 581   virtual uint ideal_reg() const { return Op_RegN; }
 582   virtual int store_Opcode() const { return Op_StoreNKlass; }
 583   virtual BasicType value_basic_type() const { return T_NARROWKLASS; }
 584 
 585   virtual const Type* Value(PhaseGVN* phase) const;
 586   virtual Node* Identity(PhaseGVN* phase);
 587   virtual bool depends_only_on_test() const { return true; }
 588 };
 589 

 590 //------------------------------StoreNode--------------------------------------
 591 // Store value; requires Store, Address and Value
 592 class StoreNode : public MemNode {
 593 private:
 594   // On platforms with weak memory ordering (e.g., PPC) we distinguish
 595   // stores that can be reordered, and such requiring release semantics to
 596   // adhere to the Java specification.  The required behaviour is stored in
 597   // this field.
 598   const MemOrd _mo;
 599   // Needed for proper cloning.
 600   virtual uint size_of() const { return sizeof(*this); }
 601 protected:
 602   virtual bool cmp( const Node &n ) const;
 603   virtual bool depends_only_on_test() const { return false; }
 604 
 605   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 606   Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
 607 
 608 public:
 609   // We must ensure that stores of object references will be visible

 723       && StoreNode::cmp(n);
 724   }
 725   virtual uint size_of() const { return sizeof(*this); }
 726   const bool _require_atomic_access;  // is piecewise store forbidden?
 727 
 728 public:
 729   StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
 730     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
 731   virtual int Opcode() const;
 732   virtual BasicType value_basic_type() const { return T_LONG; }
 733   bool require_atomic_access() const { return _require_atomic_access; }
 734 
 735 #ifndef PRODUCT
 736   virtual void dump_spec(outputStream *st) const {
 737     StoreNode::dump_spec(st);
 738     if (_require_atomic_access)  st->print(" Atomic!");
 739   }
 740 #endif
 741 };
 742 
 743 // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
 744 class StoreLSpecialNode : public StoreNode {
 745 
 746 public:
 747   StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
 748     : StoreNode(c, mem, adr, at, val, mo) {
 749     set_mismatched_access();
 750     if (oop_off != nullptr) {
 751       add_req(oop_off);
 752     }
 753   }
 754   virtual int Opcode() const;
 755   virtual BasicType value_basic_type() const { return T_LONG; }
 756 
 757   virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
 758                                                    idx == MemNode::ValueIn ||
 759                                                    idx == MemNode::ValueIn + 1; }
 760 };
 761 
 762 //------------------------------StoreFNode-------------------------------------
 763 // Store float to memory
 764 class StoreFNode : public StoreNode {
 765 public:
 766   StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 767     : StoreNode(c, mem, adr, at, val, mo) {}
 768   virtual int Opcode() const;
 769   virtual BasicType value_basic_type() const { return T_FLOAT; }
 770 };
 771 
 772 //------------------------------StoreDNode-------------------------------------
 773 // Store double to memory
 774 class StoreDNode : public StoreNode {
 775   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 776   virtual bool cmp( const Node &n ) const {
 777     return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
 778       && StoreNode::cmp(n);
 779   }
 780   virtual uint size_of() const { return sizeof(*this); }
 781   const bool _require_atomic_access;  // is piecewise store forbidden?

1097 };
1098 
1099 //------------------------------GetAndSetPNode---------------------------
1100 class GetAndSetPNode : public LoadStoreNode {
1101 public:
1102   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1103   virtual int Opcode() const;
1104 };
1105 
1106 //------------------------------GetAndSetNNode---------------------------
1107 class GetAndSetNNode : public LoadStoreNode {
1108 public:
1109   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1110   virtual int Opcode() const;
1111 };
1112 
1113 //------------------------------ClearArray-------------------------------------
1114 class ClearArrayNode: public Node {
1115 private:
1116   bool _is_large;
1117   bool _word_copy_only;
1118 public:
1119   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1120     : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1121       _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1122     init_class_id(Class_ClearArray);
1123   }
1124   virtual int         Opcode() const;
1125   virtual const Type *bottom_type() const { return Type::MEMORY; }
1126   // ClearArray modifies array elements, and so affects only the
1127   // array memory addressed by the bottom_type of its base address.
1128   virtual const class TypePtr *adr_type() const;
1129   virtual Node* Identity(PhaseGVN* phase);
1130   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1131   virtual uint match_edge(uint idx) const;
1132   bool is_large() const { return _is_large; }
1133   bool word_copy_only() const { return _word_copy_only; }
1134 
1135   // Clear the given area of an object or array.
1136   // The start offset must always be aligned mod BytesPerInt.
1137   // The end offset must always be aligned mod BytesPerLong.
1138   // Return the new memory.
1139   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1140                             Node* val,
1141                             Node* raw_val,
1142                             intptr_t start_offset,
1143                             intptr_t end_offset,
1144                             PhaseGVN* phase);
1145   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1146                             Node* val,
1147                             Node* raw_val,
1148                             intptr_t start_offset,
1149                             Node* end_offset,
1150                             PhaseGVN* phase);
1151   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1152                             Node* raw_val,
1153                             Node* start_offset,
1154                             Node* end_offset,
1155                             PhaseGVN* phase);
1156   // Return allocation input memory edge if it is different instance
1157   // or itself if it is the one we are looking for.
1158   static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1159 };
1160 
1161 //------------------------------MemBar-----------------------------------------
1162 // There are different flavors of Memory Barriers to match the Java Memory
1163 // Model.  Monitor-enter and volatile-load act as Acquires: no following ref
1164 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1165 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1166 // preceding ref can be moved to after them.  We insert a MemBar-Release
1167 // before a FastUnlock or volatile-store.  All volatiles need to be
1168 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1169 // separate it from any following volatile-load.
1170 class MemBarNode: public MultiNode {
1171   virtual uint hash() const ;                  // { return NO_HASH; }
1172   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self

1184     TrailingLoadStore,
1185     LeadingLoadStore,
1186     TrailingExpandedArrayCopy
1187   } _kind;
1188 
1189 #ifdef ASSERT
1190   uint _pair_idx;
1191 #endif
1192 
1193 public:
1194   enum {
1195     Precedent = TypeFunc::Parms  // optional edge to force precedence
1196   };
1197   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1198   virtual int Opcode() const = 0;
1199   virtual const class TypePtr *adr_type() const { return _adr_type; }
1200   virtual const Type* Value(PhaseGVN* phase) const;
1201   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1202   virtual uint match_edge(uint idx) const { return 0; }
1203   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1204   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1205   // Factory method.  Builds a wide or narrow membar.
1206   // Optional 'precedent' becomes an extra edge if not null.
1207   static MemBarNode* make(Compile* C, int opcode,
1208                           int alias_idx = Compile::AliasIdxBot,
1209                           Node* precedent = nullptr);
1210 
1211   MemBarNode* trailing_membar() const;
1212   MemBarNode* leading_membar() const;
1213 
1214   void set_trailing_load() { _kind = TrailingLoad; }
1215   bool trailing_load() const { return _kind == TrailingLoad; }
1216   bool trailing_store() const { return _kind == TrailingStore; }
1217   bool leading_store() const { return _kind == LeadingStore; }
1218   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1219   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1220   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1221   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1222   bool standalone() const { return _kind == Standalone; }
1223   void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1224   bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
< prev index next >