< prev index next >

src/hotspot/share/opto/memnode.hpp

Print this page

 109   static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
 110   static bool all_controls_dominate(Node* dom, Node* sub) {
 111     DomResult dom_result = maybe_all_controls_dominate(dom, sub);
 112     return dom_result == DomResult::Dominate;
 113   }
 114 
 115   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 116 
 117   // Shared code for Ideal methods:
 118   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit null.
 119 
 120   // Helper function for adr_type() implementations.
 121   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
 122 
 123   // Raw access function, to allow copying of adr_type efficiently in
 124   // product builds and retain the debug info for debug builds.
 125   const TypePtr *raw_adr_type() const {
 126     return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
 127   }
 128 




 129   // Return the barrier data of n, if available, or 0 otherwise.
 130   static uint8_t barrier_data(const Node* n);
 131 
 132   // Map a load or store opcode to its corresponding store opcode.
 133   // (Return -1 if unknown.)
 134   virtual int store_Opcode() const { return -1; }
 135 
 136   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 137   virtual BasicType memory_type() const = 0;
 138   virtual int memory_size() const {
 139 #ifdef ASSERT
 140     return type2aelembytes(memory_type(), true);
 141 #else
 142     return type2aelembytes(memory_type());
 143 #endif
 144   }
 145 
 146   uint8_t barrier_data() { return _barrier_data; }
 147   void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
 148 

 244   // If the load is from Field memory and the pointer is non-null, it might be possible to
 245   // zero out the control input.
 246   // If the offset is constant and the base is an object allocation,
 247   // try to hook me up to the exact initializing store.
 248   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 249 
 250   // Return true if it's possible to split the Load through a Phi merging the bases
 251   bool can_split_through_phi_base(PhaseGVN *phase);
 252 
 253   // Split instance field load through Phi.
 254   Node* split_through_phi(PhaseGVN *phase, bool ignore_missing_instance_id = false);
 255 
 256   // Recover original value from boxed values
 257   Node *eliminate_autobox(PhaseIterGVN *igvn);
 258 
 259   // Compute a new Type for this node.  Basically we just do the pre-check,
 260   // then call the virtual add() to set the type.
 261   virtual const Type* Value(PhaseGVN* phase) const;
 262 
 263   // Common methods for LoadKlass and LoadNKlass nodes.
 264   const Type* klass_value_common(PhaseGVN* phase) const;
 265   Node* klass_identity_common(PhaseGVN* phase);
 266 
 267   virtual uint ideal_reg() const;
 268   virtual const Type *bottom_type() const;
 269   // Following method is copied from TypeNode:
 270   void set_type(const Type* t) {
 271     assert(t != nullptr, "sanity");
 272     debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
 273     *(const Type**)&_type = t;   // cast away const-ness
 274     // If this node is in the hash table, make sure it doesn't need a rehash.
 275     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
 276   }
 277   const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
 278 
 279   // Do not match memory edge
 280   virtual uint match_edge(uint idx) const;
 281 
 282   // Map a load opcode to its corresponding store opcode.
 283   virtual int store_Opcode() const = 0;
 284 

 496 
 497 //------------------------------LoadPNode--------------------------------------
 498 // Load a pointer from memory (either object or array)
 499 class LoadPNode : public LoadNode {
 500 public:
 501   LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 502     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 503   virtual int Opcode() const;
 504   virtual uint ideal_reg() const { return Op_RegP; }
 505   virtual int store_Opcode() const { return Op_StoreP; }
 506   virtual BasicType memory_type() const { return T_ADDRESS; }
 507 };
 508 
 509 
 510 //------------------------------LoadNNode--------------------------------------
 511 // Load a narrow oop from memory (either object or array)
 512 class LoadNNode : public LoadNode {
 513 public:
 514   LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 515     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}

 516   virtual int Opcode() const;
 517   virtual uint ideal_reg() const { return Op_RegN; }
 518   virtual int store_Opcode() const { return Op_StoreN; }
 519   virtual BasicType memory_type() const { return T_NARROWOOP; }
 520 };
 521 
 522 //------------------------------LoadKlassNode----------------------------------
 523 // Load a Klass from an object
 524 class LoadKlassNode : public LoadPNode {








 525 private:
 526   LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo)
 527     : LoadPNode(nullptr, mem, adr, at, tk, mo) {}
 528 
 529 public:
 530   virtual int Opcode() const;
 531   virtual const Type* Value(PhaseGVN* phase) const;
 532   virtual Node* Identity(PhaseGVN* phase);
 533   virtual bool depends_only_on_test() const { return true; }
 534 
 535   // Polymorphic factory method:
 536   static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
 537                     const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
 538 };
 539 
 540 //------------------------------LoadNKlassNode---------------------------------
 541 // Load a narrow Klass from an object.
 542 // With compact headers, the input address (adr) does not point at the exact
 543 // header position where the (narrow) class pointer is located, but into the
 544 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
 545 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
 546 // extract the actual class pointer. C2's type system is agnostic on whether the
 547 // input address directly points into the class pointer.
 548 class LoadNKlassNode : public LoadNNode {








 549 private:
 550   friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*);
 551   LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo)
 552     : LoadNNode(nullptr, mem, adr, at, tk, mo) {}
 553 
 554 public:
 555   virtual int Opcode() const;
 556   virtual uint ideal_reg() const { return Op_RegN; }
 557   virtual int store_Opcode() const { return Op_StoreNKlass; }
 558   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 559 
 560   virtual const Type* Value(PhaseGVN* phase) const;
 561   virtual Node* Identity(PhaseGVN* phase);
 562   virtual bool depends_only_on_test() const { return true; }
 563 };
 564 
 565 
 566 //------------------------------StoreNode--------------------------------------
 567 // Store value; requires Store, Address and Value
 568 class StoreNode : public MemNode {
 569 private:
 570   // On platforms with weak memory ordering (e.g., PPC) we distinguish
 571   // stores that can be reordered, and such requiring release semantics to
 572   // adhere to the Java specification.  The required behaviour is stored in
 573   // this field.
 574   const MemOrd _mo;
 575   // Needed for proper cloning.
 576   virtual uint size_of() const { return sizeof(*this); }
 577 protected:
 578   virtual bool cmp( const Node &n ) const;
 579   virtual bool depends_only_on_test() const { return false; }
 580 
 581   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 582   Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
 583 
 584 public:
 585   // We must ensure that stores of object references will be visible

 699       && StoreNode::cmp(n);
 700   }
 701   virtual uint size_of() const { return sizeof(*this); }
 702   const bool _require_atomic_access;  // is piecewise store forbidden?
 703 
 704 public:
 705   StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
 706     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
 707   virtual int Opcode() const;
 708   virtual BasicType memory_type() const { return T_LONG; }
 709   bool require_atomic_access() const { return _require_atomic_access; }
 710 
 711 #ifndef PRODUCT
 712   virtual void dump_spec(outputStream *st) const {
 713     StoreNode::dump_spec(st);
 714     if (_require_atomic_access)  st->print(" Atomic!");
 715   }
 716 #endif
 717 };
 718 



















 719 //------------------------------StoreFNode-------------------------------------
 720 // Store float to memory
 721 class StoreFNode : public StoreNode {
 722 public:
 723   StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 724     : StoreNode(c, mem, adr, at, val, mo) {}
 725   virtual int Opcode() const;
 726   virtual BasicType memory_type() const { return T_FLOAT; }
 727 };
 728 
 729 //------------------------------StoreDNode-------------------------------------
 730 // Store double to memory
 731 class StoreDNode : public StoreNode {
 732   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 733   virtual bool cmp( const Node &n ) const {
 734     return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
 735       && StoreNode::cmp(n);
 736   }
 737   virtual uint size_of() const { return sizeof(*this); }
 738   const bool _require_atomic_access;  // is piecewise store forbidden?

1054 };
1055 
1056 //------------------------------GetAndSetPNode---------------------------
1057 class GetAndSetPNode : public LoadStoreNode {
1058 public:
1059   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1060   virtual int Opcode() const;
1061 };
1062 
1063 //------------------------------GetAndSetNNode---------------------------
1064 class GetAndSetNNode : public LoadStoreNode {
1065 public:
1066   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1067   virtual int Opcode() const;
1068 };
1069 
1070 //------------------------------ClearArray-------------------------------------
1071 class ClearArrayNode: public Node {
1072 private:
1073   bool _is_large;

1074 public:
1075   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1076     : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {

1077     init_class_id(Class_ClearArray);
1078   }
1079   virtual int         Opcode() const;
1080   virtual const Type *bottom_type() const { return Type::MEMORY; }
1081   // ClearArray modifies array elements, and so affects only the
1082   // array memory addressed by the bottom_type of its base address.
1083   virtual const class TypePtr *adr_type() const;
1084   virtual Node* Identity(PhaseGVN* phase);
1085   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1086   virtual uint match_edge(uint idx) const;
1087   bool is_large() const { return _is_large; }

1088 
1089   // Clear the given area of an object or array.
1090   // The start offset must always be aligned mod BytesPerInt.
1091   // The end offset must always be aligned mod BytesPerLong.
1092   // Return the new memory.
1093   static Node* clear_memory(Node* control, Node* mem, Node* dest,


1094                             intptr_t start_offset,
1095                             intptr_t end_offset,
1096                             PhaseGVN* phase);
1097   static Node* clear_memory(Node* control, Node* mem, Node* dest,


1098                             intptr_t start_offset,
1099                             Node* end_offset,
1100                             PhaseGVN* phase);
1101   static Node* clear_memory(Node* control, Node* mem, Node* dest,

1102                             Node* start_offset,
1103                             Node* end_offset,
1104                             PhaseGVN* phase);
1105   // Return allocation input memory edge if it is different instance
1106   // or itself if it is the one we are looking for.
1107   static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1108 };
1109 
1110 //------------------------------MemBar-----------------------------------------
1111 // There are different flavors of Memory Barriers to match the Java Memory
1112 // Model.  Monitor-enter and volatile-load act as Acquires: no following ref
1113 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1114 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1115 // preceding ref can be moved to after them.  We insert a MemBar-Release
1116 // before a FastUnlock or volatile-store.  All volatiles need to be
1117 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1118 // separate it from any following volatile-load.
1119 class MemBarNode: public MultiNode {
1120   virtual uint hash() const ;                  // { return NO_HASH; }
1121   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self

1133     TrailingLoadStore,
1134     LeadingLoadStore,
1135     TrailingExpandedArrayCopy
1136   } _kind;
1137 
1138 #ifdef ASSERT
1139   uint _pair_idx;
1140 #endif
1141 
1142 public:
1143   enum {
1144     Precedent = TypeFunc::Parms  // optional edge to force precedence
1145   };
1146   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1147   virtual int Opcode() const = 0;
1148   virtual const class TypePtr *adr_type() const { return _adr_type; }
1149   virtual const Type* Value(PhaseGVN* phase) const;
1150   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1151   virtual uint match_edge(uint idx) const { return 0; }
1152   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1153   virtual Node *match( const ProjNode *proj, const Matcher *m );
1154   // Factory method.  Builds a wide or narrow membar.
1155   // Optional 'precedent' becomes an extra edge if not null.
1156   static MemBarNode* make(Compile* C, int opcode,
1157                           int alias_idx = Compile::AliasIdxBot,
1158                           Node* precedent = nullptr);
1159 
1160   MemBarNode* trailing_membar() const;
1161   MemBarNode* leading_membar() const;
1162 
1163   void set_trailing_load() { _kind = TrailingLoad; }
1164   bool trailing_load() const { return _kind == TrailingLoad; }
1165   bool trailing_store() const { return _kind == TrailingStore; }
1166   bool leading_store() const { return _kind == LeadingStore; }
1167   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1168   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1169   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1170   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1171   bool standalone() const { return _kind == Standalone; }
1172   void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1173   bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }

 109   static DomResult maybe_all_controls_dominate(Node* dom, Node* sub);
 110   static bool all_controls_dominate(Node* dom, Node* sub) {
 111     DomResult dom_result = maybe_all_controls_dominate(dom, sub);
 112     return dom_result == DomResult::Dominate;
 113   }
 114 
 115   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 116 
 117   // Shared code for Ideal methods:
 118   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit null.
 119 
 120   // Helper function for adr_type() implementations.
 121   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
 122 
 123   // Raw access function, to allow copying of adr_type efficiently in
 124   // product builds and retain the debug info for debug builds.
 125   const TypePtr *raw_adr_type() const {
 126     return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
 127   }
 128 
 129 #ifdef ASSERT
 130   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
 131 #endif
 132 
 133   // Return the barrier data of n, if available, or 0 otherwise.
 134   static uint8_t barrier_data(const Node* n);
 135 
 136   // Map a load or store opcode to its corresponding store opcode.
 137   // (Return -1 if unknown.)
 138   virtual int store_Opcode() const { return -1; }
 139 
 140   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 141   virtual BasicType memory_type() const = 0;
 142   virtual int memory_size() const {
 143 #ifdef ASSERT
 144     return type2aelembytes(memory_type(), true);
 145 #else
 146     return type2aelembytes(memory_type());
 147 #endif
 148   }
 149 
 150   uint8_t barrier_data() { return _barrier_data; }
 151   void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
 152 

 248   // If the load is from Field memory and the pointer is non-null, it might be possible to
 249   // zero out the control input.
 250   // If the offset is constant and the base is an object allocation,
 251   // try to hook me up to the exact initializing store.
 252   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 253 
 254   // Return true if it's possible to split the Load through a Phi merging the bases
 255   bool can_split_through_phi_base(PhaseGVN *phase);
 256 
 257   // Split instance field load through Phi.
 258   Node* split_through_phi(PhaseGVN *phase, bool ignore_missing_instance_id = false);
 259 
 260   // Recover original value from boxed values
 261   Node *eliminate_autobox(PhaseIterGVN *igvn);
 262 
 263   // Compute a new Type for this node.  Basically we just do the pre-check,
 264   // then call the virtual add() to set the type.
 265   virtual const Type* Value(PhaseGVN* phase) const;
 266 
 267   // Common methods for LoadKlass and LoadNKlass nodes.
 268   const Type* klass_value_common(PhaseGVN* phase, bool fold_for_arrays) const;
 269   Node* klass_identity_common(PhaseGVN* phase);
 270 
 271   virtual uint ideal_reg() const;
 272   virtual const Type *bottom_type() const;
 273   // Following method is copied from TypeNode:
 274   void set_type(const Type* t) {
 275     assert(t != nullptr, "sanity");
 276     debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
 277     *(const Type**)&_type = t;   // cast away const-ness
 278     // If this node is in the hash table, make sure it doesn't need a rehash.
 279     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
 280   }
 281   const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
 282 
 283   // Do not match memory edge
 284   virtual uint match_edge(uint idx) const;
 285 
 286   // Map a load opcode to its corresponding store opcode.
 287   virtual int store_Opcode() const = 0;
 288 

 500 
 501 //------------------------------LoadPNode--------------------------------------
 502 // Load a pointer from memory (either object or array)
 503 class LoadPNode : public LoadNode {
 504 public:
 505   LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 506     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 507   virtual int Opcode() const;
 508   virtual uint ideal_reg() const { return Op_RegP; }
 509   virtual int store_Opcode() const { return Op_StoreP; }
 510   virtual BasicType memory_type() const { return T_ADDRESS; }
 511 };
 512 
 513 
 514 //------------------------------LoadNNode--------------------------------------
 515 // Load a narrow oop from memory (either object or array)
 516 class LoadNNode : public LoadNode {
 517 public:
 518   LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
 519     : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
 520   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 521   virtual int Opcode() const;
 522   virtual uint ideal_reg() const { return Op_RegN; }
 523   virtual int store_Opcode() const { return Op_StoreN; }
 524   virtual BasicType memory_type() const { return T_NARROWOOP; }
 525 };
 526 
 527 //------------------------------LoadKlassNode----------------------------------
 528 // Load a Klass from an object
 529 class LoadKlassNode : public LoadPNode {
 530   bool _fold_for_arrays;
 531 
 532   virtual uint size_of() const { return sizeof(*this); }
 533   virtual uint hash() const { return LoadNode::hash() + _fold_for_arrays; }
 534   virtual bool cmp( const Node &n ) const {
 535     return _fold_for_arrays == ((LoadKlassNode&)n)._fold_for_arrays && LoadNode::cmp(n);
 536   }
 537 
 538 private:
 539   LoadKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, MemOrd mo, bool fold_for_arrays)
 540     : LoadPNode(nullptr, mem, adr, at, tk, mo), _fold_for_arrays(fold_for_arrays) {}
 541 
 542 public:
 543   virtual int Opcode() const;
 544   virtual const Type* Value(PhaseGVN* phase) const;
 545   virtual Node* Identity(PhaseGVN* phase);
 546   virtual bool depends_only_on_test() const { return true; }
 547 
 548   // Polymorphic factory method:
 549   static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
 550                     const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT, bool fold_for_arrays = true);
 551 };
 552 
 553 //------------------------------LoadNKlassNode---------------------------------
 554 // Load a narrow Klass from an object.
 555 // With compact headers, the input address (adr) does not point at the exact
 556 // header position where the (narrow) class pointer is located, but into the
 557 // middle of the mark word (see oopDesc::klass_offset_in_bytes()). This node
 558 // implicitly shifts the loaded value (markWord::klass_shift_at_offset bits) to
 559 // extract the actual class pointer. C2's type system is agnostic on whether the
 560 // input address directly points into the class pointer.
 561 class LoadNKlassNode : public LoadNNode {
 562   bool _fold_for_arrays;
 563 
 564   virtual uint size_of() const { return sizeof(*this); }
 565   virtual uint hash() const { return LoadNode::hash() + _fold_for_arrays; }
 566   virtual bool cmp( const Node &n ) const {
 567     return _fold_for_arrays == ((LoadNKlassNode&)n)._fold_for_arrays && LoadNode::cmp(n);
 568   }
 569 
 570 private:
 571   friend Node* LoadKlassNode::make(PhaseGVN&, Node*, Node*, const TypePtr*, const TypeKlassPtr*, bool fold_for_arrays);
 572   LoadNKlassNode(Node* mem, Node* adr, const TypePtr* at, const TypeNarrowKlass* tk, MemOrd mo, bool fold_for_arrays)
 573     : LoadNNode(nullptr, mem, adr, at, tk, mo), _fold_for_arrays(fold_for_arrays) {}
 574 
 575 public:
 576   virtual int Opcode() const;
 577   virtual uint ideal_reg() const { return Op_RegN; }
 578   virtual int store_Opcode() const { return Op_StoreNKlass; }
 579   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 580 
 581   virtual const Type* Value(PhaseGVN* phase) const;
 582   virtual Node* Identity(PhaseGVN* phase);
 583   virtual bool depends_only_on_test() const { return true; }
 584 };
 585 

 586 //------------------------------StoreNode--------------------------------------
 587 // Store value; requires Store, Address and Value
 588 class StoreNode : public MemNode {
 589 private:
 590   // On platforms with weak memory ordering (e.g., PPC) we distinguish
 591   // stores that can be reordered, and such requiring release semantics to
 592   // adhere to the Java specification.  The required behaviour is stored in
 593   // this field.
 594   const MemOrd _mo;
 595   // Needed for proper cloning.
 596   virtual uint size_of() const { return sizeof(*this); }
 597 protected:
 598   virtual bool cmp( const Node &n ) const;
 599   virtual bool depends_only_on_test() const { return false; }
 600 
 601   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 602   Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
 603 
 604 public:
 605   // We must ensure that stores of object references will be visible

 719       && StoreNode::cmp(n);
 720   }
 721   virtual uint size_of() const { return sizeof(*this); }
 722   const bool _require_atomic_access;  // is piecewise store forbidden?
 723 
 724 public:
 725   StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
 726     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
 727   virtual int Opcode() const;
 728   virtual BasicType memory_type() const { return T_LONG; }
 729   bool require_atomic_access() const { return _require_atomic_access; }
 730 
 731 #ifndef PRODUCT
 732   virtual void dump_spec(outputStream *st) const {
 733     StoreNode::dump_spec(st);
 734     if (_require_atomic_access)  st->print(" Atomic!");
 735   }
 736 #endif
 737 };
 738 
 739 // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
 740 class StoreLSpecialNode : public StoreNode {
 741 
 742 public:
 743   StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
 744     : StoreNode(c, mem, adr, at, val, mo) {
 745     set_mismatched_access();
 746     if (oop_off != nullptr) {
 747       add_req(oop_off);
 748     }
 749   }
 750   virtual int Opcode() const;
 751   virtual BasicType memory_type() const { return T_LONG; }
 752 
 753   virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
 754                                                    idx == MemNode::ValueIn ||
 755                                                    idx == MemNode::ValueIn + 1; }
 756 };
 757 
 758 //------------------------------StoreFNode-------------------------------------
 759 // Store float to memory
 760 class StoreFNode : public StoreNode {
 761 public:
 762   StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 763     : StoreNode(c, mem, adr, at, val, mo) {}
 764   virtual int Opcode() const;
 765   virtual BasicType memory_type() const { return T_FLOAT; }
 766 };
 767 
 768 //------------------------------StoreDNode-------------------------------------
 769 // Store double to memory
 770 class StoreDNode : public StoreNode {
 771   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 772   virtual bool cmp( const Node &n ) const {
 773     return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
 774       && StoreNode::cmp(n);
 775   }
 776   virtual uint size_of() const { return sizeof(*this); }
 777   const bool _require_atomic_access;  // is piecewise store forbidden?

1093 };
1094 
1095 //------------------------------GetAndSetPNode---------------------------
1096 class GetAndSetPNode : public LoadStoreNode {
1097 public:
1098   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1099   virtual int Opcode() const;
1100 };
1101 
1102 //------------------------------GetAndSetNNode---------------------------
1103 class GetAndSetNNode : public LoadStoreNode {
1104 public:
1105   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1106   virtual int Opcode() const;
1107 };
1108 
1109 //------------------------------ClearArray-------------------------------------
1110 class ClearArrayNode: public Node {
1111 private:
1112   bool _is_large;
1113   bool _word_copy_only;
1114 public:
1115   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1116     : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1117       _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1118     init_class_id(Class_ClearArray);
1119   }
1120   virtual int         Opcode() const;
1121   virtual const Type *bottom_type() const { return Type::MEMORY; }
1122   // ClearArray modifies array elements, and so affects only the
1123   // array memory addressed by the bottom_type of its base address.
1124   virtual const class TypePtr *adr_type() const;
1125   virtual Node* Identity(PhaseGVN* phase);
1126   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1127   virtual uint match_edge(uint idx) const;
1128   bool is_large() const { return _is_large; }
1129   bool word_copy_only() const { return _word_copy_only; }
1130 
1131   // Clear the given area of an object or array.
1132   // The start offset must always be aligned mod BytesPerInt.
1133   // The end offset must always be aligned mod BytesPerLong.
1134   // Return the new memory.
1135   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1136                             Node* val,
1137                             Node* raw_val,
1138                             intptr_t start_offset,
1139                             intptr_t end_offset,
1140                             PhaseGVN* phase);
1141   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1142                             Node* val,
1143                             Node* raw_val,
1144                             intptr_t start_offset,
1145                             Node* end_offset,
1146                             PhaseGVN* phase);
1147   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1148                             Node* raw_val,
1149                             Node* start_offset,
1150                             Node* end_offset,
1151                             PhaseGVN* phase);
1152   // Return allocation input memory edge if it is different instance
1153   // or itself if it is the one we are looking for.
1154   static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
1155 };
1156 
1157 //------------------------------MemBar-----------------------------------------
1158 // There are different flavors of Memory Barriers to match the Java Memory
1159 // Model.  Monitor-enter and volatile-load act as Acquires: no following ref
1160 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1161 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1162 // preceding ref can be moved to after them.  We insert a MemBar-Release
1163 // before a FastUnlock or volatile-store.  All volatiles need to be
1164 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1165 // separate it from any following volatile-load.
1166 class MemBarNode: public MultiNode {
1167   virtual uint hash() const ;                  // { return NO_HASH; }
1168   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self

1180     TrailingLoadStore,
1181     LeadingLoadStore,
1182     TrailingExpandedArrayCopy
1183   } _kind;
1184 
1185 #ifdef ASSERT
1186   uint _pair_idx;
1187 #endif
1188 
1189 public:
1190   enum {
1191     Precedent = TypeFunc::Parms  // optional edge to force precedence
1192   };
1193   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1194   virtual int Opcode() const = 0;
1195   virtual const class TypePtr *adr_type() const { return _adr_type; }
1196   virtual const Type* Value(PhaseGVN* phase) const;
1197   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1198   virtual uint match_edge(uint idx) const { return 0; }
1199   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1200   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1201   // Factory method.  Builds a wide or narrow membar.
1202   // Optional 'precedent' becomes an extra edge if not null.
1203   static MemBarNode* make(Compile* C, int opcode,
1204                           int alias_idx = Compile::AliasIdxBot,
1205                           Node* precedent = nullptr);
1206 
1207   MemBarNode* trailing_membar() const;
1208   MemBarNode* leading_membar() const;
1209 
1210   void set_trailing_load() { _kind = TrailingLoad; }
1211   bool trailing_load() const { return _kind == TrailingLoad; }
1212   bool trailing_store() const { return _kind == TrailingStore; }
1213   bool leading_store() const { return _kind == LeadingStore; }
1214   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1215   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1216   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1217   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1218   bool standalone() const { return _kind == Standalone; }
1219   void set_trailing_expanded_array_copy() { _kind = TrailingExpandedArrayCopy; }
1220   bool trailing_expanded_array_copy() const { return _kind == TrailingExpandedArrayCopy; }
< prev index next >