< prev index next >

src/share/vm/opto/memnode.hpp

Print this page




  72   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
  73     : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) {
  74     init_class_id(Class_Mem);
  75     debug_only(_adr_type=at; adr_type();)
  76   }
  77 
  78   static bool check_if_adr_maybe_raw(Node* adr);
  79 
  80 public:
  81   // Helpers for the optimizer.  Documented in memnode.cpp.
  82   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
  83                                       Node* p2, AllocateNode* a2,
  84                                       PhaseTransform* phase);
  85   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
  86 
  87   static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
  88   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
  89   // This one should probably be a phase-specific function:
  90   static bool all_controls_dominate(Node* dom, Node* sub);
  91 
  92   // Find any cast-away of null-ness and keep its control.
  93   static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
  94   virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
  95 
  96   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
  97 
  98   // Shared code for Ideal methods:
  99   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
 100 
 101   // Helper function for adr_type() implementations.
 102   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
 103 
 104   // Raw access function, to allow copying of adr_type efficiently in
 105   // product builds and retain the debug info for debug builds.
 106   const TypePtr *raw_adr_type() const {
 107 #ifdef ASSERT
 108     return _adr_type;
 109 #else
 110     return 0;
 111 #endif
 112   }
 113 






 114   // Map a load or store opcode to its corresponding store opcode.
 115   // (Return -1 if unknown.)
 116   virtual int store_Opcode() const { return -1; }
 117 
 118   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 119   virtual BasicType memory_type() const = 0;
 120   virtual int memory_size() const {
 121 #ifdef ASSERT
 122     return type2aelembytes(memory_type(), true);
 123 #else
 124     return type2aelembytes(memory_type());
 125 #endif
 126   }
 127 
 128   // Search through memory states which precede this node (load or store).
 129   // Look for an exact match for the address, with no intervening
 130   // aliased stores.
 131   Node* find_previous_store(PhaseTransform* phase);
 132 
 133   // Can this node (load or store) accurately see a stored value in


 241     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
 242   }
 243   const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
 244 
 245   // Do not match memory edge
 246   virtual uint match_edge(uint idx) const;
 247 
 248   // Map a load opcode to its corresponding store opcode.
 249   virtual int store_Opcode() const = 0;
 250 
 251   // Check if the load's memory input is a Phi node with the same control.
 252   bool is_instance_field_load_with_local_phi(Node* ctrl);
 253 
 254 #ifndef PRODUCT
 255   virtual void dump_spec(outputStream *st) const;
 256 #endif
 257 #ifdef ASSERT
 258   // Helper function to allow a raw load without control edge for some cases
 259   static bool is_immutable_value(Node* adr);
 260 #endif
















 261 protected:
 262   const Type* load_array_final_field(const TypeKlassPtr *tkls,
 263                                      ciKlass* klass) const;
 264   // depends_only_on_test is almost always true, and needs to be almost always
 265   // true to enable key hoisting & commoning optimizations.  However, for the
 266   // special case of RawPtr loads from TLS top & end, and other loads performed by
 267   // GC barriers, the control edge carries the dependence preventing hoisting past
 268   // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
 269   // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
 270   // which produce results (new raw memory state) inside of loops preventing all
 271   // manner of other optimizations).  Basically, it's ugly but so is the alternative.
 272   // See comment in macro.cpp, around line 125 expand_allocate_common().
 273   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
 274 };
 275 
 276 //------------------------------LoadBNode--------------------------------------
 277 // Load a byte (8bits signed) from memory
 278 class LoadBNode : public LoadNode {
 279 public:
 280   LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)


 569   // zero out the control input.
 570   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 571 
 572   // Compute a new Type for this node.  Basically we just do the pre-check,
 573   // then call the virtual add() to set the type.
 574   virtual const Type *Value( PhaseTransform *phase ) const;
 575 
 576   // Check for identity function on memory (Load then Store at same address)
 577   virtual Node *Identity( PhaseTransform *phase );
 578 
 579   // Do not match memory edge
 580   virtual uint match_edge(uint idx) const;
 581 
 582   virtual const Type *bottom_type() const;  // returns Type::MEMORY
 583 
 584   // Map a store opcode to its corresponding own opcode, trivially.
 585   virtual int store_Opcode() const { return Opcode(); }
 586 
 587   // have all possible loads of the value stored been optimized away?
 588   bool value_never_loaded(PhaseTransform *phase) const;


 589 };
 590 
 591 //------------------------------StoreBNode-------------------------------------
 592 // Store byte to memory
 593 class StoreBNode : public StoreNode {
 594 public:
 595   StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 596     : StoreNode(c, mem, adr, at, val, mo) {}
 597   virtual int Opcode() const;
 598   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 599   virtual BasicType memory_type() const { return T_BYTE; }
 600 };
 601 
 602 //------------------------------StoreCNode-------------------------------------
 603 // Store char/short to memory
 604 class StoreCNode : public StoreNode {
 605 public:
 606   StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 607     : StoreNode(c, mem, adr, at, val, mo) {}
 608   virtual int Opcode() const;


 772 #ifndef PRODUCT
 773   virtual void dump_spec(outputStream *st) const {};
 774 #endif
 775 };
 776 
 777 //------------------------------LoadStoreNode---------------------------
 778 // Note: is_Mem() method returns 'true' for this class.
 779 class LoadStoreNode : public Node {
 780 private:
 781   const Type* const _type;      // What kind of value is loaded?
 782   const TypePtr* _adr_type;     // What kind of memory is being addressed?
 783   virtual uint size_of() const; // Size is bigger
 784 public:
 785   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
 786   virtual bool depends_only_on_test() const { return false; }
 787   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
 788 
 789   virtual const Type *bottom_type() const { return _type; }
 790   virtual uint ideal_reg() const;
 791   virtual const class TypePtr *adr_type() const { return _adr_type; }  // returns bottom_type of address



 792 
 793   bool result_not_used() const;

 794 };
 795 
 796 class LoadStoreConditionalNode : public LoadStoreNode {
 797 public:
 798   enum {
 799     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
 800   };
 801   LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
 802 };
 803 
 804 //------------------------------StorePConditionalNode---------------------------
 805 // Conditionally store pointer to memory, if no change since prior
 806 // load-locked.  Sets flags for success or failure of the store.
 807 class StorePConditionalNode : public LoadStoreConditionalNode {
 808 public:
 809   StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
 810   virtual int Opcode() const;
 811   // Produces flags
 812   virtual uint ideal_reg() const { return Op_RegFlags; }
 813 };


1027   virtual const Type *Value(PhaseTransform *phase) const;
1028 };
1029 
1030 //------------------------------MemBar-----------------------------------------
1031 // There are different flavors of Memory Barriers to match the Java Memory
1032 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
1033 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1034 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1035 // preceding ref can be moved to after them.  We insert a MemBar-Release
1036 // before a FastUnlock or volatile-store.  All volatiles need to be
1037 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1038 // separate it from any following volatile-load.
1039 class MemBarNode: public MultiNode {
1040   virtual uint hash() const ;                  // { return NO_HASH; }
1041   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
1042 
1043   virtual uint size_of() const { return sizeof(*this); }
1044   // Memory type this node is serializing.  Usually either rawptr or bottom.
1045   const TypePtr* _adr_type;
1046 














1047 public:
1048   enum {
1049     Precedent = TypeFunc::Parms  // optional edge to force precedence
1050   };
1051   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1052   virtual int Opcode() const = 0;
1053   virtual const class TypePtr *adr_type() const { return _adr_type; }
1054   virtual const Type *Value( PhaseTransform *phase ) const;
1055   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1056   virtual uint match_edge(uint idx) const { return 0; }
1057   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1058   virtual Node *match( const ProjNode *proj, const Matcher *m );
1059   // Factory method.  Builds a wide or narrow membar.
1060   // Optional 'precedent' becomes an extra edge if not null.
1061   static MemBarNode* make(Compile* C, int opcode,
1062                           int alias_idx = Compile::AliasIdxBot,
1063                           Node* precedent = NULL);


















1064 };
1065 
1066 // "Acquire" - no following ref can move before (but earlier refs can
1067 // follow, like an early Load stalled in cache).  Requires multi-cpu
1068 // visibility.  Inserted after a volatile load.
1069 class MemBarAcquireNode: public MemBarNode {
1070 public:
1071   MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
1072     : MemBarNode(C, alias_idx, precedent) {}
1073   virtual int Opcode() const;
1074 };
1075 
1076 // "Acquire" - no following ref can move before (but earlier refs can
1077 // follow, like an early Load stalled in cache).  Requires multi-cpu
1078 // visibility.  Inserted independ of any load, as required
1079 // for intrinsic sun.misc.Unsafe.loadFence().
1080 class LoadFenceNode: public MemBarNode {
1081 public:
1082   LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
1083     : MemBarNode(C, alias_idx, precedent) {}




  72   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
  73     : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) {
  74     init_class_id(Class_Mem);
  75     debug_only(_adr_type=at; adr_type();)
  76   }
  77 
  78   static bool check_if_adr_maybe_raw(Node* adr);
  79 
  80 public:
  81   // Helpers for the optimizer.  Documented in memnode.cpp.
  82   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
  83                                       Node* p2, AllocateNode* a2,
  84                                       PhaseTransform* phase);
  85   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
  86 
  87   static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
  88   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
  89   // This one should probably be a phase-specific function:
  90   static bool all_controls_dominate(Node* dom, Node* sub);
  91 




  92   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
  93 
  94   // Shared code for Ideal methods:
  95   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
  96 
  97   // Helper function for adr_type() implementations.
  98   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
  99 
 100   // Raw access function, to allow copying of adr_type efficiently in
 101   // product builds and retain the debug info for debug builds.
 102   const TypePtr *raw_adr_type() const {
 103 #ifdef ASSERT
 104     return _adr_type;
 105 #else
 106     return 0;
 107 #endif
 108   }
 109 
 110 #ifdef ASSERT
 111   void set_raw_adr_type(const TypePtr *t) {
 112     _adr_type = t;
 113   }
 114 #endif
 115 
 116   // Map a load or store opcode to its corresponding store opcode.
 117   // (Return -1 if unknown.)
 118   virtual int store_Opcode() const { return -1; }
 119 
 120   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 121   virtual BasicType memory_type() const = 0;
 122   virtual int memory_size() const {
 123 #ifdef ASSERT
 124     return type2aelembytes(memory_type(), true);
 125 #else
 126     return type2aelembytes(memory_type());
 127 #endif
 128   }
 129 
 130   // Search through memory states which precede this node (load or store).
 131   // Look for an exact match for the address, with no intervening
 132   // aliased stores.
 133   Node* find_previous_store(PhaseTransform* phase);
 134 
 135   // Can this node (load or store) accurately see a stored value in


 243     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
 244   }
 245   const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
 246 
 247   // Do not match memory edge
 248   virtual uint match_edge(uint idx) const;
 249 
 250   // Map a load opcode to its corresponding store opcode.
 251   virtual int store_Opcode() const = 0;
 252 
 253   // Check if the load's memory input is a Phi node with the same control.
 254   bool is_instance_field_load_with_local_phi(Node* ctrl);
 255 
 256 #ifndef PRODUCT
 257   virtual void dump_spec(outputStream *st) const;
 258 #endif
 259 #ifdef ASSERT
 260   // Helper function to allow a raw load without control edge for some cases
 261   static bool is_immutable_value(Node* adr);
 262 #endif
 263 
 264   virtual bool is_g1_marking_load() const {
 265     const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active());
 266     return in(2)->is_AddP() && in(2)->in(2)->Opcode() == Op_ThreadLocal
 267       && in(2)->in(3)->is_Con()
 268       && in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset;
 269   }
 270 
 271   virtual bool is_shenandoah_state_load() const {
 272     if (!UseShenandoahGC) return false;
 273     const int state_offset = in_bytes(JavaThread::gc_state_offset());
 274     return in(2)->is_AddP() && in(2)->in(2)->Opcode() == Op_ThreadLocal
 275       && in(2)->in(3)->is_Con()
 276       && in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset;
 277   }
 278 
 279 protected:
 280   const Type* load_array_final_field(const TypeKlassPtr *tkls,
 281                                      ciKlass* klass) const;
 282   // depends_only_on_test is almost always true, and needs to be almost always
 283   // true to enable key hoisting & commoning optimizations.  However, for the
 284   // special case of RawPtr loads from TLS top & end, and other loads performed by
 285   // GC barriers, the control edge carries the dependence preventing hoisting past
 286   // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
 287   // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
 288   // which produce results (new raw memory state) inside of loops preventing all
 289   // manner of other optimizations).  Basically, it's ugly but so is the alternative.
 290   // See comment in macro.cpp, around line 125 expand_allocate_common().
 291   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
 292 };
 293 
 294 //------------------------------LoadBNode--------------------------------------
 295 // Load a byte (8bits signed) from memory
 296 class LoadBNode : public LoadNode {
 297 public:
 298   LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)


 587   // zero out the control input.
 588   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 589 
 590   // Compute a new Type for this node.  Basically we just do the pre-check,
 591   // then call the virtual add() to set the type.
 592   virtual const Type *Value( PhaseTransform *phase ) const;
 593 
 594   // Check for identity function on memory (Load then Store at same address)
 595   virtual Node *Identity( PhaseTransform *phase );
 596 
 597   // Do not match memory edge
 598   virtual uint match_edge(uint idx) const;
 599 
 600   virtual const Type *bottom_type() const;  // returns Type::MEMORY
 601 
 602   // Map a store opcode to its corresponding own opcode, trivially.
 603   virtual int store_Opcode() const { return Opcode(); }
 604 
 605   // have all possible loads of the value stored been optimized away?
 606   bool value_never_loaded(PhaseTransform *phase) const;
 607 
 608   MemBarNode* trailing_membar() const;
 609 };
 610 
 611 //------------------------------StoreBNode-------------------------------------
 612 // Store byte to memory
 613 class StoreBNode : public StoreNode {
 614 public:
 615   StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 616     : StoreNode(c, mem, adr, at, val, mo) {}
 617   virtual int Opcode() const;
 618   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 619   virtual BasicType memory_type() const { return T_BYTE; }
 620 };
 621 
 622 //------------------------------StoreCNode-------------------------------------
 623 // Store char/short to memory
 624 class StoreCNode : public StoreNode {
 625 public:
 626   StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
 627     : StoreNode(c, mem, adr, at, val, mo) {}
 628   virtual int Opcode() const;


 792 #ifndef PRODUCT
 793   virtual void dump_spec(outputStream *st) const {};
 794 #endif
 795 };
 796 
 797 //------------------------------LoadStoreNode---------------------------
 798 // Note: is_Mem() method returns 'true' for this class.
 799 class LoadStoreNode : public Node {
 800 private:
 801   const Type* const _type;      // What kind of value is loaded?
 802   const TypePtr* _adr_type;     // What kind of memory is being addressed?
 803   virtual uint size_of() const; // Size is bigger
 804 public:
 805   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
 806   virtual bool depends_only_on_test() const { return false; }
 807   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
 808 
 809   virtual const Type *bottom_type() const { return _type; }
 810   virtual uint ideal_reg() const;
 811   virtual const class TypePtr *adr_type() const { return _adr_type; }  // returns bottom_type of address
 812   void set_adr_type(const TypePtr *t) {
 813     _adr_type = t;
 814   }
 815 
 816   bool result_not_used() const;
 817   MemBarNode* trailing_membar() const;
 818 };
 819 
 820 class LoadStoreConditionalNode : public LoadStoreNode {
 821 public:
 822   enum {
 823     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
 824   };
 825   LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
 826 };
 827 
 828 //------------------------------StorePConditionalNode---------------------------
 829 // Conditionally store pointer to memory, if no change since prior
 830 // load-locked.  Sets flags for success or failure of the store.
 831 class StorePConditionalNode : public LoadStoreConditionalNode {
 832 public:
 833   StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
 834   virtual int Opcode() const;
 835   // Produces flags
 836   virtual uint ideal_reg() const { return Op_RegFlags; }
 837 };


1051   virtual const Type *Value(PhaseTransform *phase) const;
1052 };
1053 
1054 //------------------------------MemBar-----------------------------------------
1055 // There are different flavors of Memory Barriers to match the Java Memory
1056 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
1057 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1058 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1059 // preceding ref can be moved to after them.  We insert a MemBar-Release
1060 // before a FastUnlock or volatile-store.  All volatiles need to be
1061 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1062 // separate it from any following volatile-load.
1063 class MemBarNode: public MultiNode {
1064   virtual uint hash() const ;                  // { return NO_HASH; }
1065   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
1066 
1067   virtual uint size_of() const { return sizeof(*this); }
1068   // Memory type this node is serializing.  Usually either rawptr or bottom.
1069   const TypePtr* _adr_type;
1070 
1071   // How is this membar related to a nearby memory access?
1072   enum {
1073     Standalone,
1074     TrailingLoad,
1075     TrailingStore,
1076     LeadingStore,
1077     TrailingLoadStore,
1078     LeadingLoadStore
1079   } _kind;
1080 
1081 #ifdef ASSERT
1082   uint _pair_idx;
1083 #endif
1084 
1085 public:
1086   enum {
1087     Precedent = TypeFunc::Parms  // optional edge to force precedence
1088   };
1089   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1090   virtual int Opcode() const = 0;
1091   virtual const class TypePtr *adr_type() const { return _adr_type; }
1092   virtual const Type *Value( PhaseTransform *phase ) const;
1093   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1094   virtual uint match_edge(uint idx) const { return 0; }
1095   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1096   virtual Node *match( const ProjNode *proj, const Matcher *m );
1097   // Factory method.  Builds a wide or narrow membar.
1098   // Optional 'precedent' becomes an extra edge if not null.
1099   static MemBarNode* make(Compile* C, int opcode,
1100                           int alias_idx = Compile::AliasIdxBot,
1101                           Node* precedent = NULL);
1102 
1103   MemBarNode* trailing_membar() const;
1104   MemBarNode* leading_membar() const;
1105 
1106   void set_trailing_load() { _kind = TrailingLoad; }
1107   bool trailing_load() const { return _kind == TrailingLoad; }
1108   bool trailing_store() const { return _kind == TrailingStore; }
1109   bool leading_store() const { return _kind == LeadingStore; }
1110   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1111   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1112   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1113   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1114   bool standalone() const { return _kind == Standalone; }
1115 
1116   static void set_store_pair(MemBarNode* leading, MemBarNode* trailing);
1117   static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing);
1118 
1119   void remove(PhaseIterGVN *igvn);
1120 };
1121 
1122 // "Acquire" - no following ref can move before (but earlier refs can
1123 // follow, like an early Load stalled in cache).  Requires multi-cpu
1124 // visibility.  Inserted after a volatile load.
1125 class MemBarAcquireNode: public MemBarNode {
1126 public:
1127   MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
1128     : MemBarNode(C, alias_idx, precedent) {}
1129   virtual int Opcode() const;
1130 };
1131 
1132 // "Acquire" - no following ref can move before (but earlier refs can
1133 // follow, like an early Load stalled in cache).  Requires multi-cpu
1134 // visibility.  Inserted independ of any load, as required
1135 // for intrinsic sun.misc.Unsafe.loadFence().
1136 class LoadFenceNode: public MemBarNode {
1137 public:
1138   LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
1139     : MemBarNode(C, alias_idx, precedent) {}


< prev index next >