< prev index next >

src/share/vm/opto/memnode.hpp

Print this page




  72   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
  73     : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) {
  74     init_class_id(Class_Mem);
  75     debug_only(_adr_type=at; adr_type();)
  76   }
  77 
  78   static bool check_if_adr_maybe_raw(Node* adr);
  79 
  80 public:
  81   // Helpers for the optimizer.  Documented in memnode.cpp.
  82   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
  83                                       Node* p2, AllocateNode* a2,
  84                                       PhaseTransform* phase);
  85   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
  86 
  87   static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
  88   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
  89   // This one should probably be a phase-specific function:
  90   static bool all_controls_dominate(Node* dom, Node* sub);
  91 
  92   // Find any cast-away of null-ness and keep its control.
  93   static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
  94   virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
  95 
  96   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
  97 
  98   // Shared code for Ideal methods:
  99   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
 100 
 101   // Helper function for adr_type() implementations.
 102   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
 103 
 104   // Raw access function, to allow copying of adr_type efficiently in
 105   // product builds and retain the debug info for debug builds.
 106   const TypePtr *raw_adr_type() const {
 107 #ifdef ASSERT
 108     return _adr_type;
 109 #else
 110     return 0;
 111 #endif
 112   }
 113 






 114   // Map a load or store opcode to its corresponding store opcode.
 115   // (Return -1 if unknown.)
 116   virtual int store_Opcode() const { return -1; }
 117 
 118   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 119   virtual BasicType memory_type() const = 0;
 120   virtual int memory_size() const {
 121 #ifdef ASSERT
 122     return type2aelembytes(memory_type(), true);
 123 #else
 124     return type2aelembytes(memory_type());
 125 #endif
 126   }
 127 
 128   // Search through memory states which precede this node (load or store).
 129   // Look for an exact match for the address, with no intervening
 130   // aliased stores.
 131   Node* find_previous_store(PhaseTransform* phase);
 132 
 133   // Can this node (load or store) accurately see a stored value in


 241     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
 242   }
 243   const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
 244 
 245   // Do not match memory edge
 246   virtual uint match_edge(uint idx) const;
 247 
 248   // Map a load opcode to its corresponding store opcode.
 249   virtual int store_Opcode() const = 0;
 250 
 251   // Check if the load's memory input is a Phi node with the same control.
 252   bool is_instance_field_load_with_local_phi(Node* ctrl);
 253 
 254 #ifndef PRODUCT
 255   virtual void dump_spec(outputStream *st) const;
 256 #endif
 257 #ifdef ASSERT
 258   // Helper function to allow a raw load without control edge for some cases
 259   static bool is_immutable_value(Node* adr);
 260 #endif
















 261 protected:
 262   const Type* load_array_final_field(const TypeKlassPtr *tkls,
 263                                      ciKlass* klass) const;
 264   // depends_only_on_test is almost always true, and needs to be almost always
 265   // true to enable key hoisting & commoning optimizations.  However, for the
 266   // special case of RawPtr loads from TLS top & end, and other loads performed by
 267   // GC barriers, the control edge carries the dependence preventing hoisting past
 268   // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
 269   // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
 270   // which produce results (new raw memory state) inside of loops preventing all
 271   // manner of other optimizations).  Basically, it's ugly but so is the alternative.
 272   // See comment in macro.cpp, around line 125 expand_allocate_common().
 273   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
 274 };
 275 
 276 //------------------------------LoadBNode--------------------------------------
 277 // Load a byte (8bits signed) from memory
 278 class LoadBNode : public LoadNode {
 279 public:
 280   LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)


 774 #ifndef PRODUCT
 775   virtual void dump_spec(outputStream *st) const {};
 776 #endif
 777 };
 778 
 779 //------------------------------LoadStoreNode---------------------------
 780 // Note: is_Mem() method returns 'true' for this class.
 781 class LoadStoreNode : public Node {
 782 private:
 783   const Type* const _type;      // What kind of value is loaded?
 784   const TypePtr* _adr_type;     // What kind of memory is being addressed?
 785   virtual uint size_of() const; // Size is bigger
 786 public:
 787   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
 788   virtual bool depends_only_on_test() const { return false; }
 789   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
 790 
 791   virtual const Type *bottom_type() const { return _type; }
 792   virtual uint ideal_reg() const;
 793   virtual const class TypePtr *adr_type() const { return _adr_type; }  // returns bottom_type of address



 794 
 795   bool result_not_used() const;
 796   MemBarNode* trailing_membar() const;
 797 };
 798 
 799 class LoadStoreConditionalNode : public LoadStoreNode {
 800 public:
 801   enum {
 802     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
 803   };
 804   LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
 805 };
 806 
 807 //------------------------------StorePConditionalNode---------------------------
 808 // Conditionally store pointer to memory, if no change since prior
 809 // load-locked.  Sets flags for success or failure of the store.
 810 class StorePConditionalNode : public LoadStoreConditionalNode {
 811 public:
 812   StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
 813   virtual int Opcode() const;




  72   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
  73     : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) {
  74     init_class_id(Class_Mem);
  75     debug_only(_adr_type=at; adr_type();)
  76   }
  77 
  78   static bool check_if_adr_maybe_raw(Node* adr);
  79 
  80 public:
  81   // Helpers for the optimizer.  Documented in memnode.cpp.
  82   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
  83                                       Node* p2, AllocateNode* a2,
  84                                       PhaseTransform* phase);
  85   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
  86 
  87   static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
  88   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
  89   // This one should probably be a phase-specific function:
  90   static bool all_controls_dominate(Node* dom, Node* sub);
  91 




  92   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
  93 
  94   // Shared code for Ideal methods:
  95   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
  96 
  97   // Helper function for adr_type() implementations.
  98   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
  99 
 100   // Raw access function, to allow copying of adr_type efficiently in
 101   // product builds and retain the debug info for debug builds.
 102   const TypePtr *raw_adr_type() const {
 103 #ifdef ASSERT
 104     return _adr_type;
 105 #else
 106     return 0;
 107 #endif
 108   }
 109 
 110 #ifdef ASSERT
 111   void set_raw_adr_type(const TypePtr *t) {
 112     _adr_type = t;
 113   }
 114 #endif
 115 
 116   // Map a load or store opcode to its corresponding store opcode.
 117   // (Return -1 if unknown.)
 118   virtual int store_Opcode() const { return -1; }
 119 
 120   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 121   virtual BasicType memory_type() const = 0;
 122   virtual int memory_size() const {
 123 #ifdef ASSERT
 124     return type2aelembytes(memory_type(), true);
 125 #else
 126     return type2aelembytes(memory_type());
 127 #endif
 128   }
 129 
 130   // Search through memory states which precede this node (load or store).
 131   // Look for an exact match for the address, with no intervening
 132   // aliased stores.
 133   Node* find_previous_store(PhaseTransform* phase);
 134 
 135   // Can this node (load or store) accurately see a stored value in


 243     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
 244   }
 245   const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
 246 
 247   // Do not match memory edge
 248   virtual uint match_edge(uint idx) const;
 249 
 250   // Map a load opcode to its corresponding store opcode.
 251   virtual int store_Opcode() const = 0;
 252 
 253   // Check if the load's memory input is a Phi node with the same control.
 254   bool is_instance_field_load_with_local_phi(Node* ctrl);
 255 
 256 #ifndef PRODUCT
 257   virtual void dump_spec(outputStream *st) const;
 258 #endif
 259 #ifdef ASSERT
 260   // Helper function to allow a raw load without control edge for some cases
 261   static bool is_immutable_value(Node* adr);
 262 #endif
 263 
 264   virtual bool is_g1_marking_load() const {
 265     const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active());
 266     return in(2)->is_AddP() && in(2)->in(2)->Opcode() == Op_ThreadLocal
 267       && in(2)->in(3)->is_Con()
 268       && in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset;
 269   }
 270 
 271   virtual bool is_shenandoah_state_load() const {
 272     if (!UseShenandoahGC) return false;
 273     const int state_offset = in_bytes(JavaThread::gc_state_offset());
 274     return in(2)->is_AddP() && in(2)->in(2)->Opcode() == Op_ThreadLocal
 275       && in(2)->in(3)->is_Con()
 276       && in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset;
 277   }
 278 
 279 protected:
 280   const Type* load_array_final_field(const TypeKlassPtr *tkls,
 281                                      ciKlass* klass) const;
 282   // depends_only_on_test is almost always true, and needs to be almost always
 283   // true to enable key hoisting & commoning optimizations.  However, for the
 284   // special case of RawPtr loads from TLS top & end, and other loads performed by
 285   // GC barriers, the control edge carries the dependence preventing hoisting past
 286   // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
 287   // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
 288   // which produce results (new raw memory state) inside of loops preventing all
 289   // manner of other optimizations).  Basically, it's ugly but so is the alternative.
 290   // See comment in macro.cpp, around line 125 expand_allocate_common().
 291   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
 292 };
 293 
 294 //------------------------------LoadBNode--------------------------------------
 295 // Load a byte (8bits signed) from memory
 296 class LoadBNode : public LoadNode {
 297 public:
 298   LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)


 792 #ifndef PRODUCT
 793   virtual void dump_spec(outputStream *st) const {};
 794 #endif
 795 };
 796 
 797 //------------------------------LoadStoreNode---------------------------
 798 // Note: is_Mem() method returns 'true' for this class.
 799 class LoadStoreNode : public Node {
 800 private:
 801   const Type* const _type;      // What kind of value is loaded?
 802   const TypePtr* _adr_type;     // What kind of memory is being addressed?
 803   virtual uint size_of() const; // Size is bigger
 804 public:
 805   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
 806   virtual bool depends_only_on_test() const { return false; }
 807   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
 808 
 809   virtual const Type *bottom_type() const { return _type; }
 810   virtual uint ideal_reg() const;
 811   virtual const class TypePtr *adr_type() const { return _adr_type; }  // returns bottom_type of address
 812   void set_adr_type(const TypePtr *t) {
 813     _adr_type = t;
 814   }
 815 
 816   bool result_not_used() const;
 817   MemBarNode* trailing_membar() const;
 818 };
 819 
 820 class LoadStoreConditionalNode : public LoadStoreNode {
 821 public:
 822   enum {
 823     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
 824   };
 825   LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
 826 };
 827 
 828 //------------------------------StorePConditionalNode---------------------------
 829 // Conditionally store pointer to memory, if no change since prior
 830 // load-locked.  Sets flags for success or failure of the store.
 831 class StorePConditionalNode : public LoadStoreConditionalNode {
 832 public:
 833   StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
 834   virtual int Opcode() const;


< prev index next >