< prev index next >

src/hotspot/share/opto/memnode.hpp

Print this page




  94   static bool all_controls_dominate(Node* dom, Node* sub);
  95 
  96   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
  97 
  98   // Shared code for Ideal methods:
  99   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
 100 
 101   // Helper function for adr_type() implementations.
 102   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
 103 
 104   // Raw access function, to allow copying of adr_type efficiently in
 105   // product builds and retain the debug info for debug builds.
 106   const TypePtr *raw_adr_type() const {
 107 #ifdef ASSERT
 108     return _adr_type;
 109 #else
 110     return 0;
 111 #endif
 112   }
 113 




 114   // Map a load or store opcode to its corresponding store opcode.
 115   // (Return -1 if unknown.)
 116   virtual int store_Opcode() const { return -1; }
 117 
 118   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 119   virtual BasicType memory_type() const = 0;
 120   virtual int memory_size() const {
 121 #ifdef ASSERT
 122     return type2aelembytes(memory_type(), true);
 123 #else
 124     return type2aelembytes(memory_type());
 125 #endif
 126   }
 127 
 128   // Search through memory states which precede this node (load or store).
 129   // Look for an exact match for the address, with no intervening
 130   // aliased stores.
 131   Node* find_previous_store(PhaseTransform* phase);
 132 
 133   // Can this node (load or store) accurately see a stored value in


 512   static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
 513                     const TypeKlassPtr* tk = TypeKlassPtr::OBJECT);
 514 };
 515 
 516 //------------------------------LoadNKlassNode---------------------------------
 517 // Load a narrow Klass from an object.
 518 class LoadNKlassNode : public LoadNNode {
 519 public:
 520   LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
 521     : LoadNNode(c, mem, adr, at, tk, mo) {}
 522   virtual int Opcode() const;
 523   virtual uint ideal_reg() const { return Op_RegN; }
 524   virtual int store_Opcode() const { return Op_StoreNKlass; }
 525   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 526 
 527   virtual const Type* Value(PhaseGVN* phase) const;
 528   virtual Node* Identity(PhaseGVN* phase);
 529   virtual bool depends_only_on_test() const { return true; }
 530 };
 531 



























 532 
 533 //------------------------------StoreNode--------------------------------------
 534 // Store value; requires Store, Address and Value
 535 class StoreNode : public MemNode {
 536 private:
 537   // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
 538   // stores that can be reordered, and such requiring release semantics to
 539   // adhere to the Java specification.  The required behaviour is stored in
 540   // this field.
 541   const MemOrd _mo;
 542   // Needed for proper cloning.
 543   virtual uint size_of() const { return sizeof(*this); }
 544 protected:
 545   virtual bool cmp( const Node &n ) const;
 546   virtual bool depends_only_on_test() const { return false; }
 547 
 548   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 549   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
 550 
 551 public:


1088 };
1089 
1090 //------------------------------GetAndSetPNode---------------------------
1091 class GetAndSetPNode : public LoadStoreNode {
1092 public:
1093   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1094   virtual int Opcode() const;
1095 };
1096 
1097 //------------------------------GetAndSetNNode---------------------------
1098 class GetAndSetNNode : public LoadStoreNode {
1099 public:
1100   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1101   virtual int Opcode() const;
1102 };
1103 
1104 //------------------------------ClearArray-------------------------------------
1105 class ClearArrayNode: public Node {
1106 private:
1107   bool _is_large;

1108 public:
1109   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1110     : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {

1111     init_class_id(Class_ClearArray);
1112   }
1113   virtual int         Opcode() const;
1114   virtual const Type *bottom_type() const { return Type::MEMORY; }
1115   // ClearArray modifies array elements, and so affects only the
1116   // array memory addressed by the bottom_type of its base address.
1117   virtual const class TypePtr *adr_type() const;
1118   virtual Node* Identity(PhaseGVN* phase);
1119   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1120   virtual uint match_edge(uint idx) const;
1121   bool is_large() const { return _is_large; }

1122 
1123   // Clear the given area of an object or array.
1124   // The start offset must always be aligned mod BytesPerInt.
1125   // The end offset must always be aligned mod BytesPerLong.
1126   // Return the new memory.
1127   static Node* clear_memory(Node* control, Node* mem, Node* dest,


1128                             intptr_t start_offset,
1129                             intptr_t end_offset,
1130                             PhaseGVN* phase);
1131   static Node* clear_memory(Node* control, Node* mem, Node* dest,


1132                             intptr_t start_offset,
1133                             Node* end_offset,
1134                             PhaseGVN* phase);
1135   static Node* clear_memory(Node* control, Node* mem, Node* dest,

1136                             Node* start_offset,
1137                             Node* end_offset,
1138                             PhaseGVN* phase);
1139   // Return allocation input memory edge if it is different instance
1140   // or itself if it is the one we are looking for.
1141   static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
1142 };
1143 
1144 //------------------------------MemBar-----------------------------------------
1145 // There are different flavors of Memory Barriers to match the Java Memory
1146 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
1147 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1148 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1149 // preceding ref can be moved to after them.  We insert a MemBar-Release
1150 // before a FastUnlock or volatile-store.  All volatiles need to be
1151 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1152 // separate it from any following volatile-load.
1153 class MemBarNode: public MultiNode {
1154   virtual uint hash() const ;                  // { return NO_HASH; }
1155   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self


1166     LeadingStore,
1167     TrailingLoadStore,
1168     LeadingLoadStore
1169   } _kind;
1170 
1171 #ifdef ASSERT
1172   uint _pair_idx;
1173 #endif
1174 
1175 public:
1176   enum {
1177     Precedent = TypeFunc::Parms  // optional edge to force precedence
1178   };
1179   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1180   virtual int Opcode() const = 0;
1181   virtual const class TypePtr *adr_type() const { return _adr_type; }
1182   virtual const Type* Value(PhaseGVN* phase) const;
1183   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1184   virtual uint match_edge(uint idx) const { return 0; }
1185   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1186   virtual Node *match( const ProjNode *proj, const Matcher *m );
1187   // Factory method.  Builds a wide or narrow membar.
1188   // Optional 'precedent' becomes an extra edge if not null.
1189   static MemBarNode* make(Compile* C, int opcode,
1190                           int alias_idx = Compile::AliasIdxBot,
1191                           Node* precedent = NULL);
1192 
1193   MemBarNode* trailing_membar() const;
1194   MemBarNode* leading_membar() const;
1195 
1196   void set_trailing_load() { _kind = TrailingLoad; }
1197   bool trailing_load() const { return _kind == TrailingLoad; }
1198   bool trailing_store() const { return _kind == TrailingStore; }
1199   bool leading_store() const { return _kind == LeadingStore; }
1200   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1201   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1202   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1203   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1204   bool standalone() const { return _kind == Standalone; }
1205 
1206   static void set_store_pair(MemBarNode* leading, MemBarNode* trailing);




  94   static bool all_controls_dominate(Node* dom, Node* sub);
  95 
  96   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
  97 
  98   // Shared code for Ideal methods:
  99   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
 100 
 101   // Helper function for adr_type() implementations.
 102   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
 103 
 104   // Raw access function, to allow copying of adr_type efficiently in
 105   // product builds and retain the debug info for debug builds.
 106   const TypePtr *raw_adr_type() const {
 107 #ifdef ASSERT
 108     return _adr_type;
 109 #else
 110     return 0;
 111 #endif
 112   }
 113 
 114 #ifdef ASSERT
 115   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
 116 #endif
 117 
 118   // Map a load or store opcode to its corresponding store opcode.
 119   // (Return -1 if unknown.)
 120   virtual int store_Opcode() const { return -1; }
 121 
 122   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 123   virtual BasicType memory_type() const = 0;
 124   virtual int memory_size() const {
 125 #ifdef ASSERT
 126     return type2aelembytes(memory_type(), true);
 127 #else
 128     return type2aelembytes(memory_type());
 129 #endif
 130   }
 131 
 132   // Search through memory states which precede this node (load or store).
 133   // Look for an exact match for the address, with no intervening
 134   // aliased stores.
 135   Node* find_previous_store(PhaseTransform* phase);
 136 
 137   // Can this node (load or store) accurately see a stored value in


 516   static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
 517                     const TypeKlassPtr* tk = TypeKlassPtr::OBJECT);
 518 };
 519 
 520 //------------------------------LoadNKlassNode---------------------------------
 521 // Load a narrow Klass from an object.
 522 class LoadNKlassNode : public LoadNNode {
 523 public:
 524   LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
 525     : LoadNNode(c, mem, adr, at, tk, mo) {}
 526   virtual int Opcode() const;
 527   virtual uint ideal_reg() const { return Op_RegN; }
 528   virtual int store_Opcode() const { return Op_StoreNKlass; }
 529   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 530 
 531   virtual const Type* Value(PhaseGVN* phase) const;
 532   virtual Node* Identity(PhaseGVN* phase);
 533   virtual bool depends_only_on_test() const { return true; }
 534 };
 535 
 536 // Retrieve the null free property from an array klass. This is
 537 // treated a bit like a field that would be read from the klass
 538 // structure at runtime except, the implementation encodes the
 539 // property as a bit in the klass header field of the array. This
 540 // implementation detail is hidden under this node so it doesn't make
 541 // a difference for high level optimizations. At final graph reshaping
 542 // time, this node is turned into the actual logical operations that
 543 // extract the property from the klass pointer. For this to work
 544 // correctly, GetNullFreePropertyNode must take a LoadKlass/LoadNKlass
 545 // input. The Ideal transformation splits the GetNullFreePropertyNode
 546 // through phis, Value returns a constant if the node's input is a
 547 // constant. These 2 should guarantee GetNullFreePropertyNode does
 548 // indeed have a LoadKlass/LoadNKlass input at final graph reshaping
 549 // time.
 550 class GetNullFreePropertyNode : public Node {
 551 public:
 552   GetNullFreePropertyNode(Node* klass) : Node(NULL, klass) {}
 553   virtual int Opcode() const;
 554   virtual const Type* Value(PhaseGVN* phase) const;
 555   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
 556   virtual const Type* bottom_type() const {
 557     if (in(1)->bottom_type()->isa_klassptr()) {
 558       return TypeLong::LONG;
 559     }
 560     return TypeInt::INT;
 561   }
 562 };
 563 
 564 //------------------------------StoreNode--------------------------------------
 565 // Store value; requires Store, Address and Value
 566 class StoreNode : public MemNode {
 567 private:
 568   // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
 569   // stores that can be reordered, and such requiring release semantics to
 570   // adhere to the Java specification.  The required behaviour is stored in
 571   // this field.
 572   const MemOrd _mo;
 573   // Needed for proper cloning.
 574   virtual uint size_of() const { return sizeof(*this); }
 575 protected:
 576   virtual bool cmp( const Node &n ) const;
 577   virtual bool depends_only_on_test() const { return false; }
 578 
 579   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 580   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
 581 
 582 public:


1119 };
1120 
1121 //------------------------------GetAndSetPNode---------------------------
1122 class GetAndSetPNode : public LoadStoreNode {
1123 public:
1124   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1125   virtual int Opcode() const;
1126 };
1127 
1128 //------------------------------GetAndSetNNode---------------------------
1129 class GetAndSetNNode : public LoadStoreNode {
1130 public:
1131   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1132   virtual int Opcode() const;
1133 };
1134 
1135 //------------------------------ClearArray-------------------------------------
1136 class ClearArrayNode: public Node {
1137 private:
1138   bool _is_large;
1139   bool _word_copy_only;
1140 public:
1141   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1142     : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1143       _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1144     init_class_id(Class_ClearArray);
1145   }
1146   virtual int         Opcode() const;
1147   virtual const Type *bottom_type() const { return Type::MEMORY; }
1148   // ClearArray modifies array elements, and so affects only the
1149   // array memory addressed by the bottom_type of its base address.
1150   virtual const class TypePtr *adr_type() const;
1151   virtual Node* Identity(PhaseGVN* phase);
1152   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1153   virtual uint match_edge(uint idx) const;
1154   bool is_large() const { return _is_large; }
1155   bool word_copy_only() const { return _word_copy_only; }
1156 
1157   // Clear the given area of an object or array.
1158   // The start offset must always be aligned mod BytesPerInt.
1159   // The end offset must always be aligned mod BytesPerLong.
1160   // Return the new memory.
1161   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1162                             Node* val,
1163                             Node* raw_val,
1164                             intptr_t start_offset,
1165                             intptr_t end_offset,
1166                             PhaseGVN* phase);
1167   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1168                             Node* val,
1169                             Node* raw_val,
1170                             intptr_t start_offset,
1171                             Node* end_offset,
1172                             PhaseGVN* phase);
1173   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1174                             Node* raw_val,
1175                             Node* start_offset,
1176                             Node* end_offset,
1177                             PhaseGVN* phase);
1178   // Return allocation input memory edge if it is different instance
1179   // or itself if it is the one we are looking for.
1180   static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
1181 };
1182 
1183 //------------------------------MemBar-----------------------------------------
1184 // There are different flavors of Memory Barriers to match the Java Memory
1185 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
1186 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1187 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1188 // preceding ref can be moved to after them.  We insert a MemBar-Release
1189 // before a FastUnlock or volatile-store.  All volatiles need to be
1190 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1191 // separate it from any following volatile-load.
1192 class MemBarNode: public MultiNode {
1193   virtual uint hash() const ;                  // { return NO_HASH; }
1194   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self


1205     LeadingStore,
1206     TrailingLoadStore,
1207     LeadingLoadStore
1208   } _kind;
1209 
1210 #ifdef ASSERT
1211   uint _pair_idx;
1212 #endif
1213 
1214 public:
1215   enum {
1216     Precedent = TypeFunc::Parms  // optional edge to force precedence
1217   };
1218   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1219   virtual int Opcode() const = 0;
1220   virtual const class TypePtr *adr_type() const { return _adr_type; }
1221   virtual const Type* Value(PhaseGVN* phase) const;
1222   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1223   virtual uint match_edge(uint idx) const { return 0; }
1224   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1225   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1226   // Factory method.  Builds a wide or narrow membar.
1227   // Optional 'precedent' becomes an extra edge if not null.
1228   static MemBarNode* make(Compile* C, int opcode,
1229                           int alias_idx = Compile::AliasIdxBot,
1230                           Node* precedent = NULL);
1231 
1232   MemBarNode* trailing_membar() const;
1233   MemBarNode* leading_membar() const;
1234 
1235   void set_trailing_load() { _kind = TrailingLoad; }
1236   bool trailing_load() const { return _kind == TrailingLoad; }
1237   bool trailing_store() const { return _kind == TrailingStore; }
1238   bool leading_store() const { return _kind == LeadingStore; }
1239   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1240   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1241   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1242   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1243   bool standalone() const { return _kind == Standalone; }
1244 
1245   static void set_store_pair(MemBarNode* leading, MemBarNode* trailing);


< prev index next >