< prev index next >

src/hotspot/share/opto/memnode.hpp

Print this page

 109   static bool all_controls_dominate(Node* dom, Node* sub);
 110 
 111   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 112 
 113   // Shared code for Ideal methods:
 114   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
 115 
 116   // Helper function for adr_type() implementations.
 117   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
 118 
 119   // Raw access function, to allow copying of adr_type efficiently in
 120   // product builds and retain the debug info for debug builds.
 121   const TypePtr *raw_adr_type() const {
 122 #ifdef ASSERT
 123     return _adr_type;
 124 #else
 125     return 0;
 126 #endif
 127   }
 128 




 129   // Map a load or store opcode to its corresponding store opcode.
 130   // (Return -1 if unknown.)
 131   virtual int store_Opcode() const { return -1; }
 132 
 133   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 134   virtual BasicType memory_type() const = 0;
 135   virtual int memory_size() const {
 136 #ifdef ASSERT
 137     return type2aelembytes(memory_type(), true);
 138 #else
 139     return type2aelembytes(memory_type());
 140 #endif
 141   }
 142 
 143   uint8_t barrier_data() { return _barrier_data; }
 144   void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
 145 
 146   // Search through memory states which precede this node (load or store).
 147   // Look for an exact match for the address, with no intervening
 148   // aliased stores.

 530   static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
 531                     const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
 532 };
 533 
 534 //------------------------------LoadNKlassNode---------------------------------
 535 // Load a narrow Klass from an object.
 536 class LoadNKlassNode : public LoadNNode {
 537 public:
 538   LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
 539     : LoadNNode(c, mem, adr, at, tk, mo) {}
 540   virtual int Opcode() const;
 541   virtual uint ideal_reg() const { return Op_RegN; }
 542   virtual int store_Opcode() const { return Op_StoreNKlass; }
 543   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 544 
 545   virtual const Type* Value(PhaseGVN* phase) const;
 546   virtual Node* Identity(PhaseGVN* phase);
 547   virtual bool depends_only_on_test() const { return true; }
 548 };
 549 
 550 
 551 //------------------------------StoreNode--------------------------------------
 552 // Store value; requires Store, Address and Value
 553 class StoreNode : public MemNode {
 554 private:
 555   // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
 556   // stores that can be reordered, and such requiring release semantics to
 557   // adhere to the Java specification.  The required behaviour is stored in
 558   // this field.
 559   const MemOrd _mo;
 560   // Needed for proper cloning.
 561   virtual uint size_of() const { return sizeof(*this); }
 562 protected:
 563   virtual bool cmp( const Node &n ) const;
 564   virtual bool depends_only_on_test() const { return false; }
 565 
 566   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 567   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
 568 
 569 public:
 570   // We must ensure that stores of object references will be visible

1114 };
1115 
1116 //------------------------------GetAndSetPNode---------------------------
1117 class GetAndSetPNode : public LoadStoreNode {
1118 public:
1119   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1120   virtual int Opcode() const;
1121 };
1122 
1123 //------------------------------GetAndSetNNode---------------------------
1124 class GetAndSetNNode : public LoadStoreNode {
1125 public:
1126   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1127   virtual int Opcode() const;
1128 };
1129 
1130 //------------------------------ClearArray-------------------------------------
1131 class ClearArrayNode: public Node {
1132 private:
1133   bool _is_large;

1134 public:
1135   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1136     : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {

1137     init_class_id(Class_ClearArray);
1138   }
1139   virtual int         Opcode() const;
1140   virtual const Type *bottom_type() const { return Type::MEMORY; }
1141   // ClearArray modifies array elements, and so affects only the
1142   // array memory addressed by the bottom_type of its base address.
1143   virtual const class TypePtr *adr_type() const;
1144   virtual Node* Identity(PhaseGVN* phase);
1145   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1146   virtual uint match_edge(uint idx) const;
1147   bool is_large() const { return _is_large; }

1148 
1149   // Clear the given area of an object or array.
1150   // The start offset must always be aligned mod BytesPerInt.
1151   // The end offset must always be aligned mod BytesPerLong.
1152   // Return the new memory.
1153   static Node* clear_memory(Node* control, Node* mem, Node* dest,


1154                             intptr_t start_offset,
1155                             intptr_t end_offset,
1156                             PhaseGVN* phase);
1157   static Node* clear_memory(Node* control, Node* mem, Node* dest,


1158                             intptr_t start_offset,
1159                             Node* end_offset,
1160                             PhaseGVN* phase);
1161   static Node* clear_memory(Node* control, Node* mem, Node* dest,

1162                             Node* start_offset,
1163                             Node* end_offset,
1164                             PhaseGVN* phase);
1165   // Return allocation input memory edge if it is different instance
1166   // or itself if it is the one we are looking for.
1167   static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
1168 };
1169 
1170 //------------------------------MemBar-----------------------------------------
1171 // There are different flavors of Memory Barriers to match the Java Memory
1172 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
1173 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1174 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1175 // preceding ref can be moved to after them.  We insert a MemBar-Release
1176 // before a FastUnlock or volatile-store.  All volatiles need to be
1177 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1178 // separate it from any following volatile-load.
1179 class MemBarNode: public MultiNode {
1180   virtual uint hash() const ;                  // { return NO_HASH; }
1181   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self

1193     TrailingLoadStore,
1194     LeadingLoadStore,
1195     TrailingPartialArrayCopy
1196   } _kind;
1197 
1198 #ifdef ASSERT
1199   uint _pair_idx;
1200 #endif
1201 
1202 public:
1203   enum {
1204     Precedent = TypeFunc::Parms  // optional edge to force precedence
1205   };
1206   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1207   virtual int Opcode() const = 0;
1208   virtual const class TypePtr *adr_type() const { return _adr_type; }
1209   virtual const Type* Value(PhaseGVN* phase) const;
1210   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1211   virtual uint match_edge(uint idx) const { return 0; }
1212   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1213   virtual Node *match( const ProjNode *proj, const Matcher *m );
1214   // Factory method.  Builds a wide or narrow membar.
1215   // Optional 'precedent' becomes an extra edge if not null.
1216   static MemBarNode* make(Compile* C, int opcode,
1217                           int alias_idx = Compile::AliasIdxBot,
1218                           Node* precedent = NULL);
1219 
1220   MemBarNode* trailing_membar() const;
1221   MemBarNode* leading_membar() const;
1222 
1223   void set_trailing_load() { _kind = TrailingLoad; }
1224   bool trailing_load() const { return _kind == TrailingLoad; }
1225   bool trailing_store() const { return _kind == TrailingStore; }
1226   bool leading_store() const { return _kind == LeadingStore; }
1227   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1228   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1229   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1230   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1231   bool standalone() const { return _kind == Standalone; }
1232   void set_trailing_partial_array_copy() { _kind = TrailingPartialArrayCopy; }
1233   bool trailing_partial_array_copy() const { return _kind == TrailingPartialArrayCopy; }

1325 public:
1326   MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
1327     : MemBarNode(C, alias_idx, precedent) {}
1328   virtual int Opcode() const;
1329   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1330 };
1331 
1332 class OnSpinWaitNode: public MemBarNode {
1333 public:
1334   OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent)
1335     : MemBarNode(C, alias_idx, precedent) {}
1336   virtual int Opcode() const;
1337 };
1338 
1339 //------------------------------BlackholeNode----------------------------
1340 // Blackhole all arguments. This node would survive through the compiler
1341 // the effects on its arguments, and would be finally matched to nothing.
1342 class BlackholeNode : public MemBarNode {
1343 public:
1344   BlackholeNode(Compile* C, int alias_idx, Node* precedent)
1345     : MemBarNode(C, alias_idx, precedent) {}


1346   virtual int   Opcode() const;
1347   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1348   const RegMask &in_RegMask(uint idx) const {
1349     // Fake the incoming arguments mask for blackholes: accept all registers
1350     // and all stack slots. This would avoid any redundant register moves
1351     // for blackhole inputs.
1352     return RegMask::All;
1353   }
1354 #ifndef PRODUCT
1355   virtual void format(PhaseRegAlloc* ra, outputStream* st) const;
1356 #endif
1357 };
1358 
1359 // Isolation of object setup after an AllocateNode and before next safepoint.
1360 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
1361 class InitializeNode: public MemBarNode {
1362   friend class AllocateNode;
1363 
1364   enum {
1365     Incomplete    = 0,

 109   static bool all_controls_dominate(Node* dom, Node* sub);
 110 
 111   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 112 
 113   // Shared code for Ideal methods:
 114   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
 115 
 116   // Helper function for adr_type() implementations.
 117   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
 118 
 119   // Raw access function, to allow copying of adr_type efficiently in
 120   // product builds and retain the debug info for debug builds.
 121   const TypePtr *raw_adr_type() const {
 122 #ifdef ASSERT
 123     return _adr_type;
 124 #else
 125     return 0;
 126 #endif
 127   }
 128 
 129 #ifdef ASSERT
 130   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
 131 #endif
 132 
 133   // Map a load or store opcode to its corresponding store opcode.
 134   // (Return -1 if unknown.)
 135   virtual int store_Opcode() const { return -1; }
 136 
 137   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 138   virtual BasicType memory_type() const = 0;
 139   virtual int memory_size() const {
 140 #ifdef ASSERT
 141     return type2aelembytes(memory_type(), true);
 142 #else
 143     return type2aelembytes(memory_type());
 144 #endif
 145   }
 146 
 147   uint8_t barrier_data() { return _barrier_data; }
 148   void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
 149 
 150   // Search through memory states which precede this node (load or store).
 151   // Look for an exact match for the address, with no intervening
 152   // aliased stores.

 534   static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
 535                     const TypeKlassPtr* tk = TypeInstKlassPtr::OBJECT);
 536 };
 537 
 538 //------------------------------LoadNKlassNode---------------------------------
 539 // Load a narrow Klass from an object.
 540 class LoadNKlassNode : public LoadNNode {
 541 public:
 542   LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
 543     : LoadNNode(c, mem, adr, at, tk, mo) {}
 544   virtual int Opcode() const;
 545   virtual uint ideal_reg() const { return Op_RegN; }
 546   virtual int store_Opcode() const { return Op_StoreNKlass; }
 547   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 548 
 549   virtual const Type* Value(PhaseGVN* phase) const;
 550   virtual Node* Identity(PhaseGVN* phase);
 551   virtual bool depends_only_on_test() const { return true; }
 552 };
 553 

 554 //------------------------------StoreNode--------------------------------------
 555 // Store value; requires Store, Address and Value
 556 class StoreNode : public MemNode {
 557 private:
 558   // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
 559   // stores that can be reordered, and such requiring release semantics to
 560   // adhere to the Java specification.  The required behaviour is stored in
 561   // this field.
 562   const MemOrd _mo;
 563   // Needed for proper cloning.
 564   virtual uint size_of() const { return sizeof(*this); }
 565 protected:
 566   virtual bool cmp( const Node &n ) const;
 567   virtual bool depends_only_on_test() const { return false; }
 568 
 569   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 570   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
 571 
 572 public:
 573   // We must ensure that stores of object references will be visible

1117 };
1118 
1119 //------------------------------GetAndSetPNode---------------------------
1120 class GetAndSetPNode : public LoadStoreNode {
1121 public:
1122   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1123   virtual int Opcode() const;
1124 };
1125 
1126 //------------------------------GetAndSetNNode---------------------------
1127 class GetAndSetNNode : public LoadStoreNode {
1128 public:
1129   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1130   virtual int Opcode() const;
1131 };
1132 
1133 //------------------------------ClearArray-------------------------------------
1134 class ClearArrayNode: public Node {
1135 private:
1136   bool _is_large;
1137   bool _word_copy_only;
1138 public:
1139   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
1140     : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
1141       _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
1142     init_class_id(Class_ClearArray);
1143   }
1144   virtual int         Opcode() const;
1145   virtual const Type *bottom_type() const { return Type::MEMORY; }
1146   // ClearArray modifies array elements, and so affects only the
1147   // array memory addressed by the bottom_type of its base address.
1148   virtual const class TypePtr *adr_type() const;
1149   virtual Node* Identity(PhaseGVN* phase);
1150   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1151   virtual uint match_edge(uint idx) const;
1152   bool is_large() const { return _is_large; }
1153   bool word_copy_only() const { return _word_copy_only; }
1154 
1155   // Clear the given area of an object or array.
1156   // The start offset must always be aligned mod BytesPerInt.
1157   // The end offset must always be aligned mod BytesPerLong.
1158   // Return the new memory.
1159   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1160                             Node* val,
1161                             Node* raw_val,
1162                             intptr_t start_offset,
1163                             intptr_t end_offset,
1164                             PhaseGVN* phase);
1165   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1166                             Node* val,
1167                             Node* raw_val,
1168                             intptr_t start_offset,
1169                             Node* end_offset,
1170                             PhaseGVN* phase);
1171   static Node* clear_memory(Node* control, Node* mem, Node* dest,
1172                             Node* raw_val,
1173                             Node* start_offset,
1174                             Node* end_offset,
1175                             PhaseGVN* phase);
1176   // Return allocation input memory edge if it is different instance
1177   // or itself if it is the one we are looking for.
1178   static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
1179 };
1180 
1181 //------------------------------MemBar-----------------------------------------
1182 // There are different flavors of Memory Barriers to match the Java Memory
1183 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
1184 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1185 // volatile-load.  Monitor-exit and volatile-store act as Release: no
1186 // preceding ref can be moved to after them.  We insert a MemBar-Release
1187 // before a FastUnlock or volatile-store.  All volatiles need to be
1188 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1189 // separate it from any following volatile-load.
1190 class MemBarNode: public MultiNode {
1191   virtual uint hash() const ;                  // { return NO_HASH; }
1192   virtual bool cmp( const Node &n ) const ;    // Always fail, except on self

1204     TrailingLoadStore,
1205     LeadingLoadStore,
1206     TrailingPartialArrayCopy
1207   } _kind;
1208 
1209 #ifdef ASSERT
1210   uint _pair_idx;
1211 #endif
1212 
1213 public:
1214   enum {
1215     Precedent = TypeFunc::Parms  // optional edge to force precedence
1216   };
1217   MemBarNode(Compile* C, int alias_idx, Node* precedent);
1218   virtual int Opcode() const = 0;
1219   virtual const class TypePtr *adr_type() const { return _adr_type; }
1220   virtual const Type* Value(PhaseGVN* phase) const;
1221   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1222   virtual uint match_edge(uint idx) const { return 0; }
1223   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1224   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
1225   // Factory method.  Builds a wide or narrow membar.
1226   // Optional 'precedent' becomes an extra edge if not null.
1227   static MemBarNode* make(Compile* C, int opcode,
1228                           int alias_idx = Compile::AliasIdxBot,
1229                           Node* precedent = NULL);
1230 
1231   MemBarNode* trailing_membar() const;
1232   MemBarNode* leading_membar() const;
1233 
1234   void set_trailing_load() { _kind = TrailingLoad; }
1235   bool trailing_load() const { return _kind == TrailingLoad; }
1236   bool trailing_store() const { return _kind == TrailingStore; }
1237   bool leading_store() const { return _kind == LeadingStore; }
1238   bool trailing_load_store() const { return _kind == TrailingLoadStore; }
1239   bool leading_load_store() const { return _kind == LeadingLoadStore; }
1240   bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
1241   bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
1242   bool standalone() const { return _kind == Standalone; }
1243   void set_trailing_partial_array_copy() { _kind = TrailingPartialArrayCopy; }
1244   bool trailing_partial_array_copy() const { return _kind == TrailingPartialArrayCopy; }

1336 public:
1337   MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
1338     : MemBarNode(C, alias_idx, precedent) {}
1339   virtual int Opcode() const;
1340   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1341 };
1342 
1343 class OnSpinWaitNode: public MemBarNode {
1344 public:
1345   OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent)
1346     : MemBarNode(C, alias_idx, precedent) {}
1347   virtual int Opcode() const;
1348 };
1349 
1350 //------------------------------BlackholeNode----------------------------
1351 // Blackhole all arguments. This node would survive through the compiler
1352 // the effects on its arguments, and would be finally matched to nothing.
1353 class BlackholeNode : public MemBarNode {
1354 public:
1355   BlackholeNode(Compile* C, int alias_idx, Node* precedent)
1356     : MemBarNode(C, alias_idx, precedent) {
1357     init_class_id(Class_Blackhole);
1358   }
1359   virtual int   Opcode() const;
1360   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1361   const RegMask &in_RegMask(uint idx) const {
1362     // Fake the incoming arguments mask for blackholes: accept all registers
1363     // and all stack slots. This would avoid any redundant register moves
1364     // for blackhole inputs.
1365     return RegMask::All;
1366   }
1367 #ifndef PRODUCT
1368   virtual void format(PhaseRegAlloc* ra, outputStream* st) const;
1369 #endif
1370 };
1371 
1372 // Isolation of object setup after an AllocateNode and before next safepoint.
1373 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
1374 class InitializeNode: public MemBarNode {
1375   friend class AllocateNode;
1376 
1377   enum {
1378     Incomplete    = 0,
< prev index next >