< prev index next >

src/share/vm/opto/memnode.hpp

Print this page

        

@@ -87,14 +87,10 @@
   static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
   // This one should probably be a phase-specific function:
   static bool all_controls_dominate(Node* dom, Node* sub);
 
-  // Find any cast-away of null-ness and keep its control.
-  static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
-  virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
-
   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
 
   // Shared code for Ideal methods:
   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
 

@@ -109,10 +105,16 @@
 #else
     return 0;
 #endif
   }
 
+#ifdef ASSERT
+  void set_raw_adr_type(const TypePtr *t) {
+    _adr_type = t;
+  }
+#endif
+
   // Map a load or store opcode to its corresponding store opcode.
   // (Return -1 if unknown.)
   virtual int store_Opcode() const { return -1; }
 
   // What is the type of the value in memory?  (T_VOID mean "unspecified".)

@@ -256,10 +258,26 @@
 #endif
 #ifdef ASSERT
   // Helper function to allow a raw load without control edge for some cases
   static bool is_immutable_value(Node* adr);
 #endif
+
+  virtual bool is_g1_marking_load() const {
+    const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active());
+    return in(2)->is_AddP() && in(2)->in(2)->Opcode() == Op_ThreadLocal
+      && in(2)->in(3)->is_Con()
+      && in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset;
+  }
+
+  virtual bool is_shenandoah_state_load() const {
+    if (!UseShenandoahGC) return false;
+    const int state_offset = in_bytes(JavaThread::gc_state_offset());
+    return in(2)->is_AddP() && in(2)->in(2)->Opcode() == Op_ThreadLocal
+      && in(2)->in(3)->is_Con()
+      && in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset;
+  }
+
 protected:
   const Type* load_array_final_field(const TypeKlassPtr *tkls,
                                      ciKlass* klass) const;
   // depends_only_on_test is almost always true, and needs to be almost always
   // true to enable key hoisting & commoning optimizations.  However, for the

@@ -584,10 +602,12 @@
   // Map a store opcode to its corresponding own opcode, trivially.
   virtual int store_Opcode() const { return Opcode(); }
 
   // have all possible loads of the value stored been optimized away?
   bool value_never_loaded(PhaseTransform *phase) const;
+
+  MemBarNode* trailing_membar() const;
 };
 
 //------------------------------StoreBNode-------------------------------------
 // Store byte to memory
 class StoreBNode : public StoreNode {

@@ -787,12 +807,16 @@
   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
 
   virtual const Type *bottom_type() const { return _type; }
   virtual uint ideal_reg() const;
   virtual const class TypePtr *adr_type() const { return _adr_type; }  // returns bottom_type of address
+  void set_adr_type(const TypePtr *t) {
+    _adr_type = t;
+  }
 
   bool result_not_used() const;
+  MemBarNode* trailing_membar() const;
 };
 
 class LoadStoreConditionalNode : public LoadStoreNode {
 public:
   enum {

@@ -1042,10 +1066,24 @@
 
   virtual uint size_of() const { return sizeof(*this); }
   // Memory type this node is serializing.  Usually either rawptr or bottom.
   const TypePtr* _adr_type;
 
+  // How is this membar related to a nearby memory access?
+  enum {
+    Standalone,
+    TrailingLoad,
+    TrailingStore,
+    LeadingStore,
+    TrailingLoadStore,
+    LeadingLoadStore
+  } _kind;
+
+#ifdef ASSERT
+  uint _pair_idx;
+#endif
+
 public:
   enum {
     Precedent = TypeFunc::Parms  // optional edge to force precedence
   };
   MemBarNode(Compile* C, int alias_idx, Node* precedent);

@@ -1059,10 +1097,28 @@
   // Factory method.  Builds a wide or narrow membar.
   // Optional 'precedent' becomes an extra edge if not null.
   static MemBarNode* make(Compile* C, int opcode,
                           int alias_idx = Compile::AliasIdxBot,
                           Node* precedent = NULL);
+
+  MemBarNode* trailing_membar() const;
+  MemBarNode* leading_membar() const;
+
+  void set_trailing_load() { _kind = TrailingLoad; }
+  bool trailing_load() const { return _kind == TrailingLoad; }
+  bool trailing_store() const { return _kind == TrailingStore; }
+  bool leading_store() const { return _kind == LeadingStore; }
+  bool trailing_load_store() const { return _kind == TrailingLoadStore; }
+  bool leading_load_store() const { return _kind == LeadingLoadStore; }
+  bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
+  bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
+  bool standalone() const { return _kind == Standalone; }
+
+  static void set_store_pair(MemBarNode* leading, MemBarNode* trailing);
+  static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing);
+
+  void remove(PhaseIterGVN *igvn);
 };
 
 // "Acquire" - no following ref can move before (but earlier refs can
 // follow, like an early Load stalled in cache).  Requires multi-cpu
 // visibility.  Inserted after a volatile load.
< prev index next >