< prev index next >

src/hotspot/share/opto/memnode.hpp

Print this page
@@ -124,10 +124,14 @@
    // product builds and retain the debug info for debug builds.
    const TypePtr *raw_adr_type() const {
      return DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
    }
  
+ #ifdef ASSERT
+   void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; }
+ #endif
+ 
    // Return the barrier data of n, if available, or 0 otherwise.
    static uint8_t barrier_data(const Node* n);
  
    // Map a load or store opcode to its corresponding store opcode.
    // (Return -1 if unknown.)

@@ -511,10 +515,11 @@
  // Load a narrow oop from memory (either object or array)
  class LoadNNode : public LoadNode {
  public:
    LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
      : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
+   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
    virtual int Opcode() const;
    virtual uint ideal_reg() const { return Op_RegN; }
    virtual int store_Opcode() const { return Op_StoreN; }
    virtual BasicType memory_type() const { return T_NARROWOOP; }
  };

@@ -560,11 +565,10 @@
    virtual const Type* Value(PhaseGVN* phase) const;
    virtual Node* Identity(PhaseGVN* phase);
    virtual bool depends_only_on_test() const { return true; }
  };
  
- 
  //------------------------------StoreNode--------------------------------------
  // Store value; requires Store, Address and Value
  class StoreNode : public MemNode {
  private:
    // On platforms with weak memory ordering (e.g., PPC) we distinguish

@@ -714,10 +718,29 @@
      if (_require_atomic_access)  st->print(" Atomic!");
    }
  #endif
  };
  
+ // Special StoreL for flat stores that emits GC barriers for field at 'oop_off' in the backend
+ class StoreLSpecialNode : public StoreNode {
+ 
+ public:
+   StoreLSpecialNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val, Node* oop_off, MemOrd mo)
+     : StoreNode(c, mem, adr, at, val, mo) {
+     set_mismatched_access();
+     if (oop_off != nullptr) {
+       add_req(oop_off);
+     }
+   }
+   virtual int Opcode() const;
+   virtual BasicType memory_type() const { return T_LONG; }
+ 
+   virtual uint match_edge(uint idx) const { return idx == MemNode::Address ||
+                                                    idx == MemNode::ValueIn ||
+                                                    idx == MemNode::ValueIn + 1; }
+ };
+ 
  //------------------------------StoreFNode-------------------------------------
  // Store float to memory
  class StoreFNode : public StoreNode {
  public:
    StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)

@@ -1069,13 +1092,15 @@
  
  //------------------------------ClearArray-------------------------------------
  class ClearArrayNode: public Node {
  private:
    bool _is_large;
+   bool _word_copy_only;
  public:
-   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
-     : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {
+   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large)
+     : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large),
+       _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) {
      init_class_id(Class_ClearArray);
    }
    virtual int         Opcode() const;
    virtual const Type *bottom_type() const { return Type::MEMORY; }
    // ClearArray modifies array elements, and so affects only the

@@ -1083,24 +1108,30 @@
    virtual const class TypePtr *adr_type() const;
    virtual Node* Identity(PhaseGVN* phase);
    virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
    virtual uint match_edge(uint idx) const;
    bool is_large() const { return _is_large; }
+   bool word_copy_only() const { return _word_copy_only; }
  
    // Clear the given area of an object or array.
    // The start offset must always be aligned mod BytesPerInt.
    // The end offset must always be aligned mod BytesPerLong.
    // Return the new memory.
    static Node* clear_memory(Node* control, Node* mem, Node* dest,
+                             Node* val,
+                             Node* raw_val,
                              intptr_t start_offset,
                              intptr_t end_offset,
                              PhaseGVN* phase);
    static Node* clear_memory(Node* control, Node* mem, Node* dest,
+                             Node* val,
+                             Node* raw_val,
                              intptr_t start_offset,
                              Node* end_offset,
                              PhaseGVN* phase);
    static Node* clear_memory(Node* control, Node* mem, Node* dest,
+                             Node* raw_val,
                              Node* start_offset,
                              Node* end_offset,
                              PhaseGVN* phase);
    // Return allocation input memory edge if it is different instance
    // or itself if it is the one we are looking for.

@@ -1148,11 +1179,11 @@
    virtual const class TypePtr *adr_type() const { return _adr_type; }
    virtual const Type* Value(PhaseGVN* phase) const;
    virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
    virtual uint match_edge(uint idx) const { return 0; }
    virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
-   virtual Node *match( const ProjNode *proj, const Matcher *m );
+   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
    // Factory method.  Builds a wide or narrow membar.
    // Optional 'precedent' becomes an extra edge if not null.
    static MemBarNode* make(Compile* C, int opcode,
                            int alias_idx = Compile::AliasIdxBot,
                            Node* precedent = nullptr);
< prev index next >