< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page
@@ -21,11 +21,13 @@
   * or visit www.oracle.com if you need additional information or have any
   * questions.
   *
   */
  
+ #include "ci/ciFlatArrayKlass.hpp"
  #include "classfile/javaClasses.hpp"
+ #include "classfile/systemDictionary.hpp"
  #include "compiler/compileLog.hpp"
  #include "gc/shared/barrierSet.hpp"
  #include "gc/shared/c2/barrierSetC2.hpp"
  #include "gc/shared/tlab_globals.hpp"
  #include "memory/allocation.inline.hpp"

@@ -36,10 +38,11 @@
  #include "opto/cfgnode.hpp"
  #include "opto/regalloc.hpp"
  #include "opto/compile.hpp"
  #include "opto/connode.hpp"
  #include "opto/convertnode.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/loopnode.hpp"
  #include "opto/machnode.hpp"
  #include "opto/matcher.hpp"
  #include "opto/memnode.hpp"
  #include "opto/mempointer.hpp"

@@ -231,10 +234,12 @@
          ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
        if (t_oop->isa_aryptr()) {
          mem_t = mem_t->is_aryptr()
                       ->cast_to_stable(t_oop->is_aryptr()->is_stable())
                       ->cast_to_size(t_oop->is_aryptr()->size())
+                      ->cast_to_not_flat(t_oop->is_aryptr()->is_not_flat())
+                      ->cast_to_not_null_free(t_oop->is_aryptr()->is_not_null_free())
                       ->with_offset(t_oop->is_aryptr()->offset())
                       ->is_aryptr();
        }
        do_split = mem_t == t_oop;
      }

@@ -257,11 +262,11 @@
      assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
      bool consistent =  adr_check == nullptr || adr_check->empty() ||
                         phase->C->must_alias(adr_check, alias_idx );
      // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
      if( !consistent && adr_check != nullptr && !adr_check->empty() &&
-                tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
+         tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
          adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
          ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
            adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
            adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
        // don't assert if it is dead code.

@@ -1010,11 +1015,11 @@
      bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
      bool is_stable_ary = FoldStableValues &&
                           (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
                           tp->isa_aryptr()->is_stable();
  
-     return (eliminate_boxing && non_volatile) || is_stable_ary;
+     return (eliminate_boxing && non_volatile) || is_stable_ary || tp->is_inlinetypeptr();
    }
  
    return false;
  }
  

@@ -1067,11 +1072,11 @@
        const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
        BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
        if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
  
        uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
-       uint shift  = exact_log2(type2aelembytes(ary_elem));
+       uint shift  = ary_t->is_flat() ? ary_t->flat_log_elem_size() : exact_log2(type2aelembytes(ary_elem));
  
        Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
  #ifdef _LP64
        diff = phase->transform(new ConvI2LNode(diff));
  #endif

@@ -1091,10 +1096,21 @@
      return ld;
    }
    return nullptr;
  }
  
+ static Node* see_through_inline_type(PhaseValues* phase, const MemNode* load, Node* base, int offset) {
+   if (!load->is_mismatched_access() && base != nullptr && base->is_InlineType() && offset > oopDesc::klass_offset_in_bytes()) {
+     InlineTypeNode* vt = base->as_InlineType();
+     assert(!vt->is_larval(), "must not load from a larval object");
+     Node* value = vt->field_value_by_offset(offset, true);
+     assert(value != nullptr, "must see some value");
+     return value;
+   }
+ 
+   return nullptr;
+ }
  
  //---------------------------can_see_stored_value------------------------------
  // This routine exists to make sure this set of tests is done the same
  // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
  // will change the graph shape in a way which makes memory alive twice at the

@@ -1103,10 +1119,19 @@
  // of aliasing.
  Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
    Node* ld_adr = in(MemNode::Address);
    intptr_t ld_off = 0;
    Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
+   // Try to see through an InlineTypeNode
+   // LoadN is special because the input is not compressed
+   if (Opcode() != Op_LoadN) {
+     Node* value = see_through_inline_type(phase, this, ld_base, ld_off);
+     if (value != nullptr) {
+       return value;
+     }
+   }
+ 
    Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
    const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
    Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
    // This is more general than load from boxing objects.
    if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {

@@ -1186,11 +1211,11 @@
          return nullptr;
        }
        // LoadVector/StoreVector needs additional check to ensure the types match.
        if (st->is_StoreVector()) {
          const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
-         const TypeVect* out_vt = as_LoadVector()->vect_type();
+         const TypeVect* out_vt = is_Load() ? as_LoadVector()->vect_type() : as_StoreVector()->vect_type();
          if (in_vt != out_vt) {
            return nullptr;
          }
        }
        return st->in(MemNode::ValueIn);

@@ -1204,10 +1229,16 @@
          (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
        // return a zero value for the load's basic type
        // (This is one of the few places where a generic PhaseTransform
        // can create new nodes.  Think of it as lazily manifesting
        // virtually pre-existing constants.)
+       Node* init_value = ld_alloc->in(AllocateNode::InitValue);
+       if (init_value != nullptr) {
+         // TODO 8350865 Is this correct for non-all-zero init values? Don't we need field_value_by_offset?
+         return init_value;
+       }
+       assert(ld_alloc->in(AllocateNode::RawInitValue) == nullptr, "init value may not be null");
        if (memory_type() != T_VOID) {
          if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
            // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
            // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
            // by the ArrayCopyNode.

@@ -1864,10 +1895,11 @@
    Node*    base   = AddPNode::Ideal_base_and_offset(address, phase, ignore);
    if (base != nullptr
        && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
      // Check for useless control edge in some common special cases
      if (in(MemNode::Control) != nullptr
+         && !(phase->type(address)->is_inlinetypeptr() && is_mismatched_access())
          && can_remove_control()
          && phase->type(base)->higher_equal(TypePtr::NOTNULL)
          && all_controls_dominate(base, phase->C->start())) {
        // A method-invariant, non-null address (constant or 'this' argument).
        set_req(MemNode::Control, nullptr);

@@ -2062,10 +2094,11 @@
      // In fact, that could have been the original type of p1, and p1 could have
      // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
      // expression (LShiftL quux 3) independently optimized to the constant 8.
      if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
          && (_type->isa_vect() == nullptr)
+         && !ary->is_flat()
          && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
        // t might actually be lower than _type, if _type is a unique
        // concrete subclass of abstract class t.
        if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
          const Type* jt = t->join_speculative(_type);

@@ -2097,20 +2130,24 @@
    } else if (tp->base() == Type::InstPtr) {
      assert( off != Type::OffsetBot ||
              // arrays can be cast to Objects
              !tp->isa_instptr() ||
              tp->is_instptr()->instance_klass()->is_java_lang_Object() ||
+             // Default value load
+             tp->is_instptr()->instance_klass() == ciEnv::current()->Class_klass() ||
              // unsafe field access may not have a constant offset
              C->has_unsafe_access(),
              "Field accesses must be precise" );
      // For oop loads, we expect the _type to be precise.
  
-     // Optimize loads from constant fields.
      const TypeInstPtr* tinst = tp->is_instptr();
+     BasicType bt = memory_type();
+ 
+     // Optimize loads from constant fields.
      ciObject* const_oop = tinst->const_oop();
      if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
-       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());
+       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt);
        if (con_type != nullptr) {
          return con_type;
        }
      }
    } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {

@@ -2153,11 +2190,11 @@
          // The field is Klass::_super_check_offset.  Return its (constant) value.
          // (Folds up type checking code.)
          assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
          return TypeInt::make(klass->super_check_offset());
        }
-       if (UseCompactObjectHeaders) {
+       if (UseCompactObjectHeaders) { // TODO: Should EnableValhalla also take this path ?
          if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
            // The field is Klass::_prototype_header. Return its (constant) value.
            assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
            return TypeX::make(klass->prototype_header());
          }

@@ -2231,18 +2268,31 @@
      // so just return a zero of the appropriate type -
      // except if it is vectorized - then we have no zero constant.
      Node *mem = in(MemNode::Memory);
      if (mem->is_Parm() && mem->in(0)->is_Start()) {
        assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
+       // TODO 8350865 This is needed for flat array accesses, somehow the memory of the loads bypasses the intrinsic
+       // Run TestArrays.test6 in Scenario4, we need more tests for this. TestBasicFunctionality::test20 also needs this.
+       if (tp->isa_aryptr() && tp->is_aryptr()->is_flat() && !UseFieldFlattening) {
+         return _type;
+       }
        return Type::get_zero_type(_type->basic_type());
      }
    }
- 
    if (!UseCompactObjectHeaders) {
      Node* alloc = is_new_object_mark_load();
      if (alloc != nullptr) {
-       return TypeX::make(markWord::prototype().value());
+       if (EnableValhalla) {
+         // The mark word may contain property bits (inline, flat, null-free)
+         Node* klass_node = alloc->in(AllocateNode::KlassNode);
+         const TypeKlassPtr* tkls = phase->type(klass_node)->isa_klassptr();
+         if (tkls != nullptr && tkls->is_loaded() && tkls->klass_is_exact()) {
+           return TypeX::make(tkls->exact_klass()->prototype_header());
+         }
+       } else {
+         return TypeX::make(markWord::prototype().value());
+       }
      }
    }
  
    return _type;
  }

@@ -2387,10 +2437,23 @@
      return TypeInt::make((con << 16) >> 16);
    }
    return LoadNode::Value(phase);
  }
  
+ Node* LoadNNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+   // Loading from an InlineType, find the input and make an EncodeP
+   Node* addr = in(Address);
+   intptr_t offset;
+   Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset);
+   Node* value = see_through_inline_type(phase, this, base, offset);
+   if (value != nullptr) {
+     return new EncodePNode(value, type());
+   }
+ 
+   return LoadNode::Ideal(phase, can_reshape);
+ }
+ 
  //=============================================================================
  //----------------------------LoadKlassNode::make------------------------------
  // Polymorphic factory method:
  Node* LoadKlassNode::make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
    // sanity check the alias category against the created node type

@@ -2431,28 +2494,37 @@
      if (ik == phase->C->env()->Class_klass()
          && (offset == java_lang_Class::klass_offset() ||
              offset == java_lang_Class::array_klass_offset())) {
        // We are loading a special hidden field from a Class mirror object,
        // the field which points to the VM's Klass metaobject.
-       ciType* t = tinst->java_mirror_type();
+       bool is_null_free_array = false;
+       ciType* t = tinst->java_mirror_type(&is_null_free_array);
        // java_mirror_type returns non-null for compile-time Class constants.
        if (t != nullptr) {
          // constant oop => constant klass
          if (offset == java_lang_Class::array_klass_offset()) {
            if (t->is_void()) {
              // We cannot create a void array.  Since void is a primitive type return null
              // klass.  Users of this result need to do a null check on the returned klass.
              return TypePtr::NULL_PTR;
            }
-           return TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
+           const TypeKlassPtr* tklass = TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
+           if (is_null_free_array) {
+             tklass = tklass->is_aryklassptr()->cast_to_null_free();
+           }
+           return tklass;
          }
          if (!t->is_klass()) {
            // a primitive Class (e.g., int.class) has null for a klass field
            return TypePtr::NULL_PTR;
          }
          // Fold up the load of the hidden field
-         return TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
+         const TypeKlassPtr* tklass = TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
+         if (is_null_free_array) {
+           tklass = tklass->is_aryklassptr()->cast_to_null_free();
+         }
+         return tklass;
        }
        // non-constant mirror, so we can't tell what's going on
      }
      if (!tinst->is_loaded())
        return _type;             // Bail out if not loaded

@@ -2460,11 +2532,11 @@
        return tinst->as_klass_type(true);
      }
    }
  
    // Check for loading klass from an array
-   const TypeAryPtr *tary = tp->isa_aryptr();
+   const TypeAryPtr* tary = tp->isa_aryptr();
    if (tary != nullptr &&
        tary->offset() == oopDesc::klass_offset_in_bytes()) {
      return tary->as_klass_type(true);
    }
  

@@ -3376,12 +3448,12 @@
  
    Node* mem     = in(MemNode::Memory);
    Node* address = in(MemNode::Address);
    Node* value   = in(MemNode::ValueIn);
    // Back-to-back stores to same address?  Fold em up.  Generally
-   // unsafe if I have intervening uses.
-   {
+   // unsafe if I have intervening uses...
+   if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::INLINES) {
      Node* st = mem;
      // If Store 'st' has more than one use, we cannot fold 'st' away.
      // For example, 'st' might be the final state at a conditional
      // return.  Or, 'st' might be used by some node which is live at
      // the same time 'st' is live, which might be unschedulable.  So,

@@ -3397,10 +3469,12 @@
               st->Opcode() == Op_StoreVectorScatter ||
               Opcode() == Op_StoreVectorScatter ||
               phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
               (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
               (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
+              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
+              (st->adr_type()->isa_aryptr() && st->adr_type()->is_aryptr()->is_flat()) || // TODO 8343835
               (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
               "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
  
        if (st->in(MemNode::Address)->eqv_uncast(address) &&
            st->as_Store()->memory_size() <= this->memory_size()) {

@@ -3534,18 +3608,18 @@
    }
  
    // Store of zero anywhere into a freshly-allocated object?
    // Then the store is useless.
    // (It must already have been captured by the InitializeNode.)
-   if (result == this &&
-       ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
+   if (result == this && ReduceFieldZeroing) {
      // a newly allocated object is already all-zeroes everywhere
-     if (mem->is_Proj() && mem->in(0)->is_Allocate()) {
+     if (mem->is_Proj() && mem->in(0)->is_Allocate() &&
+         (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::InitValue) == val)) {
        result = mem;
      }
  
-     if (result == this) {
+     if (result == this && phase->type(val)->is_zero_type()) {
        // the store may also apply to zero-bits in an earlier object
        Node* prev_mem = find_previous_store(phase);
        // Steps (a), (b):  Walk past independent stores to find an exact match.
        if (prev_mem != nullptr) {
          Node* prev_val = can_see_stored_value(prev_mem, phase);

@@ -4045,11 +4119,11 @@
    if (size <= 0 || size % unit != 0)  return nullptr;
    intptr_t count = size / unit;
    // Length too long; communicate this to matchers and assemblers.
    // Assemblers are responsible to produce fast hardware clears for it.
    if (size > InitArrayShortSize) {
-     return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
+     return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true);
    } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
      return nullptr;
    }
    if (!IdealizeClearArrayNode) return nullptr;
    Node *mem = in(1);

@@ -4063,18 +4137,18 @@
    else              atp = atp->add_offset(Type::OffsetBot);
    // Get base for derived pointer purposes
    if( adr->Opcode() != Op_AddP ) Unimplemented();
    Node *base = adr->in(1);
  
-   Node *zero = phase->makecon(TypeLong::ZERO);
+   Node *val = in(4);
    Node *off  = phase->MakeConX(BytesPerLong);
-   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
+   mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
    count--;
    while( count-- ) {
      mem = phase->transform(mem);
      adr = phase->transform(new AddPNode(base,adr,off));
-     mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
+     mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
    }
    return mem;
  }
  
  //----------------------------step_through----------------------------------

@@ -4104,31 +4178,40 @@
  }
  
  //----------------------------clear_memory-------------------------------------
  // Generate code to initialize object storage to zero.
  Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
+                                    Node* val,
+                                    Node* raw_val,
                                     intptr_t start_offset,
                                     Node* end_offset,
                                     PhaseGVN* phase) {
    intptr_t offset = start_offset;
  
    int unit = BytesPerLong;
    if ((offset % unit) != 0) {
      Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
      adr = phase->transform(adr);
      const TypePtr* atp = TypeRawPtr::BOTTOM;
-     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+     if (val != nullptr) {
+       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
+       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
+     } else {
+       assert(raw_val == nullptr, "val may not be null");
+       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+     }
      mem = phase->transform(mem);
      offset += BytesPerInt;
    }
    assert((offset % unit) == 0, "");
  
    // Initialize the remaining stuff, if any, with a ClearArray.
-   return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
+   return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase);
  }
  
  Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
+                                    Node* raw_val,
                                     Node* start_offset,
                                     Node* end_offset,
                                     PhaseGVN* phase) {
    if (start_offset == end_offset) {
      // nothing to do

@@ -4147,15 +4230,20 @@
    }
  
    // Bulk clear double-words
    Node* zsize = phase->transform(new SubXNode(zend, zbase) );
    Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
-   mem = new ClearArrayNode(ctl, mem, zsize, adr, false);
+   if (raw_val == nullptr) {
+     raw_val = phase->MakeConX(0);
+   }
+   mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false);
    return phase->transform(mem);
  }
  
  Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
+                                    Node* val,
+                                    Node* raw_val,
                                     intptr_t start_offset,
                                     intptr_t end_offset,
                                     PhaseGVN* phase) {
    if (start_offset == end_offset) {
      // nothing to do

@@ -4166,18 +4254,24 @@
    intptr_t done_offset = end_offset;
    if ((done_offset % BytesPerLong) != 0) {
      done_offset -= BytesPerInt;
    }
    if (done_offset > start_offset) {
-     mem = clear_memory(ctl, mem, dest,
+     mem = clear_memory(ctl, mem, dest, val, raw_val,
                         start_offset, phase->MakeConX(done_offset), phase);
    }
    if (done_offset < end_offset) { // emit the final 32-bit store
      Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
      adr = phase->transform(adr);
      const TypePtr* atp = TypeRawPtr::BOTTOM;
-     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+     if (val != nullptr) {
+       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
+       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
+     } else {
+       assert(raw_val == nullptr, "val may not be null");
+       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+     }
      mem = phase->transform(mem);
      done_offset += BytesPerInt;
    }
    assert(done_offset == end_offset, "");
    return mem;

@@ -4319,11 +4413,11 @@
    return TypeTuple::MEMBAR;
  }
  
  //------------------------------match------------------------------------------
  // Construct projections for memory.
- Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
+ Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) {
    switch (proj->_con) {
    case TypeFunc::Control:
    case TypeFunc::Memory:
      return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
    }

@@ -4606,11 +4700,13 @@
  
  // convenience function
  // return false if the init contains any stores already
  bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
    InitializeNode* init = initialization();
-   if (init == nullptr || init->is_complete())  return false;
+   if (init == nullptr || init->is_complete()) {
+     return false;
+   }
    init->remove_extra_zeroes();
    // for now, if this allocation has already collected any inits, bail:
    if (init->is_non_zero())  return false;
    init->set_complete(phase);
    return true;

@@ -4790,10 +4886,16 @@
                  // after the InitializeNode. We check the control of the
                  // object/array that is loaded from. If it's the same as
                  // the store control then we cannot capture the store.
                  assert(!n->is_Store(), "2 stores to same slice on same control?");
                  Node* base = other_adr;
+                 if (base->is_Phi()) {
+                   // In rare case, base may be a PhiNode and it may read
+                   // the same memory slice between InitializeNode and store.
+                   failed = true;
+                   break;
+                 }
                  assert(base->is_AddP(), "should be addp but is %s", base->Name());
                  base = base->in(AddPNode::Base);
                  if (base != nullptr) {
                    base = base->uncast();
                    if (base->is_Proj() && base->in(0) == alloc) {

@@ -5376,10 +5478,12 @@
        if (zeroes_needed > zeroes_done) {
          intptr_t zsize = zeroes_needed - zeroes_done;
          // Do some incremental zeroing on rawmem, in parallel with inits.
          zeroes_done = align_down(zeroes_done, BytesPerInt);
          rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
+                                               allocation()->in(AllocateNode::InitValue),
+                                               allocation()->in(AllocateNode::RawInitValue),
                                                zeroes_done, zeroes_needed,
                                                phase);
          zeroes_done = zeroes_needed;
          if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
            do_zeroing = false;   // leave the hole, next time

@@ -5435,10 +5539,12 @@
            zeroes_done = size_limit;
        }
      }
      if (zeroes_done < size_limit) {
        rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
+                                             allocation()->in(AllocateNode::InitValue),
+                                             allocation()->in(AllocateNode::RawInitValue),
                                              zeroes_done, size_in_bytes, phase);
      }
    }
  
    set_complete(phase);
< prev index next >