< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page
@@ -21,11 +21,13 @@
   * questions.
   *
   */
  
  #include "precompiled.hpp"
+ #include "ci/ciFlatArrayKlass.hpp"
  #include "classfile/javaClasses.hpp"
+ #include "classfile/systemDictionary.hpp"
  #include "compiler/compileLog.hpp"
  #include "gc/shared/barrierSet.hpp"
  #include "gc/shared/c2/barrierSetC2.hpp"
  #include "gc/shared/tlab_globals.hpp"
  #include "memory/allocation.inline.hpp"

@@ -36,10 +38,11 @@
  #include "opto/cfgnode.hpp"
  #include "opto/regalloc.hpp"
  #include "opto/compile.hpp"
  #include "opto/connode.hpp"
  #include "opto/convertnode.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/loopnode.hpp"
  #include "opto/machnode.hpp"
  #include "opto/matcher.hpp"
  #include "opto/memnode.hpp"
  #include "opto/mulnode.hpp"

@@ -229,20 +232,23 @@
          ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
        if (t_oop->is_aryptr()) {
          mem_t = mem_t->is_aryptr()
                       ->cast_to_stable(t_oop->is_aryptr()->is_stable())
                       ->cast_to_size(t_oop->is_aryptr()->size())
+                      ->cast_to_not_flat(t_oop->is_aryptr()->is_not_flat())
+                      ->cast_to_not_null_free(t_oop->is_aryptr()->is_not_null_free())
                       ->with_offset(t_oop->is_aryptr()->offset())
                       ->is_aryptr();
        }
        do_split = mem_t == t_oop;
      }
      if (do_split) {
        // clone the Phi with our address type
        result = mphi->split_out_instance(t_adr, igvn);
      } else {
-       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
+       // TODO 8325106
+       // assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
      }
    }
    return result;
  }
  

@@ -255,11 +261,11 @@
      assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
      bool consistent =  adr_check == nullptr || adr_check->empty() ||
                         phase->C->must_alias(adr_check, alias_idx );
      // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
      if( !consistent && adr_check != nullptr && !adr_check->empty() &&
-                tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
+         tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
          adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
          ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
            adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
            adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
        // don't assert if it is dead code.

@@ -975,11 +981,11 @@
      bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
      bool is_stable_ary = FoldStableValues &&
                           (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
                           tp->isa_aryptr()->is_stable();
  
-     return (eliminate_boxing && non_volatile) || is_stable_ary;
+     return (eliminate_boxing && non_volatile) || is_stable_ary || tp->is_inlinetypeptr();
    }
  
    return false;
  }
  

@@ -1023,11 +1029,11 @@
        const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
        BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
        if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
  
        uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
-       uint shift  = exact_log2(type2aelembytes(ary_elem));
+       uint shift  = ary_t->is_flat() ? ary_t->flat_log_elem_size() : exact_log2(type2aelembytes(ary_elem));
  
        Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
  #ifdef _LP64
        diff = phase->transform(new ConvI2LNode(diff));
  #endif

@@ -1144,11 +1150,11 @@
          return nullptr;
        }
        // LoadVector/StoreVector needs additional check to ensure the types match.
        if (st->is_StoreVector()) {
          const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
-         const TypeVect* out_vt = as_LoadVector()->vect_type();
+         const TypeVect* out_vt = is_Load() ? as_LoadVector()->vect_type() : as_StoreVector()->vect_type();
          if (in_vt != out_vt) {
            return nullptr;
          }
        }
        return st->in(MemNode::ValueIn);

@@ -1162,10 +1168,15 @@
          (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
        // return a zero value for the load's basic type
        // (This is one of the few places where a generic PhaseTransform
        // can create new nodes.  Think of it as lazily manifesting
        // virtually pre-existing constants.)
+       Node* default_value = ld_alloc->in(AllocateNode::DefaultValue);
+       if (default_value != nullptr) {
+         return default_value;
+       }
+       assert(ld_alloc->in(AllocateNode::RawDefaultValue) == nullptr, "default value may not be null");
        if (memory_type() != T_VOID) {
          if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
            // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
            // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
            // by the ArrayCopyNode.

@@ -1229,10 +1240,27 @@
  }
  
  //------------------------------Identity---------------------------------------
  // Loads are identity if previous store is to same address
  Node* LoadNode::Identity(PhaseGVN* phase) {
+   // Loading from an InlineType? The InlineType has the values of
+   // all fields as input. Look for the field with matching offset.
+   Node* addr = in(Address);
+   intptr_t offset;
+   Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset);
+   if (base != nullptr && base->is_InlineType() && offset > oopDesc::klass_offset_in_bytes()) {
+     Node* value = base->as_InlineType()->field_value_by_offset((int)offset, true);
+     if (value != nullptr) {
+       if (Opcode() == Op_LoadN) {
+         // Encode oop value if we are loading a narrow oop
+         assert(!phase->type(value)->isa_narrowoop(), "should already be decoded");
+         value = phase->transform(new EncodePNode(value, bottom_type()));
+       }
+       return value;
+     }
+   }
+ 
    // If the previous store-maker is the right kind of Store, and the store is
    // to the same address, then we are equal to the value stored.
    Node* mem = in(Memory);
    Node* value = can_see_stored_value(mem, phase);
    if( value ) {

@@ -1987,10 +2015,11 @@
      // In fact, that could have been the original type of p1, and p1 could have
      // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
      // expression (LShiftL quux 3) independently optimized to the constant 8.
      if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
          && (_type->isa_vect() == nullptr)
+         && !ary->is_flat()
          && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
        // t might actually be lower than _type, if _type is a unique
        // concrete subclass of abstract class t.
        if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
          const Type* jt = t->join_speculative(_type);

@@ -2022,20 +2051,24 @@
    } else if (tp->base() == Type::InstPtr) {
      assert( off != Type::OffsetBot ||
              // arrays can be cast to Objects
              !tp->isa_instptr() ||
              tp->is_instptr()->instance_klass()->is_java_lang_Object() ||
+             // Default value load
+             tp->is_instptr()->instance_klass() == ciEnv::current()->Class_klass() ||
              // unsafe field access may not have a constant offset
              C->has_unsafe_access(),
              "Field accesses must be precise" );
      // For oop loads, we expect the _type to be precise.
  
-     // Optimize loads from constant fields.
      const TypeInstPtr* tinst = tp->is_instptr();
+     BasicType bt = memory_type();
+ 
+     // Optimize loads from constant fields.
      ciObject* const_oop = tinst->const_oop();
      if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
-       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());
+       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt);
        if (con_type != nullptr) {
          return con_type;
        }
      }
    } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {

@@ -2046,25 +2079,42 @@
             // also allow array-loading from the primary supertype
             // array during subtype checks
             Opcode() == Op_LoadKlass,
             "Field accesses must be precise");
      // For klass/static loads, we expect the _type to be precise
-   } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
-     /* With mirrors being an indirect in the Klass*
-      * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
-      * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
-      *
-      * So check the type and klass of the node before the LoadP.
-      */
-     Node* adr2 = adr->in(MemNode::Address);
-     const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
-     if (tkls != nullptr && !StressReflectiveCode) {
-       if (tkls->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
-         ciKlass* klass = tkls->exact_klass();
-         assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
-         assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
-         return TypeInstPtr::make(klass->java_mirror());
+   } else if (tp->base() == Type::RawPtr && !StressReflectiveCode) {
+     if (adr->is_Load() && off == 0) {
+       /* With mirrors being an indirect in the Klass*
+        * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
+        * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
+        *
+        * So check the type and klass of the node before the LoadP.
+        */
+       Node* adr2 = adr->in(MemNode::Address);
+       const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
+       if (tkls != nullptr) {
+         if (tkls->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
+           ciKlass* klass = tkls->exact_klass();
+           assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
+           assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
+           return TypeInstPtr::make(klass->java_mirror());
+         }
+       }
+     } else {
+       // Check for a load of the default value offset from the InlineKlassFixedBlock:
+       // LoadI(LoadP(inline_klass, adr_inlineklass_fixed_block_offset), default_value_offset_offset)
+       // TODO 8325106 remove?
+       intptr_t offset = 0;
+       Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
+       if (base != nullptr && base->is_Load() && offset == in_bytes(InlineKlass::default_value_offset_offset())) {
+         const TypeKlassPtr* tkls = phase->type(base->in(MemNode::Address))->isa_klassptr();
+         if (tkls != nullptr && tkls->is_loaded() && tkls->klass_is_exact() && tkls->exact_klass()->is_inlinetype() &&
+             tkls->offset() == in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())) {
+           assert(base->Opcode() == Op_LoadP, "must load an oop from klass");
+           assert(Opcode() == Op_LoadI, "must load an int from fixed block");
+           return TypeInt::make(tkls->exact_klass()->as_inline_klass()->default_value_offset());
+         }
        }
      }
    }
  
    const TypeKlassPtr *tkls = tp->isa_klassptr();

@@ -2166,14 +2216,22 @@
      if (mem->is_Parm() && mem->in(0)->is_Start()) {
        assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
        return Type::get_zero_type(_type->basic_type());
      }
    }
- 
    Node* alloc = is_new_object_mark_load();
    if (alloc != nullptr) {
-     return TypeX::make(markWord::prototype().value());
+     if (EnableValhalla) {
+       // The mark word may contain property bits (inline, flat, null-free)
+       Node* klass_node = alloc->in(AllocateNode::KlassNode);
+       const TypeKlassPtr* tkls = phase->type(klass_node)->isa_klassptr();
+       if (tkls != nullptr && tkls->is_loaded() && tkls->klass_is_exact()) {
+         return TypeX::make(tkls->exact_klass()->prototype_header().value());
+       }
+     } else {
+       return TypeX::make(markWord::prototype().value());
+     }
    }
  
    return _type;
  }
  

@@ -2320,11 +2378,12 @@
  }
  
  //=============================================================================
  //----------------------------LoadKlassNode::make------------------------------
  // Polymorphic factory method:
- Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
+ Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
+                           const TypeKlassPtr* tk) {
    // sanity check the alias category against the created node type
    const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
    assert(adr_type != nullptr, "expecting TypeKlassPtr");
  #ifdef _LP64
    if (adr_type->is_ptr_to_narrowklass()) {

@@ -2396,11 +2455,11 @@
        return tinst->as_klass_type(true);
      }
    }
  
    // Check for loading klass from an array
-   const TypeAryPtr *tary = tp->isa_aryptr();
+   const TypeAryPtr* tary = tp->isa_aryptr();
    if (tary != nullptr &&
        tary->offset() == oopDesc::klass_offset_in_bytes()) {
      return tary->as_klass_type(true);
    }
  

@@ -2672,11 +2731,11 @@
    Node* value   = in(MemNode::ValueIn);
    // Back-to-back stores to same address?  Fold em up.  Generally
    // unsafe if I have intervening uses...  Also disallowed for StoreCM
    // since they must follow each StoreP operation.  Redundant StoreCMs
    // are eliminated just before matching in final_graph_reshape.
-   {
+   if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::INLINES) {
      Node* st = mem;
      // If Store 'st' has more than one use, we cannot fold 'st' away.
      // For example, 'st' might be the final state at a conditional
      // return.  Or, 'st' might be used by some node which is live at
      // the same time 'st' is live, which might be unschedulable.  So,

@@ -2692,10 +2751,11 @@
               st->Opcode() == Op_StoreVectorScatter ||
               Opcode() == Op_StoreVectorScatter ||
               phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
               (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
               (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
+              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
               (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
               "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
  
        if (st->in(MemNode::Address)->eqv_uncast(address) &&
            st->as_Store()->memory_size() <= this->memory_size()) {

@@ -2788,18 +2848,18 @@
    }
  
    // Store of zero anywhere into a freshly-allocated object?
    // Then the store is useless.
    // (It must already have been captured by the InitializeNode.)
-   if (result == this &&
-       ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
+   if (result == this && ReduceFieldZeroing) {
      // a newly allocated object is already all-zeroes everywhere
-     if (mem->is_Proj() && mem->in(0)->is_Allocate()) {
+     if (mem->is_Proj() && mem->in(0)->is_Allocate() &&
+         (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == val)) {
        result = mem;
      }
  
-     if (result == this) {
+     if (result == this && phase->type(val)->is_zero_type()) {
        // the store may also apply to zero-bits in an earlier object
        Node* prev_mem = find_previous_store(phase);
        // Steps (a), (b):  Walk past independent stores to find an exact match.
        if (prev_mem != nullptr) {
          Node* prev_val = can_see_stored_value(prev_mem, phase);

@@ -2980,13 +3040,17 @@
    Node* progress = StoreNode::Ideal(phase, can_reshape);
    if (progress != nullptr) return progress;
  
    Node* my_store = in(MemNode::OopStore);
    if (my_store->is_MergeMem()) {
-     Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx());
-     set_req_X(MemNode::OopStore, mem, phase);
-     return this;
+     if (oop_alias_idx() != phase->C->get_alias_index(TypeAryPtr::INLINES) ||
+         phase->C->flat_accesses_share_alias()) {
+       // The alias that was recorded is no longer accurate enough.
+       Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx());
+       set_req_X(MemNode::OopStore, mem, phase);
+       return this;
+     }
    }
  
    return nullptr;
  }
  

@@ -3153,11 +3217,11 @@
    if (size <= 0 || size % unit != 0)  return nullptr;
    intptr_t count = size / unit;
    // Length too long; communicate this to matchers and assemblers.
    // Assemblers are responsible to produce fast hardware clears for it.
    if (size > InitArrayShortSize) {
-     return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
+     return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true);
    } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
      return nullptr;
    }
    if (!IdealizeClearArrayNode) return nullptr;
    Node *mem = in(1);

@@ -3171,18 +3235,18 @@
    else              atp = atp->add_offset(Type::OffsetBot);
    // Get base for derived pointer purposes
    if( adr->Opcode() != Op_AddP ) Unimplemented();
    Node *base = adr->in(1);
  
-   Node *zero = phase->makecon(TypeLong::ZERO);
+   Node *val = in(4);
    Node *off  = phase->MakeConX(BytesPerLong);
-   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
+   mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
    count--;
    while( count-- ) {
      mem = phase->transform(mem);
      adr = phase->transform(new AddPNode(base,adr,off));
-     mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
+     mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
    }
    return mem;
  }
  
  //----------------------------step_through----------------------------------

@@ -3212,31 +3276,40 @@
  }
  
  //----------------------------clear_memory-------------------------------------
  // Generate code to initialize object storage to zero.
  Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
+                                    Node* val,
+                                    Node* raw_val,
                                     intptr_t start_offset,
                                     Node* end_offset,
                                     PhaseGVN* phase) {
    intptr_t offset = start_offset;
  
    int unit = BytesPerLong;
    if ((offset % unit) != 0) {
      Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
      adr = phase->transform(adr);
      const TypePtr* atp = TypeRawPtr::BOTTOM;
-     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+     if (val != nullptr) {
+       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
+       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
+     } else {
+       assert(raw_val == nullptr, "val may not be null");
+       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+     }
      mem = phase->transform(mem);
      offset += BytesPerInt;
    }
    assert((offset % unit) == 0, "");
  
    // Initialize the remaining stuff, if any, with a ClearArray.
-   return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
+   return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase);
  }
  
  Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
+                                    Node* raw_val,
                                     Node* start_offset,
                                     Node* end_offset,
                                     PhaseGVN* phase) {
    if (start_offset == end_offset) {
      // nothing to do

@@ -3255,15 +3328,20 @@
    }
  
    // Bulk clear double-words
    Node* zsize = phase->transform(new SubXNode(zend, zbase) );
    Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
-   mem = new ClearArrayNode(ctl, mem, zsize, adr, false);
+   if (raw_val == nullptr) {
+     raw_val = phase->MakeConX(0);
+   }
+   mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false);
    return phase->transform(mem);
  }
  
  Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
+                                    Node* val,
+                                    Node* raw_val,
                                     intptr_t start_offset,
                                     intptr_t end_offset,
                                     PhaseGVN* phase) {
    if (start_offset == end_offset) {
      // nothing to do

@@ -3274,18 +3352,24 @@
    intptr_t done_offset = end_offset;
    if ((done_offset % BytesPerLong) != 0) {
      done_offset -= BytesPerInt;
    }
    if (done_offset > start_offset) {
-     mem = clear_memory(ctl, mem, dest,
+     mem = clear_memory(ctl, mem, dest, val, raw_val,
                         start_offset, phase->MakeConX(done_offset), phase);
    }
    if (done_offset < end_offset) { // emit the final 32-bit store
      Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
      adr = phase->transform(adr);
      const TypePtr* atp = TypeRawPtr::BOTTOM;
-     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+     if (val != nullptr) {
+       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
+       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
+     } else {
+       assert(raw_val == nullptr, "val may not be null");
+       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+     }
      mem = phase->transform(mem);
      done_offset += BytesPerInt;
    }
    assert(done_offset == end_offset, "");
    return mem;

@@ -3426,11 +3510,11 @@
    return TypeTuple::MEMBAR;
  }
  
  //------------------------------match------------------------------------------
  // Construct projections for memory.
- Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
+ Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) {
    switch (proj->_con) {
    case TypeFunc::Control:
    case TypeFunc::Memory:
      return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
    }

@@ -3713,11 +3797,13 @@
  
  // convenience function
  // return false if the init contains any stores already
  bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
    InitializeNode* init = initialization();
-   if (init == nullptr || init->is_complete())  return false;
+   if (init == nullptr || init->is_complete()) {
+     return false;
+   }
    init->remove_extra_zeroes();
    // for now, if this allocation has already collected any inits, bail:
    if (init->is_non_zero())  return false;
    init->set_complete(phase);
    return true;

@@ -3891,10 +3977,16 @@
                  // after the InitializeNode. We check the control of the
                  // object/array that is loaded from. If it's the same as
                  // the store control then we cannot capture the store.
                  assert(!n->is_Store(), "2 stores to same slice on same control?");
                  Node* base = other_adr;
+                 if (base->is_Phi()) {
+                   // In rare case, base may be a PhiNode and it may read
+                   // the same memory slice between InitializeNode and store.
+                   failed = true;
+                   break;
+                 }
                  assert(base->is_AddP(), "should be addp but is %s", base->Name());
                  base = base->in(AddPNode::Base);
                  if (base != nullptr) {
                    base = base->uncast();
                    if (base->is_Proj() && base->in(0) == alloc) {

@@ -4477,10 +4569,12 @@
        if (zeroes_needed > zeroes_done) {
          intptr_t zsize = zeroes_needed - zeroes_done;
          // Do some incremental zeroing on rawmem, in parallel with inits.
          zeroes_done = align_down(zeroes_done, BytesPerInt);
          rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
+                                               allocation()->in(AllocateNode::DefaultValue),
+                                               allocation()->in(AllocateNode::RawDefaultValue),
                                                zeroes_done, zeroes_needed,
                                                phase);
          zeroes_done = zeroes_needed;
          if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
            do_zeroing = false;   // leave the hole, next time

@@ -4536,10 +4630,12 @@
            zeroes_done = size_limit;
        }
      }
      if (zeroes_done < size_limit) {
        rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
+                                             allocation()->in(AllocateNode::DefaultValue),
+                                             allocation()->in(AllocateNode::RawDefaultValue),
                                              zeroes_done, size_in_bytes, phase);
      }
    }
  
    set_complete(phase);
< prev index next >