< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page
*** 21,11 ***
--- 21,13 ---
   * or visit www.oracle.com if you need additional information or have any
   * questions.
   *
   */
  
+ #include "ci/ciFlatArrayKlass.hpp"
  #include "classfile/javaClasses.hpp"
+ #include "classfile/systemDictionary.hpp"
  #include "compiler/compileLog.hpp"
  #include "gc/shared/barrierSet.hpp"
  #include "gc/shared/c2/barrierSetC2.hpp"
  #include "gc/shared/tlab_globals.hpp"
  #include "memory/allocation.inline.hpp"

*** 36,10 ***
--- 38,11 ---
  #include "opto/cfgnode.hpp"
  #include "opto/regalloc.hpp"
  #include "opto/compile.hpp"
  #include "opto/connode.hpp"
  #include "opto/convertnode.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/loopnode.hpp"
  #include "opto/machnode.hpp"
  #include "opto/matcher.hpp"
  #include "opto/memnode.hpp"
  #include "opto/mempointer.hpp"

*** 138,35 ***
  
  extern void print_alias_types();
  
  #endif
  
! Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) {
!   assert((t_oop != nullptr), "sanity");
    bool is_instance = t_oop->is_known_instance_field();
!   bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() &&
!                              (load != nullptr) && load->is_Load() &&
!                              (phase->is_IterGVN() != nullptr);
!   if (!(is_instance || is_boxed_value_load))
!     return mchain;  // don't try to optimize non-instance types
    uint instance_id = t_oop->instance_id();
!   Node *start_mem = phase->C->start()->proj_out_or_null(TypeFunc::Memory);
!   Node *prev = nullptr;
-   Node *result = mchain;
    while (prev != result) {
      prev = result;
!     if (result == start_mem)
!       break;  // hit one of our sentinels
      // skip over a call which does not affect this memory slice
      if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
!       Node *proj_in = result->in(0);
        if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
!         break;  // hit one of our sentinels
        } else if (proj_in->is_Call()) {
          // ArrayCopyNodes processed here as well
!         CallNode *call = proj_in->as_Call();
!         if (!call->may_modify(t_oop, phase)) { // returns false for instances
            result = call->in(TypeFunc::Memory);
          }
        } else if (proj_in->is_Initialize()) {
          AllocateNode* alloc = proj_in->as_Initialize()->allocation();
          // Stop if this is the initialization for the object instance which
--- 141,188 ---
  
  extern void print_alias_types();
  
  #endif
  
! // If call is a constructor call on receiver, returns the class which declares the target method,
! // else returns nullptr. This information can then be used to deduce if call modifies a field of
+ // receiver. Specifically, if the field is declared in a class that is a subclass of the one
+ // declaring the constructor, then the field is set inside the constructor, else the field must be
+ // set before the constructor invocation. E.g. A field Super.x will be set during the execution of
+ // Sub::<init>, while a field Sub.y must be set before Super::<init> is invoked.
+ static ciInstanceKlass* find_constructor_call_method_holder(Node* call, Node* receiver) {
+   if (!call->is_CallJava()) {
+     return nullptr;
+   }
+ 
+   ciMethod* target = call->as_CallJava()->method();
+   if (target == nullptr || !target->is_object_constructor()) {
+     return nullptr;
+   }
+ 
+   assert(call->req() > TypeFunc::Parms, "constructor must have at least 1 argument");
+   Node* parm = call->in(TypeFunc::Parms)->uncast();
+   receiver = receiver->uncast();
+   if (parm == receiver || (parm->is_InlineType() && parm->as_InlineType()->get_oop()->uncast() == receiver)) {
+     return target->holder();
+   }
+ 
+   return nullptr;
+ }
+ 
+ // Find the memory output corresponding to the fall-through path of a call
+ static Node* find_call_fallthrough_mem_output(CallNode* call) {
+   ResourceMark rm;
+   CallProjections* projs = call->extract_projections(false, false);
+   Node* res = projs->fallthrough_memproj;
+   assert(res != nullptr, "must have a fallthrough mem output");
+   return res;
+ }
+ 
+ // Try to find a better memory input for a load from a strict final field of an object that is
+ // allocated in the current compilation unit, or is the first parameter when we are in a
+ // constructor
+ static Node* optimize_strict_final_load_memory_from_local_object(ciField* field, ProjNode* base_uncasted) {
+   if (!EnableValhalla) {
+     // In this method we depends on the fact that strict fields are set before the invocation of
+     // super(), I'm not sure if this is true without Valhalla
+     return nullptr;
+   }
+ 
+   // The node that can be passed into a constructor
+   Node* base = base_uncasted;
+   if (!base_uncasted->is_Parm()) {
+     assert(base_uncasted->_con == AllocateNode::RawAddress && base_uncasted->in(0)->is_Allocate(), "must be the RawAddress of an AllocateNode");
+     base = base_uncasted->in(0)->as_Allocate()->result_cast();
+     assert(base != nullptr && base->in(1) == base_uncasted, "must find a valid base");
+   }
+ 
+   // Try to see if there is a constructor call on the base
+   for (DUIterator_Fast imax, i = base->fast_outs(imax); i < imax; i++) {
+     Node* out = base->fast_out(i);
+     ciInstanceKlass* target_holder = find_constructor_call_method_holder(out, base);
+     if (target_holder == nullptr) {
+       continue;
+     } else if (target_holder->is_subclass_of(field->holder())) {
+       return find_call_fallthrough_mem_output(out->as_CallJava());
+     } else {
+       Node* res = out->in(TypeFunc::Memory);
+       assert(res != nullptr, "should have a memory input");
+       return res;
+     }
+   }
+ 
+   return nullptr;
+ }
+ 
+ // Try to find a better memory input for a load from a strict final field
+ static Node* try_optimize_strict_final_load_memory(PhaseGVN* phase, ciField* field, Node* adr, ProjNode*& base_local) {
+   intptr_t offset = 0;
+   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
+   if (base == nullptr) {
+     return nullptr;
+   }
+ 
+   Node* base_uncasted = base->uncast();
+   if (base_uncasted->is_Proj()) {
+     MultiNode* multi = base_uncasted->in(0)->as_Multi();
+     if (multi->is_Allocate()) {
+       // The result of an AllocateNode, try to find the constructor call
+       base_local = base_uncasted->as_Proj();
+       return optimize_strict_final_load_memory_from_local_object(field, base_uncasted->as_Proj());
+     } else if (multi->is_Call()) {
+       // The oop is returned from a call, the memory can be the fallthrough output of the call
+       return find_call_fallthrough_mem_output(multi->as_Call());
+     } else if (multi->is_Start()) {
+       // The oop is a parameter
+       if (phase->C->method()->is_object_constructor() && base_uncasted->as_Proj()->_con == TypeFunc::Parms) {
+         // The receiver of a constructor is similar to the result of an AllocateNode
+         base_local = base_uncasted->as_Proj();
+         return optimize_strict_final_load_memory_from_local_object(field, base_uncasted->as_Proj());
+       } else {
+         // Use the start memory otherwise
+         return multi->proj_out(TypeFunc::Memory);
+       }
+     }
+   }
+ 
+   return nullptr;
+ }
+ 
+ // Whether a call can modify a strict final field of base_local, given that base_local is allocated
+ // inside the current compilation unit, or is the first parameter when the compilation root is a
+ // constructor. This is equivalent to asking whether base_local is the receiver of the constructor
+ // invocation call and the class declaring the target method is a subclass of the class declaring
+ // field.
+ static bool call_can_modify_local_object(ciField* field, CallNode* call, Node* base_local) {
+   ciInstanceKlass* target_holder = find_constructor_call_method_holder(call, base_local);
+   return target_holder != nullptr && target_holder->is_subclass_of(field->holder());
+ }
+ 
+ Node* MemNode::optimize_simple_memory_chain(Node* mchain, const TypeOopPtr* t_oop, Node* load, PhaseGVN* phase) {
+   assert(t_oop != nullptr, "sanity");
    bool is_instance = t_oop->is_known_instance_field();
! 
!   ciField* field = phase->C->alias_type(t_oop)->field();
!   bool is_strict_final_load = false;
! 
!   // After macro expansion, an allocation may become a call, changing the memory input to the
+   // memory output of that call would be illegal. As a result, disallow this transformation after
+   // macro expansion.
+   if (phase->is_IterGVN() && phase->C->allow_macro_nodes() && load != nullptr && load->is_Load() && !load->as_Load()->is_mismatched_access()) {
+     if (EnableValhalla) {
+       if (field != nullptr && (field->holder()->is_inlinetype() || field->holder()->is_abstract_value_klass())) {
+         is_strict_final_load = true;
+       }
+ #ifdef ASSERT
+       if (t_oop->is_inlinetypeptr() && t_oop->inline_klass()->contains_field_offset(t_oop->offset())) {
+         assert(is_strict_final_load, "sanity check for basic cases");
+       }
+ #endif
+     } else {
+       is_strict_final_load = field != nullptr && t_oop->is_ptr_to_boxed_value();
+     }
+   }
+ 
+   if (!is_instance && !is_strict_final_load) {
+     return mchain;
+   }
+ 
+   Node* result = mchain;
+   ProjNode* base_local = nullptr;
+ 
+   if (is_strict_final_load) {
+     Node* adr = load->in(MemNode::Address);
+     assert(phase->type(adr) == t_oop, "inconsistent type");
+     Node* tmp = try_optimize_strict_final_load_memory(phase, field, adr, base_local);
+     if (tmp != nullptr) {
+       result = tmp;
+     }
+   }
+ 
    uint instance_id = t_oop->instance_id();
!   Node* start_mem = phase->C->start()->proj_out_or_null(TypeFunc::Memory);
!   Node* prev = nullptr;
    while (prev != result) {
      prev = result;
!     if (result == start_mem) {
!       // start_mem is the earliest memory possible
+       break;
+     }
+ 
      // skip over a call which does not affect this memory slice
      if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
!       Node* proj_in = result->in(0);
        if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
!         // This is the allocation that creates the object from which we are loading from
+         break;
        } else if (proj_in->is_Call()) {
          // ArrayCopyNodes processed here as well
!         CallNode* call = proj_in->as_Call();
!         if (!call->may_modify(t_oop, phase)) {
+           result = call->in(TypeFunc::Memory);
+         } else if (is_strict_final_load && base_local != nullptr && !call_can_modify_local_object(field, call, base_local)) {
            result = call->in(TypeFunc::Memory);
          }
        } else if (proj_in->is_Initialize()) {
          AllocateNode* alloc = proj_in->as_Initialize()->allocation();
          // Stop if this is the initialization for the object instance which

*** 174,15 ***
          if ((alloc == nullptr) || (alloc->_idx == instance_id)) {
            break;
          }
          if (is_instance) {
            result = proj_in->in(TypeFunc::Memory);
!         } else if (is_boxed_value_load) {
            Node* klass = alloc->in(AllocateNode::KlassNode);
            const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
            if (tklass->klass_is_exact() && !tklass->exact_klass()->equals(t_oop->is_instptr()->exact_klass())) {
!             result = proj_in->in(TypeFunc::Memory); // not related allocation
            }
          }
        } else if (proj_in->is_MemBar()) {
          ArrayCopyNode* ac = nullptr;
          if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) {
--- 330,19 ---
          if ((alloc == nullptr) || (alloc->_idx == instance_id)) {
            break;
          }
          if (is_instance) {
            result = proj_in->in(TypeFunc::Memory);
!         } else if (is_strict_final_load) {
            Node* klass = alloc->in(AllocateNode::KlassNode);
            const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
            if (tklass->klass_is_exact() && !tklass->exact_klass()->equals(t_oop->is_instptr()->exact_klass())) {
!             // Allocation of another type, must be another object
+             result = proj_in->in(TypeFunc::Memory);
+           } else if (base_local != nullptr && (base_local->is_Parm() || base_local->in(0) != alloc)) {
+             // Allocation of another object
+             result = proj_in->in(TypeFunc::Memory);
            }
          }
        } else if (proj_in->is_MemBar()) {
          ArrayCopyNode* ac = nullptr;
          if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) {

*** 231,10 ***
--- 391,12 ---
          ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
        if (t_oop->isa_aryptr()) {
          mem_t = mem_t->is_aryptr()
                       ->cast_to_stable(t_oop->is_aryptr()->is_stable())
                       ->cast_to_size(t_oop->is_aryptr()->size())
+                      ->cast_to_not_flat(t_oop->is_aryptr()->is_not_flat())
+                      ->cast_to_not_null_free(t_oop->is_aryptr()->is_not_null_free())
                       ->with_offset(t_oop->is_aryptr()->offset())
                       ->is_aryptr();
        }
        do_split = mem_t == t_oop;
      }

*** 257,11 ***
      assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
      bool consistent =  adr_check == nullptr || adr_check->empty() ||
                         phase->C->must_alias(adr_check, alias_idx );
      // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
      if( !consistent && adr_check != nullptr && !adr_check->empty() &&
!                tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
          adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
          ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
            adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
            adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
        // don't assert if it is dead code.
--- 419,11 ---
      assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
      bool consistent =  adr_check == nullptr || adr_check->empty() ||
                         phase->C->must_alias(adr_check, alias_idx );
      // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
      if( !consistent && adr_check != nullptr && !adr_check->empty() &&
!         tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
          adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
          ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
            adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
            adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
        // don't assert if it is dead code.

*** 1010,11 ***
      bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
      bool is_stable_ary = FoldStableValues &&
                           (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
                           tp->isa_aryptr()->is_stable();
  
!     return (eliminate_boxing && non_volatile) || is_stable_ary;
    }
  
    return false;
  }
  
--- 1172,11 ---
      bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
      bool is_stable_ary = FoldStableValues &&
                           (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
                           tp->isa_aryptr()->is_stable();
  
!     return (eliminate_boxing && non_volatile) || is_stable_ary || tp->is_inlinetypeptr();
    }
  
    return false;
  }
  

*** 1067,11 ***
        const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
        BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
        if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
  
        uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
!       uint shift  = exact_log2(type2aelembytes(ary_elem));
  
        Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
  #ifdef _LP64
        diff = phase->transform(new ConvI2LNode(diff));
  #endif
--- 1229,11 ---
        const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
        BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
        if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
  
        uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
!       uint shift  = ary_t->is_flat() ? ary_t->flat_log_elem_size() : exact_log2(type2aelembytes(ary_elem));
  
        Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
  #ifdef _LP64
        diff = phase->transform(new ConvI2LNode(diff));
  #endif

*** 1091,10 ***
--- 1253,21 ---
      return ld;
    }
    return nullptr;
  }
  
+ static Node* see_through_inline_type(PhaseValues* phase, const MemNode* load, Node* base, int offset) {
+   if (!load->is_mismatched_access() && base != nullptr && base->is_InlineType() && offset > oopDesc::klass_offset_in_bytes()) {
+     InlineTypeNode* vt = base->as_InlineType();
+     assert(!vt->is_larval(), "must not load from a larval object");
+     Node* value = vt->field_value_by_offset(offset, true);
+     assert(value != nullptr, "must see some value");
+     return value;
+   }
+ 
+   return nullptr;
+ }
  
  //---------------------------can_see_stored_value------------------------------
  // This routine exists to make sure this set of tests is done the same
  // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
  // will change the graph shape in a way which makes memory alive twice at the

*** 1103,10 ***
--- 1276,19 ---
  // of aliasing.
  Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
    Node* ld_adr = in(MemNode::Address);
    intptr_t ld_off = 0;
    Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
+   // Try to see through an InlineTypeNode
+   // LoadN is special because the input is not compressed
+   if (Opcode() != Op_LoadN) {
+     Node* value = see_through_inline_type(phase, this, ld_base, ld_off);
+     if (value != nullptr) {
+       return value;
+     }
+   }
+ 
    Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
    const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
    Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
    // This is more general than load from boxing objects.
    if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {

*** 1186,11 ***
          return nullptr;
        }
        // LoadVector/StoreVector needs additional check to ensure the types match.
        if (st->is_StoreVector()) {
          const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
!         const TypeVect* out_vt = as_LoadVector()->vect_type();
          if (in_vt != out_vt) {
            return nullptr;
          }
        }
        return st->in(MemNode::ValueIn);
--- 1368,11 ---
          return nullptr;
        }
        // LoadVector/StoreVector needs additional check to ensure the types match.
        if (st->is_StoreVector()) {
          const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
!         const TypeVect* out_vt = is_Load() ? as_LoadVector()->vect_type() : as_StoreVector()->vect_type();
          if (in_vt != out_vt) {
            return nullptr;
          }
        }
        return st->in(MemNode::ValueIn);

*** 1204,10 ***
--- 1386,16 ---
          (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
        // return a zero value for the load's basic type
        // (This is one of the few places where a generic PhaseTransform
        // can create new nodes.  Think of it as lazily manifesting
        // virtually pre-existing constants.)
+       Node* init_value = ld_alloc->in(AllocateNode::InitValue);
+       if (init_value != nullptr) {
+         // TODO 8350865 Is this correct for non-all-zero init values? Don't we need field_value_by_offset?
+         return init_value;
+       }
+       assert(ld_alloc->in(AllocateNode::RawInitValue) == nullptr, "init value may not be null");
        if (memory_type() != T_VOID) {
          if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
            // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
            // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
            // by the ArrayCopyNode.

*** 1864,10 ***
--- 2052,11 ---
    Node*    base   = AddPNode::Ideal_base_and_offset(address, phase, ignore);
    if (base != nullptr
        && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
      // Check for useless control edge in some common special cases
      if (in(MemNode::Control) != nullptr
+         && !(phase->type(address)->is_inlinetypeptr() && is_mismatched_access())
          && can_remove_control()
          && phase->type(base)->higher_equal(TypePtr::NOTNULL)
          && all_controls_dominate(base, phase->C->start())) {
        // A method-invariant, non-null address (constant or 'this' argument).
        set_req(MemNode::Control, nullptr);

*** 1960,11 ***
        set_req_X(MemNode::Memory, prev_mem, phase);
        return this;
      }
    }
  
!   return progress ? this : nullptr;
  }
  
  // Helper to recognize certain Klass fields which are invariant across
  // some group of array types (e.g., int[] or all T[] where T < Object).
  const Type*
--- 2149,18 ---
        set_req_X(MemNode::Memory, prev_mem, phase);
        return this;
      }
    }
  
!   if (progress) {
+     return this;
+   }
+ 
+   if (!can_reshape) {
+     phase->record_for_igvn(this);
+   }
+   return nullptr;
  }
  
  // Helper to recognize certain Klass fields which are invariant across
  // some group of array types (e.g., int[] or all T[] where T < Object).
  const Type*

*** 2062,10 ***
--- 2258,11 ---
      // In fact, that could have been the original type of p1, and p1 could have
      // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
      // expression (LShiftL quux 3) independently optimized to the constant 8.
      if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
          && (_type->isa_vect() == nullptr)
+         && !ary->is_flat()
          && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
        // t might actually be lower than _type, if _type is a unique
        // concrete subclass of abstract class t.
        if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
          const Type* jt = t->join_speculative(_type);

*** 2097,20 ***
    } else if (tp->base() == Type::InstPtr) {
      assert( off != Type::OffsetBot ||
              // arrays can be cast to Objects
              !tp->isa_instptr() ||
              tp->is_instptr()->instance_klass()->is_java_lang_Object() ||
              // unsafe field access may not have a constant offset
              C->has_unsafe_access(),
              "Field accesses must be precise" );
      // For oop loads, we expect the _type to be precise.
  
-     // Optimize loads from constant fields.
      const TypeInstPtr* tinst = tp->is_instptr();
      ciObject* const_oop = tinst->const_oop();
      if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
!       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());
        if (con_type != nullptr) {
          return con_type;
        }
      }
    } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
--- 2294,24 ---
    } else if (tp->base() == Type::InstPtr) {
      assert( off != Type::OffsetBot ||
              // arrays can be cast to Objects
              !tp->isa_instptr() ||
              tp->is_instptr()->instance_klass()->is_java_lang_Object() ||
+             // Default value load
+             tp->is_instptr()->instance_klass() == ciEnv::current()->Class_klass() ||
              // unsafe field access may not have a constant offset
              C->has_unsafe_access(),
              "Field accesses must be precise" );
      // For oop loads, we expect the _type to be precise.
  
      const TypeInstPtr* tinst = tp->is_instptr();
+     BasicType bt = memory_type();
+ 
+     // Optimize loads from constant fields.
      ciObject* const_oop = tinst->const_oop();
      if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
!       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt);
        if (con_type != nullptr) {
          return con_type;
        }
      }
    } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {

*** 2153,11 ***
          // The field is Klass::_super_check_offset.  Return its (constant) value.
          // (Folds up type checking code.)
          assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
          return TypeInt::make(klass->super_check_offset());
        }
!       if (UseCompactObjectHeaders) {
          if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
            // The field is Klass::_prototype_header. Return its (constant) value.
            assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
            return TypeX::make(klass->prototype_header());
          }
--- 2354,11 ---
          // The field is Klass::_super_check_offset.  Return its (constant) value.
          // (Folds up type checking code.)
          assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
          return TypeInt::make(klass->super_check_offset());
        }
!       if (UseCompactObjectHeaders) { // TODO: Should EnableValhalla also take this path ?
          if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
            // The field is Klass::_prototype_header. Return its (constant) value.
            assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
            return TypeX::make(klass->prototype_header());
          }

*** 2231,18 ***
      // so just return a zero of the appropriate type -
      // except if it is vectorized - then we have no zero constant.
      Node *mem = in(MemNode::Memory);
      if (mem->is_Parm() && mem->in(0)->is_Start()) {
        assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
        return Type::get_zero_type(_type->basic_type());
      }
    }
- 
    if (!UseCompactObjectHeaders) {
      Node* alloc = is_new_object_mark_load();
      if (alloc != nullptr) {
!       return TypeX::make(markWord::prototype().value());
      }
    }
  
    return _type;
  }
--- 2432,31 ---
      // so just return a zero of the appropriate type -
      // except if it is vectorized - then we have no zero constant.
      Node *mem = in(MemNode::Memory);
      if (mem->is_Parm() && mem->in(0)->is_Start()) {
        assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
+       // TODO 8350865 This is needed for flat array accesses, somehow the memory of the loads bypasses the intrinsic
+       // Run TestArrays.test6 in Scenario4, we need more tests for this. TestBasicFunctionality::test20 also needs this.
+       if (tp->isa_aryptr() && tp->is_aryptr()->is_flat() && !UseFieldFlattening) {
+         return _type;
+       }
        return Type::get_zero_type(_type->basic_type());
      }
    }
    if (!UseCompactObjectHeaders) {
      Node* alloc = is_new_object_mark_load();
      if (alloc != nullptr) {
!       if (EnableValhalla) {
+         // The mark word may contain property bits (inline, flat, null-free)
+         Node* klass_node = alloc->in(AllocateNode::KlassNode);
+         const TypeKlassPtr* tkls = phase->type(klass_node)->isa_klassptr();
+         if (tkls != nullptr && tkls->is_loaded() && tkls->klass_is_exact()) {
+           return TypeX::make(tkls->exact_klass()->prototype_header());
+         }
+       } else {
+         return TypeX::make(markWord::prototype().value());
+       }
      }
    }
  
    return _type;
  }

*** 2387,10 ***
--- 2601,23 ---
      return TypeInt::make((con << 16) >> 16);
    }
    return LoadNode::Value(phase);
  }
  
+ Node* LoadNNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+   // Loading from an InlineType, find the input and make an EncodeP
+   Node* addr = in(Address);
+   intptr_t offset;
+   Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset);
+   Node* value = see_through_inline_type(phase, this, base, offset);
+   if (value != nullptr) {
+     return new EncodePNode(value, type());
+   }
+ 
+   return LoadNode::Ideal(phase, can_reshape);
+ }
+ 
  //=============================================================================
  //----------------------------LoadKlassNode::make------------------------------
  // Polymorphic factory method:
  Node* LoadKlassNode::make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
    // sanity check the alias category against the created node type

*** 2431,28 ***
      if (ik == phase->C->env()->Class_klass()
          && (offset == java_lang_Class::klass_offset() ||
              offset == java_lang_Class::array_klass_offset())) {
        // We are loading a special hidden field from a Class mirror object,
        // the field which points to the VM's Klass metaobject.
!       ciType* t = tinst->java_mirror_type();
        // java_mirror_type returns non-null for compile-time Class constants.
        if (t != nullptr) {
          // constant oop => constant klass
          if (offset == java_lang_Class::array_klass_offset()) {
            if (t->is_void()) {
              // We cannot create a void array.  Since void is a primitive type return null
              // klass.  Users of this result need to do a null check on the returned klass.
              return TypePtr::NULL_PTR;
            }
!           return TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
          }
          if (!t->is_klass()) {
            // a primitive Class (e.g., int.class) has null for a klass field
            return TypePtr::NULL_PTR;
          }
          // Fold up the load of the hidden field
!         return TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
        }
        // non-constant mirror, so we can't tell what's going on
      }
      if (!tinst->is_loaded())
        return _type;             // Bail out if not loaded
--- 2658,37 ---
      if (ik == phase->C->env()->Class_klass()
          && (offset == java_lang_Class::klass_offset() ||
              offset == java_lang_Class::array_klass_offset())) {
        // We are loading a special hidden field from a Class mirror object,
        // the field which points to the VM's Klass metaobject.
!       bool is_null_free_array = false;
+       ciType* t = tinst->java_mirror_type(&is_null_free_array);
        // java_mirror_type returns non-null for compile-time Class constants.
        if (t != nullptr) {
          // constant oop => constant klass
          if (offset == java_lang_Class::array_klass_offset()) {
            if (t->is_void()) {
              // We cannot create a void array.  Since void is a primitive type return null
              // klass.  Users of this result need to do a null check on the returned klass.
              return TypePtr::NULL_PTR;
            }
!           const TypeKlassPtr* tklass = TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
+           if (is_null_free_array) {
+             tklass = tklass->is_aryklassptr()->cast_to_null_free();
+           }
+           return tklass;
          }
          if (!t->is_klass()) {
            // a primitive Class (e.g., int.class) has null for a klass field
            return TypePtr::NULL_PTR;
          }
          // Fold up the load of the hidden field
!         const TypeKlassPtr* tklass = TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
+         if (is_null_free_array) {
+           tklass = tklass->is_aryklassptr()->cast_to_null_free();
+         }
+         return tklass;
        }
        // non-constant mirror, so we can't tell what's going on
      }
      if (!tinst->is_loaded())
        return _type;             // Bail out if not loaded

*** 2460,11 ***
        return tinst->as_klass_type(true);
      }
    }
  
    // Check for loading klass from an array
!   const TypeAryPtr *tary = tp->isa_aryptr();
    if (tary != nullptr &&
        tary->offset() == oopDesc::klass_offset_in_bytes()) {
      return tary->as_klass_type(true);
    }
  
--- 2696,11 ---
        return tinst->as_klass_type(true);
      }
    }
  
    // Check for loading klass from an array
!   const TypeAryPtr* tary = tp->isa_aryptr();
    if (tary != nullptr &&
        tary->offset() == oopDesc::klass_offset_in_bytes()) {
      return tary->as_klass_type(true);
    }
  

*** 3376,12 ***
  
    Node* mem     = in(MemNode::Memory);
    Node* address = in(MemNode::Address);
    Node* value   = in(MemNode::ValueIn);
    // Back-to-back stores to same address?  Fold em up.  Generally
!   // unsafe if I have intervening uses.
!   {
      Node* st = mem;
      // If Store 'st' has more than one use, we cannot fold 'st' away.
      // For example, 'st' might be the final state at a conditional
      // return.  Or, 'st' might be used by some node which is live at
      // the same time 'st' is live, which might be unschedulable.  So,
--- 3612,12 ---
  
    Node* mem     = in(MemNode::Memory);
    Node* address = in(MemNode::Address);
    Node* value   = in(MemNode::ValueIn);
    // Back-to-back stores to same address?  Fold em up.  Generally
!   // unsafe if I have intervening uses...
!   if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::INLINES) {
      Node* st = mem;
      // If Store 'st' has more than one use, we cannot fold 'st' away.
      // For example, 'st' might be the final state at a conditional
      // return.  Or, 'st' might be used by some node which is live at
      // the same time 'st' is live, which might be unschedulable.  So,

*** 3397,10 ***
--- 3633,12 ---
               st->Opcode() == Op_StoreVectorScatter ||
               Opcode() == Op_StoreVectorScatter ||
               phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
               (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
               (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
+              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
+              (st->adr_type()->isa_aryptr() && st->adr_type()->is_aryptr()->is_flat()) || // TODO 8343835
               (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
               "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
  
        if (st->in(MemNode::Address)->eqv_uncast(address) &&
            st->as_Store()->memory_size() <= this->memory_size()) {

*** 3534,18 ***
    }
  
    // Store of zero anywhere into a freshly-allocated object?
    // Then the store is useless.
    // (It must already have been captured by the InitializeNode.)
!   if (result == this &&
-       ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
      // a newly allocated object is already all-zeroes everywhere
!     if (mem->is_Proj() && mem->in(0)->is_Allocate()) {
        result = mem;
      }
  
!     if (result == this) {
        // the store may also apply to zero-bits in an earlier object
        Node* prev_mem = find_previous_store(phase);
        // Steps (a), (b):  Walk past independent stores to find an exact match.
        if (prev_mem != nullptr) {
          Node* prev_val = can_see_stored_value(prev_mem, phase);
--- 3772,18 ---
    }
  
    // Store of zero anywhere into a freshly-allocated object?
    // Then the store is useless.
    // (It must already have been captured by the InitializeNode.)
!   if (result == this && ReduceFieldZeroing) {
      // a newly allocated object is already all-zeroes everywhere
!     if (mem->is_Proj() && mem->in(0)->is_Allocate() &&
+         (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::InitValue) == val)) {
        result = mem;
      }
  
!     if (result == this && phase->type(val)->is_zero_type()) {
        // the store may also apply to zero-bits in an earlier object
        Node* prev_mem = find_previous_store(phase);
        // Steps (a), (b):  Walk past independent stores to find an exact match.
        if (prev_mem != nullptr) {
          Node* prev_val = can_see_stored_value(prev_mem, phase);

*** 4045,11 ***
    if (size <= 0 || size % unit != 0)  return nullptr;
    intptr_t count = size / unit;
    // Length too long; communicate this to matchers and assemblers.
    // Assemblers are responsible to produce fast hardware clears for it.
    if (size > InitArrayShortSize) {
!     return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
    } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
      return nullptr;
    }
    if (!IdealizeClearArrayNode) return nullptr;
    Node *mem = in(1);
--- 4283,11 ---
    if (size <= 0 || size % unit != 0)  return nullptr;
    intptr_t count = size / unit;
    // Length too long; communicate this to matchers and assemblers.
    // Assemblers are responsible to produce fast hardware clears for it.
    if (size > InitArrayShortSize) {
!     return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true);
    } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
      return nullptr;
    }
    if (!IdealizeClearArrayNode) return nullptr;
    Node *mem = in(1);

*** 4063,18 ***
    else              atp = atp->add_offset(Type::OffsetBot);
    // Get base for derived pointer purposes
    if( adr->Opcode() != Op_AddP ) Unimplemented();
    Node *base = adr->in(1);
  
!   Node *zero = phase->makecon(TypeLong::ZERO);
    Node *off  = phase->MakeConX(BytesPerLong);
!   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
    count--;
    while( count-- ) {
      mem = phase->transform(mem);
      adr = phase->transform(new AddPNode(base,adr,off));
!     mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
    }
    return mem;
  }
  
  //----------------------------step_through----------------------------------
--- 4301,18 ---
    else              atp = atp->add_offset(Type::OffsetBot);
    // Get base for derived pointer purposes
    if( adr->Opcode() != Op_AddP ) Unimplemented();
    Node *base = adr->in(1);
  
!   Node *val = in(4);
    Node *off  = phase->MakeConX(BytesPerLong);
!   mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
    count--;
    while( count-- ) {
      mem = phase->transform(mem);
      adr = phase->transform(new AddPNode(base,adr,off));
!     mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
    }
    return mem;
  }
  
  //----------------------------step_through----------------------------------

*** 4104,31 ***
  }
  
  //----------------------------clear_memory-------------------------------------
  // Generate code to initialize object storage to zero.
  Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
                                     intptr_t start_offset,
                                     Node* end_offset,
                                     PhaseGVN* phase) {
    intptr_t offset = start_offset;
  
    int unit = BytesPerLong;
    if ((offset % unit) != 0) {
      Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
      adr = phase->transform(adr);
      const TypePtr* atp = TypeRawPtr::BOTTOM;
!     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
      mem = phase->transform(mem);
      offset += BytesPerInt;
    }
    assert((offset % unit) == 0, "");
  
    // Initialize the remaining stuff, if any, with a ClearArray.
!   return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
  }
  
  Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
                                     Node* start_offset,
                                     Node* end_offset,
                                     PhaseGVN* phase) {
    if (start_offset == end_offset) {
      // nothing to do
--- 4342,40 ---
  }
  
  //----------------------------clear_memory-------------------------------------
  // Generate code to initialize object storage to zero.
  Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
+                                    Node* val,
+                                    Node* raw_val,
                                     intptr_t start_offset,
                                     Node* end_offset,
                                     PhaseGVN* phase) {
    intptr_t offset = start_offset;
  
    int unit = BytesPerLong;
    if ((offset % unit) != 0) {
      Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
      adr = phase->transform(adr);
      const TypePtr* atp = TypeRawPtr::BOTTOM;
!     if (val != nullptr) {
+       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
+       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
+     } else {
+       assert(raw_val == nullptr, "val may not be null");
+       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+     }
      mem = phase->transform(mem);
      offset += BytesPerInt;
    }
    assert((offset % unit) == 0, "");
  
    // Initialize the remaining stuff, if any, with a ClearArray.
!   return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase);
  }
  
  Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
+                                    Node* raw_val,
                                     Node* start_offset,
                                     Node* end_offset,
                                     PhaseGVN* phase) {
    if (start_offset == end_offset) {
      // nothing to do

*** 4147,15 ***
    }
  
    // Bulk clear double-words
    Node* zsize = phase->transform(new SubXNode(zend, zbase) );
    Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
!   mem = new ClearArrayNode(ctl, mem, zsize, adr, false);
    return phase->transform(mem);
  }
  
  Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
                                     intptr_t start_offset,
                                     intptr_t end_offset,
                                     PhaseGVN* phase) {
    if (start_offset == end_offset) {
      // nothing to do
--- 4394,20 ---
    }
  
    // Bulk clear double-words
    Node* zsize = phase->transform(new SubXNode(zend, zbase) );
    Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
!   if (raw_val == nullptr) {
+     raw_val = phase->MakeConX(0);
+   }
+   mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false);
    return phase->transform(mem);
  }
  
  Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
+                                    Node* val,
+                                    Node* raw_val,
                                     intptr_t start_offset,
                                     intptr_t end_offset,
                                     PhaseGVN* phase) {
    if (start_offset == end_offset) {
      // nothing to do

*** 4166,18 ***
    intptr_t done_offset = end_offset;
    if ((done_offset % BytesPerLong) != 0) {
      done_offset -= BytesPerInt;
    }
    if (done_offset > start_offset) {
!     mem = clear_memory(ctl, mem, dest,
                         start_offset, phase->MakeConX(done_offset), phase);
    }
    if (done_offset < end_offset) { // emit the final 32-bit store
      Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
      adr = phase->transform(adr);
      const TypePtr* atp = TypeRawPtr::BOTTOM;
!     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
      mem = phase->transform(mem);
      done_offset += BytesPerInt;
    }
    assert(done_offset == end_offset, "");
    return mem;
--- 4418,24 ---
    intptr_t done_offset = end_offset;
    if ((done_offset % BytesPerLong) != 0) {
      done_offset -= BytesPerInt;
    }
    if (done_offset > start_offset) {
!     mem = clear_memory(ctl, mem, dest, val, raw_val,
                         start_offset, phase->MakeConX(done_offset), phase);
    }
    if (done_offset < end_offset) { // emit the final 32-bit store
      Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
      adr = phase->transform(adr);
      const TypePtr* atp = TypeRawPtr::BOTTOM;
!     if (val != nullptr) {
+       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
+       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
+     } else {
+       assert(raw_val == nullptr, "val may not be null");
+       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+     }
      mem = phase->transform(mem);
      done_offset += BytesPerInt;
    }
    assert(done_offset == end_offset, "");
    return mem;

*** 4319,11 ***
    return TypeTuple::MEMBAR;
  }
  
  //------------------------------match------------------------------------------
  // Construct projections for memory.
! Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
    switch (proj->_con) {
    case TypeFunc::Control:
    case TypeFunc::Memory:
      return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
    }
--- 4577,11 ---
    return TypeTuple::MEMBAR;
  }
  
  //------------------------------match------------------------------------------
  // Construct projections for memory.
! Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) {
    switch (proj->_con) {
    case TypeFunc::Control:
    case TypeFunc::Memory:
      return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
    }

*** 4606,11 ***
  
  // convenience function
  // return false if the init contains any stores already
  bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
    InitializeNode* init = initialization();
!   if (init == nullptr || init->is_complete())  return false;
    init->remove_extra_zeroes();
    // for now, if this allocation has already collected any inits, bail:
    if (init->is_non_zero())  return false;
    init->set_complete(phase);
    return true;
--- 4864,13 ---
  
  // convenience function
  // return false if the init contains any stores already
  bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
    InitializeNode* init = initialization();
!   if (init == nullptr || init->is_complete()) {
+     return false;
+   }
    init->remove_extra_zeroes();
    // for now, if this allocation has already collected any inits, bail:
    if (init->is_non_zero())  return false;
    init->set_complete(phase);
    return true;

*** 4790,10 ***
--- 5050,16 ---
                  // after the InitializeNode. We check the control of the
                  // object/array that is loaded from. If it's the same as
                  // the store control then we cannot capture the store.
                  assert(!n->is_Store(), "2 stores to same slice on same control?");
                  Node* base = other_adr;
+                 if (base->is_Phi()) {
+                   // In rare case, base may be a PhiNode and it may read
+                   // the same memory slice between InitializeNode and store.
+                   failed = true;
+                   break;
+                 }
                  assert(base->is_AddP(), "should be addp but is %s", base->Name());
                  base = base->in(AddPNode::Base);
                  if (base != nullptr) {
                    base = base->uncast();
                    if (base->is_Proj() && base->in(0) == alloc) {

*** 5376,10 ***
--- 5642,12 ---
        if (zeroes_needed > zeroes_done) {
          intptr_t zsize = zeroes_needed - zeroes_done;
          // Do some incremental zeroing on rawmem, in parallel with inits.
          zeroes_done = align_down(zeroes_done, BytesPerInt);
          rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
+                                               allocation()->in(AllocateNode::InitValue),
+                                               allocation()->in(AllocateNode::RawInitValue),
                                                zeroes_done, zeroes_needed,
                                                phase);
          zeroes_done = zeroes_needed;
          if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
            do_zeroing = false;   // leave the hole, next time

*** 5435,10 ***
--- 5703,12 ---
            zeroes_done = size_limit;
        }
      }
      if (zeroes_done < size_limit) {
        rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
+                                             allocation()->in(AllocateNode::InitValue),
+                                             allocation()->in(AllocateNode::RawInitValue),
                                              zeroes_done, size_in_bytes, phase);
      }
    }
  
    set_complete(phase);
< prev index next >