< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "classfile/javaClasses.hpp"

  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "memory/allocation.inline.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "oops/objArrayKlass.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/regalloc.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/connode.hpp"
  40 #include "opto/convertnode.hpp"

  41 #include "opto/loopnode.hpp"
  42 #include "opto/machnode.hpp"
  43 #include "opto/matcher.hpp"
  44 #include "opto/memnode.hpp"
  45 #include "opto/mulnode.hpp"
  46 #include "opto/narrowptrnode.hpp"
  47 #include "opto/phaseX.hpp"
  48 #include "opto/regmask.hpp"
  49 #include "opto/rootnode.hpp"
  50 #include "opto/vectornode.hpp"
  51 #include "utilities/align.hpp"
  52 #include "utilities/copy.hpp"
  53 #include "utilities/macros.hpp"
  54 #include "utilities/powerOfTwo.hpp"
  55 #include "utilities/vmError.hpp"
  56 
  57 // Portions of code courtesy of Clifford Click
  58 
  59 // Optimization - Graph Style
  60 

 223       // clone the Phi with our address type
 224       result = mphi->split_out_instance(t_adr, igvn);
 225     } else {
 226       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
 227     }
 228   }
 229   return result;
 230 }
 231 
 232 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
 233   uint alias_idx = phase->C->get_alias_index(tp);
 234   Node *mem = mmem;
 235 #ifdef ASSERT
 236   {
 237     // Check that current type is consistent with the alias index used during graph construction
 238     assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
 239     bool consistent =  adr_check == NULL || adr_check->empty() ||
 240                        phase->C->must_alias(adr_check, alias_idx );
 241     // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
 242     if( !consistent && adr_check != NULL && !adr_check->empty() &&
 243                tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
 244         adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
 245         ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
 246           adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
 247           adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
 248       // don't assert if it is dead code.
 249       consistent = true;
 250     }
 251     if( !consistent ) {
 252       st->print("alias_idx==%d, adr_check==", alias_idx);
 253       if( adr_check == NULL ) {
 254         st->print("NULL");
 255       } else {
 256         adr_check->dump();
 257       }
 258       st->cr();
 259       print_alias_types();
 260       assert(consistent, "adr_check must match alias idx");
 261     }
 262   }
 263 #endif

 871          "use LoadKlassNode instead");
 872   assert(!(adr_type->isa_aryptr() &&
 873            adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
 874          "use LoadRangeNode instead");
 875   // Check control edge of raw loads
 876   assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
 877           // oop will be recorded in oop map if load crosses safepoint
 878           rt->isa_oopptr() || is_immutable_value(adr),
 879           "raw memory operations should have control edge");
 880   LoadNode* load = NULL;
 881   switch (bt) {
 882   case T_BOOLEAN: load = new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 883   case T_BYTE:    load = new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 884   case T_INT:     load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 885   case T_CHAR:    load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 886   case T_SHORT:   load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 887   case T_LONG:    load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency); break;
 888   case T_FLOAT:   load = new LoadFNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 889   case T_DOUBLE:  load = new LoadDNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 890   case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(),  mo, control_dependency); break;

 891   case T_OBJECT:
 892 #ifdef _LP64
 893     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 894       load = new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency);
 895     } else
 896 #endif
 897     {
 898       assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
 899       load = new LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
 900     }
 901     break;
 902   default:
 903     ShouldNotReachHere();
 904     break;
 905   }
 906   assert(load != NULL, "LoadNode should have been created");
 907   if (unaligned) {
 908     load->set_unaligned_access();
 909   }
 910   if (mismatched) {

 998 
 999     LoadNode* ld = clone()->as_Load();
1000     Node* addp = in(MemNode::Address)->clone();
1001     if (ac->as_ArrayCopy()->is_clonebasic()) {
1002       assert(ld_alloc != NULL, "need an alloc");
1003       assert(addp->is_AddP(), "address must be addp");
1004       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1005       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1006       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1007       addp->set_req(AddPNode::Base, src);
1008       addp->set_req(AddPNode::Address, src);
1009     } else {
1010       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1011              ac->as_ArrayCopy()->is_copyof_validated() ||
1012              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1013       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1014       addp->set_req(AddPNode::Base, src);
1015       addp->set_req(AddPNode::Address, src);
1016 
1017       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1018       BasicType ary_elem  = ary_t->klass()->as_array_klass()->element_type()->basic_type();
1019       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1020       uint shift  = exact_log2(type2aelembytes(ary_elem));




1021 
1022       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1023 #ifdef _LP64
1024       diff = phase->transform(new ConvI2LNode(diff));
1025 #endif
1026       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1027 
1028       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1029       addp->set_req(AddPNode::Offset, offset);
1030     }
1031     addp = phase->transform(addp);
1032 #ifdef ASSERT
1033     const TypePtr* adr_type = phase->type(addp)->is_ptr();
1034     ld->_adr_type = adr_type;
1035 #endif
1036     ld->set_req(MemNode::Address, addp);
1037     ld->set_req(0, ctl);
1038     ld->set_req(MemNode::Memory, mem);
1039     // load depends on the tests that validate the arraycopy
1040     ld->_control_dependency = UnknownControl;

1121         // Same base, same offset.
1122         // Possible improvement for arrays: check index value instead of absolute offset.
1123 
1124         // At this point we have proven something like this setup:
1125         //   B = << base >>
1126         //   L =  LoadQ(AddP(Check/CastPP(B), #Off))
1127         //   S = StoreQ(AddP(             B , #Off), V)
1128         // (Actually, we haven't yet proven the Q's are the same.)
1129         // In other words, we are loading from a casted version of
1130         // the same pointer-and-offset that we stored to.
1131         // Casted version may carry a dependency and it is respected.
1132         // Thus, we are able to replace L by V.
1133       }
1134       // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1135       if (store_Opcode() != st->Opcode()) {
1136         return NULL;
1137       }
1138       // LoadVector/StoreVector needs additional check to ensure the types match.
1139       if (st->is_StoreVector()) {
1140         const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
1141         const TypeVect* out_vt = as_LoadVector()->vect_type();
1142         if (in_vt != out_vt) {
1143           return NULL;
1144         }
1145       }
1146       return st->in(MemNode::ValueIn);
1147     }
1148 
1149     // A load from a freshly-created object always returns zero.
1150     // (This can happen after LoadNode::Ideal resets the load's memory input
1151     // to find_captured_store, which returned InitializeNode::zero_memory.)
1152     if (st->is_Proj() && st->in(0)->is_Allocate() &&
1153         (st->in(0) == ld_alloc) &&
1154         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1155       // return a zero value for the load's basic type
1156       // (This is one of the few places where a generic PhaseTransform
1157       // can create new nodes.  Think of it as lazily manifesting
1158       // virtually pre-existing constants.)






1159       if (memory_type() != T_VOID) {
1160         if (ReduceBulkZeroing || find_array_copy_clone(phase, ld_alloc, in(MemNode::Memory)) == NULL) {
1161           // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1162           // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1163           // by the ArrayCopyNode.
1164           return phase->zerocon(memory_type());
1165         }
1166       } else {
1167         // TODO: materialize all-zero vector constant
1168         assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1169       }
1170     }
1171 
1172     // A load from an initialization barrier can match a captured store.
1173     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1174       InitializeNode* init = st->in(0)->as_Initialize();
1175       AllocateNode* alloc = init->allocation();
1176       if ((alloc != NULL) && (alloc == ld_alloc)) {
1177         // examine a captured store value
1178         st = init->find_captured_store(ld_off, memory_size(), phase);

1206 //----------------------is_instance_field_load_with_local_phi------------------
1207 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1208   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1209       in(Address)->is_AddP() ) {
1210     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1211     // Only instances and boxed values.
1212     if( t_oop != NULL &&
1213         (t_oop->is_ptr_to_boxed_value() ||
1214          t_oop->is_known_instance_field()) &&
1215         t_oop->offset() != Type::OffsetBot &&
1216         t_oop->offset() != Type::OffsetTop) {
1217       return true;
1218     }
1219   }
1220   return false;
1221 }
1222 
1223 //------------------------------Identity---------------------------------------
1224 // Loads are identity if previous store is to same address
1225 Node* LoadNode::Identity(PhaseGVN* phase) {



























1226   // If the previous store-maker is the right kind of Store, and the store is
1227   // to the same address, then we are equal to the value stored.
1228   Node* mem = in(Memory);
1229   Node* value = can_see_stored_value(mem, phase);
1230   if( value ) {
1231     // byte, short & char stores truncate naturally.
1232     // A load has to load the truncated value which requires
1233     // some sort of masking operation and that requires an
1234     // Ideal call instead of an Identity call.
1235     if (memory_size() < BytesPerInt) {
1236       // If the input to the store does not fit with the load's result type,
1237       // it must be truncated via an Ideal call.
1238       if (!phase->type(value)->higher_equal(phase->type(this)))
1239         return this;
1240     }
1241     // (This works even when value is a Con, but LoadNode::Value
1242     // usually runs first, producing the singleton type of the Con.)
1243     return value;
1244   }
1245 

1908       }
1909     }
1910 
1911     // Don't do this for integer types. There is only potential profit if
1912     // the element type t is lower than _type; that is, for int types, if _type is
1913     // more restrictive than t.  This only happens here if one is short and the other
1914     // char (both 16 bits), and in those cases we've made an intentional decision
1915     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1916     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1917     //
1918     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
1919     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
1920     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
1921     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
1922     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
1923     // In fact, that could have been the original type of p1, and p1 could have
1924     // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
1925     // expression (LShiftL quux 3) independently optimized to the constant 8.
1926     if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
1927         && (_type->isa_vect() == NULL)

1928         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
1929       // t might actually be lower than _type, if _type is a unique
1930       // concrete subclass of abstract class t.
1931       if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
1932         const Type* jt = t->join_speculative(_type);
1933         // In any case, do not allow the join, per se, to empty out the type.
1934         if (jt->empty() && !t->empty()) {
1935           // This can happen if a interface-typed array narrows to a class type.
1936           jt = _type;
1937         }
1938 #ifdef ASSERT
1939         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
1940           // The pointers in the autobox arrays are always non-null
1941           Node* base = adr->in(AddPNode::Base);
1942           if ((base != NULL) && base->is_DecodeN()) {
1943             // Get LoadN node which loads IntegerCache.cache field
1944             base = base->in(1);
1945           }
1946           if ((base != NULL) && base->is_Con()) {
1947             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
1948             if ((base_type != NULL) && base_type->is_autobox_cache()) {
1949               // It could be narrow oop
1950               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
1951             }
1952           }
1953         }
1954 #endif
1955         return jt;
1956       }
1957     }
1958   } else if (tp->base() == Type::InstPtr) {
1959     assert( off != Type::OffsetBot ||
1960             // arrays can be cast to Objects
1961             tp->is_oopptr()->klass()->is_java_lang_Object() ||

1962             // unsafe field access may not have a constant offset
1963             C->has_unsafe_access(),
1964             "Field accesses must be precise" );
1965     // For oop loads, we expect the _type to be precise.
1966 
1967     // Optimize loads from constant fields.
1968     const TypeInstPtr* tinst = tp->is_instptr();



1969     ciObject* const_oop = tinst->const_oop();
1970     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) {
1971       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());


















1972       if (con_type != NULL) {
1973         return con_type;
1974       }
1975     }
1976   } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
1977     assert( off != Type::OffsetBot ||
1978             // arrays can be cast to Objects

1979             tp->is_klassptr()->klass()->is_java_lang_Object() ||
1980             // also allow array-loading from the primary supertype
1981             // array during subtype checks
1982             Opcode() == Op_LoadKlass,
1983             "Field accesses must be precise" );
1984     // For klass/static loads, we expect the _type to be precise
1985   } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
1986     /* With mirrors being an indirect in the Klass*
1987      * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
1988      * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
1989      *
1990      * So check the type and klass of the node before the LoadP.
1991      */
1992     Node* adr2 = adr->in(MemNode::Address);
1993     const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
1994     if (tkls != NULL && !StressReflectiveCode) {
1995       ciKlass* klass = tkls->klass();
1996       if (klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
1997         assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
1998         assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
1999         return TypeInstPtr::make(klass->java_mirror());
















2000       }
2001     }
2002   }
2003 
2004   const TypeKlassPtr *tkls = tp->isa_klassptr();
2005   if (tkls != NULL && !StressReflectiveCode) {
2006     ciKlass* klass = tkls->klass();
2007     if (klass->is_loaded() && tkls->klass_is_exact()) {
2008       // We are loading a field from a Klass metaobject whose identity
2009       // is known at compile time (the type is "exact" or "precise").
2010       // Check for fields we know are maintained as constants by the VM.
2011       if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2012         // The field is Klass::_super_check_offset.  Return its (constant) value.
2013         // (Folds up type checking code.)
2014         assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2015         return TypeInt::make(klass->super_check_offset());
2016       }
2017       // Compute index into primary_supers array
2018       juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2019       // Check for overflowing; use unsigned compare to handle the negative case.
2020       if( depth < ciKlass::primary_super_limit() ) {
2021         // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2022         // (Folds up type checking code.)
2023         assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2024         ciKlass *ss = klass->super_of_depth(depth);
2025         return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
2026       }
2027       const Type* aift = load_array_final_field(tkls, klass);
2028       if (aift != NULL)  return aift;
2029     }
2030 
2031     // We can still check if we are loading from the primary_supers array at a
2032     // shallow enough depth.  Even though the klass is not exact, entries less
2033     // than or equal to its super depth are correct.
2034     if (klass->is_loaded() ) {
2035       ciType *inner = klass;
2036       while( inner->is_obj_array_klass() )
2037         inner = inner->as_obj_array_klass()->base_element_type();
2038       if( inner->is_instance_klass() &&
2039           !inner->as_instance_klass()->flags().is_interface() ) {
2040         // Compute index into primary_supers array
2041         juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2042         // Check for overflowing; use unsigned compare to handle the negative case.
2043         if( depth < ciKlass::primary_super_limit() &&
2044             depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case
2045           // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2046           // (Folds up type checking code.)
2047           assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2048           ciKlass *ss = klass->super_of_depth(depth);
2049           return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
2050         }
2051       }
2052     }
2053 
2054     // If the type is enough to determine that the thing is not an array,

2079   if (ReduceFieldZeroing || is_instance || is_boxed_value) {
2080     Node* value = can_see_stored_value(mem,phase);
2081     if (value != NULL && value->is_Con()) {
2082       assert(value->bottom_type()->higher_equal(_type),"sanity");
2083       return value->bottom_type();
2084     }
2085   }
2086 
2087   bool is_vect = (_type->isa_vect() != NULL);
2088   if (is_instance && !is_vect) {
2089     // If we have an instance type and our memory input is the
2090     // programs's initial memory state, there is no matching store,
2091     // so just return a zero of the appropriate type -
2092     // except if it is vectorized - then we have no zero constant.
2093     Node *mem = in(MemNode::Memory);
2094     if (mem->is_Parm() && mem->in(0)->is_Start()) {
2095       assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2096       return Type::get_zero_type(_type->basic_type());
2097     }
2098   }
2099 
2100   Node* alloc = is_new_object_mark_load(phase);
2101   if (alloc != NULL) {
2102     return TypeX::make(markWord::prototype().value());










2103   }
2104 
2105   return _type;
2106 }
2107 
2108 //------------------------------match_edge-------------------------------------
2109 // Do we Match on this edge index or not?  Match only the address.
2110 uint LoadNode::match_edge(uint idx) const {
2111   return idx == MemNode::Address;
2112 }
2113 
2114 //--------------------------LoadBNode::Ideal--------------------------------------
2115 //
2116 //  If the previous store is to the same address as this load,
2117 //  and the value stored was larger than a byte, replace this load
2118 //  with the value stored truncated to a byte.  If no truncation is
2119 //  needed, the replacement is done in LoadNode::Identity().
2120 //
2121 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2122   Node* mem = in(MemNode::Memory);

2233   return LoadNode::Ideal(phase, can_reshape);
2234 }
2235 
2236 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2237   Node* mem = in(MemNode::Memory);
2238   Node* value = can_see_stored_value(mem,phase);
2239   if (value != NULL && value->is_Con() &&
2240       !value->bottom_type()->higher_equal(_type)) {
2241     // If the input to the store does not fit with the load's result type,
2242     // it must be truncated. We can't delay until Ideal call since
2243     // a singleton Value is needed for split_thru_phi optimization.
2244     int con = value->get_int();
2245     return TypeInt::make((con << 16) >> 16);
2246   }
2247   return LoadNode::Value(phase);
2248 }
2249 
2250 //=============================================================================
2251 //----------------------------LoadKlassNode::make------------------------------
2252 // Polymorphic factory method:
2253 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {

2254   // sanity check the alias category against the created node type
2255   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2256   assert(adr_type != NULL, "expecting TypeKlassPtr");
2257 #ifdef _LP64
2258   if (adr_type->is_ptr_to_narrowklass()) {
2259     assert(UseCompressedClassPointers, "no compressed klasses");
2260     Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2261     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2262   }
2263 #endif
2264   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2265   return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
2266 }
2267 
2268 //------------------------------Value------------------------------------------
2269 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2270   return klass_value_common(phase);
2271 }
2272 
2273 // In most cases, LoadKlassNode does not have the control input set. If the control

2280   // Either input is TOP ==> the result is TOP
2281   const Type *t1 = phase->type( in(MemNode::Memory) );
2282   if (t1 == Type::TOP)  return Type::TOP;
2283   Node *adr = in(MemNode::Address);
2284   const Type *t2 = phase->type( adr );
2285   if (t2 == Type::TOP)  return Type::TOP;
2286   const TypePtr *tp = t2->is_ptr();
2287   if (TypePtr::above_centerline(tp->ptr()) ||
2288       tp->ptr() == TypePtr::Null)  return Type::TOP;
2289 
2290   // Return a more precise klass, if possible
2291   const TypeInstPtr *tinst = tp->isa_instptr();
2292   if (tinst != NULL) {
2293     ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
2294     int offset = tinst->offset();
2295     if (ik == phase->C->env()->Class_klass()
2296         && (offset == java_lang_Class::klass_offset() ||
2297             offset == java_lang_Class::array_klass_offset())) {
2298       // We are loading a special hidden field from a Class mirror object,
2299       // the field which points to the VM's Klass metaobject.
2300       ciType* t = tinst->java_mirror_type();

2301       // java_mirror_type returns non-null for compile-time Class constants.
2302       if (t != NULL) {
2303         // constant oop => constant klass
2304         if (offset == java_lang_Class::array_klass_offset()) {
2305           if (t->is_void()) {
2306             // We cannot create a void array.  Since void is a primitive type return null
2307             // klass.  Users of this result need to do a null check on the returned klass.
2308             return TypePtr::NULL_PTR;
2309           }
2310           return TypeKlassPtr::make(ciArrayKlass::make(t));
2311         }
2312         if (!t->is_klass()) {
2313           // a primitive Class (e.g., int.class) has NULL for a klass field
2314           return TypePtr::NULL_PTR;
2315         }
2316         // (Folds up the 1st indirection in aClassConstant.getModifiers().)
2317         return TypeKlassPtr::make(t->as_klass());
2318       }
2319       // non-constant mirror, so we can't tell what's going on
2320     }
2321     if( !ik->is_loaded() )
2322       return _type;             // Bail out if not loaded
2323     if (offset == oopDesc::klass_offset_in_bytes()) {
2324       if (tinst->klass_is_exact()) {
2325         return TypeKlassPtr::make(ik);
2326       }
2327       // See if we can become precise: no subklasses and no interface
2328       // (Note:  We need to support verified interfaces.)
2329       if (!ik->is_interface() && !ik->has_subklass()) {
2330         // Add a dependence; if any subclass added we need to recompile
2331         if (!ik->is_final()) {
2332           // %%% should use stronger assert_unique_concrete_subtype instead
2333           phase->C->dependencies()->assert_leaf_type(ik);
2334         }
2335         // Return precise klass
2336         return TypeKlassPtr::make(ik);
2337       }
2338 
2339       // Return root of possible klass
2340       return TypeKlassPtr::make(TypePtr::NotNull, ik, 0/*offset*/);
2341     }
2342   }
2343 
2344   // Check for loading klass from an array
2345   const TypeAryPtr *tary = tp->isa_aryptr();
2346   if( tary != NULL ) {
2347     ciKlass *tary_klass = tary->klass();
2348     if (tary_klass != NULL   // can be NULL when at BOTTOM or TOP
2349         && tary->offset() == oopDesc::klass_offset_in_bytes()) {
2350       if (tary->klass_is_exact()) {
2351         return TypeKlassPtr::make(tary_klass);
2352       }
2353       ciArrayKlass *ak = tary->klass()->as_array_klass();
2354       // If the klass is an object array, we defer the question to the
2355       // array component klass.
2356       if( ak->is_obj_array_klass() ) {
2357         assert( ak->is_loaded(), "" );
2358         ciKlass *base_k = ak->as_obj_array_klass()->base_element_klass();
2359         if( base_k->is_loaded() && base_k->is_instance_klass() ) {
2360           ciInstanceKlass* ik = base_k->as_instance_klass();
2361           // See if we can become precise: no subklasses and no interface
2362           if (!ik->is_interface() && !ik->has_subklass()) {


2363             // Add a dependence; if any subclass added we need to recompile
2364             if (!ik->is_final()) {
2365               phase->C->dependencies()->assert_leaf_type(ik);
2366             }
2367             // Return precise array klass
2368             return TypeKlassPtr::make(ak);
2369           }
2370         }
2371         return TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
2372       } else {                  // Found a type-array?
2373         assert( ak->is_type_array_klass(), "" );
2374         return TypeKlassPtr::make(ak); // These are always precise
2375       }
2376     }
2377   }
2378 
2379   // Check for loading klass from an array klass
2380   const TypeKlassPtr *tkls = tp->isa_klassptr();
2381   if (tkls != NULL && !StressReflectiveCode) {
2382     ciKlass* klass = tkls->klass();
2383     if( !klass->is_loaded() )
2384       return _type;             // Bail out if not loaded


2385     if( klass->is_obj_array_klass() &&
2386         tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2387       ciKlass* elem = klass->as_obj_array_klass()->element_klass();
2388       // // Always returning precise element type is incorrect,
2389       // // e.g., element type could be object and array may contain strings
2390       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2391 
2392       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2393       // according to the element type's subclassing.
2394       return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/);




2395     }
2396     if( klass->is_instance_klass() && tkls->klass_is_exact() &&
2397         tkls->offset() == in_bytes(Klass::super_offset())) {
2398       ciKlass* sup = klass->as_instance_klass()->super();
2399       // The field is Klass::_super.  Return its (constant) value.
2400       // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2401       return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
2402     }
2403   }
2404 
2405   // Bailout case
2406   return LoadNode::Value(phase);
2407 }
2408 
2409 //------------------------------Identity---------------------------------------
2410 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2411 // Also feed through the klass in Allocate(...klass...)._klass.
2412 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2413   return klass_identity_common(phase);
2414 }

2582 //=============================================================================
2583 //---------------------------StoreNode::make-----------------------------------
2584 // Polymorphic factory method:
2585 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
2586   assert((mo == unordered || mo == release), "unexpected");
2587   Compile* C = gvn.C;
2588   assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2589          ctl != NULL, "raw memory operations should have control edge");
2590 
2591   switch (bt) {
2592   case T_BOOLEAN: val = gvn.transform(new AndINode(val, gvn.intcon(0x1))); // Fall through to T_BYTE case
2593   case T_BYTE:    return new StoreBNode(ctl, mem, adr, adr_type, val, mo);
2594   case T_INT:     return new StoreINode(ctl, mem, adr, adr_type, val, mo);
2595   case T_CHAR:
2596   case T_SHORT:   return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
2597   case T_LONG:    return new StoreLNode(ctl, mem, adr, adr_type, val, mo);
2598   case T_FLOAT:   return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
2599   case T_DOUBLE:  return new StoreDNode(ctl, mem, adr, adr_type, val, mo);
2600   case T_METADATA:
2601   case T_ADDRESS:

2602   case T_OBJECT:
2603 #ifdef _LP64
2604     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2605       val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop()));
2606       return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
2607     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2608                (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2609                 adr->bottom_type()->isa_rawptr())) {
2610       val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2611       return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2612     }
2613 #endif
2614     {
2615       return new StorePNode(ctl, mem, adr, adr_type, val, mo);
2616     }
2617   default:
2618     ShouldNotReachHere();
2619     return (StoreNode*)NULL;
2620   }
2621 }

2643 
2644   // Since they are not commoned, do not hash them:
2645   return NO_HASH;
2646 }
2647 
2648 //------------------------------Ideal------------------------------------------
2649 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2650 // When a store immediately follows a relevant allocation/initialization,
2651 // try to capture it into the initialization, or hoist it above.
2652 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2653   Node* p = MemNode::Ideal_common(phase, can_reshape);
2654   if (p)  return (p == NodeSentinel) ? NULL : p;
2655 
2656   Node* mem     = in(MemNode::Memory);
2657   Node* address = in(MemNode::Address);
2658   Node* value   = in(MemNode::ValueIn);
2659   // Back-to-back stores to same address?  Fold em up.  Generally
2660   // unsafe if I have intervening uses...  Also disallowed for StoreCM
2661   // since they must follow each StoreP operation.  Redundant StoreCMs
2662   // are eliminated just before matching in final_graph_reshape.
2663   {
2664     Node* st = mem;
2665     // If Store 'st' has more than one use, we cannot fold 'st' away.
2666     // For example, 'st' might be the final state at a conditional
2667     // return.  Or, 'st' might be used by some node which is live at
2668     // the same time 'st' is live, which might be unschedulable.  So,
2669     // require exactly ONE user until such time as we clone 'mem' for
2670     // each of 'mem's uses (thus making the exactly-1-user-rule hold
2671     // true).
2672     while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) {
2673       // Looking at a dead closed cycle of memory?
2674       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
2675       assert(Opcode() == st->Opcode() ||
2676              st->Opcode() == Op_StoreVector ||
2677              Opcode() == Op_StoreVector ||
2678              st->Opcode() == Op_StoreVectorScatter ||
2679              Opcode() == Op_StoreVectorScatter ||
2680              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
2681              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
2682              (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy

2683              (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
2684              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
2685 
2686       if (st->in(MemNode::Address)->eqv_uncast(address) &&
2687           st->as_Store()->memory_size() <= this->memory_size()) {
2688         Node* use = st->raw_out(0);
2689         if (phase->is_IterGVN()) {
2690           phase->is_IterGVN()->rehash_node_delayed(use);
2691         }
2692         // It's OK to do this in the parser, since DU info is always accurate,
2693         // and the parser always refers to nodes via SafePointNode maps.
2694         use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
2695         return this;
2696       }
2697       st = st->in(MemNode::Memory);
2698     }
2699   }
2700 
2701 
2702   // Capture an unaliased, unconditional, simple store into an initializer.

2759   // Load then Store?  Then the Store is useless
2760   if (val->is_Load() &&
2761       val->in(MemNode::Address)->eqv_uncast(adr) &&
2762       val->in(MemNode::Memory )->eqv_uncast(mem) &&
2763       val->as_Load()->store_Opcode() == Opcode()) {
2764     result = mem;
2765   }
2766 
2767   // Two stores in a row of the same value?
2768   if (result == this &&
2769       mem->is_Store() &&
2770       mem->in(MemNode::Address)->eqv_uncast(adr) &&
2771       mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2772       mem->Opcode() == Opcode()) {
2773     result = mem;
2774   }
2775 
2776   // Store of zero anywhere into a freshly-allocated object?
2777   // Then the store is useless.
2778   // (It must already have been captured by the InitializeNode.)
2779   if (result == this &&
2780       ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
2781     // a newly allocated object is already all-zeroes everywhere
2782     if (mem->is_Proj() && mem->in(0)->is_Allocate()) {


2783       result = mem;
2784     }
2785 
2786     if (result == this) {
2787       // the store may also apply to zero-bits in an earlier object
2788       Node* prev_mem = find_previous_store(phase);
2789       // Steps (a), (b):  Walk past independent stores to find an exact match.
2790       if (prev_mem != NULL) {
2791         Node* prev_val = can_see_stored_value(prev_mem, phase);
2792         if (prev_val != NULL && prev_val == val) {
2793           // prev_val and val might differ by a cast; it would be good
2794           // to keep the more informative of the two.
2795           result = mem;
2796         }
2797       }
2798     }
2799   }
2800 
2801   PhaseIterGVN* igvn = phase->is_IterGVN();
2802   if (result != this && igvn != NULL) {
2803     MemBarNode* trailing = trailing_membar();
2804     if (trailing != NULL) {
2805 #ifdef ASSERT
2806       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();

2951 Node* StoreCMNode::Identity(PhaseGVN* phase) {
2952   // No need to card mark when storing a null ptr
2953   Node* my_store = in(MemNode::OopStore);
2954   if (my_store->is_Store()) {
2955     const Type *t1 = phase->type( my_store->in(MemNode::ValueIn) );
2956     if( t1 == TypePtr::NULL_PTR ) {
2957       return in(MemNode::Memory);
2958     }
2959   }
2960   return this;
2961 }
2962 
2963 //=============================================================================
2964 //------------------------------Ideal---------------------------------------
2965 Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){
2966   Node* progress = StoreNode::Ideal(phase, can_reshape);
2967   if (progress != NULL) return progress;
2968 
2969   Node* my_store = in(MemNode::OopStore);
2970   if (my_store->is_MergeMem()) {
2971     Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx());
2972     set_req_X(MemNode::OopStore, mem, phase);
2973     return this;




2974   }
2975 
2976   return NULL;
2977 }
2978 
2979 //------------------------------Value-----------------------------------------
2980 const Type* StoreCMNode::Value(PhaseGVN* phase) const {
2981   // Either input is TOP ==> the result is TOP (checked in StoreNode::Value).
2982   // If extra input is TOP ==> the result is TOP
2983   const Type* t = phase->type(in(MemNode::OopStore));
2984   if (t == Type::TOP) {
2985     return Type::TOP;
2986   }
2987   return StoreNode::Value(phase);
2988 }
2989 
2990 
2991 //=============================================================================
2992 //----------------------------------SCMemProjNode------------------------------
2993 const Type* SCMemProjNode::Value(PhaseGVN* phase) const

3112 // Clearing a short array is faster with stores
3113 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3114   // Already know this is a large node, do not try to ideal it
3115   if (!IdealizeClearArrayNode || _is_large) return NULL;
3116 
3117   const int unit = BytesPerLong;
3118   const TypeX* t = phase->type(in(2))->isa_intptr_t();
3119   if (!t)  return NULL;
3120   if (!t->is_con())  return NULL;
3121   intptr_t raw_count = t->get_con();
3122   intptr_t size = raw_count;
3123   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
3124   // Clearing nothing uses the Identity call.
3125   // Negative clears are possible on dead ClearArrays
3126   // (see jck test stmt114.stmt11402.val).
3127   if (size <= 0 || size % unit != 0)  return NULL;
3128   intptr_t count = size / unit;
3129   // Length too long; communicate this to matchers and assemblers.
3130   // Assemblers are responsible to produce fast hardware clears for it.
3131   if (size > InitArrayShortSize) {
3132     return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
3133   } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
3134     return NULL;
3135   }
3136   Node *mem = in(1);
3137   if( phase->type(mem)==Type::TOP ) return NULL;
3138   Node *adr = in(3);
3139   const Type* at = phase->type(adr);
3140   if( at==Type::TOP ) return NULL;
3141   const TypePtr* atp = at->isa_ptr();
3142   // adjust atp to be the correct array element address type
3143   if (atp == NULL)  atp = TypePtr::BOTTOM;
3144   else              atp = atp->add_offset(Type::OffsetBot);
3145   // Get base for derived pointer purposes
3146   if( adr->Opcode() != Op_AddP ) Unimplemented();
3147   Node *base = adr->in(1);
3148 
3149   Node *zero = phase->makecon(TypeLong::ZERO);
3150   Node *off  = phase->MakeConX(BytesPerLong);
3151   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
3152   count--;
3153   while( count-- ) {
3154     mem = phase->transform(mem);
3155     adr = phase->transform(new AddPNode(base,adr,off));
3156     mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
3157   }
3158   return mem;
3159 }
3160 
3161 //----------------------------step_through----------------------------------
3162 // Return allocation input memory edge if it is different instance
3163 // or itself if it is the one we are looking for.
3164 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
3165   Node* n = *np;
3166   assert(n->is_ClearArray(), "sanity");
3167   intptr_t offset;
3168   AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
3169   // This method is called only before Allocate nodes are expanded
3170   // during macro nodes expansion. Before that ClearArray nodes are
3171   // only generated in PhaseMacroExpand::generate_arraycopy() (before
3172   // Allocate nodes are expanded) which follows allocations.
3173   assert(alloc != NULL, "should have allocation");
3174   if (alloc->_idx == instance_id) {
3175     // Can not bypass initialization of the instance we are looking for.
3176     return false;
3177   }
3178   // Otherwise skip it.
3179   InitializeNode* init = alloc->initialization();
3180   if (init != NULL)
3181     *np = init->in(TypeFunc::Memory);
3182   else
3183     *np = alloc->in(TypeFunc::Memory);
3184   return true;
3185 }
3186 
3187 //----------------------------clear_memory-------------------------------------
3188 // Generate code to initialize object storage to zero.
3189 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,


3190                                    intptr_t start_offset,
3191                                    Node* end_offset,
3192                                    PhaseGVN* phase) {
3193   intptr_t offset = start_offset;
3194 
3195   int unit = BytesPerLong;
3196   if ((offset % unit) != 0) {
3197     Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
3198     adr = phase->transform(adr);
3199     const TypePtr* atp = TypeRawPtr::BOTTOM;
3200     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);






3201     mem = phase->transform(mem);
3202     offset += BytesPerInt;
3203   }
3204   assert((offset % unit) == 0, "");
3205 
3206   // Initialize the remaining stuff, if any, with a ClearArray.
3207   return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
3208 }
3209 
3210 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,

3211                                    Node* start_offset,
3212                                    Node* end_offset,
3213                                    PhaseGVN* phase) {
3214   if (start_offset == end_offset) {
3215     // nothing to do
3216     return mem;
3217   }
3218 
3219   int unit = BytesPerLong;
3220   Node* zbase = start_offset;
3221   Node* zend  = end_offset;
3222 
3223   // Scale to the unit required by the CPU:
3224   if (!Matcher::init_array_count_is_in_bytes) {
3225     Node* shift = phase->intcon(exact_log2(unit));
3226     zbase = phase->transform(new URShiftXNode(zbase, shift) );
3227     zend  = phase->transform(new URShiftXNode(zend,  shift) );
3228   }
3229 
3230   // Bulk clear double-words
3231   Node* zsize = phase->transform(new SubXNode(zend, zbase) );
3232   Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
3233   mem = new ClearArrayNode(ctl, mem, zsize, adr, false);



3234   return phase->transform(mem);
3235 }
3236 
3237 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,


3238                                    intptr_t start_offset,
3239                                    intptr_t end_offset,
3240                                    PhaseGVN* phase) {
3241   if (start_offset == end_offset) {
3242     // nothing to do
3243     return mem;
3244   }
3245 
3246   assert((end_offset % BytesPerInt) == 0, "odd end offset");
3247   intptr_t done_offset = end_offset;
3248   if ((done_offset % BytesPerLong) != 0) {
3249     done_offset -= BytesPerInt;
3250   }
3251   if (done_offset > start_offset) {
3252     mem = clear_memory(ctl, mem, dest,
3253                        start_offset, phase->MakeConX(done_offset), phase);
3254   }
3255   if (done_offset < end_offset) { // emit the final 32-bit store
3256     Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
3257     adr = phase->transform(adr);
3258     const TypePtr* atp = TypeRawPtr::BOTTOM;
3259     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);






3260     mem = phase->transform(mem);
3261     done_offset += BytesPerInt;
3262   }
3263   assert(done_offset == end_offset, "");
3264   return mem;
3265 }
3266 
3267 //=============================================================================
3268 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
3269   : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
3270     _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
3271 #ifdef ASSERT
3272   , _pair_idx(0)
3273 #endif
3274 {
3275   init_class_id(Class_MemBar);
3276   Node* top = C->top();
3277   init_req(TypeFunc::I_O,top);
3278   init_req(TypeFunc::FramePtr,top);
3279   init_req(TypeFunc::ReturnAdr,top);

3385       PhaseIterGVN* igvn = phase->is_IterGVN();
3386       remove(igvn);
3387       // Must return either the original node (now dead) or a new node
3388       // (Do not return a top here, since that would break the uniqueness of top.)
3389       return new ConINode(TypeInt::ZERO);
3390     }
3391   }
3392   return progress ? this : NULL;
3393 }
3394 
3395 //------------------------------Value------------------------------------------
3396 const Type* MemBarNode::Value(PhaseGVN* phase) const {
3397   if( !in(0) ) return Type::TOP;
3398   if( phase->type(in(0)) == Type::TOP )
3399     return Type::TOP;
3400   return TypeTuple::MEMBAR;
3401 }
3402 
3403 //------------------------------match------------------------------------------
3404 // Construct projections for memory.
3405 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
3406   switch (proj->_con) {
3407   case TypeFunc::Control:
3408   case TypeFunc::Memory:
3409     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
3410   }
3411   ShouldNotReachHere();
3412   return NULL;
3413 }
3414 
3415 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3416   trailing->_kind = TrailingStore;
3417   leading->_kind = LeadingStore;
3418 #ifdef ASSERT
3419   trailing->_pair_idx = leading->_idx;
3420   leading->_pair_idx = leading->_idx;
3421 #endif
3422 }
3423 
3424 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3425   trailing->_kind = TrailingLoadStore;

3692   return (req() > RawStores);
3693 }
3694 
3695 void InitializeNode::set_complete(PhaseGVN* phase) {
3696   assert(!is_complete(), "caller responsibility");
3697   _is_complete = Complete;
3698 
3699   // After this node is complete, it contains a bunch of
3700   // raw-memory initializations.  There is no need for
3701   // it to have anything to do with non-raw memory effects.
3702   // Therefore, tell all non-raw users to re-optimize themselves,
3703   // after skipping the memory effects of this initialization.
3704   PhaseIterGVN* igvn = phase->is_IterGVN();
3705   if (igvn)  igvn->add_users_to_worklist(this);
3706 }
3707 
3708 // convenience function
3709 // return false if the init contains any stores already
3710 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
3711   InitializeNode* init = initialization();
3712   if (init == NULL || init->is_complete())  return false;


3713   init->remove_extra_zeroes();
3714   // for now, if this allocation has already collected any inits, bail:
3715   if (init->is_non_zero())  return false;
3716   init->set_complete(phase);
3717   return true;
3718 }
3719 
3720 void InitializeNode::remove_extra_zeroes() {
3721   if (req() == RawStores)  return;
3722   Node* zmem = zero_memory();
3723   uint fill = RawStores;
3724   for (uint i = fill; i < req(); i++) {
3725     Node* n = in(i);
3726     if (n->is_top() || n == zmem)  continue;  // skip
3727     if (fill < i)  set_req(fill, n);          // compact
3728     ++fill;
3729   }
3730   // delete any empty spaces created:
3731   while (fill < req()) {
3732     del_req(fill);

3870             // store node that we'd like to capture. We need to check
3871             // the uses of the MergeMemNode.
3872             mems.push(n);
3873           }
3874         } else if (n->is_Mem()) {
3875           Node* other_adr = n->in(MemNode::Address);
3876           if (other_adr == adr) {
3877             failed = true;
3878             break;
3879           } else {
3880             const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
3881             if (other_t_adr != NULL) {
3882               int other_alias_idx = phase->C->get_alias_index(other_t_adr);
3883               if (other_alias_idx == alias_idx) {
3884                 // A load from the same memory slice as the store right
3885                 // after the InitializeNode. We check the control of the
3886                 // object/array that is loaded from. If it's the same as
3887                 // the store control then we cannot capture the store.
3888                 assert(!n->is_Store(), "2 stores to same slice on same control?");
3889                 Node* base = other_adr;






3890                 assert(base->is_AddP(), "should be addp but is %s", base->Name());
3891                 base = base->in(AddPNode::Base);
3892                 if (base != NULL) {
3893                   base = base->uncast();
3894                   if (base->is_Proj() && base->in(0) == alloc) {
3895                     failed = true;
3896                     break;
3897                   }
3898                 }
3899               }
3900             }
3901           }
3902         } else {
3903           failed = true;
3904           break;
3905         }
3906       }
3907     }
3908   }
3909   if (failed) {

4454         //   z's_done      12  16  16  16    12  16    12
4455         //   z's_needed    12  16  16  16    16  16    16
4456         //   zsize          0   0   0   0     4   0     4
4457         if (next_full_store < 0) {
4458           // Conservative tack:  Zero to end of current word.
4459           zeroes_needed = align_up(zeroes_needed, BytesPerInt);
4460         } else {
4461           // Zero to beginning of next fully initialized word.
4462           // Or, don't zero at all, if we are already in that word.
4463           assert(next_full_store >= zeroes_needed, "must go forward");
4464           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
4465           zeroes_needed = next_full_store;
4466         }
4467       }
4468 
4469       if (zeroes_needed > zeroes_done) {
4470         intptr_t zsize = zeroes_needed - zeroes_done;
4471         // Do some incremental zeroing on rawmem, in parallel with inits.
4472         zeroes_done = align_down(zeroes_done, BytesPerInt);
4473         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,


4474                                               zeroes_done, zeroes_needed,
4475                                               phase);
4476         zeroes_done = zeroes_needed;
4477         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
4478           do_zeroing = false;   // leave the hole, next time
4479       }
4480     }
4481 
4482     // Collect the store and move on:
4483     phase->replace_input_of(st, MemNode::Memory, inits);
4484     inits = st;                 // put it on the linearized chain
4485     set_req(i, zmem);           // unhook from previous position
4486 
4487     if (zeroes_done == st_off)
4488       zeroes_done = next_init_off;
4489 
4490     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
4491 
4492     #ifdef ASSERT
4493     // Various order invariants.  Weaker than stores_are_sane because

4513   remove_extra_zeroes();        // clear out all the zmems left over
4514   add_req(inits);
4515 
4516   if (!(UseTLAB && ZeroTLAB)) {
4517     // If anything remains to be zeroed, zero it all now.
4518     zeroes_done = align_down(zeroes_done, BytesPerInt);
4519     // if it is the last unused 4 bytes of an instance, forget about it
4520     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
4521     if (zeroes_done + BytesPerLong >= size_limit) {
4522       AllocateNode* alloc = allocation();
4523       assert(alloc != NULL, "must be present");
4524       if (alloc != NULL && alloc->Opcode() == Op_Allocate) {
4525         Node* klass_node = alloc->in(AllocateNode::KlassNode);
4526         ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
4527         if (zeroes_done == k->layout_helper())
4528           zeroes_done = size_limit;
4529       }
4530     }
4531     if (zeroes_done < size_limit) {
4532       rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,


4533                                             zeroes_done, size_in_bytes, phase);
4534     }
4535   }
4536 
4537   set_complete(phase);
4538   return rawmem;
4539 }
4540 
4541 
4542 #ifdef ASSERT
4543 bool InitializeNode::stores_are_sane(PhaseTransform* phase) {
4544   if (is_complete())
4545     return true;                // stores could be anything at this point
4546   assert(allocation() != NULL, "must be present");
4547   intptr_t last_off = allocation()->minimum_header_size();
4548   for (uint i = InitializeNode::RawStores; i < req(); i++) {
4549     Node* st = in(i);
4550     intptr_t st_off = get_store_offset(st, phase);
4551     if (st_off < 0)  continue;  // ignore dead garbage
4552     if (last_off > st_off) {

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciFlatArrayKlass.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/c2/barrierSetC2.hpp"
  32 #include "gc/shared/tlab_globals.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/cfgnode.hpp"
  39 #include "opto/regalloc.hpp"
  40 #include "opto/compile.hpp"
  41 #include "opto/connode.hpp"
  42 #include "opto/convertnode.hpp"
  43 #include "opto/inlinetypenode.hpp"
  44 #include "opto/loopnode.hpp"
  45 #include "opto/machnode.hpp"
  46 #include "opto/matcher.hpp"
  47 #include "opto/memnode.hpp"
  48 #include "opto/mulnode.hpp"
  49 #include "opto/narrowptrnode.hpp"
  50 #include "opto/phaseX.hpp"
  51 #include "opto/regmask.hpp"
  52 #include "opto/rootnode.hpp"
  53 #include "opto/vectornode.hpp"
  54 #include "utilities/align.hpp"
  55 #include "utilities/copy.hpp"
  56 #include "utilities/macros.hpp"
  57 #include "utilities/powerOfTwo.hpp"
  58 #include "utilities/vmError.hpp"
  59 
  60 // Portions of code courtesy of Clifford Click
  61 
  62 // Optimization - Graph Style
  63 

 226       // clone the Phi with our address type
 227       result = mphi->split_out_instance(t_adr, igvn);
 228     } else {
 229       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
 230     }
 231   }
 232   return result;
 233 }
 234 
 235 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
 236   uint alias_idx = phase->C->get_alias_index(tp);
 237   Node *mem = mmem;
 238 #ifdef ASSERT
 239   {
 240     // Check that current type is consistent with the alias index used during graph construction
 241     assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
 242     bool consistent =  adr_check == NULL || adr_check->empty() ||
 243                        phase->C->must_alias(adr_check, alias_idx );
 244     // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
 245     if( !consistent && adr_check != NULL && !adr_check->empty() &&
 246         tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
 247         adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
 248         ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
 249           adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
 250           adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
 251       // don't assert if it is dead code.
 252       consistent = true;
 253     }
 254     if( !consistent ) {
 255       st->print("alias_idx==%d, adr_check==", alias_idx);
 256       if( adr_check == NULL ) {
 257         st->print("NULL");
 258       } else {
 259         adr_check->dump();
 260       }
 261       st->cr();
 262       print_alias_types();
 263       assert(consistent, "adr_check must match alias idx");
 264     }
 265   }
 266 #endif

 874          "use LoadKlassNode instead");
 875   assert(!(adr_type->isa_aryptr() &&
 876            adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
 877          "use LoadRangeNode instead");
 878   // Check control edge of raw loads
 879   assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
 880           // oop will be recorded in oop map if load crosses safepoint
 881           rt->isa_oopptr() || is_immutable_value(adr),
 882           "raw memory operations should have control edge");
 883   LoadNode* load = NULL;
 884   switch (bt) {
 885   case T_BOOLEAN: load = new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 886   case T_BYTE:    load = new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 887   case T_INT:     load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 888   case T_CHAR:    load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 889   case T_SHORT:   load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 890   case T_LONG:    load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency); break;
 891   case T_FLOAT:   load = new LoadFNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 892   case T_DOUBLE:  load = new LoadDNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 893   case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(),  mo, control_dependency); break;
 894   case T_PRIMITIVE_OBJECT:
 895   case T_OBJECT:
 896 #ifdef _LP64
 897     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 898       load = new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency);
 899     } else
 900 #endif
 901     {
 902       assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
 903       load = new LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
 904     }
 905     break;
 906   default:
 907     ShouldNotReachHere();
 908     break;
 909   }
 910   assert(load != NULL, "LoadNode should have been created");
 911   if (unaligned) {
 912     load->set_unaligned_access();
 913   }
 914   if (mismatched) {

1002 
1003     LoadNode* ld = clone()->as_Load();
1004     Node* addp = in(MemNode::Address)->clone();
1005     if (ac->as_ArrayCopy()->is_clonebasic()) {
1006       assert(ld_alloc != NULL, "need an alloc");
1007       assert(addp->is_AddP(), "address must be addp");
1008       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1009       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1010       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1011       addp->set_req(AddPNode::Base, src);
1012       addp->set_req(AddPNode::Address, src);
1013     } else {
1014       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1015              ac->as_ArrayCopy()->is_copyof_validated() ||
1016              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1017       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1018       addp->set_req(AddPNode::Base, src);
1019       addp->set_req(AddPNode::Address, src);
1020 
1021       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1022       BasicType ary_elem = ary_t->klass()->as_array_klass()->element_type()->basic_type();
1023       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1024       uint shift  = exact_log2(type2aelembytes(ary_elem));
1025       if (ary_t->klass()->is_flat_array_klass()) {
1026         ciFlatArrayKlass* vak = ary_t->klass()->as_flat_array_klass();
1027         shift = vak->log2_element_size();
1028       }
1029 
1030       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1031 #ifdef _LP64
1032       diff = phase->transform(new ConvI2LNode(diff));
1033 #endif
1034       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1035 
1036       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1037       addp->set_req(AddPNode::Offset, offset);
1038     }
1039     addp = phase->transform(addp);
1040 #ifdef ASSERT
1041     const TypePtr* adr_type = phase->type(addp)->is_ptr();
1042     ld->_adr_type = adr_type;
1043 #endif
1044     ld->set_req(MemNode::Address, addp);
1045     ld->set_req(0, ctl);
1046     ld->set_req(MemNode::Memory, mem);
1047     // load depends on the tests that validate the arraycopy
1048     ld->_control_dependency = UnknownControl;

1129         // Same base, same offset.
1130         // Possible improvement for arrays: check index value instead of absolute offset.
1131 
1132         // At this point we have proven something like this setup:
1133         //   B = << base >>
1134         //   L =  LoadQ(AddP(Check/CastPP(B), #Off))
1135         //   S = StoreQ(AddP(             B , #Off), V)
1136         // (Actually, we haven't yet proven the Q's are the same.)
1137         // In other words, we are loading from a casted version of
1138         // the same pointer-and-offset that we stored to.
1139         // Casted version may carry a dependency and it is respected.
1140         // Thus, we are able to replace L by V.
1141       }
1142       // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1143       if (store_Opcode() != st->Opcode()) {
1144         return NULL;
1145       }
1146       // LoadVector/StoreVector needs additional check to ensure the types match.
1147       if (st->is_StoreVector()) {
1148         const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
1149         const TypeVect* out_vt = is_Load() ? as_LoadVector()->vect_type() : as_StoreVector()->vect_type();
1150         if (in_vt != out_vt) {
1151           return NULL;
1152         }
1153       }
1154       return st->in(MemNode::ValueIn);
1155     }
1156 
1157     // A load from a freshly-created object always returns zero.
1158     // (This can happen after LoadNode::Ideal resets the load's memory input
1159     // to find_captured_store, which returned InitializeNode::zero_memory.)
1160     if (st->is_Proj() && st->in(0)->is_Allocate() &&
1161         (st->in(0) == ld_alloc) &&
1162         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1163       // return a zero value for the load's basic type
1164       // (This is one of the few places where a generic PhaseTransform
1165       // can create new nodes.  Think of it as lazily manifesting
1166       // virtually pre-existing constants.)
1167       assert(memory_type() != T_PRIMITIVE_OBJECT, "should not be used for inline types");
1168       Node* default_value = ld_alloc->in(AllocateNode::DefaultValue);
1169       if (default_value != NULL) {
1170         return default_value;
1171       }
1172       assert(ld_alloc->in(AllocateNode::RawDefaultValue) == NULL, "default value may not be null");
1173       if (memory_type() != T_VOID) {
1174         if (ReduceBulkZeroing || find_array_copy_clone(phase, ld_alloc, in(MemNode::Memory)) == NULL) {
1175           // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1176           // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1177           // by the ArrayCopyNode.
1178           return phase->zerocon(memory_type());
1179         }
1180       } else {
1181         // TODO: materialize all-zero vector constant
1182         assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1183       }
1184     }
1185 
1186     // A load from an initialization barrier can match a captured store.
1187     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1188       InitializeNode* init = st->in(0)->as_Initialize();
1189       AllocateNode* alloc = init->allocation();
1190       if ((alloc != NULL) && (alloc == ld_alloc)) {
1191         // examine a captured store value
1192         st = init->find_captured_store(ld_off, memory_size(), phase);

1220 //----------------------is_instance_field_load_with_local_phi------------------
1221 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1222   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1223       in(Address)->is_AddP() ) {
1224     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1225     // Only instances and boxed values.
1226     if( t_oop != NULL &&
1227         (t_oop->is_ptr_to_boxed_value() ||
1228          t_oop->is_known_instance_field()) &&
1229         t_oop->offset() != Type::OffsetBot &&
1230         t_oop->offset() != Type::OffsetTop) {
1231       return true;
1232     }
1233   }
1234   return false;
1235 }
1236 
1237 //------------------------------Identity---------------------------------------
1238 // Loads are identity if previous store is to same address
1239 Node* LoadNode::Identity(PhaseGVN* phase) {
1240   // Loading from an InlineTypePtr? The InlineTypePtr has the values of
1241   // all fields as input. Look for the field with matching offset.
1242   Node* addr = in(Address);
1243   intptr_t offset;
1244   Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset);
1245   if (base != NULL && base->is_InlineTypePtr() && offset > oopDesc::klass_offset_in_bytes()) {
1246     Node* value = base->as_InlineTypePtr()->field_value_by_offset((int)offset, true);
1247     if (value->is_InlineType()) {
1248       // Non-flattened inline type field
1249       InlineTypeNode* vt = value->as_InlineType();
1250       if (vt->is_allocated(phase)) {
1251         value = vt->get_oop();
1252       } else {
1253         // Not yet allocated, bail out
1254         value = NULL;
1255       }
1256     }
1257     if (value != NULL) {
1258       if (Opcode() == Op_LoadN) {
1259         // Encode oop value if we are loading a narrow oop
1260         assert(!phase->type(value)->isa_narrowoop(), "should already be decoded");
1261         value = phase->transform(new EncodePNode(value, bottom_type()));
1262       }
1263       return value;
1264     }
1265   }
1266 
1267   // If the previous store-maker is the right kind of Store, and the store is
1268   // to the same address, then we are equal to the value stored.
1269   Node* mem = in(Memory);
1270   Node* value = can_see_stored_value(mem, phase);
1271   if( value ) {
1272     // byte, short & char stores truncate naturally.
1273     // A load has to load the truncated value which requires
1274     // some sort of masking operation and that requires an
1275     // Ideal call instead of an Identity call.
1276     if (memory_size() < BytesPerInt) {
1277       // If the input to the store does not fit with the load's result type,
1278       // it must be truncated via an Ideal call.
1279       if (!phase->type(value)->higher_equal(phase->type(this)))
1280         return this;
1281     }
1282     // (This works even when value is a Con, but LoadNode::Value
1283     // usually runs first, producing the singleton type of the Con.)
1284     return value;
1285   }
1286 

1949       }
1950     }
1951 
1952     // Don't do this for integer types. There is only potential profit if
1953     // the element type t is lower than _type; that is, for int types, if _type is
1954     // more restrictive than t.  This only happens here if one is short and the other
1955     // char (both 16 bits), and in those cases we've made an intentional decision
1956     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1957     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1958     //
1959     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
1960     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
1961     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
1962     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
1963     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
1964     // In fact, that could have been the original type of p1, and p1 could have
1965     // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
1966     // expression (LShiftL quux 3) independently optimized to the constant 8.
1967     if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
1968         && (_type->isa_vect() == NULL)
1969         && t->isa_inlinetype() == NULL
1970         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
1971       // t might actually be lower than _type, if _type is a unique
1972       // concrete subclass of abstract class t.
1973       if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
1974         const Type* jt = t->join_speculative(_type);
1975         // In any case, do not allow the join, per se, to empty out the type.
1976         if (jt->empty() && !t->empty()) {
1977           // This can happen if a interface-typed array narrows to a class type.
1978           jt = _type;
1979         }
1980 #ifdef ASSERT
1981         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
1982           // The pointers in the autobox arrays are always non-null
1983           Node* base = adr->in(AddPNode::Base);
1984           if ((base != NULL) && base->is_DecodeN()) {
1985             // Get LoadN node which loads IntegerCache.cache field
1986             base = base->in(1);
1987           }
1988           if ((base != NULL) && base->is_Con()) {
1989             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
1990             if ((base_type != NULL) && base_type->is_autobox_cache()) {
1991               // It could be narrow oop
1992               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
1993             }
1994           }
1995         }
1996 #endif
1997         return jt;
1998       }
1999     }
2000   } else if (tp->base() == Type::InstPtr) {
2001     assert( off != Type::OffsetBot ||
2002             // arrays can be cast to Objects
2003             tp->is_oopptr()->klass()->is_java_lang_Object() ||
2004             tp->is_oopptr()->klass() == ciEnv::current()->Class_klass() ||
2005             // unsafe field access may not have a constant offset
2006             C->has_unsafe_access(),
2007             "Field accesses must be precise" );
2008     // For oop loads, we expect the _type to be precise.
2009 

2010     const TypeInstPtr* tinst = tp->is_instptr();
2011     BasicType bt = memory_type();
2012 
2013     // Optimize loads from constant fields.
2014     ciObject* const_oop = tinst->const_oop();
2015     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) {
2016       ciType* mirror_type = const_oop->as_instance()->java_mirror_type();
2017       if (mirror_type != NULL) {
2018         const Type* const_oop = NULL;
2019         ciInlineKlass* vk = mirror_type->is_inlinetype() ? mirror_type->as_inline_klass() : NULL;
2020         // Fold default value loads
2021         if (vk != NULL && off == vk->default_value_offset()) {
2022           const_oop = TypeInstPtr::make(vk->default_instance());
2023         }
2024         // Fold class mirror loads
2025         if (off == java_lang_Class::primary_mirror_offset()) {
2026           const_oop = (vk == NULL) ? TypePtr::NULL_PTR : TypeInstPtr::make(vk->ref_instance());
2027         } else if (off == java_lang_Class::secondary_mirror_offset()) {
2028           const_oop = (vk == NULL) ? TypePtr::NULL_PTR : TypeInstPtr::make(vk->val_instance());
2029         }
2030         if (const_oop != NULL) {
2031           return (bt == T_NARROWOOP) ? const_oop->make_narrowoop() : const_oop;
2032         }
2033       }
2034       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt);
2035       if (con_type != NULL) {
2036         return con_type;
2037       }
2038     }
2039   } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
2040     assert( off != Type::OffsetBot ||
2041             // arrays can be cast to Objects
2042             tp->is_klassptr()->klass() == NULL ||
2043             tp->is_klassptr()->klass()->is_java_lang_Object() ||
2044             // also allow array-loading from the primary supertype
2045             // array during subtype checks
2046             Opcode() == Op_LoadKlass,
2047             "Field accesses must be precise" );
2048     // For klass/static loads, we expect the _type to be precise
2049   } else if (tp->base() == Type::RawPtr && !StressReflectiveCode) {
2050     if (adr->is_Load() && off == 0) {
2051       /* With mirrors being an indirect in the Klass*
2052        * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
2053        * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
2054        *
2055        * So check the type and klass of the node before the LoadP.
2056        */
2057       Node* adr2 = adr->in(MemNode::Address);
2058       const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
2059       if (tkls != NULL) {
2060         ciKlass* klass = tkls->klass();
2061         if (klass != NULL && klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
2062           assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2063           assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2064           return TypeInstPtr::make(klass->java_mirror());
2065         }
2066       }
2067     } else {
2068       // Check for a load of the default value offset from the InlineKlassFixedBlock:
2069       // LoadI(LoadP(inline_klass, adr_inlineklass_fixed_block_offset), default_value_offset_offset)
2070       intptr_t offset = 0;
2071       Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2072       if (base != NULL && base->is_Load() && offset == in_bytes(InlineKlass::default_value_offset_offset())) {
2073         const TypeKlassPtr* tkls = phase->type(base->in(MemNode::Address))->isa_klassptr();
2074         if (tkls != NULL && tkls->is_loaded() && tkls->klass_is_exact() && tkls->isa_inlinetype() &&
2075             tkls->offset() == in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())) {
2076           assert(base->Opcode() == Op_LoadP, "must load an oop from klass");
2077           assert(Opcode() == Op_LoadI, "must load an int from fixed block");
2078           return TypeInt::make(tkls->klass()->as_inline_klass()->default_value_offset());
2079         }
2080       }
2081     }
2082   }
2083 
2084   const TypeKlassPtr *tkls = tp->isa_klassptr();
2085   if (tkls != NULL && !StressReflectiveCode) {
2086     ciKlass* klass = tkls->klass();
2087     if (tkls->is_loaded() && tkls->klass_is_exact()) {
2088       // We are loading a field from a Klass metaobject whose identity
2089       // is known at compile time (the type is "exact" or "precise").
2090       // Check for fields we know are maintained as constants by the VM.
2091       if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2092         // The field is Klass::_super_check_offset.  Return its (constant) value.
2093         // (Folds up type checking code.)
2094         assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2095         return TypeInt::make(klass->super_check_offset());
2096       }
2097       // Compute index into primary_supers array
2098       juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2099       // Check for overflowing; use unsigned compare to handle the negative case.
2100       if( depth < ciKlass::primary_super_limit() ) {
2101         // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2102         // (Folds up type checking code.)
2103         assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2104         ciKlass *ss = klass->super_of_depth(depth);
2105         return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
2106       }
2107       const Type* aift = load_array_final_field(tkls, klass);
2108       if (aift != NULL)  return aift;
2109     }
2110 
2111     // We can still check if we are loading from the primary_supers array at a
2112     // shallow enough depth.  Even though the klass is not exact, entries less
2113     // than or equal to its super depth are correct.
2114     if (tkls->is_loaded()) {
2115       ciType *inner = klass;
2116       while( inner->is_obj_array_klass() )
2117         inner = inner->as_obj_array_klass()->base_element_type();
2118       if( inner->is_instance_klass() &&
2119           !inner->as_instance_klass()->flags().is_interface() ) {
2120         // Compute index into primary_supers array
2121         juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2122         // Check for overflowing; use unsigned compare to handle the negative case.
2123         if( depth < ciKlass::primary_super_limit() &&
2124             depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case
2125           // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2126           // (Folds up type checking code.)
2127           assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2128           ciKlass *ss = klass->super_of_depth(depth);
2129           return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
2130         }
2131       }
2132     }
2133 
2134     // If the type is enough to determine that the thing is not an array,

2159   if (ReduceFieldZeroing || is_instance || is_boxed_value) {
2160     Node* value = can_see_stored_value(mem,phase);
2161     if (value != NULL && value->is_Con()) {
2162       assert(value->bottom_type()->higher_equal(_type),"sanity");
2163       return value->bottom_type();
2164     }
2165   }
2166 
2167   bool is_vect = (_type->isa_vect() != NULL);
2168   if (is_instance && !is_vect) {
2169     // If we have an instance type and our memory input is the
2170     // programs's initial memory state, there is no matching store,
2171     // so just return a zero of the appropriate type -
2172     // except if it is vectorized - then we have no zero constant.
2173     Node *mem = in(MemNode::Memory);
2174     if (mem->is_Parm() && mem->in(0)->is_Start()) {
2175       assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2176       return Type::get_zero_type(_type->basic_type());
2177     }
2178   }

2179   Node* alloc = is_new_object_mark_load(phase);
2180   if (alloc != NULL) {
2181     if (EnableValhalla) {
2182       // The mark word may contain property bits (inline, flat, null-free)
2183       Node* klass_node = alloc->in(AllocateNode::KlassNode);
2184       const TypeKlassPtr* tkls = phase->type(klass_node)->is_klassptr();
2185       ciKlass* klass = tkls->klass();
2186       if (klass != NULL && klass->is_loaded() && tkls->klass_is_exact()) {
2187         return TypeX::make(klass->prototype_header().value());
2188       }
2189     } else {
2190       return TypeX::make(markWord::prototype().value());
2191     }
2192   }
2193 
2194   return _type;
2195 }
2196 
2197 //------------------------------match_edge-------------------------------------
2198 // Do we Match on this edge index or not?  Match only the address.
2199 uint LoadNode::match_edge(uint idx) const {
2200   return idx == MemNode::Address;
2201 }
2202 
2203 //--------------------------LoadBNode::Ideal--------------------------------------
2204 //
2205 //  If the previous store is to the same address as this load,
2206 //  and the value stored was larger than a byte, replace this load
2207 //  with the value stored truncated to a byte.  If no truncation is
2208 //  needed, the replacement is done in LoadNode::Identity().
2209 //
2210 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2211   Node* mem = in(MemNode::Memory);

2322   return LoadNode::Ideal(phase, can_reshape);
2323 }
2324 
2325 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2326   Node* mem = in(MemNode::Memory);
2327   Node* value = can_see_stored_value(mem,phase);
2328   if (value != NULL && value->is_Con() &&
2329       !value->bottom_type()->higher_equal(_type)) {
2330     // If the input to the store does not fit with the load's result type,
2331     // it must be truncated. We can't delay until Ideal call since
2332     // a singleton Value is needed for split_thru_phi optimization.
2333     int con = value->get_int();
2334     return TypeInt::make((con << 16) >> 16);
2335   }
2336   return LoadNode::Value(phase);
2337 }
2338 
2339 //=============================================================================
2340 //----------------------------LoadKlassNode::make------------------------------
2341 // Polymorphic factory method:
2342 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
2343                           const TypeKlassPtr* tk) {
2344   // sanity check the alias category against the created node type
2345   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2346   assert(adr_type != NULL, "expecting TypeKlassPtr");
2347 #ifdef _LP64
2348   if (adr_type->is_ptr_to_narrowklass()) {
2349     assert(UseCompressedClassPointers, "no compressed klasses");
2350     Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2351     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2352   }
2353 #endif
2354   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2355   return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
2356 }
2357 
2358 //------------------------------Value------------------------------------------
2359 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2360   return klass_value_common(phase);
2361 }
2362 
2363 // In most cases, LoadKlassNode does not have the control input set. If the control

2370   // Either input is TOP ==> the result is TOP
2371   const Type *t1 = phase->type( in(MemNode::Memory) );
2372   if (t1 == Type::TOP)  return Type::TOP;
2373   Node *adr = in(MemNode::Address);
2374   const Type *t2 = phase->type( adr );
2375   if (t2 == Type::TOP)  return Type::TOP;
2376   const TypePtr *tp = t2->is_ptr();
2377   if (TypePtr::above_centerline(tp->ptr()) ||
2378       tp->ptr() == TypePtr::Null)  return Type::TOP;
2379 
2380   // Return a more precise klass, if possible
2381   const TypeInstPtr *tinst = tp->isa_instptr();
2382   if (tinst != NULL) {
2383     ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
2384     int offset = tinst->offset();
2385     if (ik == phase->C->env()->Class_klass()
2386         && (offset == java_lang_Class::klass_offset() ||
2387             offset == java_lang_Class::array_klass_offset())) {
2388       // We are loading a special hidden field from a Class mirror object,
2389       // the field which points to the VM's Klass metaobject.
2390       bool null_free = false;
2391       ciType* t = tinst->java_mirror_type(&null_free);
2392       // java_mirror_type returns non-null for compile-time Class constants.
2393       if (t != NULL) {
2394         // constant oop => constant klass
2395         if (offset == java_lang_Class::array_klass_offset()) {
2396           if (t->is_void()) {
2397             // We cannot create a void array.  Since void is a primitive type return null
2398             // klass.  Users of this result need to do a null check on the returned klass.
2399             return TypePtr::NULL_PTR;
2400           }
2401           return TypeKlassPtr::make(ciArrayKlass::make(t, null_free));
2402         }
2403         if (!t->is_klass()) {
2404           // a primitive Class (e.g., int.class) has NULL for a klass field
2405           return TypePtr::NULL_PTR;
2406         }
2407         // (Folds up the 1st indirection in aClassConstant.getModifiers().)
2408         return TypeKlassPtr::make(t->as_klass());
2409       }
2410       // non-constant mirror, so we can't tell what's going on
2411     }
2412     if( !ik->is_loaded() )
2413       return _type;             // Bail out if not loaded
2414     if (offset == oopDesc::klass_offset_in_bytes()) {
2415       if (tinst->klass_is_exact()) {
2416         return TypeKlassPtr::make(ik);
2417       }
2418       // See if we can become precise: no subklasses and no interface
2419       // (Note:  We need to support verified interfaces.)
2420       if (!ik->is_interface() && !ik->has_subklass()) {
2421         // Add a dependence; if any subclass added we need to recompile
2422         if (!ik->is_final()) {
2423           // %%% should use stronger assert_unique_concrete_subtype instead
2424           phase->C->dependencies()->assert_leaf_type(ik);
2425         }
2426         // Return precise klass
2427         return TypeKlassPtr::make(ik);
2428       }
2429 
2430       // Return root of possible klass
2431       return TypeInstKlassPtr::make(TypePtr::NotNull, ik, Type::Offset(0), tinst->flatten_array());
2432     }
2433   }
2434 
2435   // Check for loading klass from an array
2436   const TypeAryPtr *tary = tp->isa_aryptr();
2437   if (tary != NULL) {
2438     ciKlass *tary_klass = tary->klass();
2439     if (tary_klass != NULL   // can be NULL when at BOTTOM or TOP
2440         && tary->offset() == oopDesc::klass_offset_in_bytes()) {
2441       if (tary->klass_is_exact()) {
2442         return TypeKlassPtr::make(tary_klass);
2443       }
2444       ciArrayKlass* ak = tary_klass->as_array_klass();
2445       // If the klass is an object array, we defer the question to the
2446       // array component klass.
2447       if (ak->is_obj_array_klass()) {
2448         assert(ak->is_loaded(), "");
2449         ciKlass *base_k = ak->as_obj_array_klass()->base_element_klass();
2450         if (base_k->is_loaded() && base_k->is_instance_klass()) {
2451           ciInstanceKlass *ik = base_k->as_instance_klass();
2452           // See if we can become precise: no subklasses and no interface
2453           // Do not fold klass loads from [LMyValue. The runtime type might be [QMyValue due to [QMyValue <: [LMyValue
2454           // and the klass for [QMyValue is not equal to the klass for [LMyValue.
2455           if (!ik->is_interface() && !ik->has_subklass() && (!ik->is_inlinetype() || ak->is_elem_null_free())) {
2456             // Add a dependence; if any subclass added we need to recompile
2457             if (!ik->is_final()) {
2458               phase->C->dependencies()->assert_leaf_type(ik);
2459             }
2460             // Return precise array klass
2461             return TypeKlassPtr::make(ak);
2462           }
2463         }
2464         return TypeAryKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0), tary->is_not_flat(), tary->is_not_null_free(), tary->is_null_free());
2465       } else if (ak->is_type_array_klass()) {

2466         return TypeKlassPtr::make(ak); // These are always precise
2467       }
2468     }
2469   }
2470 
2471   // Check for loading klass from an array klass
2472   const TypeKlassPtr *tkls = tp->isa_klassptr();
2473   if (tkls != NULL && !StressReflectiveCode) {
2474     if (!tkls->is_loaded()) {

2475       return _type;             // Bail out if not loaded
2476     }
2477     ciKlass* klass = tkls->klass();
2478     if( klass->is_obj_array_klass() &&
2479         tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2480       ciKlass* elem = klass->as_obj_array_klass()->element_klass();
2481       // // Always returning precise element type is incorrect,
2482       // // e.g., element type could be object and array may contain strings
2483       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2484 
2485       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2486       // according to the element type's subclassing.
2487       return TypeKlassPtr::make(tkls->ptr(), elem, Type::Offset(0));
2488     } else if (klass->is_flat_array_klass() &&
2489                tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2490       ciKlass* elem = klass->as_flat_array_klass()->element_klass();
2491       return TypeInstKlassPtr::make(tkls->ptr(), elem, Type::Offset(0), /* flatten_array= */ true);
2492     }
2493     if( klass->is_instance_klass() && tkls->klass_is_exact() &&
2494         tkls->offset() == in_bytes(Klass::super_offset())) {
2495       ciKlass* sup = klass->as_instance_klass()->super();
2496       // The field is Klass::_super.  Return its (constant) value.
2497       // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2498       return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
2499     }
2500   }
2501 
2502   // Bailout case
2503   return LoadNode::Value(phase);
2504 }
2505 
2506 //------------------------------Identity---------------------------------------
2507 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2508 // Also feed through the klass in Allocate(...klass...)._klass.
2509 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2510   return klass_identity_common(phase);
2511 }

2679 //=============================================================================
2680 //---------------------------StoreNode::make-----------------------------------
2681 // Polymorphic factory method:
2682 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
2683   assert((mo == unordered || mo == release), "unexpected");
2684   Compile* C = gvn.C;
2685   assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2686          ctl != NULL, "raw memory operations should have control edge");
2687 
2688   switch (bt) {
2689   case T_BOOLEAN: val = gvn.transform(new AndINode(val, gvn.intcon(0x1))); // Fall through to T_BYTE case
2690   case T_BYTE:    return new StoreBNode(ctl, mem, adr, adr_type, val, mo);
2691   case T_INT:     return new StoreINode(ctl, mem, adr, adr_type, val, mo);
2692   case T_CHAR:
2693   case T_SHORT:   return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
2694   case T_LONG:    return new StoreLNode(ctl, mem, adr, adr_type, val, mo);
2695   case T_FLOAT:   return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
2696   case T_DOUBLE:  return new StoreDNode(ctl, mem, adr, adr_type, val, mo);
2697   case T_METADATA:
2698   case T_ADDRESS:
2699   case T_PRIMITIVE_OBJECT:
2700   case T_OBJECT:
2701 #ifdef _LP64
2702     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2703       val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop()));
2704       return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
2705     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2706                (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2707                 adr->bottom_type()->isa_rawptr())) {
2708       val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2709       return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2710     }
2711 #endif
2712     {
2713       return new StorePNode(ctl, mem, adr, adr_type, val, mo);
2714     }
2715   default:
2716     ShouldNotReachHere();
2717     return (StoreNode*)NULL;
2718   }
2719 }

2741 
2742   // Since they are not commoned, do not hash them:
2743   return NO_HASH;
2744 }
2745 
2746 //------------------------------Ideal------------------------------------------
2747 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2748 // When a store immediately follows a relevant allocation/initialization,
2749 // try to capture it into the initialization, or hoist it above.
2750 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2751   Node* p = MemNode::Ideal_common(phase, can_reshape);
2752   if (p)  return (p == NodeSentinel) ? NULL : p;
2753 
2754   Node* mem     = in(MemNode::Memory);
2755   Node* address = in(MemNode::Address);
2756   Node* value   = in(MemNode::ValueIn);
2757   // Back-to-back stores to same address?  Fold em up.  Generally
2758   // unsafe if I have intervening uses...  Also disallowed for StoreCM
2759   // since they must follow each StoreP operation.  Redundant StoreCMs
2760   // are eliminated just before matching in final_graph_reshape.
2761   if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::INLINES) {
2762     Node* st = mem;
2763     // If Store 'st' has more than one use, we cannot fold 'st' away.
2764     // For example, 'st' might be the final state at a conditional
2765     // return.  Or, 'st' might be used by some node which is live at
2766     // the same time 'st' is live, which might be unschedulable.  So,
2767     // require exactly ONE user until such time as we clone 'mem' for
2768     // each of 'mem's uses (thus making the exactly-1-user-rule hold
2769     // true).
2770     while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) {
2771       // Looking at a dead closed cycle of memory?
2772       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
2773       assert(Opcode() == st->Opcode() ||
2774              st->Opcode() == Op_StoreVector ||
2775              Opcode() == Op_StoreVector ||
2776              st->Opcode() == Op_StoreVectorScatter ||
2777              Opcode() == Op_StoreVectorScatter ||
2778              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
2779              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
2780              (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
2781              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
2782              (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
2783              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
2784 
2785       if (st->in(MemNode::Address)->eqv_uncast(address) &&
2786           st->as_Store()->memory_size() <= this->memory_size()) {
2787         Node* use = st->raw_out(0);
2788         if (phase->is_IterGVN()) {
2789           phase->is_IterGVN()->rehash_node_delayed(use);
2790         }
2791         // It's OK to do this in the parser, since DU info is always accurate,
2792         // and the parser always refers to nodes via SafePointNode maps.
2793         use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
2794         return this;
2795       }
2796       st = st->in(MemNode::Memory);
2797     }
2798   }
2799 
2800 
2801   // Capture an unaliased, unconditional, simple store into an initializer.

2858   // Load then Store?  Then the Store is useless
2859   if (val->is_Load() &&
2860       val->in(MemNode::Address)->eqv_uncast(adr) &&
2861       val->in(MemNode::Memory )->eqv_uncast(mem) &&
2862       val->as_Load()->store_Opcode() == Opcode()) {
2863     result = mem;
2864   }
2865 
2866   // Two stores in a row of the same value?
2867   if (result == this &&
2868       mem->is_Store() &&
2869       mem->in(MemNode::Address)->eqv_uncast(adr) &&
2870       mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2871       mem->Opcode() == Opcode()) {
2872     result = mem;
2873   }
2874 
2875   // Store of zero anywhere into a freshly-allocated object?
2876   // Then the store is useless.
2877   // (It must already have been captured by the InitializeNode.)
2878   if (result == this && ReduceFieldZeroing) {

2879     // a newly allocated object is already all-zeroes everywhere
2880     if (mem->is_Proj() && mem->in(0)->is_Allocate() &&
2881         (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == val)) {
2882       assert(!phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == NULL, "storing null to inline type array is forbidden");
2883       result = mem;
2884     }
2885 
2886     if (result == this && phase->type(val)->is_zero_type()) {
2887       // the store may also apply to zero-bits in an earlier object
2888       Node* prev_mem = find_previous_store(phase);
2889       // Steps (a), (b):  Walk past independent stores to find an exact match.
2890       if (prev_mem != NULL) {
2891         Node* prev_val = can_see_stored_value(prev_mem, phase);
2892         if (prev_val != NULL && prev_val == val) {
2893           // prev_val and val might differ by a cast; it would be good
2894           // to keep the more informative of the two.
2895           result = mem;
2896         }
2897       }
2898     }
2899   }
2900 
2901   PhaseIterGVN* igvn = phase->is_IterGVN();
2902   if (result != this && igvn != NULL) {
2903     MemBarNode* trailing = trailing_membar();
2904     if (trailing != NULL) {
2905 #ifdef ASSERT
2906       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();

3051 Node* StoreCMNode::Identity(PhaseGVN* phase) {
3052   // No need to card mark when storing a null ptr
3053   Node* my_store = in(MemNode::OopStore);
3054   if (my_store->is_Store()) {
3055     const Type *t1 = phase->type( my_store->in(MemNode::ValueIn) );
3056     if( t1 == TypePtr::NULL_PTR ) {
3057       return in(MemNode::Memory);
3058     }
3059   }
3060   return this;
3061 }
3062 
3063 //=============================================================================
3064 //------------------------------Ideal---------------------------------------
3065 Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){
3066   Node* progress = StoreNode::Ideal(phase, can_reshape);
3067   if (progress != NULL) return progress;
3068 
3069   Node* my_store = in(MemNode::OopStore);
3070   if (my_store->is_MergeMem()) {
3071     if (oop_alias_idx() != phase->C->get_alias_index(TypeAryPtr::INLINES) ||
3072         phase->C->flattened_accesses_share_alias()) {
3073       // The alias that was recorded is no longer accurate enough.
3074       Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx());
3075       set_req_X(MemNode::OopStore, mem, phase);
3076       return this;
3077     }
3078   }
3079 
3080   return NULL;
3081 }
3082 
3083 //------------------------------Value-----------------------------------------
3084 const Type* StoreCMNode::Value(PhaseGVN* phase) const {
3085   // Either input is TOP ==> the result is TOP (checked in StoreNode::Value).
3086   // If extra input is TOP ==> the result is TOP
3087   const Type* t = phase->type(in(MemNode::OopStore));
3088   if (t == Type::TOP) {
3089     return Type::TOP;
3090   }
3091   return StoreNode::Value(phase);
3092 }
3093 
3094 
3095 //=============================================================================
3096 //----------------------------------SCMemProjNode------------------------------
3097 const Type* SCMemProjNode::Value(PhaseGVN* phase) const

3216 // Clearing a short array is faster with stores
3217 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3218   // Already know this is a large node, do not try to ideal it
3219   if (!IdealizeClearArrayNode || _is_large) return NULL;
3220 
3221   const int unit = BytesPerLong;
3222   const TypeX* t = phase->type(in(2))->isa_intptr_t();
3223   if (!t)  return NULL;
3224   if (!t->is_con())  return NULL;
3225   intptr_t raw_count = t->get_con();
3226   intptr_t size = raw_count;
3227   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
3228   // Clearing nothing uses the Identity call.
3229   // Negative clears are possible on dead ClearArrays
3230   // (see jck test stmt114.stmt11402.val).
3231   if (size <= 0 || size % unit != 0)  return NULL;
3232   intptr_t count = size / unit;
3233   // Length too long; communicate this to matchers and assemblers.
3234   // Assemblers are responsible to produce fast hardware clears for it.
3235   if (size > InitArrayShortSize) {
3236     return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true);
3237   } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
3238     return NULL;
3239   }
3240   Node *mem = in(1);
3241   if( phase->type(mem)==Type::TOP ) return NULL;
3242   Node *adr = in(3);
3243   const Type* at = phase->type(adr);
3244   if( at==Type::TOP ) return NULL;
3245   const TypePtr* atp = at->isa_ptr();
3246   // adjust atp to be the correct array element address type
3247   if (atp == NULL)  atp = TypePtr::BOTTOM;
3248   else              atp = atp->add_offset(Type::OffsetBot);
3249   // Get base for derived pointer purposes
3250   if( adr->Opcode() != Op_AddP ) Unimplemented();
3251   Node *base = adr->in(1);
3252 
3253   Node *val = in(4);
3254   Node *off  = phase->MakeConX(BytesPerLong);
3255   mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
3256   count--;
3257   while( count-- ) {
3258     mem = phase->transform(mem);
3259     adr = phase->transform(new AddPNode(base,adr,off));
3260     mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
3261   }
3262   return mem;
3263 }
3264 
3265 //----------------------------step_through----------------------------------
3266 // Return allocation input memory edge if it is different instance
3267 // or itself if it is the one we are looking for.
3268 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
3269   Node* n = *np;
3270   assert(n->is_ClearArray(), "sanity");
3271   intptr_t offset;
3272   AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
3273   // This method is called only before Allocate nodes are expanded
3274   // during macro nodes expansion. Before that ClearArray nodes are
3275   // only generated in PhaseMacroExpand::generate_arraycopy() (before
3276   // Allocate nodes are expanded) which follows allocations.
3277   assert(alloc != NULL, "should have allocation");
3278   if (alloc->_idx == instance_id) {
3279     // Can not bypass initialization of the instance we are looking for.
3280     return false;
3281   }
3282   // Otherwise skip it.
3283   InitializeNode* init = alloc->initialization();
3284   if (init != NULL)
3285     *np = init->in(TypeFunc::Memory);
3286   else
3287     *np = alloc->in(TypeFunc::Memory);
3288   return true;
3289 }
3290 
3291 //----------------------------clear_memory-------------------------------------
3292 // Generate code to initialize object storage to zero.
3293 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
3294                                    Node* val,
3295                                    Node* raw_val,
3296                                    intptr_t start_offset,
3297                                    Node* end_offset,
3298                                    PhaseGVN* phase) {
3299   intptr_t offset = start_offset;
3300 
3301   int unit = BytesPerLong;
3302   if ((offset % unit) != 0) {
3303     Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
3304     adr = phase->transform(adr);
3305     const TypePtr* atp = TypeRawPtr::BOTTOM;
3306     if (val != NULL) {
3307       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
3308       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
3309     } else {
3310       assert(raw_val == NULL, "val may not be null");
3311       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
3312     }
3313     mem = phase->transform(mem);
3314     offset += BytesPerInt;
3315   }
3316   assert((offset % unit) == 0, "");
3317 
3318   // Initialize the remaining stuff, if any, with a ClearArray.
3319   return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase);
3320 }
3321 
3322 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
3323                                    Node* raw_val,
3324                                    Node* start_offset,
3325                                    Node* end_offset,
3326                                    PhaseGVN* phase) {
3327   if (start_offset == end_offset) {
3328     // nothing to do
3329     return mem;
3330   }
3331 
3332   int unit = BytesPerLong;
3333   Node* zbase = start_offset;
3334   Node* zend  = end_offset;
3335 
3336   // Scale to the unit required by the CPU:
3337   if (!Matcher::init_array_count_is_in_bytes) {
3338     Node* shift = phase->intcon(exact_log2(unit));
3339     zbase = phase->transform(new URShiftXNode(zbase, shift) );
3340     zend  = phase->transform(new URShiftXNode(zend,  shift) );
3341   }
3342 
3343   // Bulk clear double-words
3344   Node* zsize = phase->transform(new SubXNode(zend, zbase) );
3345   Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
3346   if (raw_val == NULL) {
3347     raw_val = phase->MakeConX(0);
3348   }
3349   mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false);
3350   return phase->transform(mem);
3351 }
3352 
3353 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
3354                                    Node* val,
3355                                    Node* raw_val,
3356                                    intptr_t start_offset,
3357                                    intptr_t end_offset,
3358                                    PhaseGVN* phase) {
3359   if (start_offset == end_offset) {
3360     // nothing to do
3361     return mem;
3362   }
3363 
3364   assert((end_offset % BytesPerInt) == 0, "odd end offset");
3365   intptr_t done_offset = end_offset;
3366   if ((done_offset % BytesPerLong) != 0) {
3367     done_offset -= BytesPerInt;
3368   }
3369   if (done_offset > start_offset) {
3370     mem = clear_memory(ctl, mem, dest, val, raw_val,
3371                        start_offset, phase->MakeConX(done_offset), phase);
3372   }
3373   if (done_offset < end_offset) { // emit the final 32-bit store
3374     Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
3375     adr = phase->transform(adr);
3376     const TypePtr* atp = TypeRawPtr::BOTTOM;
3377     if (val != NULL) {
3378       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
3379       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
3380     } else {
3381       assert(raw_val == NULL, "val may not be null");
3382       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
3383     }
3384     mem = phase->transform(mem);
3385     done_offset += BytesPerInt;
3386   }
3387   assert(done_offset == end_offset, "");
3388   return mem;
3389 }
3390 
3391 //=============================================================================
3392 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
3393   : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
3394     _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
3395 #ifdef ASSERT
3396   , _pair_idx(0)
3397 #endif
3398 {
3399   init_class_id(Class_MemBar);
3400   Node* top = C->top();
3401   init_req(TypeFunc::I_O,top);
3402   init_req(TypeFunc::FramePtr,top);
3403   init_req(TypeFunc::ReturnAdr,top);

3509       PhaseIterGVN* igvn = phase->is_IterGVN();
3510       remove(igvn);
3511       // Must return either the original node (now dead) or a new node
3512       // (Do not return a top here, since that would break the uniqueness of top.)
3513       return new ConINode(TypeInt::ZERO);
3514     }
3515   }
3516   return progress ? this : NULL;
3517 }
3518 
3519 //------------------------------Value------------------------------------------
3520 const Type* MemBarNode::Value(PhaseGVN* phase) const {
3521   if( !in(0) ) return Type::TOP;
3522   if( phase->type(in(0)) == Type::TOP )
3523     return Type::TOP;
3524   return TypeTuple::MEMBAR;
3525 }
3526 
3527 //------------------------------match------------------------------------------
3528 // Construct projections for memory.
3529 Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) {
3530   switch (proj->_con) {
3531   case TypeFunc::Control:
3532   case TypeFunc::Memory:
3533     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
3534   }
3535   ShouldNotReachHere();
3536   return NULL;
3537 }
3538 
3539 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3540   trailing->_kind = TrailingStore;
3541   leading->_kind = LeadingStore;
3542 #ifdef ASSERT
3543   trailing->_pair_idx = leading->_idx;
3544   leading->_pair_idx = leading->_idx;
3545 #endif
3546 }
3547 
3548 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3549   trailing->_kind = TrailingLoadStore;

3816   return (req() > RawStores);
3817 }
3818 
3819 void InitializeNode::set_complete(PhaseGVN* phase) {
3820   assert(!is_complete(), "caller responsibility");
3821   _is_complete = Complete;
3822 
3823   // After this node is complete, it contains a bunch of
3824   // raw-memory initializations.  There is no need for
3825   // it to have anything to do with non-raw memory effects.
3826   // Therefore, tell all non-raw users to re-optimize themselves,
3827   // after skipping the memory effects of this initialization.
3828   PhaseIterGVN* igvn = phase->is_IterGVN();
3829   if (igvn)  igvn->add_users_to_worklist(this);
3830 }
3831 
3832 // convenience function
3833 // return false if the init contains any stores already
3834 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
3835   InitializeNode* init = initialization();
3836   if (init == NULL || init->is_complete()) {
3837     return false;
3838   }
3839   init->remove_extra_zeroes();
3840   // for now, if this allocation has already collected any inits, bail:
3841   if (init->is_non_zero())  return false;
3842   init->set_complete(phase);
3843   return true;
3844 }
3845 
3846 void InitializeNode::remove_extra_zeroes() {
3847   if (req() == RawStores)  return;
3848   Node* zmem = zero_memory();
3849   uint fill = RawStores;
3850   for (uint i = fill; i < req(); i++) {
3851     Node* n = in(i);
3852     if (n->is_top() || n == zmem)  continue;  // skip
3853     if (fill < i)  set_req(fill, n);          // compact
3854     ++fill;
3855   }
3856   // delete any empty spaces created:
3857   while (fill < req()) {
3858     del_req(fill);

3996             // store node that we'd like to capture. We need to check
3997             // the uses of the MergeMemNode.
3998             mems.push(n);
3999           }
4000         } else if (n->is_Mem()) {
4001           Node* other_adr = n->in(MemNode::Address);
4002           if (other_adr == adr) {
4003             failed = true;
4004             break;
4005           } else {
4006             const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
4007             if (other_t_adr != NULL) {
4008               int other_alias_idx = phase->C->get_alias_index(other_t_adr);
4009               if (other_alias_idx == alias_idx) {
4010                 // A load from the same memory slice as the store right
4011                 // after the InitializeNode. We check the control of the
4012                 // object/array that is loaded from. If it's the same as
4013                 // the store control then we cannot capture the store.
4014                 assert(!n->is_Store(), "2 stores to same slice on same control?");
4015                 Node* base = other_adr;
4016                 if (base->is_Phi()) {
4017                   // In rare case, base may be a PhiNode and it may read
4018                   // the same memory slice between InitializeNode and store.
4019                   failed = true;
4020                   break;
4021                 }
4022                 assert(base->is_AddP(), "should be addp but is %s", base->Name());
4023                 base = base->in(AddPNode::Base);
4024                 if (base != NULL) {
4025                   base = base->uncast();
4026                   if (base->is_Proj() && base->in(0) == alloc) {
4027                     failed = true;
4028                     break;
4029                   }
4030                 }
4031               }
4032             }
4033           }
4034         } else {
4035           failed = true;
4036           break;
4037         }
4038       }
4039     }
4040   }
4041   if (failed) {

4586         //   z's_done      12  16  16  16    12  16    12
4587         //   z's_needed    12  16  16  16    16  16    16
4588         //   zsize          0   0   0   0     4   0     4
4589         if (next_full_store < 0) {
4590           // Conservative tack:  Zero to end of current word.
4591           zeroes_needed = align_up(zeroes_needed, BytesPerInt);
4592         } else {
4593           // Zero to beginning of next fully initialized word.
4594           // Or, don't zero at all, if we are already in that word.
4595           assert(next_full_store >= zeroes_needed, "must go forward");
4596           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
4597           zeroes_needed = next_full_store;
4598         }
4599       }
4600 
4601       if (zeroes_needed > zeroes_done) {
4602         intptr_t zsize = zeroes_needed - zeroes_done;
4603         // Do some incremental zeroing on rawmem, in parallel with inits.
4604         zeroes_done = align_down(zeroes_done, BytesPerInt);
4605         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
4606                                               allocation()->in(AllocateNode::DefaultValue),
4607                                               allocation()->in(AllocateNode::RawDefaultValue),
4608                                               zeroes_done, zeroes_needed,
4609                                               phase);
4610         zeroes_done = zeroes_needed;
4611         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
4612           do_zeroing = false;   // leave the hole, next time
4613       }
4614     }
4615 
4616     // Collect the store and move on:
4617     phase->replace_input_of(st, MemNode::Memory, inits);
4618     inits = st;                 // put it on the linearized chain
4619     set_req(i, zmem);           // unhook from previous position
4620 
4621     if (zeroes_done == st_off)
4622       zeroes_done = next_init_off;
4623 
4624     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
4625 
4626     #ifdef ASSERT
4627     // Various order invariants.  Weaker than stores_are_sane because

4647   remove_extra_zeroes();        // clear out all the zmems left over
4648   add_req(inits);
4649 
4650   if (!(UseTLAB && ZeroTLAB)) {
4651     // If anything remains to be zeroed, zero it all now.
4652     zeroes_done = align_down(zeroes_done, BytesPerInt);
4653     // if it is the last unused 4 bytes of an instance, forget about it
4654     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
4655     if (zeroes_done + BytesPerLong >= size_limit) {
4656       AllocateNode* alloc = allocation();
4657       assert(alloc != NULL, "must be present");
4658       if (alloc != NULL && alloc->Opcode() == Op_Allocate) {
4659         Node* klass_node = alloc->in(AllocateNode::KlassNode);
4660         ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
4661         if (zeroes_done == k->layout_helper())
4662           zeroes_done = size_limit;
4663       }
4664     }
4665     if (zeroes_done < size_limit) {
4666       rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
4667                                             allocation()->in(AllocateNode::DefaultValue),
4668                                             allocation()->in(AllocateNode::RawDefaultValue),
4669                                             zeroes_done, size_in_bytes, phase);
4670     }
4671   }
4672 
4673   set_complete(phase);
4674   return rawmem;
4675 }
4676 
4677 
4678 #ifdef ASSERT
4679 bool InitializeNode::stores_are_sane(PhaseTransform* phase) {
4680   if (is_complete())
4681     return true;                // stores could be anything at this point
4682   assert(allocation() != NULL, "must be present");
4683   intptr_t last_off = allocation()->minimum_header_size();
4684   for (uint i = InitializeNode::RawStores; i < req(); i++) {
4685     Node* st = in(i);
4686     intptr_t st_off = get_store_offset(st, phase);
4687     if (st_off < 0)  continue;  // ignore dead garbage
4688     if (last_off > st_off) {
< prev index next >