< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "classfile/javaClasses.hpp"

  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "memory/allocation.inline.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "oops/objArrayKlass.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/regalloc.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/connode.hpp"
  40 #include "opto/convertnode.hpp"

  41 #include "opto/loopnode.hpp"
  42 #include "opto/machnode.hpp"
  43 #include "opto/matcher.hpp"
  44 #include "opto/memnode.hpp"
  45 #include "opto/mulnode.hpp"
  46 #include "opto/narrowptrnode.hpp"
  47 #include "opto/phaseX.hpp"
  48 #include "opto/regmask.hpp"
  49 #include "opto/rootnode.hpp"
  50 #include "opto/vectornode.hpp"
  51 #include "utilities/align.hpp"
  52 #include "utilities/copy.hpp"
  53 #include "utilities/macros.hpp"
  54 #include "utilities/powerOfTwo.hpp"
  55 #include "utilities/vmError.hpp"
  56 
  57 // Portions of code courtesy of Clifford Click
  58 
  59 // Optimization - Graph Style
  60 

 223       // clone the Phi with our address type
 224       result = mphi->split_out_instance(t_adr, igvn);
 225     } else {
 226       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
 227     }
 228   }
 229   return result;
 230 }
 231 
 232 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
 233   uint alias_idx = phase->C->get_alias_index(tp);
 234   Node *mem = mmem;
 235 #ifdef ASSERT
 236   {
 237     // Check that current type is consistent with the alias index used during graph construction
 238     assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
 239     bool consistent =  adr_check == NULL || adr_check->empty() ||
 240                        phase->C->must_alias(adr_check, alias_idx );
 241     // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
 242     if( !consistent && adr_check != NULL && !adr_check->empty() &&
 243                tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
 244         adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
 245         ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
 246           adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
 247           adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
 248       // don't assert if it is dead code.
 249       consistent = true;
 250     }
 251     if( !consistent ) {
 252       st->print("alias_idx==%d, adr_check==", alias_idx);
 253       if( adr_check == NULL ) {
 254         st->print("NULL");
 255       } else {
 256         adr_check->dump();
 257       }
 258       st->cr();
 259       print_alias_types();
 260       assert(consistent, "adr_check must match alias idx");
 261     }
 262   }
 263 #endif

 871          "use LoadKlassNode instead");
 872   assert(!(adr_type->isa_aryptr() &&
 873            adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
 874          "use LoadRangeNode instead");
 875   // Check control edge of raw loads
 876   assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
 877           // oop will be recorded in oop map if load crosses safepoint
 878           rt->isa_oopptr() || is_immutable_value(adr),
 879           "raw memory operations should have control edge");
 880   LoadNode* load = NULL;
 881   switch (bt) {
 882   case T_BOOLEAN: load = new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 883   case T_BYTE:    load = new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 884   case T_INT:     load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 885   case T_CHAR:    load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 886   case T_SHORT:   load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 887   case T_LONG:    load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency); break;
 888   case T_FLOAT:   load = new LoadFNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 889   case T_DOUBLE:  load = new LoadDNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 890   case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(),  mo, control_dependency); break;

 891   case T_OBJECT:
 892 #ifdef _LP64
 893     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 894       load = new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency);
 895     } else
 896 #endif
 897     {
 898       assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
 899       load = new LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
 900     }
 901     break;
 902   default:
 903     ShouldNotReachHere();
 904     break;
 905   }
 906   assert(load != NULL, "LoadNode should have been created");
 907   if (unaligned) {
 908     load->set_unaligned_access();
 909   }
 910   if (mismatched) {

 998 
 999     LoadNode* ld = clone()->as_Load();
1000     Node* addp = in(MemNode::Address)->clone();
1001     if (ac->as_ArrayCopy()->is_clonebasic()) {
1002       assert(ld_alloc != NULL, "need an alloc");
1003       assert(addp->is_AddP(), "address must be addp");
1004       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1005       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1006       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1007       addp->set_req(AddPNode::Base, src);
1008       addp->set_req(AddPNode::Address, src);
1009     } else {
1010       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1011              ac->as_ArrayCopy()->is_copyof_validated() ||
1012              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1013       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1014       addp->set_req(AddPNode::Base, src);
1015       addp->set_req(AddPNode::Address, src);
1016 
1017       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1018       BasicType ary_elem  = ary_t->klass()->as_array_klass()->element_type()->basic_type();
1019       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1020       uint shift  = exact_log2(type2aelembytes(ary_elem));




1021 
1022       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1023 #ifdef _LP64
1024       diff = phase->transform(new ConvI2LNode(diff));
1025 #endif
1026       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1027 
1028       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1029       addp->set_req(AddPNode::Offset, offset);
1030     }
1031     addp = phase->transform(addp);
1032 #ifdef ASSERT
1033     const TypePtr* adr_type = phase->type(addp)->is_ptr();
1034     ld->_adr_type = adr_type;
1035 #endif
1036     ld->set_req(MemNode::Address, addp);
1037     ld->set_req(0, ctl);
1038     ld->set_req(MemNode::Memory, mem);
1039     // load depends on the tests that validate the arraycopy
1040     ld->_control_dependency = UnknownControl;

1120         // Same base, same offset.
1121         // Possible improvement for arrays: check index value instead of absolute offset.
1122 
1123         // At this point we have proven something like this setup:
1124         //   B = << base >>
1125         //   L =  LoadQ(AddP(Check/CastPP(B), #Off))
1126         //   S = StoreQ(AddP(             B , #Off), V)
1127         // (Actually, we haven't yet proven the Q's are the same.)
1128         // In other words, we are loading from a casted version of
1129         // the same pointer-and-offset that we stored to.
1130         // Casted version may carry a dependency and it is respected.
1131         // Thus, we are able to replace L by V.
1132       }
1133       // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1134       if (store_Opcode() != st->Opcode()) {
1135         return NULL;
1136       }
1137       // LoadVector/StoreVector needs additional check to ensure the types match.
1138       if (store_Opcode() == Op_StoreVector) {
1139         const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
1140         const TypeVect* out_vt = as_LoadVector()->vect_type();
1141         if (in_vt != out_vt) {
1142           return NULL;
1143         }
1144       }
1145       return st->in(MemNode::ValueIn);
1146     }
1147 
1148     // A load from a freshly-created object always returns zero.
1149     // (This can happen after LoadNode::Ideal resets the load's memory input
1150     // to find_captured_store, which returned InitializeNode::zero_memory.)
1151     if (st->is_Proj() && st->in(0)->is_Allocate() &&
1152         (st->in(0) == ld_alloc) &&
1153         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1154       // return a zero value for the load's basic type
1155       // (This is one of the few places where a generic PhaseTransform
1156       // can create new nodes.  Think of it as lazily manifesting
1157       // virtually pre-existing constants.)






1158       if (memory_type() != T_VOID) {
1159         if (ReduceBulkZeroing || find_array_copy_clone(phase, ld_alloc, in(MemNode::Memory)) == NULL) {
1160           // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1161           // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1162           // by the ArrayCopyNode.
1163           return phase->zerocon(memory_type());
1164         }
1165       } else {
1166         // TODO: materialize all-zero vector constant
1167         assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1168       }
1169     }
1170 
1171     // A load from an initialization barrier can match a captured store.
1172     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1173       InitializeNode* init = st->in(0)->as_Initialize();
1174       AllocateNode* alloc = init->allocation();
1175       if ((alloc != NULL) && (alloc == ld_alloc)) {
1176         // examine a captured store value
1177         st = init->find_captured_store(ld_off, memory_size(), phase);

1205 //----------------------is_instance_field_load_with_local_phi------------------
1206 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1207   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1208       in(Address)->is_AddP() ) {
1209     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1210     // Only instances and boxed values.
1211     if( t_oop != NULL &&
1212         (t_oop->is_ptr_to_boxed_value() ||
1213          t_oop->is_known_instance_field()) &&
1214         t_oop->offset() != Type::OffsetBot &&
1215         t_oop->offset() != Type::OffsetTop) {
1216       return true;
1217     }
1218   }
1219   return false;
1220 }
1221 
1222 //------------------------------Identity---------------------------------------
1223 // Loads are identity if previous store is to same address
1224 Node* LoadNode::Identity(PhaseGVN* phase) {



























1225   // If the previous store-maker is the right kind of Store, and the store is
1226   // to the same address, then we are equal to the value stored.
1227   Node* mem = in(Memory);
1228   Node* value = can_see_stored_value(mem, phase);
1229   if( value ) {
1230     // byte, short & char stores truncate naturally.
1231     // A load has to load the truncated value which requires
1232     // some sort of masking operation and that requires an
1233     // Ideal call instead of an Identity call.
1234     if (memory_size() < BytesPerInt) {
1235       // If the input to the store does not fit with the load's result type,
1236       // it must be truncated via an Ideal call.
1237       if (!phase->type(value)->higher_equal(phase->type(this)))
1238         return this;
1239     }
1240     // (This works even when value is a Con, but LoadNode::Value
1241     // usually runs first, producing the singleton type of the Con.)
1242     return value;
1243   }
1244 

1914       }
1915     }
1916 
1917     // Don't do this for integer types. There is only potential profit if
1918     // the element type t is lower than _type; that is, for int types, if _type is
1919     // more restrictive than t.  This only happens here if one is short and the other
1920     // char (both 16 bits), and in those cases we've made an intentional decision
1921     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1922     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1923     //
1924     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
1925     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
1926     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
1927     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
1928     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
1929     // In fact, that could have been the original type of p1, and p1 could have
1930     // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
1931     // expression (LShiftL quux 3) independently optimized to the constant 8.
1932     if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
1933         && (_type->isa_vect() == NULL)

1934         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
1935       // t might actually be lower than _type, if _type is a unique
1936       // concrete subclass of abstract class t.
1937       if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
1938         const Type* jt = t->join_speculative(_type);
1939         // In any case, do not allow the join, per se, to empty out the type.
1940         if (jt->empty() && !t->empty()) {
1941           // This can happen if a interface-typed array narrows to a class type.
1942           jt = _type;
1943         }
1944 #ifdef ASSERT
1945         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
1946           // The pointers in the autobox arrays are always non-null
1947           Node* base = adr->in(AddPNode::Base);
1948           if ((base != NULL) && base->is_DecodeN()) {
1949             // Get LoadN node which loads IntegerCache.cache field
1950             base = base->in(1);
1951           }
1952           if ((base != NULL) && base->is_Con()) {
1953             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
1954             if ((base_type != NULL) && base_type->is_autobox_cache()) {
1955               // It could be narrow oop
1956               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
1957             }
1958           }
1959         }
1960 #endif
1961         return jt;
1962       }
1963     }
1964   } else if (tp->base() == Type::InstPtr) {
1965     assert( off != Type::OffsetBot ||
1966             // arrays can be cast to Objects
1967             tp->is_oopptr()->klass()->is_java_lang_Object() ||

1968             // unsafe field access may not have a constant offset
1969             C->has_unsafe_access(),
1970             "Field accesses must be precise" );
1971     // For oop loads, we expect the _type to be precise.
1972 
1973     // Optimize loads from constant fields.
1974     const TypeInstPtr* tinst = tp->is_instptr();



1975     ciObject* const_oop = tinst->const_oop();
1976     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) {
1977       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());


















1978       if (con_type != NULL) {
1979         return con_type;
1980       }
1981     }
1982   } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
1983     assert( off != Type::OffsetBot ||
1984             // arrays can be cast to Objects

1985             tp->is_klassptr()->klass()->is_java_lang_Object() ||
1986             // also allow array-loading from the primary supertype
1987             // array during subtype checks
1988             Opcode() == Op_LoadKlass,
1989             "Field accesses must be precise" );
1990     // For klass/static loads, we expect the _type to be precise
1991   } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
1992     /* With mirrors being an indirect in the Klass*
1993      * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
1994      * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
1995      *
1996      * So check the type and klass of the node before the LoadP.
1997      */
1998     Node* adr2 = adr->in(MemNode::Address);
1999     const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
2000     if (tkls != NULL && !StressReflectiveCode) {
2001       ciKlass* klass = tkls->klass();
2002       if (klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
2003         assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2004         assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2005         return TypeInstPtr::make(klass->java_mirror());
















2006       }
2007     }
2008   }
2009 
2010   const TypeKlassPtr *tkls = tp->isa_klassptr();
2011   if (tkls != NULL && !StressReflectiveCode) {
2012     ciKlass* klass = tkls->klass();
2013     if (klass->is_loaded() && tkls->klass_is_exact()) {
2014       // We are loading a field from a Klass metaobject whose identity
2015       // is known at compile time (the type is "exact" or "precise").
2016       // Check for fields we know are maintained as constants by the VM.
2017       if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2018         // The field is Klass::_super_check_offset.  Return its (constant) value.
2019         // (Folds up type checking code.)
2020         assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2021         return TypeInt::make(klass->super_check_offset());
2022       }
2023       // Compute index into primary_supers array
2024       juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2025       // Check for overflowing; use unsigned compare to handle the negative case.
2026       if( depth < ciKlass::primary_super_limit() ) {
2027         // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2028         // (Folds up type checking code.)
2029         assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2030         ciKlass *ss = klass->super_of_depth(depth);
2031         return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
2032       }
2033       const Type* aift = load_array_final_field(tkls, klass);
2034       if (aift != NULL)  return aift;
2035     }
2036 
2037     // We can still check if we are loading from the primary_supers array at a
2038     // shallow enough depth.  Even though the klass is not exact, entries less
2039     // than or equal to its super depth are correct.
2040     if (klass->is_loaded() ) {
2041       ciType *inner = klass;
2042       while( inner->is_obj_array_klass() )
2043         inner = inner->as_obj_array_klass()->base_element_type();
2044       if( inner->is_instance_klass() &&
2045           !inner->as_instance_klass()->flags().is_interface() ) {
2046         // Compute index into primary_supers array
2047         juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2048         // Check for overflowing; use unsigned compare to handle the negative case.
2049         if( depth < ciKlass::primary_super_limit() &&
2050             depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case
2051           // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2052           // (Folds up type checking code.)
2053           assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2054           ciKlass *ss = klass->super_of_depth(depth);
2055           return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
2056         }
2057       }
2058     }
2059 
2060     // If the type is enough to determine that the thing is not an array,

2085   if (ReduceFieldZeroing || is_instance || is_boxed_value) {
2086     Node* value = can_see_stored_value(mem,phase);
2087     if (value != NULL && value->is_Con()) {
2088       assert(value->bottom_type()->higher_equal(_type),"sanity");
2089       return value->bottom_type();
2090     }
2091   }
2092 
2093   bool is_vect = (_type->isa_vect() != NULL);
2094   if (is_instance && !is_vect) {
2095     // If we have an instance type and our memory input is the
2096     // programs's initial memory state, there is no matching store,
2097     // so just return a zero of the appropriate type -
2098     // except if it is vectorized - then we have no zero constant.
2099     Node *mem = in(MemNode::Memory);
2100     if (mem->is_Parm() && mem->in(0)->is_Start()) {
2101       assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2102       return Type::get_zero_type(_type->basic_type());
2103     }
2104   }
2105 
2106   Node* alloc = is_new_object_mark_load(phase);
2107   if (alloc != NULL) {
2108     return TypeX::make(markWord::prototype().value());










2109   }
2110 
2111   return _type;
2112 }
2113 
2114 //------------------------------match_edge-------------------------------------
2115 // Do we Match on this edge index or not?  Match only the address.
2116 uint LoadNode::match_edge(uint idx) const {
2117   return idx == MemNode::Address;
2118 }
2119 
2120 //--------------------------LoadBNode::Ideal--------------------------------------
2121 //
2122 //  If the previous store is to the same address as this load,
2123 //  and the value stored was larger than a byte, replace this load
2124 //  with the value stored truncated to a byte.  If no truncation is
2125 //  needed, the replacement is done in LoadNode::Identity().
2126 //
2127 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2128   Node* mem = in(MemNode::Memory);

2239   return LoadNode::Ideal(phase, can_reshape);
2240 }
2241 
2242 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2243   Node* mem = in(MemNode::Memory);
2244   Node* value = can_see_stored_value(mem,phase);
2245   if (value != NULL && value->is_Con() &&
2246       !value->bottom_type()->higher_equal(_type)) {
2247     // If the input to the store does not fit with the load's result type,
2248     // it must be truncated. We can't delay until Ideal call since
2249     // a singleton Value is needed for split_thru_phi optimization.
2250     int con = value->get_int();
2251     return TypeInt::make((con << 16) >> 16);
2252   }
2253   return LoadNode::Value(phase);
2254 }
2255 
2256 //=============================================================================
2257 //----------------------------LoadKlassNode::make------------------------------
2258 // Polymorphic factory method:
2259 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {

2260   // sanity check the alias category against the created node type
2261   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2262   assert(adr_type != NULL, "expecting TypeKlassPtr");
2263 #ifdef _LP64
2264   if (adr_type->is_ptr_to_narrowklass()) {
2265     assert(UseCompressedClassPointers, "no compressed klasses");
2266     Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2267     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2268   }
2269 #endif
2270   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2271   return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
2272 }
2273 
2274 //------------------------------Value------------------------------------------
2275 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2276   return klass_value_common(phase);
2277 }
2278 
2279 // In most cases, LoadKlassNode does not have the control input set. If the control

2286   // Either input is TOP ==> the result is TOP
2287   const Type *t1 = phase->type( in(MemNode::Memory) );
2288   if (t1 == Type::TOP)  return Type::TOP;
2289   Node *adr = in(MemNode::Address);
2290   const Type *t2 = phase->type( adr );
2291   if (t2 == Type::TOP)  return Type::TOP;
2292   const TypePtr *tp = t2->is_ptr();
2293   if (TypePtr::above_centerline(tp->ptr()) ||
2294       tp->ptr() == TypePtr::Null)  return Type::TOP;
2295 
2296   // Return a more precise klass, if possible
2297   const TypeInstPtr *tinst = tp->isa_instptr();
2298   if (tinst != NULL) {
2299     ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
2300     int offset = tinst->offset();
2301     if (ik == phase->C->env()->Class_klass()
2302         && (offset == java_lang_Class::klass_offset() ||
2303             offset == java_lang_Class::array_klass_offset())) {
2304       // We are loading a special hidden field from a Class mirror object,
2305       // the field which points to the VM's Klass metaobject.
2306       ciType* t = tinst->java_mirror_type();

2307       // java_mirror_type returns non-null for compile-time Class constants.
2308       if (t != NULL) {
2309         // constant oop => constant klass
2310         if (offset == java_lang_Class::array_klass_offset()) {
2311           if (t->is_void()) {
2312             // We cannot create a void array.  Since void is a primitive type return null
2313             // klass.  Users of this result need to do a null check on the returned klass.
2314             return TypePtr::NULL_PTR;
2315           }
2316           return TypeKlassPtr::make(ciArrayKlass::make(t));
2317         }
2318         if (!t->is_klass()) {
2319           // a primitive Class (e.g., int.class) has NULL for a klass field
2320           return TypePtr::NULL_PTR;
2321         }
2322         // (Folds up the 1st indirection in aClassConstant.getModifiers().)
2323         return TypeKlassPtr::make(t->as_klass());
2324       }
2325       // non-constant mirror, so we can't tell what's going on
2326     }
2327     if( !ik->is_loaded() )
2328       return _type;             // Bail out if not loaded
2329     if (offset == oopDesc::klass_offset_in_bytes()) {
2330       if (tinst->klass_is_exact()) {
2331         return TypeKlassPtr::make(ik);
2332       }
2333       // See if we can become precise: no subklasses and no interface
2334       // (Note:  We need to support verified interfaces.)
2335       if (!ik->is_interface() && !ik->has_subklass()) {
2336         // Add a dependence; if any subclass added we need to recompile
2337         if (!ik->is_final()) {
2338           // %%% should use stronger assert_unique_concrete_subtype instead
2339           phase->C->dependencies()->assert_leaf_type(ik);
2340         }
2341         // Return precise klass
2342         return TypeKlassPtr::make(ik);
2343       }
2344 
2345       // Return root of possible klass
2346       return TypeKlassPtr::make(TypePtr::NotNull, ik, 0/*offset*/);
2347     }
2348   }
2349 
2350   // Check for loading klass from an array
2351   const TypeAryPtr *tary = tp->isa_aryptr();
2352   if( tary != NULL ) {
2353     ciKlass *tary_klass = tary->klass();
2354     if (tary_klass != NULL   // can be NULL when at BOTTOM or TOP
2355         && tary->offset() == oopDesc::klass_offset_in_bytes()) {
2356       if (tary->klass_is_exact()) {
2357         return TypeKlassPtr::make(tary_klass);
2358       }
2359       ciArrayKlass *ak = tary->klass()->as_array_klass();
2360       // If the klass is an object array, we defer the question to the
2361       // array component klass.
2362       if( ak->is_obj_array_klass() ) {
2363         assert( ak->is_loaded(), "" );
2364         ciKlass *base_k = ak->as_obj_array_klass()->base_element_klass();
2365         if( base_k->is_loaded() && base_k->is_instance_klass() ) {
2366           ciInstanceKlass* ik = base_k->as_instance_klass();
2367           // See if we can become precise: no subklasses and no interface
2368           if (!ik->is_interface() && !ik->has_subklass()) {


2369             // Add a dependence; if any subclass added we need to recompile
2370             if (!ik->is_final()) {
2371               phase->C->dependencies()->assert_leaf_type(ik);
2372             }
2373             // Return precise array klass
2374             return TypeKlassPtr::make(ak);
2375           }
2376         }
2377         return TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
2378       } else {                  // Found a type-array?
2379         assert( ak->is_type_array_klass(), "" );
2380         return TypeKlassPtr::make(ak); // These are always precise
2381       }
2382     }
2383   }
2384 
2385   // Check for loading klass from an array klass
2386   const TypeKlassPtr *tkls = tp->isa_klassptr();
2387   if (tkls != NULL && !StressReflectiveCode) {
2388     ciKlass* klass = tkls->klass();
2389     if( !klass->is_loaded() )
2390       return _type;             // Bail out if not loaded


2391     if( klass->is_obj_array_klass() &&
2392         tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2393       ciKlass* elem = klass->as_obj_array_klass()->element_klass();
2394       // // Always returning precise element type is incorrect,
2395       // // e.g., element type could be object and array may contain strings
2396       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2397 
2398       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2399       // according to the element type's subclassing.
2400       return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/);




2401     }
2402     if( klass->is_instance_klass() && tkls->klass_is_exact() &&
2403         tkls->offset() == in_bytes(Klass::super_offset())) {
2404       ciKlass* sup = klass->as_instance_klass()->super();
2405       // The field is Klass::_super.  Return its (constant) value.
2406       // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2407       return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
2408     }
2409   }
2410 
2411   // Bailout case
2412   return LoadNode::Value(phase);
2413 }
2414 
2415 //------------------------------Identity---------------------------------------
2416 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2417 // Also feed through the klass in Allocate(...klass...)._klass.
2418 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2419   return klass_identity_common(phase);
2420 }

2588 //=============================================================================
2589 //---------------------------StoreNode::make-----------------------------------
2590 // Polymorphic factory method:
2591 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
2592   assert((mo == unordered || mo == release), "unexpected");
2593   Compile* C = gvn.C;
2594   assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2595          ctl != NULL, "raw memory operations should have control edge");
2596 
2597   switch (bt) {
2598   case T_BOOLEAN: val = gvn.transform(new AndINode(val, gvn.intcon(0x1))); // Fall through to T_BYTE case
2599   case T_BYTE:    return new StoreBNode(ctl, mem, adr, adr_type, val, mo);
2600   case T_INT:     return new StoreINode(ctl, mem, adr, adr_type, val, mo);
2601   case T_CHAR:
2602   case T_SHORT:   return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
2603   case T_LONG:    return new StoreLNode(ctl, mem, adr, adr_type, val, mo);
2604   case T_FLOAT:   return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
2605   case T_DOUBLE:  return new StoreDNode(ctl, mem, adr, adr_type, val, mo);
2606   case T_METADATA:
2607   case T_ADDRESS:

2608   case T_OBJECT:
2609 #ifdef _LP64
2610     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2611       val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop()));
2612       return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
2613     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2614                (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2615                 adr->bottom_type()->isa_rawptr())) {
2616       val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2617       return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2618     }
2619 #endif
2620     {
2621       return new StorePNode(ctl, mem, adr, adr_type, val, mo);
2622     }
2623   default:
2624     ShouldNotReachHere();
2625     return (StoreNode*)NULL;
2626   }
2627 }

2649 
2650   // Since they are not commoned, do not hash them:
2651   return NO_HASH;
2652 }
2653 
2654 //------------------------------Ideal------------------------------------------
2655 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2656 // When a store immediately follows a relevant allocation/initialization,
2657 // try to capture it into the initialization, or hoist it above.
2658 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2659   Node* p = MemNode::Ideal_common(phase, can_reshape);
2660   if (p)  return (p == NodeSentinel) ? NULL : p;
2661 
2662   Node* mem     = in(MemNode::Memory);
2663   Node* address = in(MemNode::Address);
2664   Node* value   = in(MemNode::ValueIn);
2665   // Back-to-back stores to same address?  Fold em up.  Generally
2666   // unsafe if I have intervening uses...  Also disallowed for StoreCM
2667   // since they must follow each StoreP operation.  Redundant StoreCMs
2668   // are eliminated just before matching in final_graph_reshape.
2669   {
2670     Node* st = mem;
2671     // If Store 'st' has more than one use, we cannot fold 'st' away.
2672     // For example, 'st' might be the final state at a conditional
2673     // return.  Or, 'st' might be used by some node which is live at
2674     // the same time 'st' is live, which might be unschedulable.  So,
2675     // require exactly ONE user until such time as we clone 'mem' for
2676     // each of 'mem's uses (thus making the exactly-1-user-rule hold
2677     // true).
2678     while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) {
2679       // Looking at a dead closed cycle of memory?
2680       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
2681       assert(Opcode() == st->Opcode() ||
2682              st->Opcode() == Op_StoreVector ||
2683              Opcode() == Op_StoreVector ||
2684              st->Opcode() == Op_StoreVectorScatter ||
2685              Opcode() == Op_StoreVectorScatter ||
2686              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
2687              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
2688              (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy

2689              (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
2690              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
2691 
2692       if (st->in(MemNode::Address)->eqv_uncast(address) &&
2693           st->as_Store()->memory_size() <= this->memory_size()) {
2694         Node* use = st->raw_out(0);
2695         if (phase->is_IterGVN()) {
2696           phase->is_IterGVN()->rehash_node_delayed(use);
2697         }
2698         // It's OK to do this in the parser, since DU info is always accurate,
2699         // and the parser always refers to nodes via SafePointNode maps.
2700         use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
2701         return this;
2702       }
2703       st = st->in(MemNode::Memory);
2704     }
2705   }
2706 
2707 
2708   // Capture an unaliased, unconditional, simple store into an initializer.

2765   // Load then Store?  Then the Store is useless
2766   if (val->is_Load() &&
2767       val->in(MemNode::Address)->eqv_uncast(adr) &&
2768       val->in(MemNode::Memory )->eqv_uncast(mem) &&
2769       val->as_Load()->store_Opcode() == Opcode()) {
2770     result = mem;
2771   }
2772 
2773   // Two stores in a row of the same value?
2774   if (result == this &&
2775       mem->is_Store() &&
2776       mem->in(MemNode::Address)->eqv_uncast(adr) &&
2777       mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2778       mem->Opcode() == Opcode()) {
2779     result = mem;
2780   }
2781 
2782   // Store of zero anywhere into a freshly-allocated object?
2783   // Then the store is useless.
2784   // (It must already have been captured by the InitializeNode.)
2785   if (result == this &&
2786       ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
2787     // a newly allocated object is already all-zeroes everywhere
2788     if (mem->is_Proj() && mem->in(0)->is_Allocate()) {


2789       result = mem;
2790     }
2791 
2792     if (result == this) {
2793       // the store may also apply to zero-bits in an earlier object
2794       Node* prev_mem = find_previous_store(phase);
2795       // Steps (a), (b):  Walk past independent stores to find an exact match.
2796       if (prev_mem != NULL) {
2797         Node* prev_val = can_see_stored_value(prev_mem, phase);
2798         if (prev_val != NULL && prev_val == val) {
2799           // prev_val and val might differ by a cast; it would be good
2800           // to keep the more informative of the two.
2801           result = mem;
2802         }
2803       }
2804     }
2805   }
2806 
2807   PhaseIterGVN* igvn = phase->is_IterGVN();
2808   if (result != this && igvn != NULL) {
2809     MemBarNode* trailing = trailing_membar();
2810     if (trailing != NULL) {
2811 #ifdef ASSERT
2812       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();

2957 Node* StoreCMNode::Identity(PhaseGVN* phase) {
2958   // No need to card mark when storing a null ptr
2959   Node* my_store = in(MemNode::OopStore);
2960   if (my_store->is_Store()) {
2961     const Type *t1 = phase->type( my_store->in(MemNode::ValueIn) );
2962     if( t1 == TypePtr::NULL_PTR ) {
2963       return in(MemNode::Memory);
2964     }
2965   }
2966   return this;
2967 }
2968 
2969 //=============================================================================
2970 //------------------------------Ideal---------------------------------------
2971 Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){
2972   Node* progress = StoreNode::Ideal(phase, can_reshape);
2973   if (progress != NULL) return progress;
2974 
2975   Node* my_store = in(MemNode::OopStore);
2976   if (my_store->is_MergeMem()) {
2977     Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx());
2978     set_req_X(MemNode::OopStore, mem, phase);
2979     return this;




2980   }
2981 
2982   return NULL;
2983 }
2984 
2985 //------------------------------Value-----------------------------------------
2986 const Type* StoreCMNode::Value(PhaseGVN* phase) const {
2987   // Either input is TOP ==> the result is TOP (checked in StoreNode::Value).
2988   // If extra input is TOP ==> the result is TOP
2989   const Type* t = phase->type(in(MemNode::OopStore));
2990   if (t == Type::TOP) {
2991     return Type::TOP;
2992   }
2993   return StoreNode::Value(phase);
2994 }
2995 
2996 
2997 //=============================================================================
2998 //----------------------------------SCMemProjNode------------------------------
2999 const Type* SCMemProjNode::Value(PhaseGVN* phase) const

3118 // Clearing a short array is faster with stores
3119 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3120   // Already know this is a large node, do not try to ideal it
3121   if (!IdealizeClearArrayNode || _is_large) return NULL;
3122 
3123   const int unit = BytesPerLong;
3124   const TypeX* t = phase->type(in(2))->isa_intptr_t();
3125   if (!t)  return NULL;
3126   if (!t->is_con())  return NULL;
3127   intptr_t raw_count = t->get_con();
3128   intptr_t size = raw_count;
3129   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
3130   // Clearing nothing uses the Identity call.
3131   // Negative clears are possible on dead ClearArrays
3132   // (see jck test stmt114.stmt11402.val).
3133   if (size <= 0 || size % unit != 0)  return NULL;
3134   intptr_t count = size / unit;
3135   // Length too long; communicate this to matchers and assemblers.
3136   // Assemblers are responsible to produce fast hardware clears for it.
3137   if (size > InitArrayShortSize) {
3138     return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
3139   } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
3140     return NULL;
3141   }
3142   Node *mem = in(1);
3143   if( phase->type(mem)==Type::TOP ) return NULL;
3144   Node *adr = in(3);
3145   const Type* at = phase->type(adr);
3146   if( at==Type::TOP ) return NULL;
3147   const TypePtr* atp = at->isa_ptr();
3148   // adjust atp to be the correct array element address type
3149   if (atp == NULL)  atp = TypePtr::BOTTOM;
3150   else              atp = atp->add_offset(Type::OffsetBot);
3151   // Get base for derived pointer purposes
3152   if( adr->Opcode() != Op_AddP ) Unimplemented();
3153   Node *base = adr->in(1);
3154 
3155   Node *zero = phase->makecon(TypeLong::ZERO);
3156   Node *off  = phase->MakeConX(BytesPerLong);
3157   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
3158   count--;
3159   while( count-- ) {
3160     mem = phase->transform(mem);
3161     adr = phase->transform(new AddPNode(base,adr,off));
3162     mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
3163   }
3164   return mem;
3165 }
3166 
3167 //----------------------------step_through----------------------------------
3168 // Return allocation input memory edge if it is different instance
3169 // or itself if it is the one we are looking for.
3170 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
3171   Node* n = *np;
3172   assert(n->is_ClearArray(), "sanity");
3173   intptr_t offset;
3174   AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
3175   // This method is called only before Allocate nodes are expanded
3176   // during macro nodes expansion. Before that ClearArray nodes are
3177   // only generated in PhaseMacroExpand::generate_arraycopy() (before
3178   // Allocate nodes are expanded) which follows allocations.
3179   assert(alloc != NULL, "should have allocation");
3180   if (alloc->_idx == instance_id) {
3181     // Can not bypass initialization of the instance we are looking for.
3182     return false;
3183   }
3184   // Otherwise skip it.
3185   InitializeNode* init = alloc->initialization();
3186   if (init != NULL)
3187     *np = init->in(TypeFunc::Memory);
3188   else
3189     *np = alloc->in(TypeFunc::Memory);
3190   return true;
3191 }
3192 
3193 //----------------------------clear_memory-------------------------------------
3194 // Generate code to initialize object storage to zero.
3195 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,


3196                                    intptr_t start_offset,
3197                                    Node* end_offset,
3198                                    PhaseGVN* phase) {
3199   intptr_t offset = start_offset;
3200 
3201   int unit = BytesPerLong;
3202   if ((offset % unit) != 0) {
3203     Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
3204     adr = phase->transform(adr);
3205     const TypePtr* atp = TypeRawPtr::BOTTOM;
3206     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);






3207     mem = phase->transform(mem);
3208     offset += BytesPerInt;
3209   }
3210   assert((offset % unit) == 0, "");
3211 
3212   // Initialize the remaining stuff, if any, with a ClearArray.
3213   return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
3214 }
3215 
3216 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,

3217                                    Node* start_offset,
3218                                    Node* end_offset,
3219                                    PhaseGVN* phase) {
3220   if (start_offset == end_offset) {
3221     // nothing to do
3222     return mem;
3223   }
3224 
3225   int unit = BytesPerLong;
3226   Node* zbase = start_offset;
3227   Node* zend  = end_offset;
3228 
3229   // Scale to the unit required by the CPU:
3230   if (!Matcher::init_array_count_is_in_bytes) {
3231     Node* shift = phase->intcon(exact_log2(unit));
3232     zbase = phase->transform(new URShiftXNode(zbase, shift) );
3233     zend  = phase->transform(new URShiftXNode(zend,  shift) );
3234   }
3235 
3236   // Bulk clear double-words
3237   Node* zsize = phase->transform(new SubXNode(zend, zbase) );
3238   Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
3239   mem = new ClearArrayNode(ctl, mem, zsize, adr, false);



3240   return phase->transform(mem);
3241 }
3242 
3243 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,


3244                                    intptr_t start_offset,
3245                                    intptr_t end_offset,
3246                                    PhaseGVN* phase) {
3247   if (start_offset == end_offset) {
3248     // nothing to do
3249     return mem;
3250   }
3251 
3252   assert((end_offset % BytesPerInt) == 0, "odd end offset");
3253   intptr_t done_offset = end_offset;
3254   if ((done_offset % BytesPerLong) != 0) {
3255     done_offset -= BytesPerInt;
3256   }
3257   if (done_offset > start_offset) {
3258     mem = clear_memory(ctl, mem, dest,
3259                        start_offset, phase->MakeConX(done_offset), phase);
3260   }
3261   if (done_offset < end_offset) { // emit the final 32-bit store
3262     Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
3263     adr = phase->transform(adr);
3264     const TypePtr* atp = TypeRawPtr::BOTTOM;
3265     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);






3266     mem = phase->transform(mem);
3267     done_offset += BytesPerInt;
3268   }
3269   assert(done_offset == end_offset, "");
3270   return mem;
3271 }
3272 
3273 //=============================================================================
3274 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
3275   : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
3276     _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
3277 #ifdef ASSERT
3278   , _pair_idx(0)
3279 #endif
3280 {
3281   init_class_id(Class_MemBar);
3282   Node* top = C->top();
3283   init_req(TypeFunc::I_O,top);
3284   init_req(TypeFunc::FramePtr,top);
3285   init_req(TypeFunc::ReturnAdr,top);

3390       PhaseIterGVN* igvn = phase->is_IterGVN();
3391       remove(igvn);
3392       // Must return either the original node (now dead) or a new node
3393       // (Do not return a top here, since that would break the uniqueness of top.)
3394       return new ConINode(TypeInt::ZERO);
3395     }
3396   }
3397   return progress ? this : NULL;
3398 }
3399 
3400 //------------------------------Value------------------------------------------
3401 const Type* MemBarNode::Value(PhaseGVN* phase) const {
3402   if( !in(0) ) return Type::TOP;
3403   if( phase->type(in(0)) == Type::TOP )
3404     return Type::TOP;
3405   return TypeTuple::MEMBAR;
3406 }
3407 
3408 //------------------------------match------------------------------------------
3409 // Construct projections for memory.
3410 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
3411   switch (proj->_con) {
3412   case TypeFunc::Control:
3413   case TypeFunc::Memory:
3414     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
3415   }
3416   ShouldNotReachHere();
3417   return NULL;
3418 }
3419 
3420 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3421   trailing->_kind = TrailingStore;
3422   leading->_kind = LeadingStore;
3423 #ifdef ASSERT
3424   trailing->_pair_idx = leading->_idx;
3425   leading->_pair_idx = leading->_idx;
3426 #endif
3427 }
3428 
3429 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3430   trailing->_kind = TrailingLoadStore;

3697   return (req() > RawStores);
3698 }
3699 
3700 void InitializeNode::set_complete(PhaseGVN* phase) {
3701   assert(!is_complete(), "caller responsibility");
3702   _is_complete = Complete;
3703 
3704   // After this node is complete, it contains a bunch of
3705   // raw-memory initializations.  There is no need for
3706   // it to have anything to do with non-raw memory effects.
3707   // Therefore, tell all non-raw users to re-optimize themselves,
3708   // after skipping the memory effects of this initialization.
3709   PhaseIterGVN* igvn = phase->is_IterGVN();
3710   if (igvn)  igvn->add_users_to_worklist(this);
3711 }
3712 
3713 // convenience function
3714 // return false if the init contains any stores already
3715 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
3716   InitializeNode* init = initialization();
3717   if (init == NULL || init->is_complete())  return false;


3718   init->remove_extra_zeroes();
3719   // for now, if this allocation has already collected any inits, bail:
3720   if (init->is_non_zero())  return false;
3721   init->set_complete(phase);
3722   return true;
3723 }
3724 
3725 void InitializeNode::remove_extra_zeroes() {
3726   if (req() == RawStores)  return;
3727   Node* zmem = zero_memory();
3728   uint fill = RawStores;
3729   for (uint i = fill; i < req(); i++) {
3730     Node* n = in(i);
3731     if (n->is_top() || n == zmem)  continue;  // skip
3732     if (fill < i)  set_req(fill, n);          // compact
3733     ++fill;
3734   }
3735   // delete any empty spaces created:
3736   while (fill < req()) {
3737     del_req(fill);

3875             // store node that we'd like to capture. We need to check
3876             // the uses of the MergeMemNode.
3877             mems.push(n);
3878           }
3879         } else if (n->is_Mem()) {
3880           Node* other_adr = n->in(MemNode::Address);
3881           if (other_adr == adr) {
3882             failed = true;
3883             break;
3884           } else {
3885             const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
3886             if (other_t_adr != NULL) {
3887               int other_alias_idx = phase->C->get_alias_index(other_t_adr);
3888               if (other_alias_idx == alias_idx) {
3889                 // A load from the same memory slice as the store right
3890                 // after the InitializeNode. We check the control of the
3891                 // object/array that is loaded from. If it's the same as
3892                 // the store control then we cannot capture the store.
3893                 assert(!n->is_Store(), "2 stores to same slice on same control?");
3894                 Node* base = other_adr;






3895                 assert(base->is_AddP(), "should be addp but is %s", base->Name());
3896                 base = base->in(AddPNode::Base);
3897                 if (base != NULL) {
3898                   base = base->uncast();
3899                   if (base->is_Proj() && base->in(0) == alloc) {
3900                     failed = true;
3901                     break;
3902                   }
3903                 }
3904               }
3905             }
3906           }
3907         } else {
3908           failed = true;
3909           break;
3910         }
3911       }
3912     }
3913   }
3914   if (failed) {

4459         //   z's_done      12  16  16  16    12  16    12
4460         //   z's_needed    12  16  16  16    16  16    16
4461         //   zsize          0   0   0   0     4   0     4
4462         if (next_full_store < 0) {
4463           // Conservative tack:  Zero to end of current word.
4464           zeroes_needed = align_up(zeroes_needed, BytesPerInt);
4465         } else {
4466           // Zero to beginning of next fully initialized word.
4467           // Or, don't zero at all, if we are already in that word.
4468           assert(next_full_store >= zeroes_needed, "must go forward");
4469           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
4470           zeroes_needed = next_full_store;
4471         }
4472       }
4473 
4474       if (zeroes_needed > zeroes_done) {
4475         intptr_t zsize = zeroes_needed - zeroes_done;
4476         // Do some incremental zeroing on rawmem, in parallel with inits.
4477         zeroes_done = align_down(zeroes_done, BytesPerInt);
4478         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,


4479                                               zeroes_done, zeroes_needed,
4480                                               phase);
4481         zeroes_done = zeroes_needed;
4482         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
4483           do_zeroing = false;   // leave the hole, next time
4484       }
4485     }
4486 
4487     // Collect the store and move on:
4488     phase->replace_input_of(st, MemNode::Memory, inits);
4489     inits = st;                 // put it on the linearized chain
4490     set_req(i, zmem);           // unhook from previous position
4491 
4492     if (zeroes_done == st_off)
4493       zeroes_done = next_init_off;
4494 
4495     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
4496 
4497     #ifdef ASSERT
4498     // Various order invariants.  Weaker than stores_are_sane because

4518   remove_extra_zeroes();        // clear out all the zmems left over
4519   add_req(inits);
4520 
4521   if (!(UseTLAB && ZeroTLAB)) {
4522     // If anything remains to be zeroed, zero it all now.
4523     zeroes_done = align_down(zeroes_done, BytesPerInt);
4524     // if it is the last unused 4 bytes of an instance, forget about it
4525     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
4526     if (zeroes_done + BytesPerLong >= size_limit) {
4527       AllocateNode* alloc = allocation();
4528       assert(alloc != NULL, "must be present");
4529       if (alloc != NULL && alloc->Opcode() == Op_Allocate) {
4530         Node* klass_node = alloc->in(AllocateNode::KlassNode);
4531         ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
4532         if (zeroes_done == k->layout_helper())
4533           zeroes_done = size_limit;
4534       }
4535     }
4536     if (zeroes_done < size_limit) {
4537       rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,


4538                                             zeroes_done, size_in_bytes, phase);
4539     }
4540   }
4541 
4542   set_complete(phase);
4543   return rawmem;
4544 }
4545 
4546 
4547 #ifdef ASSERT
4548 bool InitializeNode::stores_are_sane(PhaseTransform* phase) {
4549   if (is_complete())
4550     return true;                // stores could be anything at this point
4551   assert(allocation() != NULL, "must be present");
4552   intptr_t last_off = allocation()->minimum_header_size();
4553   for (uint i = InitializeNode::RawStores; i < req(); i++) {
4554     Node* st = in(i);
4555     intptr_t st_off = get_store_offset(st, phase);
4556     if (st_off < 0)  continue;  // ignore dead garbage
4557     if (last_off > st_off) {

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciFlatArrayKlass.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/c2/barrierSetC2.hpp"
  32 #include "gc/shared/tlab_globals.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/cfgnode.hpp"
  39 #include "opto/regalloc.hpp"
  40 #include "opto/compile.hpp"
  41 #include "opto/connode.hpp"
  42 #include "opto/convertnode.hpp"
  43 #include "opto/inlinetypenode.hpp"
  44 #include "opto/loopnode.hpp"
  45 #include "opto/machnode.hpp"
  46 #include "opto/matcher.hpp"
  47 #include "opto/memnode.hpp"
  48 #include "opto/mulnode.hpp"
  49 #include "opto/narrowptrnode.hpp"
  50 #include "opto/phaseX.hpp"
  51 #include "opto/regmask.hpp"
  52 #include "opto/rootnode.hpp"
  53 #include "opto/vectornode.hpp"
  54 #include "utilities/align.hpp"
  55 #include "utilities/copy.hpp"
  56 #include "utilities/macros.hpp"
  57 #include "utilities/powerOfTwo.hpp"
  58 #include "utilities/vmError.hpp"
  59 
  60 // Portions of code courtesy of Clifford Click
  61 
  62 // Optimization - Graph Style
  63 

 226       // clone the Phi with our address type
 227       result = mphi->split_out_instance(t_adr, igvn);
 228     } else {
 229       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
 230     }
 231   }
 232   return result;
 233 }
 234 
 235 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
 236   uint alias_idx = phase->C->get_alias_index(tp);
 237   Node *mem = mmem;
 238 #ifdef ASSERT
 239   {
 240     // Check that current type is consistent with the alias index used during graph construction
 241     assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
 242     bool consistent =  adr_check == NULL || adr_check->empty() ||
 243                        phase->C->must_alias(adr_check, alias_idx );
 244     // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
 245     if( !consistent && adr_check != NULL && !adr_check->empty() &&
 246         tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
 247         adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
 248         ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
 249           adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
 250           adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
 251       // don't assert if it is dead code.
 252       consistent = true;
 253     }
 254     if( !consistent ) {
 255       st->print("alias_idx==%d, adr_check==", alias_idx);
 256       if( adr_check == NULL ) {
 257         st->print("NULL");
 258       } else {
 259         adr_check->dump();
 260       }
 261       st->cr();
 262       print_alias_types();
 263       assert(consistent, "adr_check must match alias idx");
 264     }
 265   }
 266 #endif

 874          "use LoadKlassNode instead");
 875   assert(!(adr_type->isa_aryptr() &&
 876            adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
 877          "use LoadRangeNode instead");
 878   // Check control edge of raw loads
 879   assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
 880           // oop will be recorded in oop map if load crosses safepoint
 881           rt->isa_oopptr() || is_immutable_value(adr),
 882           "raw memory operations should have control edge");
 883   LoadNode* load = NULL;
 884   switch (bt) {
 885   case T_BOOLEAN: load = new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 886   case T_BYTE:    load = new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 887   case T_INT:     load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 888   case T_CHAR:    load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 889   case T_SHORT:   load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 890   case T_LONG:    load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency); break;
 891   case T_FLOAT:   load = new LoadFNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 892   case T_DOUBLE:  load = new LoadDNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 893   case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(),  mo, control_dependency); break;
 894   case T_INLINE_TYPE:
 895   case T_OBJECT:
 896 #ifdef _LP64
 897     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 898       load = new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency);
 899     } else
 900 #endif
 901     {
 902       assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
 903       load = new LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
 904     }
 905     break;
 906   default:
 907     ShouldNotReachHere();
 908     break;
 909   }
 910   assert(load != NULL, "LoadNode should have been created");
 911   if (unaligned) {
 912     load->set_unaligned_access();
 913   }
 914   if (mismatched) {

1002 
1003     LoadNode* ld = clone()->as_Load();
1004     Node* addp = in(MemNode::Address)->clone();
1005     if (ac->as_ArrayCopy()->is_clonebasic()) {
1006       assert(ld_alloc != NULL, "need an alloc");
1007       assert(addp->is_AddP(), "address must be addp");
1008       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1009       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1010       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1011       addp->set_req(AddPNode::Base, src);
1012       addp->set_req(AddPNode::Address, src);
1013     } else {
1014       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1015              ac->as_ArrayCopy()->is_copyof_validated() ||
1016              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1017       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1018       addp->set_req(AddPNode::Base, src);
1019       addp->set_req(AddPNode::Address, src);
1020 
1021       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1022       BasicType ary_elem = ary_t->klass()->as_array_klass()->element_type()->basic_type();
1023       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1024       uint shift  = exact_log2(type2aelembytes(ary_elem));
1025       if (ary_t->klass()->is_flat_array_klass()) {
1026         ciFlatArrayKlass* vak = ary_t->klass()->as_flat_array_klass();
1027         shift = vak->log2_element_size();
1028       }
1029 
1030       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1031 #ifdef _LP64
1032       diff = phase->transform(new ConvI2LNode(diff));
1033 #endif
1034       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1035 
1036       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1037       addp->set_req(AddPNode::Offset, offset);
1038     }
1039     addp = phase->transform(addp);
1040 #ifdef ASSERT
1041     const TypePtr* adr_type = phase->type(addp)->is_ptr();
1042     ld->_adr_type = adr_type;
1043 #endif
1044     ld->set_req(MemNode::Address, addp);
1045     ld->set_req(0, ctl);
1046     ld->set_req(MemNode::Memory, mem);
1047     // load depends on the tests that validate the arraycopy
1048     ld->_control_dependency = UnknownControl;

1128         // Same base, same offset.
1129         // Possible improvement for arrays: check index value instead of absolute offset.
1130 
1131         // At this point we have proven something like this setup:
1132         //   B = << base >>
1133         //   L =  LoadQ(AddP(Check/CastPP(B), #Off))
1134         //   S = StoreQ(AddP(             B , #Off), V)
1135         // (Actually, we haven't yet proven the Q's are the same.)
1136         // In other words, we are loading from a casted version of
1137         // the same pointer-and-offset that we stored to.
1138         // Casted version may carry a dependency and it is respected.
1139         // Thus, we are able to replace L by V.
1140       }
1141       // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1142       if (store_Opcode() != st->Opcode()) {
1143         return NULL;
1144       }
1145       // LoadVector/StoreVector needs additional check to ensure the types match.
1146       if (store_Opcode() == Op_StoreVector) {
1147         const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
1148         const TypeVect* out_vt = is_Load() ? as_LoadVector()->vect_type() : as_StoreVector()->vect_type();
1149         if (in_vt != out_vt) {
1150           return NULL;
1151         }
1152       }
1153       return st->in(MemNode::ValueIn);
1154     }
1155 
1156     // A load from a freshly-created object always returns zero.
1157     // (This can happen after LoadNode::Ideal resets the load's memory input
1158     // to find_captured_store, which returned InitializeNode::zero_memory.)
1159     if (st->is_Proj() && st->in(0)->is_Allocate() &&
1160         (st->in(0) == ld_alloc) &&
1161         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1162       // return a zero value for the load's basic type
1163       // (This is one of the few places where a generic PhaseTransform
1164       // can create new nodes.  Think of it as lazily manifesting
1165       // virtually pre-existing constants.)
1166       assert(memory_type() != T_INLINE_TYPE, "should not be used for inline types");
1167       Node* default_value = ld_alloc->in(AllocateNode::DefaultValue);
1168       if (default_value != NULL) {
1169         return default_value;
1170       }
1171       assert(ld_alloc->in(AllocateNode::RawDefaultValue) == NULL, "default value may not be null");
1172       if (memory_type() != T_VOID) {
1173         if (ReduceBulkZeroing || find_array_copy_clone(phase, ld_alloc, in(MemNode::Memory)) == NULL) {
1174           // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1175           // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1176           // by the ArrayCopyNode.
1177           return phase->zerocon(memory_type());
1178         }
1179       } else {
1180         // TODO: materialize all-zero vector constant
1181         assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1182       }
1183     }
1184 
1185     // A load from an initialization barrier can match a captured store.
1186     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1187       InitializeNode* init = st->in(0)->as_Initialize();
1188       AllocateNode* alloc = init->allocation();
1189       if ((alloc != NULL) && (alloc == ld_alloc)) {
1190         // examine a captured store value
1191         st = init->find_captured_store(ld_off, memory_size(), phase);

1219 //----------------------is_instance_field_load_with_local_phi------------------
1220 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1221   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1222       in(Address)->is_AddP() ) {
1223     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1224     // Only instances and boxed values.
1225     if( t_oop != NULL &&
1226         (t_oop->is_ptr_to_boxed_value() ||
1227          t_oop->is_known_instance_field()) &&
1228         t_oop->offset() != Type::OffsetBot &&
1229         t_oop->offset() != Type::OffsetTop) {
1230       return true;
1231     }
1232   }
1233   return false;
1234 }
1235 
1236 //------------------------------Identity---------------------------------------
1237 // Loads are identity if previous store is to same address
1238 Node* LoadNode::Identity(PhaseGVN* phase) {
1239   // Loading from an InlineTypePtr? The InlineTypePtr has the values of
1240   // all fields as input. Look for the field with matching offset.
1241   Node* addr = in(Address);
1242   intptr_t offset;
1243   Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset);
1244   if (base != NULL && base->is_InlineTypePtr() && offset > oopDesc::klass_offset_in_bytes()) {
1245     Node* value = base->as_InlineTypePtr()->field_value_by_offset((int)offset, true);
1246     if (value->is_InlineType()) {
1247       // Non-flattened inline type field
1248       InlineTypeNode* vt = value->as_InlineType();
1249       if (vt->is_allocated(phase)) {
1250         value = vt->get_oop();
1251       } else {
1252         // Not yet allocated, bail out
1253         value = NULL;
1254       }
1255     }
1256     if (value != NULL) {
1257       if (Opcode() == Op_LoadN) {
1258         // Encode oop value if we are loading a narrow oop
1259         assert(!phase->type(value)->isa_narrowoop(), "should already be decoded");
1260         value = phase->transform(new EncodePNode(value, bottom_type()));
1261       }
1262       return value;
1263     }
1264   }
1265 
1266   // If the previous store-maker is the right kind of Store, and the store is
1267   // to the same address, then we are equal to the value stored.
1268   Node* mem = in(Memory);
1269   Node* value = can_see_stored_value(mem, phase);
1270   if( value ) {
1271     // byte, short & char stores truncate naturally.
1272     // A load has to load the truncated value which requires
1273     // some sort of masking operation and that requires an
1274     // Ideal call instead of an Identity call.
1275     if (memory_size() < BytesPerInt) {
1276       // If the input to the store does not fit with the load's result type,
1277       // it must be truncated via an Ideal call.
1278       if (!phase->type(value)->higher_equal(phase->type(this)))
1279         return this;
1280     }
1281     // (This works even when value is a Con, but LoadNode::Value
1282     // usually runs first, producing the singleton type of the Con.)
1283     return value;
1284   }
1285 

1955       }
1956     }
1957 
1958     // Don't do this for integer types. There is only potential profit if
1959     // the element type t is lower than _type; that is, for int types, if _type is
1960     // more restrictive than t.  This only happens here if one is short and the other
1961     // char (both 16 bits), and in those cases we've made an intentional decision
1962     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1963     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1964     //
1965     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
1966     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
1967     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
1968     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
1969     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
1970     // In fact, that could have been the original type of p1, and p1 could have
1971     // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
1972     // expression (LShiftL quux 3) independently optimized to the constant 8.
1973     if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
1974         && (_type->isa_vect() == NULL)
1975         && t->isa_inlinetype() == NULL
1976         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
1977       // t might actually be lower than _type, if _type is a unique
1978       // concrete subclass of abstract class t.
1979       if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
1980         const Type* jt = t->join_speculative(_type);
1981         // In any case, do not allow the join, per se, to empty out the type.
1982         if (jt->empty() && !t->empty()) {
1983           // This can happen if a interface-typed array narrows to a class type.
1984           jt = _type;
1985         }
1986 #ifdef ASSERT
1987         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
1988           // The pointers in the autobox arrays are always non-null
1989           Node* base = adr->in(AddPNode::Base);
1990           if ((base != NULL) && base->is_DecodeN()) {
1991             // Get LoadN node which loads IntegerCache.cache field
1992             base = base->in(1);
1993           }
1994           if ((base != NULL) && base->is_Con()) {
1995             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
1996             if ((base_type != NULL) && base_type->is_autobox_cache()) {
1997               // It could be narrow oop
1998               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
1999             }
2000           }
2001         }
2002 #endif
2003         return jt;
2004       }
2005     }
2006   } else if (tp->base() == Type::InstPtr) {
2007     assert( off != Type::OffsetBot ||
2008             // arrays can be cast to Objects
2009             tp->is_oopptr()->klass()->is_java_lang_Object() ||
2010             tp->is_oopptr()->klass() == ciEnv::current()->Class_klass() ||
2011             // unsafe field access may not have a constant offset
2012             C->has_unsafe_access(),
2013             "Field accesses must be precise" );
2014     // For oop loads, we expect the _type to be precise.
2015 

2016     const TypeInstPtr* tinst = tp->is_instptr();
2017     BasicType bt = memory_type();
2018 
2019     // Optimize loads from constant fields.
2020     ciObject* const_oop = tinst->const_oop();
2021     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) {
2022       ciType* mirror_type = const_oop->as_instance()->java_mirror_type();
2023       if (mirror_type != NULL) {
2024         const Type* const_oop = NULL;
2025         ciInlineKlass* vk = mirror_type->is_inlinetype() ? mirror_type->as_inline_klass() : NULL;
2026         // Fold default value loads
2027         if (vk != NULL && off == vk->default_value_offset()) {
2028           const_oop = TypeInstPtr::make(vk->default_instance());
2029         }
2030         // Fold class mirror loads
2031         if (off == java_lang_Class::primary_mirror_offset()) {
2032           const_oop = (vk == NULL) ? TypePtr::NULL_PTR : TypeInstPtr::make(vk->ref_instance());
2033         } else if (off == java_lang_Class::secondary_mirror_offset()) {
2034           const_oop = (vk == NULL) ? TypePtr::NULL_PTR : TypeInstPtr::make(vk->val_instance());
2035         }
2036         if (const_oop != NULL) {
2037           return (bt == T_NARROWOOP) ? const_oop->make_narrowoop() : const_oop;
2038         }
2039       }
2040       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt);
2041       if (con_type != NULL) {
2042         return con_type;
2043       }
2044     }
2045   } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
2046     assert( off != Type::OffsetBot ||
2047             // arrays can be cast to Objects
2048             tp->is_klassptr()->klass() == NULL ||
2049             tp->is_klassptr()->klass()->is_java_lang_Object() ||
2050             // also allow array-loading from the primary supertype
2051             // array during subtype checks
2052             Opcode() == Op_LoadKlass,
2053             "Field accesses must be precise" );
2054     // For klass/static loads, we expect the _type to be precise
2055   } else if (tp->base() == Type::RawPtr && !StressReflectiveCode) {
2056     if (adr->is_Load() && off == 0) {
2057       /* With mirrors being an indirect in the Klass*
2058        * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
2059        * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
2060        *
2061        * So check the type and klass of the node before the LoadP.
2062        */
2063       Node* adr2 = adr->in(MemNode::Address);
2064       const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
2065       if (tkls != NULL) {
2066         ciKlass* klass = tkls->klass();
2067         if (klass != NULL && klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
2068           assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2069           assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2070           return TypeInstPtr::make(klass->java_mirror());
2071         }
2072       }
2073     } else {
2074       // Check for a load of the default value offset from the InlineKlassFixedBlock:
2075       // LoadI(LoadP(inline_klass, adr_inlineklass_fixed_block_offset), default_value_offset_offset)
2076       intptr_t offset = 0;
2077       Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2078       if (base != NULL && base->is_Load() && offset == in_bytes(InlineKlass::default_value_offset_offset())) {
2079         const TypeKlassPtr* tkls = phase->type(base->in(MemNode::Address))->isa_klassptr();
2080         if (tkls != NULL && tkls->is_loaded() && tkls->klass_is_exact() && tkls->isa_inlinetype() &&
2081             tkls->offset() == in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())) {
2082           assert(base->Opcode() == Op_LoadP, "must load an oop from klass");
2083           assert(Opcode() == Op_LoadI, "must load an int from fixed block");
2084           return TypeInt::make(tkls->klass()->as_inline_klass()->default_value_offset());
2085         }
2086       }
2087     }
2088   }
2089 
2090   const TypeKlassPtr *tkls = tp->isa_klassptr();
2091   if (tkls != NULL && !StressReflectiveCode) {
2092     ciKlass* klass = tkls->klass();
2093     if (tkls->is_loaded() && tkls->klass_is_exact()) {
2094       // We are loading a field from a Klass metaobject whose identity
2095       // is known at compile time (the type is "exact" or "precise").
2096       // Check for fields we know are maintained as constants by the VM.
2097       if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2098         // The field is Klass::_super_check_offset.  Return its (constant) value.
2099         // (Folds up type checking code.)
2100         assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2101         return TypeInt::make(klass->super_check_offset());
2102       }
2103       // Compute index into primary_supers array
2104       juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2105       // Check for overflowing; use unsigned compare to handle the negative case.
2106       if( depth < ciKlass::primary_super_limit() ) {
2107         // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2108         // (Folds up type checking code.)
2109         assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2110         ciKlass *ss = klass->super_of_depth(depth);
2111         return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
2112       }
2113       const Type* aift = load_array_final_field(tkls, klass);
2114       if (aift != NULL)  return aift;
2115     }
2116 
2117     // We can still check if we are loading from the primary_supers array at a
2118     // shallow enough depth.  Even though the klass is not exact, entries less
2119     // than or equal to its super depth are correct.
2120     if (tkls->is_loaded()) {
2121       ciType *inner = klass;
2122       while( inner->is_obj_array_klass() )
2123         inner = inner->as_obj_array_klass()->base_element_type();
2124       if( inner->is_instance_klass() &&
2125           !inner->as_instance_klass()->flags().is_interface() ) {
2126         // Compute index into primary_supers array
2127         juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2128         // Check for overflowing; use unsigned compare to handle the negative case.
2129         if( depth < ciKlass::primary_super_limit() &&
2130             depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case
2131           // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2132           // (Folds up type checking code.)
2133           assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2134           ciKlass *ss = klass->super_of_depth(depth);
2135           return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
2136         }
2137       }
2138     }
2139 
2140     // If the type is enough to determine that the thing is not an array,

2165   if (ReduceFieldZeroing || is_instance || is_boxed_value) {
2166     Node* value = can_see_stored_value(mem,phase);
2167     if (value != NULL && value->is_Con()) {
2168       assert(value->bottom_type()->higher_equal(_type),"sanity");
2169       return value->bottom_type();
2170     }
2171   }
2172 
2173   bool is_vect = (_type->isa_vect() != NULL);
2174   if (is_instance && !is_vect) {
2175     // If we have an instance type and our memory input is the
2176     // programs's initial memory state, there is no matching store,
2177     // so just return a zero of the appropriate type -
2178     // except if it is vectorized - then we have no zero constant.
2179     Node *mem = in(MemNode::Memory);
2180     if (mem->is_Parm() && mem->in(0)->is_Start()) {
2181       assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2182       return Type::get_zero_type(_type->basic_type());
2183     }
2184   }

2185   Node* alloc = is_new_object_mark_load(phase);
2186   if (alloc != NULL) {
2187     if (EnableValhalla) {
2188       // The mark word may contain property bits (inline, flat, null-free)
2189       Node* klass_node = alloc->in(AllocateNode::KlassNode);
2190       const TypeKlassPtr* tkls = phase->type(klass_node)->is_klassptr();
2191       ciKlass* klass = tkls->klass();
2192       if (klass != NULL && klass->is_loaded() && tkls->klass_is_exact()) {
2193         return TypeX::make(klass->prototype_header().value());
2194       }
2195     } else {
2196       return TypeX::make(markWord::prototype().value());
2197     }
2198   }
2199 
2200   return _type;
2201 }
2202 
2203 //------------------------------match_edge-------------------------------------
2204 // Do we Match on this edge index or not?  Match only the address.
2205 uint LoadNode::match_edge(uint idx) const {
2206   return idx == MemNode::Address;
2207 }
2208 
2209 //--------------------------LoadBNode::Ideal--------------------------------------
2210 //
2211 //  If the previous store is to the same address as this load,
2212 //  and the value stored was larger than a byte, replace this load
2213 //  with the value stored truncated to a byte.  If no truncation is
2214 //  needed, the replacement is done in LoadNode::Identity().
2215 //
2216 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2217   Node* mem = in(MemNode::Memory);

2328   return LoadNode::Ideal(phase, can_reshape);
2329 }
2330 
2331 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2332   Node* mem = in(MemNode::Memory);
2333   Node* value = can_see_stored_value(mem,phase);
2334   if (value != NULL && value->is_Con() &&
2335       !value->bottom_type()->higher_equal(_type)) {
2336     // If the input to the store does not fit with the load's result type,
2337     // it must be truncated. We can't delay until Ideal call since
2338     // a singleton Value is needed for split_thru_phi optimization.
2339     int con = value->get_int();
2340     return TypeInt::make((con << 16) >> 16);
2341   }
2342   return LoadNode::Value(phase);
2343 }
2344 
2345 //=============================================================================
2346 //----------------------------LoadKlassNode::make------------------------------
2347 // Polymorphic factory method:
2348 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
2349                           const TypeKlassPtr* tk) {
2350   // sanity check the alias category against the created node type
2351   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2352   assert(adr_type != NULL, "expecting TypeKlassPtr");
2353 #ifdef _LP64
2354   if (adr_type->is_ptr_to_narrowklass()) {
2355     assert(UseCompressedClassPointers, "no compressed klasses");
2356     Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2357     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2358   }
2359 #endif
2360   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2361   return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
2362 }
2363 
2364 //------------------------------Value------------------------------------------
2365 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2366   return klass_value_common(phase);
2367 }
2368 
2369 // In most cases, LoadKlassNode does not have the control input set. If the control

2376   // Either input is TOP ==> the result is TOP
2377   const Type *t1 = phase->type( in(MemNode::Memory) );
2378   if (t1 == Type::TOP)  return Type::TOP;
2379   Node *adr = in(MemNode::Address);
2380   const Type *t2 = phase->type( adr );
2381   if (t2 == Type::TOP)  return Type::TOP;
2382   const TypePtr *tp = t2->is_ptr();
2383   if (TypePtr::above_centerline(tp->ptr()) ||
2384       tp->ptr() == TypePtr::Null)  return Type::TOP;
2385 
2386   // Return a more precise klass, if possible
2387   const TypeInstPtr *tinst = tp->isa_instptr();
2388   if (tinst != NULL) {
2389     ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
2390     int offset = tinst->offset();
2391     if (ik == phase->C->env()->Class_klass()
2392         && (offset == java_lang_Class::klass_offset() ||
2393             offset == java_lang_Class::array_klass_offset())) {
2394       // We are loading a special hidden field from a Class mirror object,
2395       // the field which points to the VM's Klass metaobject.
2396       bool null_free = false;
2397       ciType* t = tinst->java_mirror_type(&null_free);
2398       // java_mirror_type returns non-null for compile-time Class constants.
2399       if (t != NULL) {
2400         // constant oop => constant klass
2401         if (offset == java_lang_Class::array_klass_offset()) {
2402           if (t->is_void()) {
2403             // We cannot create a void array.  Since void is a primitive type return null
2404             // klass.  Users of this result need to do a null check on the returned klass.
2405             return TypePtr::NULL_PTR;
2406           }
2407           return TypeKlassPtr::make(ciArrayKlass::make(t, null_free));
2408         }
2409         if (!t->is_klass()) {
2410           // a primitive Class (e.g., int.class) has NULL for a klass field
2411           return TypePtr::NULL_PTR;
2412         }
2413         // (Folds up the 1st indirection in aClassConstant.getModifiers().)
2414         return TypeKlassPtr::make(t->as_klass());
2415       }
2416       // non-constant mirror, so we can't tell what's going on
2417     }
2418     if( !ik->is_loaded() )
2419       return _type;             // Bail out if not loaded
2420     if (offset == oopDesc::klass_offset_in_bytes()) {
2421       if (tinst->klass_is_exact()) {
2422         return TypeKlassPtr::make(ik);
2423       }
2424       // See if we can become precise: no subklasses and no interface
2425       // (Note:  We need to support verified interfaces.)
2426       if (!ik->is_interface() && !ik->has_subklass()) {
2427         // Add a dependence; if any subclass added we need to recompile
2428         if (!ik->is_final()) {
2429           // %%% should use stronger assert_unique_concrete_subtype instead
2430           phase->C->dependencies()->assert_leaf_type(ik);
2431         }
2432         // Return precise klass
2433         return TypeKlassPtr::make(ik);
2434       }
2435 
2436       // Return root of possible klass
2437       return TypeInstKlassPtr::make(TypePtr::NotNull, ik, Type::Offset(0), tinst->flatten_array());
2438     }
2439   }
2440 
2441   // Check for loading klass from an array
2442   const TypeAryPtr *tary = tp->isa_aryptr();
2443   if (tary != NULL) {
2444     ciKlass *tary_klass = tary->klass();
2445     if (tary_klass != NULL   // can be NULL when at BOTTOM or TOP
2446         && tary->offset() == oopDesc::klass_offset_in_bytes()) {
2447       if (tary->klass_is_exact()) {
2448         return TypeKlassPtr::make(tary_klass);
2449       }
2450       ciArrayKlass* ak = tary_klass->as_array_klass();
2451       // If the klass is an object array, we defer the question to the
2452       // array component klass.
2453       if (ak->is_obj_array_klass()) {
2454         assert(ak->is_loaded(), "");
2455         ciKlass *base_k = ak->as_obj_array_klass()->base_element_klass();
2456         if (base_k->is_loaded() && base_k->is_instance_klass()) {
2457           ciInstanceKlass *ik = base_k->as_instance_klass();
2458           // See if we can become precise: no subklasses and no interface
2459           // Do not fold klass loads from [LMyValue. The runtime type might be [QMyValue due to [QMyValue <: [LMyValue
2460           // and the klass for [QMyValue is not equal to the klass for [LMyValue.
2461           if (!ik->is_interface() && !ik->has_subklass() && (!ik->is_inlinetype() || ak->is_elem_null_free())) {
2462             // Add a dependence; if any subclass added we need to recompile
2463             if (!ik->is_final()) {
2464               phase->C->dependencies()->assert_leaf_type(ik);
2465             }
2466             // Return precise array klass
2467             return TypeKlassPtr::make(ak);
2468           }
2469         }
2470         return TypeAryKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0), tary->is_not_flat(), tary->is_not_null_free(), tary->is_null_free());
2471       } else if (ak->is_type_array_klass()) {

2472         return TypeKlassPtr::make(ak); // These are always precise
2473       }
2474     }
2475   }
2476 
2477   // Check for loading klass from an array klass
2478   const TypeKlassPtr *tkls = tp->isa_klassptr();
2479   if (tkls != NULL && !StressReflectiveCode) {
2480     if (!tkls->is_loaded()) {

2481       return _type;             // Bail out if not loaded
2482     }
2483     ciKlass* klass = tkls->klass();
2484     if( klass->is_obj_array_klass() &&
2485         tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2486       ciKlass* elem = klass->as_obj_array_klass()->element_klass();
2487       // // Always returning precise element type is incorrect,
2488       // // e.g., element type could be object and array may contain strings
2489       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2490 
2491       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2492       // according to the element type's subclassing.
2493       return TypeKlassPtr::make(tkls->ptr(), elem, Type::Offset(0));
2494     } else if (klass->is_flat_array_klass() &&
2495                tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2496       ciKlass* elem = klass->as_flat_array_klass()->element_klass();
2497       return TypeInstKlassPtr::make(tkls->ptr(), elem, Type::Offset(0), /* flatten_array= */ true);
2498     }
2499     if( klass->is_instance_klass() && tkls->klass_is_exact() &&
2500         tkls->offset() == in_bytes(Klass::super_offset())) {
2501       ciKlass* sup = klass->as_instance_klass()->super();
2502       // The field is Klass::_super.  Return its (constant) value.
2503       // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2504       return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
2505     }
2506   }
2507 
2508   // Bailout case
2509   return LoadNode::Value(phase);
2510 }
2511 
2512 //------------------------------Identity---------------------------------------
2513 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2514 // Also feed through the klass in Allocate(...klass...)._klass.
2515 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2516   return klass_identity_common(phase);
2517 }

2685 //=============================================================================
2686 //---------------------------StoreNode::make-----------------------------------
2687 // Polymorphic factory method:
2688 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
2689   assert((mo == unordered || mo == release), "unexpected");
2690   Compile* C = gvn.C;
2691   assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2692          ctl != NULL, "raw memory operations should have control edge");
2693 
2694   switch (bt) {
2695   case T_BOOLEAN: val = gvn.transform(new AndINode(val, gvn.intcon(0x1))); // Fall through to T_BYTE case
2696   case T_BYTE:    return new StoreBNode(ctl, mem, adr, adr_type, val, mo);
2697   case T_INT:     return new StoreINode(ctl, mem, adr, adr_type, val, mo);
2698   case T_CHAR:
2699   case T_SHORT:   return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
2700   case T_LONG:    return new StoreLNode(ctl, mem, adr, adr_type, val, mo);
2701   case T_FLOAT:   return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
2702   case T_DOUBLE:  return new StoreDNode(ctl, mem, adr, adr_type, val, mo);
2703   case T_METADATA:
2704   case T_ADDRESS:
2705   case T_INLINE_TYPE:
2706   case T_OBJECT:
2707 #ifdef _LP64
2708     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2709       val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop()));
2710       return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
2711     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2712                (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2713                 adr->bottom_type()->isa_rawptr())) {
2714       val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2715       return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2716     }
2717 #endif
2718     {
2719       return new StorePNode(ctl, mem, adr, adr_type, val, mo);
2720     }
2721   default:
2722     ShouldNotReachHere();
2723     return (StoreNode*)NULL;
2724   }
2725 }

2747 
2748   // Since they are not commoned, do not hash them:
2749   return NO_HASH;
2750 }
2751 
2752 //------------------------------Ideal------------------------------------------
2753 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2754 // When a store immediately follows a relevant allocation/initialization,
2755 // try to capture it into the initialization, or hoist it above.
2756 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2757   Node* p = MemNode::Ideal_common(phase, can_reshape);
2758   if (p)  return (p == NodeSentinel) ? NULL : p;
2759 
2760   Node* mem     = in(MemNode::Memory);
2761   Node* address = in(MemNode::Address);
2762   Node* value   = in(MemNode::ValueIn);
2763   // Back-to-back stores to same address?  Fold em up.  Generally
2764   // unsafe if I have intervening uses...  Also disallowed for StoreCM
2765   // since they must follow each StoreP operation.  Redundant StoreCMs
2766   // are eliminated just before matching in final_graph_reshape.
2767   if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::INLINES) {
2768     Node* st = mem;
2769     // If Store 'st' has more than one use, we cannot fold 'st' away.
2770     // For example, 'st' might be the final state at a conditional
2771     // return.  Or, 'st' might be used by some node which is live at
2772     // the same time 'st' is live, which might be unschedulable.  So,
2773     // require exactly ONE user until such time as we clone 'mem' for
2774     // each of 'mem's uses (thus making the exactly-1-user-rule hold
2775     // true).
2776     while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) {
2777       // Looking at a dead closed cycle of memory?
2778       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
2779       assert(Opcode() == st->Opcode() ||
2780              st->Opcode() == Op_StoreVector ||
2781              Opcode() == Op_StoreVector ||
2782              st->Opcode() == Op_StoreVectorScatter ||
2783              Opcode() == Op_StoreVectorScatter ||
2784              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
2785              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
2786              (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
2787              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
2788              (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
2789              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
2790 
2791       if (st->in(MemNode::Address)->eqv_uncast(address) &&
2792           st->as_Store()->memory_size() <= this->memory_size()) {
2793         Node* use = st->raw_out(0);
2794         if (phase->is_IterGVN()) {
2795           phase->is_IterGVN()->rehash_node_delayed(use);
2796         }
2797         // It's OK to do this in the parser, since DU info is always accurate,
2798         // and the parser always refers to nodes via SafePointNode maps.
2799         use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
2800         return this;
2801       }
2802       st = st->in(MemNode::Memory);
2803     }
2804   }
2805 
2806 
2807   // Capture an unaliased, unconditional, simple store into an initializer.

2864   // Load then Store?  Then the Store is useless
2865   if (val->is_Load() &&
2866       val->in(MemNode::Address)->eqv_uncast(adr) &&
2867       val->in(MemNode::Memory )->eqv_uncast(mem) &&
2868       val->as_Load()->store_Opcode() == Opcode()) {
2869     result = mem;
2870   }
2871 
2872   // Two stores in a row of the same value?
2873   if (result == this &&
2874       mem->is_Store() &&
2875       mem->in(MemNode::Address)->eqv_uncast(adr) &&
2876       mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2877       mem->Opcode() == Opcode()) {
2878     result = mem;
2879   }
2880 
2881   // Store of zero anywhere into a freshly-allocated object?
2882   // Then the store is useless.
2883   // (It must already have been captured by the InitializeNode.)
2884   if (result == this && ReduceFieldZeroing) {

2885     // a newly allocated object is already all-zeroes everywhere
2886     if (mem->is_Proj() && mem->in(0)->is_Allocate() &&
2887         (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == val)) {
2888       assert(!phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == NULL, "storing null to inline type array is forbidden");
2889       result = mem;
2890     }
2891 
2892     if (result == this && phase->type(val)->is_zero_type()) {
2893       // the store may also apply to zero-bits in an earlier object
2894       Node* prev_mem = find_previous_store(phase);
2895       // Steps (a), (b):  Walk past independent stores to find an exact match.
2896       if (prev_mem != NULL) {
2897         Node* prev_val = can_see_stored_value(prev_mem, phase);
2898         if (prev_val != NULL && prev_val == val) {
2899           // prev_val and val might differ by a cast; it would be good
2900           // to keep the more informative of the two.
2901           result = mem;
2902         }
2903       }
2904     }
2905   }
2906 
2907   PhaseIterGVN* igvn = phase->is_IterGVN();
2908   if (result != this && igvn != NULL) {
2909     MemBarNode* trailing = trailing_membar();
2910     if (trailing != NULL) {
2911 #ifdef ASSERT
2912       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();

3057 Node* StoreCMNode::Identity(PhaseGVN* phase) {
3058   // No need to card mark when storing a null ptr
3059   Node* my_store = in(MemNode::OopStore);
3060   if (my_store->is_Store()) {
3061     const Type *t1 = phase->type( my_store->in(MemNode::ValueIn) );
3062     if( t1 == TypePtr::NULL_PTR ) {
3063       return in(MemNode::Memory);
3064     }
3065   }
3066   return this;
3067 }
3068 
3069 //=============================================================================
3070 //------------------------------Ideal---------------------------------------
3071 Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){
3072   Node* progress = StoreNode::Ideal(phase, can_reshape);
3073   if (progress != NULL) return progress;
3074 
3075   Node* my_store = in(MemNode::OopStore);
3076   if (my_store->is_MergeMem()) {
3077     if (oop_alias_idx() != phase->C->get_alias_index(TypeAryPtr::INLINES) ||
3078         phase->C->flattened_accesses_share_alias()) {
3079       // The alias that was recorded is no longer accurate enough.
3080       Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx());
3081       set_req_X(MemNode::OopStore, mem, phase);
3082       return this;
3083     }
3084   }
3085 
3086   return NULL;
3087 }
3088 
3089 //------------------------------Value-----------------------------------------
3090 const Type* StoreCMNode::Value(PhaseGVN* phase) const {
3091   // Either input is TOP ==> the result is TOP (checked in StoreNode::Value).
3092   // If extra input is TOP ==> the result is TOP
3093   const Type* t = phase->type(in(MemNode::OopStore));
3094   if (t == Type::TOP) {
3095     return Type::TOP;
3096   }
3097   return StoreNode::Value(phase);
3098 }
3099 
3100 
3101 //=============================================================================
3102 //----------------------------------SCMemProjNode------------------------------
3103 const Type* SCMemProjNode::Value(PhaseGVN* phase) const

3222 // Clearing a short array is faster with stores
3223 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3224   // Already know this is a large node, do not try to ideal it
3225   if (!IdealizeClearArrayNode || _is_large) return NULL;
3226 
3227   const int unit = BytesPerLong;
3228   const TypeX* t = phase->type(in(2))->isa_intptr_t();
3229   if (!t)  return NULL;
3230   if (!t->is_con())  return NULL;
3231   intptr_t raw_count = t->get_con();
3232   intptr_t size = raw_count;
3233   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
3234   // Clearing nothing uses the Identity call.
3235   // Negative clears are possible on dead ClearArrays
3236   // (see jck test stmt114.stmt11402.val).
3237   if (size <= 0 || size % unit != 0)  return NULL;
3238   intptr_t count = size / unit;
3239   // Length too long; communicate this to matchers and assemblers.
3240   // Assemblers are responsible to produce fast hardware clears for it.
3241   if (size > InitArrayShortSize) {
3242     return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true);
3243   } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
3244     return NULL;
3245   }
3246   Node *mem = in(1);
3247   if( phase->type(mem)==Type::TOP ) return NULL;
3248   Node *adr = in(3);
3249   const Type* at = phase->type(adr);
3250   if( at==Type::TOP ) return NULL;
3251   const TypePtr* atp = at->isa_ptr();
3252   // adjust atp to be the correct array element address type
3253   if (atp == NULL)  atp = TypePtr::BOTTOM;
3254   else              atp = atp->add_offset(Type::OffsetBot);
3255   // Get base for derived pointer purposes
3256   if( adr->Opcode() != Op_AddP ) Unimplemented();
3257   Node *base = adr->in(1);
3258 
3259   Node *val = in(4);
3260   Node *off  = phase->MakeConX(BytesPerLong);
3261   mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
3262   count--;
3263   while( count-- ) {
3264     mem = phase->transform(mem);
3265     adr = phase->transform(new AddPNode(base,adr,off));
3266     mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
3267   }
3268   return mem;
3269 }
3270 
3271 //----------------------------step_through----------------------------------
3272 // Return allocation input memory edge if it is different instance
3273 // or itself if it is the one we are looking for.
3274 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
3275   Node* n = *np;
3276   assert(n->is_ClearArray(), "sanity");
3277   intptr_t offset;
3278   AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
3279   // This method is called only before Allocate nodes are expanded
3280   // during macro nodes expansion. Before that ClearArray nodes are
3281   // only generated in PhaseMacroExpand::generate_arraycopy() (before
3282   // Allocate nodes are expanded) which follows allocations.
3283   assert(alloc != NULL, "should have allocation");
3284   if (alloc->_idx == instance_id) {
3285     // Can not bypass initialization of the instance we are looking for.
3286     return false;
3287   }
3288   // Otherwise skip it.
3289   InitializeNode* init = alloc->initialization();
3290   if (init != NULL)
3291     *np = init->in(TypeFunc::Memory);
3292   else
3293     *np = alloc->in(TypeFunc::Memory);
3294   return true;
3295 }
3296 
3297 //----------------------------clear_memory-------------------------------------
3298 // Generate code to initialize object storage to zero.
3299 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
3300                                    Node* val,
3301                                    Node* raw_val,
3302                                    intptr_t start_offset,
3303                                    Node* end_offset,
3304                                    PhaseGVN* phase) {
3305   intptr_t offset = start_offset;
3306 
3307   int unit = BytesPerLong;
3308   if ((offset % unit) != 0) {
3309     Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
3310     adr = phase->transform(adr);
3311     const TypePtr* atp = TypeRawPtr::BOTTOM;
3312     if (val != NULL) {
3313       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
3314       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
3315     } else {
3316       assert(raw_val == NULL, "val may not be null");
3317       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
3318     }
3319     mem = phase->transform(mem);
3320     offset += BytesPerInt;
3321   }
3322   assert((offset % unit) == 0, "");
3323 
3324   // Initialize the remaining stuff, if any, with a ClearArray.
3325   return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase);
3326 }
3327 
3328 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
3329                                    Node* raw_val,
3330                                    Node* start_offset,
3331                                    Node* end_offset,
3332                                    PhaseGVN* phase) {
3333   if (start_offset == end_offset) {
3334     // nothing to do
3335     return mem;
3336   }
3337 
3338   int unit = BytesPerLong;
3339   Node* zbase = start_offset;
3340   Node* zend  = end_offset;
3341 
3342   // Scale to the unit required by the CPU:
3343   if (!Matcher::init_array_count_is_in_bytes) {
3344     Node* shift = phase->intcon(exact_log2(unit));
3345     zbase = phase->transform(new URShiftXNode(zbase, shift) );
3346     zend  = phase->transform(new URShiftXNode(zend,  shift) );
3347   }
3348 
3349   // Bulk clear double-words
3350   Node* zsize = phase->transform(new SubXNode(zend, zbase) );
3351   Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
3352   if (raw_val == NULL) {
3353     raw_val = phase->MakeConX(0);
3354   }
3355   mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false);
3356   return phase->transform(mem);
3357 }
3358 
3359 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
3360                                    Node* val,
3361                                    Node* raw_val,
3362                                    intptr_t start_offset,
3363                                    intptr_t end_offset,
3364                                    PhaseGVN* phase) {
3365   if (start_offset == end_offset) {
3366     // nothing to do
3367     return mem;
3368   }
3369 
3370   assert((end_offset % BytesPerInt) == 0, "odd end offset");
3371   intptr_t done_offset = end_offset;
3372   if ((done_offset % BytesPerLong) != 0) {
3373     done_offset -= BytesPerInt;
3374   }
3375   if (done_offset > start_offset) {
3376     mem = clear_memory(ctl, mem, dest, val, raw_val,
3377                        start_offset, phase->MakeConX(done_offset), phase);
3378   }
3379   if (done_offset < end_offset) { // emit the final 32-bit store
3380     Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
3381     adr = phase->transform(adr);
3382     const TypePtr* atp = TypeRawPtr::BOTTOM;
3383     if (val != NULL) {
3384       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
3385       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
3386     } else {
3387       assert(raw_val == NULL, "val may not be null");
3388       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
3389     }
3390     mem = phase->transform(mem);
3391     done_offset += BytesPerInt;
3392   }
3393   assert(done_offset == end_offset, "");
3394   return mem;
3395 }
3396 
3397 //=============================================================================
3398 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
3399   : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
3400     _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
3401 #ifdef ASSERT
3402   , _pair_idx(0)
3403 #endif
3404 {
3405   init_class_id(Class_MemBar);
3406   Node* top = C->top();
3407   init_req(TypeFunc::I_O,top);
3408   init_req(TypeFunc::FramePtr,top);
3409   init_req(TypeFunc::ReturnAdr,top);

3514       PhaseIterGVN* igvn = phase->is_IterGVN();
3515       remove(igvn);
3516       // Must return either the original node (now dead) or a new node
3517       // (Do not return a top here, since that would break the uniqueness of top.)
3518       return new ConINode(TypeInt::ZERO);
3519     }
3520   }
3521   return progress ? this : NULL;
3522 }
3523 
3524 //------------------------------Value------------------------------------------
3525 const Type* MemBarNode::Value(PhaseGVN* phase) const {
3526   if( !in(0) ) return Type::TOP;
3527   if( phase->type(in(0)) == Type::TOP )
3528     return Type::TOP;
3529   return TypeTuple::MEMBAR;
3530 }
3531 
3532 //------------------------------match------------------------------------------
3533 // Construct projections for memory.
3534 Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) {
3535   switch (proj->_con) {
3536   case TypeFunc::Control:
3537   case TypeFunc::Memory:
3538     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
3539   }
3540   ShouldNotReachHere();
3541   return NULL;
3542 }
3543 
3544 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3545   trailing->_kind = TrailingStore;
3546   leading->_kind = LeadingStore;
3547 #ifdef ASSERT
3548   trailing->_pair_idx = leading->_idx;
3549   leading->_pair_idx = leading->_idx;
3550 #endif
3551 }
3552 
3553 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3554   trailing->_kind = TrailingLoadStore;

3821   return (req() > RawStores);
3822 }
3823 
3824 void InitializeNode::set_complete(PhaseGVN* phase) {
3825   assert(!is_complete(), "caller responsibility");
3826   _is_complete = Complete;
3827 
3828   // After this node is complete, it contains a bunch of
3829   // raw-memory initializations.  There is no need for
3830   // it to have anything to do with non-raw memory effects.
3831   // Therefore, tell all non-raw users to re-optimize themselves,
3832   // after skipping the memory effects of this initialization.
3833   PhaseIterGVN* igvn = phase->is_IterGVN();
3834   if (igvn)  igvn->add_users_to_worklist(this);
3835 }
3836 
3837 // convenience function
3838 // return false if the init contains any stores already
3839 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
3840   InitializeNode* init = initialization();
3841   if (init == NULL || init->is_complete()) {
3842     return false;
3843   }
3844   init->remove_extra_zeroes();
3845   // for now, if this allocation has already collected any inits, bail:
3846   if (init->is_non_zero())  return false;
3847   init->set_complete(phase);
3848   return true;
3849 }
3850 
3851 void InitializeNode::remove_extra_zeroes() {
3852   if (req() == RawStores)  return;
3853   Node* zmem = zero_memory();
3854   uint fill = RawStores;
3855   for (uint i = fill; i < req(); i++) {
3856     Node* n = in(i);
3857     if (n->is_top() || n == zmem)  continue;  // skip
3858     if (fill < i)  set_req(fill, n);          // compact
3859     ++fill;
3860   }
3861   // delete any empty spaces created:
3862   while (fill < req()) {
3863     del_req(fill);

4001             // store node that we'd like to capture. We need to check
4002             // the uses of the MergeMemNode.
4003             mems.push(n);
4004           }
4005         } else if (n->is_Mem()) {
4006           Node* other_adr = n->in(MemNode::Address);
4007           if (other_adr == adr) {
4008             failed = true;
4009             break;
4010           } else {
4011             const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
4012             if (other_t_adr != NULL) {
4013               int other_alias_idx = phase->C->get_alias_index(other_t_adr);
4014               if (other_alias_idx == alias_idx) {
4015                 // A load from the same memory slice as the store right
4016                 // after the InitializeNode. We check the control of the
4017                 // object/array that is loaded from. If it's the same as
4018                 // the store control then we cannot capture the store.
4019                 assert(!n->is_Store(), "2 stores to same slice on same control?");
4020                 Node* base = other_adr;
4021                 if (base->is_Phi()) {
4022                   // In rare case, base may be a PhiNode and it may read
4023                   // the same memory slice between InitializeNode and store.
4024                   failed = true;
4025                   break;
4026                 }
4027                 assert(base->is_AddP(), "should be addp but is %s", base->Name());
4028                 base = base->in(AddPNode::Base);
4029                 if (base != NULL) {
4030                   base = base->uncast();
4031                   if (base->is_Proj() && base->in(0) == alloc) {
4032                     failed = true;
4033                     break;
4034                   }
4035                 }
4036               }
4037             }
4038           }
4039         } else {
4040           failed = true;
4041           break;
4042         }
4043       }
4044     }
4045   }
4046   if (failed) {

4591         //   z's_done      12  16  16  16    12  16    12
4592         //   z's_needed    12  16  16  16    16  16    16
4593         //   zsize          0   0   0   0     4   0     4
4594         if (next_full_store < 0) {
4595           // Conservative tack:  Zero to end of current word.
4596           zeroes_needed = align_up(zeroes_needed, BytesPerInt);
4597         } else {
4598           // Zero to beginning of next fully initialized word.
4599           // Or, don't zero at all, if we are already in that word.
4600           assert(next_full_store >= zeroes_needed, "must go forward");
4601           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
4602           zeroes_needed = next_full_store;
4603         }
4604       }
4605 
4606       if (zeroes_needed > zeroes_done) {
4607         intptr_t zsize = zeroes_needed - zeroes_done;
4608         // Do some incremental zeroing on rawmem, in parallel with inits.
4609         zeroes_done = align_down(zeroes_done, BytesPerInt);
4610         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
4611                                               allocation()->in(AllocateNode::DefaultValue),
4612                                               allocation()->in(AllocateNode::RawDefaultValue),
4613                                               zeroes_done, zeroes_needed,
4614                                               phase);
4615         zeroes_done = zeroes_needed;
4616         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
4617           do_zeroing = false;   // leave the hole, next time
4618       }
4619     }
4620 
4621     // Collect the store and move on:
4622     phase->replace_input_of(st, MemNode::Memory, inits);
4623     inits = st;                 // put it on the linearized chain
4624     set_req(i, zmem);           // unhook from previous position
4625 
4626     if (zeroes_done == st_off)
4627       zeroes_done = next_init_off;
4628 
4629     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
4630 
4631     #ifdef ASSERT
4632     // Various order invariants.  Weaker than stores_are_sane because

4652   remove_extra_zeroes();        // clear out all the zmems left over
4653   add_req(inits);
4654 
4655   if (!(UseTLAB && ZeroTLAB)) {
4656     // If anything remains to be zeroed, zero it all now.
4657     zeroes_done = align_down(zeroes_done, BytesPerInt);
4658     // if it is the last unused 4 bytes of an instance, forget about it
4659     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
4660     if (zeroes_done + BytesPerLong >= size_limit) {
4661       AllocateNode* alloc = allocation();
4662       assert(alloc != NULL, "must be present");
4663       if (alloc != NULL && alloc->Opcode() == Op_Allocate) {
4664         Node* klass_node = alloc->in(AllocateNode::KlassNode);
4665         ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
4666         if (zeroes_done == k->layout_helper())
4667           zeroes_done = size_limit;
4668       }
4669     }
4670     if (zeroes_done < size_limit) {
4671       rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
4672                                             allocation()->in(AllocateNode::DefaultValue),
4673                                             allocation()->in(AllocateNode::RawDefaultValue),
4674                                             zeroes_done, size_in_bytes, phase);
4675     }
4676   }
4677 
4678   set_complete(phase);
4679   return rawmem;
4680 }
4681 
4682 
4683 #ifdef ASSERT
4684 bool InitializeNode::stores_are_sane(PhaseTransform* phase) {
4685   if (is_complete())
4686     return true;                // stores could be anything at this point
4687   assert(allocation() != NULL, "must be present");
4688   intptr_t last_off = allocation()->minimum_header_size();
4689   for (uint i = InitializeNode::RawStores; i < req(); i++) {
4690     Node* st = in(i);
4691     intptr_t st_off = get_store_offset(st, phase);
4692     if (st_off < 0)  continue;  // ignore dead garbage
4693     if (last_off > st_off) {
< prev index next >