< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page

   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 

  26 #include "classfile/javaClasses.hpp"

  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "memory/allocation.inline.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "oops/objArrayKlass.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/regalloc.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/connode.hpp"
  40 #include "opto/convertnode.hpp"

  41 #include "opto/loopnode.hpp"
  42 #include "opto/machnode.hpp"
  43 #include "opto/matcher.hpp"
  44 #include "opto/memnode.hpp"
  45 #include "opto/mempointer.hpp"
  46 #include "opto/mulnode.hpp"
  47 #include "opto/narrowptrnode.hpp"
  48 #include "opto/phaseX.hpp"
  49 #include "opto/regmask.hpp"
  50 #include "opto/rootnode.hpp"
  51 #include "opto/traceMergeStoresTag.hpp"
  52 #include "opto/vectornode.hpp"
  53 #include "utilities/align.hpp"
  54 #include "utilities/copy.hpp"
  55 #include "utilities/macros.hpp"
  56 #include "utilities/powerOfTwo.hpp"
  57 #include "utilities/vmError.hpp"
  58 
  59 // Portions of code courtesy of Clifford Click
  60 

 123       st->print(", idx=Bot;");
 124     else if (atp->index() == Compile::AliasIdxTop)
 125       st->print(", idx=Top;");
 126     else if (atp->index() == Compile::AliasIdxRaw)
 127       st->print(", idx=Raw;");
 128     else {
 129       ciField* field = atp->field();
 130       if (field) {
 131         st->print(", name=");
 132         field->print_name_on(st);
 133       }
 134       st->print(", idx=%d;", atp->index());
 135     }
 136   }
 137 }
 138 
 139 extern void print_alias_types();
 140 
 141 #endif
 142 
 143 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) {
 144   assert((t_oop != nullptr), "sanity");




































































 145   bool is_instance = t_oop->is_known_instance_field();
 146   bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() &&
 147                              (load != nullptr) && load->is_Load() &&
 148                              (phase->is_IterGVN() != nullptr);
 149   if (!(is_instance || is_boxed_value_load))
 150     return mchain;  // don't try to optimize non-instance types

































 151   uint instance_id = t_oop->instance_id();
 152   Node *start_mem = phase->C->start()->proj_out_or_null(TypeFunc::Memory);
 153   Node *prev = nullptr;
 154   Node *result = mchain;
 155   while (prev != result) {
 156     prev = result;
 157     if (result == start_mem)
 158       break;  // hit one of our sentinels



 159     // skip over a call which does not affect this memory slice
 160     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
 161       Node *proj_in = result->in(0);
 162       if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
 163         break;  // hit one of our sentinels

 164       } else if (proj_in->is_Call()) {
 165         // ArrayCopyNodes processed here as well
 166         CallNode *call = proj_in->as_Call();
 167         if (!call->may_modify(t_oop, phase)) { // returns false for instances


 168           result = call->in(TypeFunc::Memory);
 169         }
 170       } else if (proj_in->is_Initialize()) {
 171         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
 172         // Stop if this is the initialization for the object instance which
 173         // contains this memory slice, otherwise skip over it.
 174         if ((alloc == nullptr) || (alloc->_idx == instance_id)) {
 175           break;
 176         }
 177         if (is_instance) {
 178           result = proj_in->in(TypeFunc::Memory);
 179         } else if (is_boxed_value_load) {
 180           Node* klass = alloc->in(AllocateNode::KlassNode);
 181           const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
 182           if (tklass->klass_is_exact() && !tklass->exact_klass()->equals(t_oop->is_instptr()->exact_klass())) {
 183             result = proj_in->in(TypeFunc::Memory); // not related allocation




 184           }
 185         }
 186       } else if (proj_in->is_MemBar()) {
 187         ArrayCopyNode* ac = nullptr;
 188         if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) {
 189           break;
 190         }
 191         result = proj_in->in(TypeFunc::Memory);
 192       } else if (proj_in->is_top()) {
 193         break; // dead code
 194       } else {
 195         assert(false, "unexpected projection");
 196       }
 197     } else if (result->is_ClearArray()) {
 198       if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {
 199         // Can not bypass initialization of the instance
 200         // we are looking for.
 201         break;
 202       }
 203       // Otherwise skip it (the call updated 'result' value).

 216   bool is_instance = t_oop->is_known_instance_field();
 217   PhaseIterGVN *igvn = phase->is_IterGVN();
 218   if (is_instance && igvn != nullptr && result->is_Phi()) {
 219     PhiNode *mphi = result->as_Phi();
 220     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
 221     const TypePtr *t = mphi->adr_type();
 222     bool do_split = false;
 223     // In the following cases, Load memory input can be further optimized based on
 224     // its precise address type
 225     if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ) {
 226       do_split = true;
 227     } else if (t->isa_oopptr() && !t->is_oopptr()->is_known_instance()) {
 228       const TypeOopPtr* mem_t =
 229         t->is_oopptr()->cast_to_exactness(true)
 230         ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
 231         ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
 232       if (t_oop->isa_aryptr()) {
 233         mem_t = mem_t->is_aryptr()
 234                      ->cast_to_stable(t_oop->is_aryptr()->is_stable())
 235                      ->cast_to_size(t_oop->is_aryptr()->size())


 236                      ->with_offset(t_oop->is_aryptr()->offset())
 237                      ->is_aryptr();
 238       }
 239       do_split = mem_t == t_oop;
 240     }
 241     if (do_split) {
 242       // clone the Phi with our address type
 243       result = mphi->split_out_instance(t_adr, igvn);
 244     } else {
 245       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
 246     }
 247   }
 248   return result;
 249 }
 250 
 251 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
 252   uint alias_idx = phase->C->get_alias_index(tp);
 253   Node *mem = mmem;
 254 #ifdef ASSERT
 255   {
 256     // Check that current type is consistent with the alias index used during graph construction
 257     assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
 258     bool consistent =  adr_check == nullptr || adr_check->empty() ||
 259                        phase->C->must_alias(adr_check, alias_idx );
 260     // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
 261     if( !consistent && adr_check != nullptr && !adr_check->empty() &&
 262                tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
 263         adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
 264         ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
 265           adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
 266           adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
 267       // don't assert if it is dead code.
 268       consistent = true;
 269     }
 270     if( !consistent ) {
 271       st->print("alias_idx==%d, adr_check==", alias_idx);
 272       if( adr_check == nullptr ) {
 273         st->print("null");
 274       } else {
 275         adr_check->dump();
 276       }
 277       st->cr();
 278       print_alias_types();
 279       assert(consistent, "adr_check must match alias idx");
 280     }
 281   }
 282 #endif

1002     Node* ld = gvn.transform(load);
1003     return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
1004   }
1005 
1006   return load;
1007 }
1008 
1009 //------------------------------hash-------------------------------------------
1010 uint LoadNode::hash() const {
1011   // unroll addition of interesting fields
1012   return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
1013 }
1014 
1015 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
1016   if ((atp != nullptr) && (atp->index() >= Compile::AliasIdxRaw)) {
1017     bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
1018     bool is_stable_ary = FoldStableValues &&
1019                          (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
1020                          tp->isa_aryptr()->is_stable();
1021 
1022     return (eliminate_boxing && non_volatile) || is_stable_ary;
1023   }
1024 
1025   return false;
1026 }
1027 
1028 LoadNode* LoadNode::pin_array_access_node() const {
1029   const TypePtr* adr_type = this->adr_type();
1030   if (adr_type != nullptr && adr_type->isa_aryptr()) {
1031     return clone_pinned();
1032   }
1033   return nullptr;
1034 }
1035 
1036 // Is the value loaded previously stored by an arraycopy? If so return
1037 // a load node that reads from the source array so we may be able to
1038 // optimize out the ArrayCopy node later.
1039 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
1040   Node* ld_adr = in(MemNode::Address);
1041   intptr_t ld_off = 0;
1042   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);

1059       assert(ld_alloc != nullptr, "need an alloc");
1060       assert(addp->is_AddP(), "address must be addp");
1061       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1062       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1063       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1064       addp->set_req(AddPNode::Base, src);
1065       addp->set_req(AddPNode::Address, src);
1066     } else {
1067       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1068              ac->as_ArrayCopy()->is_copyof_validated() ||
1069              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1070       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1071       addp->set_req(AddPNode::Base, src);
1072       addp->set_req(AddPNode::Address, src);
1073 
1074       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1075       BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1076       if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1077 
1078       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1079       uint shift  = exact_log2(type2aelembytes(ary_elem));
1080 
1081       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1082 #ifdef _LP64
1083       diff = phase->transform(new ConvI2LNode(diff));
1084 #endif
1085       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1086 
1087       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1088       addp->set_req(AddPNode::Offset, offset);
1089     }
1090     addp = phase->transform(addp);
1091 #ifdef ASSERT
1092     const TypePtr* adr_type = phase->type(addp)->is_ptr();
1093     ld->_adr_type = adr_type;
1094 #endif
1095     ld->set_req(MemNode::Address, addp);
1096     ld->set_req(0, ctl);
1097     ld->set_req(MemNode::Memory, mem);
1098     return ld;
1099   }
1100   return nullptr;
1101 }
1102 










1103 
1104 //---------------------------can_see_stored_value------------------------------
1105 // This routine exists to make sure this set of tests is done the same
1106 // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
1107 // will change the graph shape in a way which makes memory alive twice at the
1108 // same time (uses the Oracle model of aliasing), then some
1109 // LoadXNode::Identity will fold things back to the equivalence-class model
1110 // of aliasing.
1111 Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
1112   Node* ld_adr = in(MemNode::Address);
1113   intptr_t ld_off = 0;
1114   Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);









1115   Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
1116   const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
1117   Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
1118   // This is more general than load from boxing objects.
1119   if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
1120     uint alias_idx = atp->index();
1121     Node* result = nullptr;
1122     Node* current = st;
1123     // Skip through chains of MemBarNodes checking the MergeMems for
1124     // new states for the slice of this load.  Stop once any other
1125     // kind of node is encountered.  Loads from final memory can skip
1126     // through any kind of MemBar but normal loads shouldn't skip
1127     // through MemBarAcquire since the could allow them to move out of
1128     // a synchronized region. It is not safe to step over MemBarCPUOrder,
1129     // because alias info above them may be inaccurate (e.g., due to
1130     // mixed/mismatched unsafe accesses).
1131     bool is_final_mem = !atp->is_rewritable();
1132     while (current->is_Proj()) {
1133       int opc = current->in(0)->Opcode();
1134       if ((is_final_mem && (opc == Op_MemBarAcquire ||

1178         // Same base, same offset.
1179         // Possible improvement for arrays: check index value instead of absolute offset.
1180 
1181         // At this point we have proven something like this setup:
1182         //   B = << base >>
1183         //   L =  LoadQ(AddP(Check/CastPP(B), #Off))
1184         //   S = StoreQ(AddP(             B , #Off), V)
1185         // (Actually, we haven't yet proven the Q's are the same.)
1186         // In other words, we are loading from a casted version of
1187         // the same pointer-and-offset that we stored to.
1188         // Casted version may carry a dependency and it is respected.
1189         // Thus, we are able to replace L by V.
1190       }
1191       // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1192       if (store_Opcode() != st->Opcode()) {
1193         return nullptr;
1194       }
1195       // LoadVector/StoreVector needs additional check to ensure the types match.
1196       if (st->is_StoreVector()) {
1197         const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
1198         const TypeVect* out_vt = as_LoadVector()->vect_type();
1199         if (in_vt != out_vt) {
1200           return nullptr;
1201         }
1202       }
1203       return st->in(MemNode::ValueIn);
1204     }
1205 
1206     // A load from a freshly-created object always returns zero.
1207     // (This can happen after LoadNode::Ideal resets the load's memory input
1208     // to find_captured_store, which returned InitializeNode::zero_memory.)
1209     if (st->is_Proj() && st->in(0)->is_Allocate() &&
1210         (st->in(0) == ld_alloc) &&
1211         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1212       // return a zero value for the load's basic type
1213       // (This is one of the few places where a generic PhaseTransform
1214       // can create new nodes.  Think of it as lazily manifesting
1215       // virtually pre-existing constants.)






1216       if (memory_type() != T_VOID) {
1217         if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
1218           // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1219           // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1220           // by the ArrayCopyNode.
1221           return phase->zerocon(memory_type());
1222         }
1223       } else {
1224         // TODO: materialize all-zero vector constant
1225         assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1226       }
1227     }
1228 
1229     // A load from an initialization barrier can match a captured store.
1230     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1231       InitializeNode* init = st->in(0)->as_Initialize();
1232       AllocateNode* alloc = init->allocation();
1233       if ((alloc != nullptr) && (alloc == ld_alloc)) {
1234         // examine a captured store value
1235         st = init->find_captured_store(ld_off, memory_size(), phase);

1856   bool addr_mark = ((phase->type(address)->isa_oopptr() || phase->type(address)->isa_narrowoop()) &&
1857          phase->type(address)->is_ptr()->offset() == oopDesc::mark_offset_in_bytes());
1858 
1859   // Skip up past a SafePoint control.  Cannot do this for Stores because
1860   // pointer stores & cardmarks must stay on the same side of a SafePoint.
1861   if( ctrl != nullptr && ctrl->Opcode() == Op_SafePoint &&
1862       phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw  &&
1863       !addr_mark &&
1864       (depends_only_on_test() || has_unknown_control_dependency())) {
1865     ctrl = ctrl->in(0);
1866     set_req(MemNode::Control,ctrl);
1867     progress = true;
1868   }
1869 
1870   intptr_t ignore = 0;
1871   Node*    base   = AddPNode::Ideal_base_and_offset(address, phase, ignore);
1872   if (base != nullptr
1873       && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
1874     // Check for useless control edge in some common special cases
1875     if (in(MemNode::Control) != nullptr

1876         && can_remove_control()
1877         && phase->type(base)->higher_equal(TypePtr::NOTNULL)
1878         && all_controls_dominate(base, phase->C->start())) {
1879       // A method-invariant, non-null address (constant or 'this' argument).
1880       set_req(MemNode::Control, nullptr);
1881       progress = true;
1882     }
1883   }
1884 
1885   Node* mem = in(MemNode::Memory);
1886   const TypePtr *addr_t = phase->type(address)->isa_ptr();
1887 
1888   if (can_reshape && (addr_t != nullptr)) {
1889     // try to optimize our memory input
1890     Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase);
1891     if (opt_mem != mem) {
1892       set_req_X(MemNode::Memory, opt_mem, phase);
1893       if (phase->type( opt_mem ) == Type::TOP) return nullptr;
1894       return this;
1895     }

1952   // fold up, do so.
1953   Node* prev_mem = find_previous_store(phase);
1954   if (prev_mem != nullptr) {
1955     Node* value = can_see_arraycopy_value(prev_mem, phase);
1956     if (value != nullptr) {
1957       return value;
1958     }
1959   }
1960   // Steps (a), (b):  Walk past independent stores to find an exact match.
1961   if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
1962     // (c) See if we can fold up on the spot, but don't fold up here.
1963     // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1964     // just return a prior value, which is done by Identity calls.
1965     if (can_see_stored_value(prev_mem, phase)) {
1966       // Make ready for step (d):
1967       set_req_X(MemNode::Memory, prev_mem, phase);
1968       return this;
1969     }
1970   }
1971 
1972   return progress ? this : nullptr;







1973 }
1974 
1975 // Helper to recognize certain Klass fields which are invariant across
1976 // some group of array types (e.g., int[] or all T[] where T < Object).
1977 const Type*
1978 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1979                                  ciKlass* klass) const {
1980   assert(!UseCompactObjectHeaders || tkls->offset() != in_bytes(Klass::prototype_header_offset()),
1981          "must not happen");
1982   if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1983     // The field is Klass::_access_flags.  Return its (constant) value.
1984     // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1985     assert(Opcode() == Op_LoadUS, "must load an unsigned short from _access_flags");
1986     return TypeInt::make(klass->access_flags());
1987   }
1988   if (tkls->offset() == in_bytes(Klass::misc_flags_offset())) {
1989     // The field is Klass::_misc_flags.  Return its (constant) value.
1990     // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1991     assert(Opcode() == Op_LoadUB, "must load an unsigned byte from _misc_flags");
1992     return TypeInt::make(klass->misc_flags());

2054       }
2055     }
2056 
2057     // Don't do this for integer types. There is only potential profit if
2058     // the element type t is lower than _type; that is, for int types, if _type is
2059     // more restrictive than t.  This only happens here if one is short and the other
2060     // char (both 16 bits), and in those cases we've made an intentional decision
2061     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
2062     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
2063     //
2064     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
2065     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
2066     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
2067     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
2068     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
2069     // In fact, that could have been the original type of p1, and p1 could have
2070     // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
2071     // expression (LShiftL quux 3) independently optimized to the constant 8.
2072     if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
2073         && (_type->isa_vect() == nullptr)

2074         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
2075       // t might actually be lower than _type, if _type is a unique
2076       // concrete subclass of abstract class t.
2077       if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
2078         const Type* jt = t->join_speculative(_type);
2079         // In any case, do not allow the join, per se, to empty out the type.
2080         if (jt->empty() && !t->empty()) {
2081           // This can happen if a interface-typed array narrows to a class type.
2082           jt = _type;
2083         }
2084 #ifdef ASSERT
2085         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
2086           // The pointers in the autobox arrays are always non-null
2087           Node* base = adr->in(AddPNode::Base);
2088           if ((base != nullptr) && base->is_DecodeN()) {
2089             // Get LoadN node which loads IntegerCache.cache field
2090             base = base->in(1);
2091           }
2092           if ((base != nullptr) && base->is_Con()) {
2093             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
2094             if ((base_type != nullptr) && base_type->is_autobox_cache()) {
2095               // It could be narrow oop
2096               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
2097             }
2098           }
2099         }
2100 #endif
2101         return jt;
2102       }
2103     }
2104   } else if (tp->base() == Type::InstPtr) {
2105     assert( off != Type::OffsetBot ||
2106             // arrays can be cast to Objects
2107             !tp->isa_instptr() ||
2108             tp->is_instptr()->instance_klass()->is_java_lang_Object() ||


2109             // unsafe field access may not have a constant offset
2110             C->has_unsafe_access(),
2111             "Field accesses must be precise" );
2112     // For oop loads, we expect the _type to be precise.
2113 
2114     // Optimize loads from constant fields.
2115     const TypeInstPtr* tinst = tp->is_instptr();



2116     ciObject* const_oop = tinst->const_oop();
2117     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
2118       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());
2119       if (con_type != nullptr) {
2120         return con_type;
2121       }
2122     }
2123   } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
2124     assert(off != Type::OffsetBot ||
2125             !tp->isa_instklassptr() ||
2126            // arrays can be cast to Objects
2127            tp->isa_instklassptr()->instance_klass()->is_java_lang_Object() ||
2128            // also allow array-loading from the primary supertype
2129            // array during subtype checks
2130            Opcode() == Op_LoadKlass,
2131            "Field accesses must be precise");
2132     // For klass/static loads, we expect the _type to be precise
2133   } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
2134     /* With mirrors being an indirect in the Klass*
2135      * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
2136      * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
2137      *
2138      * So check the type and klass of the node before the LoadP.

2145         assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2146         assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2147         return TypeInstPtr::make(klass->java_mirror());
2148       }
2149     }
2150   }
2151 
2152   const TypeKlassPtr *tkls = tp->isa_klassptr();
2153   if (tkls != nullptr) {
2154     if (tkls->is_loaded() && tkls->klass_is_exact()) {
2155       ciKlass* klass = tkls->exact_klass();
2156       // We are loading a field from a Klass metaobject whose identity
2157       // is known at compile time (the type is "exact" or "precise").
2158       // Check for fields we know are maintained as constants by the VM.
2159       if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2160         // The field is Klass::_super_check_offset.  Return its (constant) value.
2161         // (Folds up type checking code.)
2162         assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2163         return TypeInt::make(klass->super_check_offset());
2164       }
2165       if (UseCompactObjectHeaders) {
2166         if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2167           // The field is Klass::_prototype_header. Return its (constant) value.
2168           assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2169           return TypeX::make(klass->prototype_header());
2170         }
2171       }
2172       // Compute index into primary_supers array
2173       juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2174       // Check for overflowing; use unsigned compare to handle the negative case.
2175       if( depth < ciKlass::primary_super_limit() ) {
2176         // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2177         // (Folds up type checking code.)
2178         assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2179         ciKlass *ss = klass->super_of_depth(depth);
2180         return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2181       }
2182       const Type* aift = load_array_final_field(tkls, klass);
2183       if (aift != nullptr)  return aift;
2184     }
2185 

2221         !tkls->is_instklassptr()->might_be_an_array() // not the supertype of all T[] (java.lang.Object) or has an interface that is not Serializable or Cloneable
2222     ) {
2223       assert(Opcode() == Op_LoadI, "must load an int from _layout_helper");
2224       jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false);
2225       // The key property of this type is that it folds up tests
2226       // for array-ness, since it proves that the layout_helper is positive.
2227       // Thus, a generic value like the basic object layout helper works fine.
2228       return TypeInt::make(min_size, max_jint, Type::WidenMin);
2229     }
2230   }
2231 
2232   bool is_vect = (_type->isa_vect() != nullptr);
2233   if (is_instance && !is_vect) {
2234     // If we have an instance type and our memory input is the
2235     // programs's initial memory state, there is no matching store,
2236     // so just return a zero of the appropriate type -
2237     // except if it is vectorized - then we have no zero constant.
2238     Node *mem = in(MemNode::Memory);
2239     if (mem->is_Parm() && mem->in(0)->is_Start()) {
2240       assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");





2241       return Type::get_zero_type(_type->basic_type());
2242     }
2243   }
2244 
2245   if (!UseCompactObjectHeaders) {
2246     Node* alloc = is_new_object_mark_load();
2247     if (alloc != nullptr) {
2248       return TypeX::make(markWord::prototype().value());









2249     }
2250   }
2251 
2252   return _type;
2253 }
2254 
2255 //------------------------------match_edge-------------------------------------
2256 // Do we Match on this edge index or not?  Match only the address.
2257 uint LoadNode::match_edge(uint idx) const {
2258   return idx == MemNode::Address;
2259 }
2260 
2261 //--------------------------LoadBNode::Ideal--------------------------------------
2262 //
2263 //  If the previous store is to the same address as this load,
2264 //  and the value stored was larger than a byte, replace this load
2265 //  with the value stored truncated to a byte.  If no truncation is
2266 //  needed, the replacement is done in LoadNode::Identity().
2267 //
2268 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {

2377     }
2378   }
2379   // Identity call will handle the case where truncation is not needed.
2380   return LoadNode::Ideal(phase, can_reshape);
2381 }
2382 
2383 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2384   Node* mem = in(MemNode::Memory);
2385   Node* value = can_see_stored_value(mem,phase);
2386   if (value != nullptr && value->is_Con() &&
2387       !value->bottom_type()->higher_equal(_type)) {
2388     // If the input to the store does not fit with the load's result type,
2389     // it must be truncated. We can't delay until Ideal call since
2390     // a singleton Value is needed for split_thru_phi optimization.
2391     int con = value->get_int();
2392     return TypeInt::make((con << 16) >> 16);
2393   }
2394   return LoadNode::Value(phase);
2395 }
2396 













2397 //=============================================================================
2398 //----------------------------LoadKlassNode::make------------------------------
2399 // Polymorphic factory method:
2400 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
2401   // sanity check the alias category against the created node type
2402   const TypePtr* adr_type = adr->bottom_type()->isa_ptr();
2403   assert(adr_type != nullptr, "expecting TypeKlassPtr");
2404 #ifdef _LP64
2405   if (adr_type->is_ptr_to_narrowklass()) {
2406     assert(UseCompressedClassPointers, "no compressed klasses");
2407     Node* load_klass = gvn.transform(new LoadNKlassNode(mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2408     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2409   }
2410 #endif
2411   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2412   return new LoadKlassNode(mem, adr, at, tk, MemNode::unordered);
2413 }
2414 
2415 //------------------------------Value------------------------------------------
2416 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2417   return klass_value_common(phase);
2418 }
2419 
2420 const Type* LoadNode::klass_value_common(PhaseGVN* phase) const {
2421   // Either input is TOP ==> the result is TOP
2422   const Type *t1 = phase->type( in(MemNode::Memory) );
2423   if (t1 == Type::TOP)  return Type::TOP;
2424   Node *adr = in(MemNode::Address);
2425   const Type *t2 = phase->type( adr );
2426   if (t2 == Type::TOP)  return Type::TOP;
2427   const TypePtr *tp = t2->is_ptr();
2428   if (TypePtr::above_centerline(tp->ptr()) ||
2429       tp->ptr() == TypePtr::Null)  return Type::TOP;
2430 
2431   // Return a more precise klass, if possible
2432   const TypeInstPtr *tinst = tp->isa_instptr();
2433   if (tinst != nullptr) {
2434     ciInstanceKlass* ik = tinst->instance_klass();
2435     int offset = tinst->offset();
2436     if (ik == phase->C->env()->Class_klass()
2437         && (offset == java_lang_Class::klass_offset() ||
2438             offset == java_lang_Class::array_klass_offset())) {
2439       // We are loading a special hidden field from a Class mirror object,
2440       // the field which points to the VM's Klass metaobject.
2441       ciType* t = tinst->java_mirror_type();

2442       // java_mirror_type returns non-null for compile-time Class constants.
2443       if (t != nullptr) {
2444         // constant oop => constant klass
2445         if (offset == java_lang_Class::array_klass_offset()) {
2446           if (t->is_void()) {
2447             // We cannot create a void array.  Since void is a primitive type return null
2448             // klass.  Users of this result need to do a null check on the returned klass.
2449             return TypePtr::NULL_PTR;
2450           }
2451           return TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);




2452         }
2453         if (!t->is_klass()) {
2454           // a primitive Class (e.g., int.class) has null for a klass field
2455           return TypePtr::NULL_PTR;
2456         }
2457         // Fold up the load of the hidden field
2458         return TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);




2459       }
2460       // non-constant mirror, so we can't tell what's going on
2461     }
2462     if (!tinst->is_loaded())
2463       return _type;             // Bail out if not loaded
2464     if (offset == oopDesc::klass_offset_in_bytes()) {
2465       return tinst->as_klass_type(true);
2466     }
2467   }
2468 
2469   // Check for loading klass from an array
2470   const TypeAryPtr *tary = tp->isa_aryptr();
2471   if (tary != nullptr &&
2472       tary->offset() == oopDesc::klass_offset_in_bytes()) {
2473     return tary->as_klass_type(true);
2474   }
2475 
2476   // Check for loading klass from an array klass
2477   const TypeKlassPtr *tkls = tp->isa_klassptr();
2478   if (tkls != nullptr && !StressReflectiveCode) {
2479     if (!tkls->is_loaded())
2480      return _type;             // Bail out if not loaded
2481     if (tkls->isa_aryklassptr() && tkls->is_aryklassptr()->elem()->isa_klassptr() &&
2482         tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2483       // // Always returning precise element type is incorrect,
2484       // // e.g., element type could be object and array may contain strings
2485       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2486 
2487       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2488       // according to the element type's subclassing.
2489       return tkls->is_aryklassptr()->elem()->isa_klassptr()->cast_to_exactness(tkls->klass_is_exact());
2490     }
2491     if (tkls->isa_instklassptr() != nullptr && tkls->klass_is_exact() &&

2566           if (tkls->offset() == mirror_field) {
2567             return adr2->in(AddPNode::Base);
2568           }
2569         }
2570       }
2571     }
2572   }
2573 
2574   return this;
2575 }
2576 
2577 LoadNode* LoadNode::clone_pinned() const {
2578   LoadNode* ld = clone()->as_Load();
2579   ld->_control_dependency = UnknownControl;
2580   return ld;
2581 }
2582 
2583 
2584 //------------------------------Value------------------------------------------
2585 const Type* LoadNKlassNode::Value(PhaseGVN* phase) const {
2586   const Type *t = klass_value_common(phase);
2587   if (t == Type::TOP)
2588     return t;
2589 
2590   return t->make_narrowklass();
2591 }
2592 
2593 //------------------------------Identity---------------------------------------
2594 // To clean up reflective code, simplify k.java_mirror.as_klass to narrow k.
2595 // Also feed through the klass in Allocate(...klass...)._klass.
2596 Node* LoadNKlassNode::Identity(PhaseGVN* phase) {
2597   Node *x = klass_identity_common(phase);
2598 
2599   const Type *t = phase->type( x );
2600   if( t == Type::TOP ) return x;
2601   if( t->isa_narrowklass()) return x;
2602   assert (!t->isa_narrowoop(), "no narrow oop here");
2603 
2604   return phase->transform(new EncodePKlassNode(x, t->make_narrowklass()));
2605 }
2606 

3366   }
3367   ss.print_cr("[TraceMergeStores]: with");
3368   merged_input_value->dump("\n", false, &ss);
3369   merged_store->dump("\n", false, &ss);
3370   tty->print("%s", ss.as_string());
3371 }
3372 #endif
3373 
3374 //------------------------------Ideal------------------------------------------
3375 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
3376 // When a store immediately follows a relevant allocation/initialization,
3377 // try to capture it into the initialization, or hoist it above.
3378 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3379   Node* p = MemNode::Ideal_common(phase, can_reshape);
3380   if (p)  return (p == NodeSentinel) ? nullptr : p;
3381 
3382   Node* mem     = in(MemNode::Memory);
3383   Node* address = in(MemNode::Address);
3384   Node* value   = in(MemNode::ValueIn);
3385   // Back-to-back stores to same address?  Fold em up.  Generally
3386   // unsafe if I have intervening uses.
3387   {
3388     Node* st = mem;
3389     // If Store 'st' has more than one use, we cannot fold 'st' away.
3390     // For example, 'st' might be the final state at a conditional
3391     // return.  Or, 'st' might be used by some node which is live at
3392     // the same time 'st' is live, which might be unschedulable.  So,
3393     // require exactly ONE user until such time as we clone 'mem' for
3394     // each of 'mem's uses (thus making the exactly-1-user-rule hold
3395     // true).
3396     while (st->is_Store() && st->outcnt() == 1) {
3397       // Looking at a dead closed cycle of memory?
3398       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
3399       assert(Opcode() == st->Opcode() ||
3400              st->Opcode() == Op_StoreVector ||
3401              Opcode() == Op_StoreVector ||
3402              st->Opcode() == Op_StoreVectorScatter ||
3403              Opcode() == Op_StoreVectorScatter ||
3404              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
3405              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
3406              (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy


3407              (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
3408              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
3409 
3410       if (st->in(MemNode::Address)->eqv_uncast(address) &&
3411           st->as_Store()->memory_size() <= this->memory_size()) {
3412         Node* use = st->raw_out(0);
3413         if (phase->is_IterGVN()) {
3414           phase->is_IterGVN()->rehash_node_delayed(use);
3415         }
3416         // It's OK to do this in the parser, since DU info is always accurate,
3417         // and the parser always refers to nodes via SafePointNode maps.
3418         use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
3419         return this;
3420       }
3421       st = st->in(MemNode::Memory);
3422     }
3423   }
3424 
3425 
3426   // Capture an unaliased, unconditional, simple store into an initializer.

3524       const StoreVectorNode* store_vector = as_StoreVector();
3525       const StoreVectorNode* mem_vector = mem->as_StoreVector();
3526       const Node* store_indices = store_vector->indices();
3527       const Node* mem_indices = mem_vector->indices();
3528       const Node* store_mask = store_vector->mask();
3529       const Node* mem_mask = mem_vector->mask();
3530       // Ensure types, indices, and masks match
3531       if (store_vector->vect_type() == mem_vector->vect_type() &&
3532           ((store_indices == nullptr) == (mem_indices == nullptr) &&
3533            (store_indices == nullptr || store_indices->eqv_uncast(mem_indices))) &&
3534           ((store_mask == nullptr) == (mem_mask == nullptr) &&
3535            (store_mask == nullptr || store_mask->eqv_uncast(mem_mask)))) {
3536         result = mem;
3537       }
3538     }
3539   }
3540 
3541   // Store of zero anywhere into a freshly-allocated object?
3542   // Then the store is useless.
3543   // (It must already have been captured by the InitializeNode.)
3544   if (result == this &&
3545       ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
3546     // a newly allocated object is already all-zeroes everywhere
3547     if (mem->is_Proj() && mem->in(0)->is_Allocate()) {

3548       result = mem;
3549     }
3550 
3551     if (result == this) {
3552       // the store may also apply to zero-bits in an earlier object
3553       Node* prev_mem = find_previous_store(phase);
3554       // Steps (a), (b):  Walk past independent stores to find an exact match.
3555       if (prev_mem != nullptr) {
3556         Node* prev_val = can_see_stored_value(prev_mem, phase);
3557         if (prev_val != nullptr && prev_val == val) {
3558           // prev_val and val might differ by a cast; it would be good
3559           // to keep the more informative of the two.
3560           result = mem;
3561         }
3562       }
3563     }
3564   }
3565 
3566   PhaseIterGVN* igvn = phase->is_IterGVN();
3567   if (result != this && igvn != nullptr) {
3568     MemBarNode* trailing = trailing_membar();
3569     if (trailing != nullptr) {
3570 #ifdef ASSERT
3571       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();

4035 // Clearing a short array is faster with stores
4036 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
4037   // Already know this is a large node, do not try to ideal it
4038   if (_is_large) return nullptr;
4039 
4040   const int unit = BytesPerLong;
4041   const TypeX* t = phase->type(in(2))->isa_intptr_t();
4042   if (!t)  return nullptr;
4043   if (!t->is_con())  return nullptr;
4044   intptr_t raw_count = t->get_con();
4045   intptr_t size = raw_count;
4046   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
4047   // Clearing nothing uses the Identity call.
4048   // Negative clears are possible on dead ClearArrays
4049   // (see jck test stmt114.stmt11402.val).
4050   if (size <= 0 || size % unit != 0)  return nullptr;
4051   intptr_t count = size / unit;
4052   // Length too long; communicate this to matchers and assemblers.
4053   // Assemblers are responsible to produce fast hardware clears for it.
4054   if (size > InitArrayShortSize) {
4055     return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
4056   } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
4057     return nullptr;
4058   }
4059   if (!IdealizeClearArrayNode) return nullptr;
4060   Node *mem = in(1);
4061   if( phase->type(mem)==Type::TOP ) return nullptr;
4062   Node *adr = in(3);
4063   const Type* at = phase->type(adr);
4064   if( at==Type::TOP ) return nullptr;
4065   const TypePtr* atp = at->isa_ptr();
4066   // adjust atp to be the correct array element address type
4067   if (atp == nullptr)  atp = TypePtr::BOTTOM;
4068   else              atp = atp->add_offset(Type::OffsetBot);
4069   // Get base for derived pointer purposes
4070   if( adr->Opcode() != Op_AddP ) Unimplemented();
4071   Node *base = adr->in(1);
4072 
4073   Node *zero = phase->makecon(TypeLong::ZERO);
4074   Node *off  = phase->MakeConX(BytesPerLong);
4075   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
4076   count--;
4077   while( count-- ) {
4078     mem = phase->transform(mem);
4079     adr = phase->transform(new AddPNode(base,adr,off));
4080     mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
4081   }
4082   return mem;
4083 }
4084 
4085 //----------------------------step_through----------------------------------
4086 // Return allocation input memory edge if it is different instance
4087 // or itself if it is the one we are looking for.
4088 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseValues* phase) {
4089   Node* n = *np;
4090   assert(n->is_ClearArray(), "sanity");
4091   intptr_t offset;
4092   AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
4093   // This method is called only before Allocate nodes are expanded
4094   // during macro nodes expansion. Before that ClearArray nodes are
4095   // only generated in PhaseMacroExpand::generate_arraycopy() (before
4096   // Allocate nodes are expanded) which follows allocations.
4097   assert(alloc != nullptr, "should have allocation");
4098   if (alloc->_idx == instance_id) {
4099     // Can not bypass initialization of the instance we are looking for.
4100     return false;
4101   }
4102   // Otherwise skip it.
4103   InitializeNode* init = alloc->initialization();
4104   if (init != nullptr)
4105     *np = init->in(TypeFunc::Memory);
4106   else
4107     *np = alloc->in(TypeFunc::Memory);
4108   return true;
4109 }
4110 
4111 //----------------------------clear_memory-------------------------------------
4112 // Generate code to initialize object storage to zero.
4113 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,


4114                                    intptr_t start_offset,
4115                                    Node* end_offset,
4116                                    PhaseGVN* phase) {
4117   intptr_t offset = start_offset;
4118 
4119   int unit = BytesPerLong;
4120   if ((offset % unit) != 0) {
4121     Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
4122     adr = phase->transform(adr);
4123     const TypePtr* atp = TypeRawPtr::BOTTOM;
4124     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);






4125     mem = phase->transform(mem);
4126     offset += BytesPerInt;
4127   }
4128   assert((offset % unit) == 0, "");
4129 
4130   // Initialize the remaining stuff, if any, with a ClearArray.
4131   return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
4132 }
4133 
4134 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,

4135                                    Node* start_offset,
4136                                    Node* end_offset,
4137                                    PhaseGVN* phase) {
4138   if (start_offset == end_offset) {
4139     // nothing to do
4140     return mem;
4141   }
4142 
4143   int unit = BytesPerLong;
4144   Node* zbase = start_offset;
4145   Node* zend  = end_offset;
4146 
4147   // Scale to the unit required by the CPU:
4148   if (!Matcher::init_array_count_is_in_bytes) {
4149     Node* shift = phase->intcon(exact_log2(unit));
4150     zbase = phase->transform(new URShiftXNode(zbase, shift) );
4151     zend  = phase->transform(new URShiftXNode(zend,  shift) );
4152   }
4153 
4154   // Bulk clear double-words
4155   Node* zsize = phase->transform(new SubXNode(zend, zbase) );
4156   Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
4157   mem = new ClearArrayNode(ctl, mem, zsize, adr, false);



4158   return phase->transform(mem);
4159 }
4160 
4161 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,


4162                                    intptr_t start_offset,
4163                                    intptr_t end_offset,
4164                                    PhaseGVN* phase) {
4165   if (start_offset == end_offset) {
4166     // nothing to do
4167     return mem;
4168   }
4169 
4170   assert((end_offset % BytesPerInt) == 0, "odd end offset");
4171   intptr_t done_offset = end_offset;
4172   if ((done_offset % BytesPerLong) != 0) {
4173     done_offset -= BytesPerInt;
4174   }
4175   if (done_offset > start_offset) {
4176     mem = clear_memory(ctl, mem, dest,
4177                        start_offset, phase->MakeConX(done_offset), phase);
4178   }
4179   if (done_offset < end_offset) { // emit the final 32-bit store
4180     Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
4181     adr = phase->transform(adr);
4182     const TypePtr* atp = TypeRawPtr::BOTTOM;
4183     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);






4184     mem = phase->transform(mem);
4185     done_offset += BytesPerInt;
4186   }
4187   assert(done_offset == end_offset, "");
4188   return mem;
4189 }
4190 
4191 //=============================================================================
4192 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
4193   : MultiNode(TypeFunc::Parms + (precedent == nullptr? 0: 1)),
4194     _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
4195 #ifdef ASSERT
4196   , _pair_idx(0)
4197 #endif
4198 {
4199   init_class_id(Class_MemBar);
4200   Node* top = C->top();
4201   init_req(TypeFunc::I_O,top);
4202   init_req(TypeFunc::FramePtr,top);
4203   init_req(TypeFunc::ReturnAdr,top);

4309       PhaseIterGVN* igvn = phase->is_IterGVN();
4310       remove(igvn);
4311       // Must return either the original node (now dead) or a new node
4312       // (Do not return a top here, since that would break the uniqueness of top.)
4313       return new ConINode(TypeInt::ZERO);
4314     }
4315   }
4316   return progress ? this : nullptr;
4317 }
4318 
4319 //------------------------------Value------------------------------------------
4320 const Type* MemBarNode::Value(PhaseGVN* phase) const {
4321   if( !in(0) ) return Type::TOP;
4322   if( phase->type(in(0)) == Type::TOP )
4323     return Type::TOP;
4324   return TypeTuple::MEMBAR;
4325 }
4326 
4327 //------------------------------match------------------------------------------
4328 // Construct projections for memory.
4329 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
4330   switch (proj->_con) {
4331   case TypeFunc::Control:
4332   case TypeFunc::Memory:
4333     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
4334   }
4335   ShouldNotReachHere();
4336   return nullptr;
4337 }
4338 
4339 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4340   trailing->_kind = TrailingStore;
4341   leading->_kind = LeadingStore;
4342 #ifdef ASSERT
4343   trailing->_pair_idx = leading->_idx;
4344   leading->_pair_idx = leading->_idx;
4345 #endif
4346 }
4347 
4348 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4349   trailing->_kind = TrailingLoadStore;

4596   return (req() > RawStores);
4597 }
4598 
4599 void InitializeNode::set_complete(PhaseGVN* phase) {
4600   assert(!is_complete(), "caller responsibility");
4601   _is_complete = Complete;
4602 
4603   // After this node is complete, it contains a bunch of
4604   // raw-memory initializations.  There is no need for
4605   // it to have anything to do with non-raw memory effects.
4606   // Therefore, tell all non-raw users to re-optimize themselves,
4607   // after skipping the memory effects of this initialization.
4608   PhaseIterGVN* igvn = phase->is_IterGVN();
4609   if (igvn)  igvn->add_users_to_worklist(this);
4610 }
4611 
4612 // convenience function
4613 // return false if the init contains any stores already
4614 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
4615   InitializeNode* init = initialization();
4616   if (init == nullptr || init->is_complete())  return false;


4617   init->remove_extra_zeroes();
4618   // for now, if this allocation has already collected any inits, bail:
4619   if (init->is_non_zero())  return false;
4620   init->set_complete(phase);
4621   return true;
4622 }
4623 
4624 void InitializeNode::remove_extra_zeroes() {
4625   if (req() == RawStores)  return;
4626   Node* zmem = zero_memory();
4627   uint fill = RawStores;
4628   for (uint i = fill; i < req(); i++) {
4629     Node* n = in(i);
4630     if (n->is_top() || n == zmem)  continue;  // skip
4631     if (fill < i)  set_req(fill, n);          // compact
4632     ++fill;
4633   }
4634   // delete any empty spaces created:
4635   while (fill < req()) {
4636     del_req(fill);

4780             // store node that we'd like to capture. We need to check
4781             // the uses of the MergeMemNode.
4782             mems.push(n);
4783           }
4784         } else if (n->is_Mem()) {
4785           Node* other_adr = n->in(MemNode::Address);
4786           if (other_adr == adr) {
4787             failed = true;
4788             break;
4789           } else {
4790             const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
4791             if (other_t_adr != nullptr) {
4792               int other_alias_idx = phase->C->get_alias_index(other_t_adr);
4793               if (other_alias_idx == alias_idx) {
4794                 // A load from the same memory slice as the store right
4795                 // after the InitializeNode. We check the control of the
4796                 // object/array that is loaded from. If it's the same as
4797                 // the store control then we cannot capture the store.
4798                 assert(!n->is_Store(), "2 stores to same slice on same control?");
4799                 Node* base = other_adr;






4800                 assert(base->is_AddP(), "should be addp but is %s", base->Name());
4801                 base = base->in(AddPNode::Base);
4802                 if (base != nullptr) {
4803                   base = base->uncast();
4804                   if (base->is_Proj() && base->in(0) == alloc) {
4805                     failed = true;
4806                     break;
4807                   }
4808                 }
4809               }
4810             }
4811           }
4812         } else {
4813           failed = true;
4814           break;
4815         }
4816       }
4817     }
4818   }
4819   if (failed) {

5366         //   z's_done      12  16  16  16    12  16    12
5367         //   z's_needed    12  16  16  16    16  16    16
5368         //   zsize          0   0   0   0     4   0     4
5369         if (next_full_store < 0) {
5370           // Conservative tack:  Zero to end of current word.
5371           zeroes_needed = align_up(zeroes_needed, BytesPerInt);
5372         } else {
5373           // Zero to beginning of next fully initialized word.
5374           // Or, don't zero at all, if we are already in that word.
5375           assert(next_full_store >= zeroes_needed, "must go forward");
5376           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
5377           zeroes_needed = next_full_store;
5378         }
5379       }
5380 
5381       if (zeroes_needed > zeroes_done) {
5382         intptr_t zsize = zeroes_needed - zeroes_done;
5383         // Do some incremental zeroing on rawmem, in parallel with inits.
5384         zeroes_done = align_down(zeroes_done, BytesPerInt);
5385         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,


5386                                               zeroes_done, zeroes_needed,
5387                                               phase);
5388         zeroes_done = zeroes_needed;
5389         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
5390           do_zeroing = false;   // leave the hole, next time
5391       }
5392     }
5393 
5394     // Collect the store and move on:
5395     phase->replace_input_of(st, MemNode::Memory, inits);
5396     inits = st;                 // put it on the linearized chain
5397     set_req(i, zmem);           // unhook from previous position
5398 
5399     if (zeroes_done == st_off)
5400       zeroes_done = next_init_off;
5401 
5402     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
5403 
5404     #ifdef ASSERT
5405     // Various order invariants.  Weaker than stores_are_sane because

5425   remove_extra_zeroes();        // clear out all the zmems left over
5426   add_req(inits);
5427 
5428   if (!(UseTLAB && ZeroTLAB)) {
5429     // If anything remains to be zeroed, zero it all now.
5430     zeroes_done = align_down(zeroes_done, BytesPerInt);
5431     // if it is the last unused 4 bytes of an instance, forget about it
5432     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
5433     if (zeroes_done + BytesPerLong >= size_limit) {
5434       AllocateNode* alloc = allocation();
5435       assert(alloc != nullptr, "must be present");
5436       if (alloc != nullptr && alloc->Opcode() == Op_Allocate) {
5437         Node* klass_node = alloc->in(AllocateNode::KlassNode);
5438         ciKlass* k = phase->type(klass_node)->is_instklassptr()->instance_klass();
5439         if (zeroes_done == k->layout_helper())
5440           zeroes_done = size_limit;
5441       }
5442     }
5443     if (zeroes_done < size_limit) {
5444       rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,


5445                                             zeroes_done, size_in_bytes, phase);
5446     }
5447   }
5448 
5449   set_complete(phase);
5450   return rawmem;
5451 }
5452 
5453 
5454 #ifdef ASSERT
5455 bool InitializeNode::stores_are_sane(PhaseValues* phase) {
5456   if (is_complete())
5457     return true;                // stores could be anything at this point
5458   assert(allocation() != nullptr, "must be present");
5459   intptr_t last_off = allocation()->minimum_header_size();
5460   for (uint i = InitializeNode::RawStores; i < req(); i++) {
5461     Node* st = in(i);
5462     intptr_t st_off = get_store_offset(st, phase);
5463     if (st_off < 0)  continue;  // ignore dead garbage
5464     if (last_off > st_off) {

   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "ci/ciFlatArrayKlass.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/c2/barrierSetC2.hpp"
  32 #include "gc/shared/tlab_globals.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/cfgnode.hpp"
  39 #include "opto/regalloc.hpp"
  40 #include "opto/compile.hpp"
  41 #include "opto/connode.hpp"
  42 #include "opto/convertnode.hpp"
  43 #include "opto/inlinetypenode.hpp"
  44 #include "opto/loopnode.hpp"
  45 #include "opto/machnode.hpp"
  46 #include "opto/matcher.hpp"
  47 #include "opto/memnode.hpp"
  48 #include "opto/mempointer.hpp"
  49 #include "opto/mulnode.hpp"
  50 #include "opto/narrowptrnode.hpp"
  51 #include "opto/phaseX.hpp"
  52 #include "opto/regmask.hpp"
  53 #include "opto/rootnode.hpp"
  54 #include "opto/traceMergeStoresTag.hpp"
  55 #include "opto/vectornode.hpp"
  56 #include "utilities/align.hpp"
  57 #include "utilities/copy.hpp"
  58 #include "utilities/macros.hpp"
  59 #include "utilities/powerOfTwo.hpp"
  60 #include "utilities/vmError.hpp"
  61 
  62 // Portions of code courtesy of Clifford Click
  63 

 126       st->print(", idx=Bot;");
 127     else if (atp->index() == Compile::AliasIdxTop)
 128       st->print(", idx=Top;");
 129     else if (atp->index() == Compile::AliasIdxRaw)
 130       st->print(", idx=Raw;");
 131     else {
 132       ciField* field = atp->field();
 133       if (field) {
 134         st->print(", name=");
 135         field->print_name_on(st);
 136       }
 137       st->print(", idx=%d;", atp->index());
 138     }
 139   }
 140 }
 141 
 142 extern void print_alias_types();
 143 
 144 #endif
 145 
 146 // Find the memory output corresponding to the fall-through path of a call
 147 static Node* find_call_fallthrough_mem_output(CallNode* call) {
 148   ResourceMark rm;
 149   CallProjections* projs = call->extract_projections(false, false);
 150   Node* res = projs->fallthrough_memproj;
 151   assert(res != nullptr, "must have a fallthrough mem output");
 152   return res;
 153 }
 154 
 155 // Try to find a better memory input for a load from a strict final field
 156 static Node* try_optimize_strict_final_load_memory(PhaseGVN* phase, Node* adr, ProjNode*& base_local) {
 157   intptr_t offset = 0;
 158   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 159   if (base == nullptr) {
 160     return nullptr;
 161   }
 162 
 163   Node* base_uncasted = base->uncast();
 164   if (base_uncasted->is_Proj()) {
 165     MultiNode* multi = base_uncasted->in(0)->as_Multi();
 166     if (multi->is_Allocate()) {
 167       base_local = base_uncasted->as_Proj();
 168       return nullptr;
 169     } else if (multi->is_Call()) {
 170       // The oop is returned from a call, the memory can be the fallthrough output of the call
 171       return find_call_fallthrough_mem_output(multi->as_Call());
 172     } else if (multi->is_Start()) {
 173       // The oop is a parameter
 174       if (phase->C->method()->is_object_constructor() && base_uncasted->as_Proj()->_con == TypeFunc::Parms) {
 175         // The receiver of a constructor is similar to the result of an AllocateNode
 176         base_local = base_uncasted->as_Proj();
 177         return nullptr;
 178       } else {
 179         // Use the start memory otherwise
 180         return multi->proj_out(TypeFunc::Memory);
 181       }
 182     }
 183   }
 184 
 185   return nullptr;
 186 }
 187 
 188 // Whether a call can modify a strict final field, given that the object is allocated inside the
 189 // current compilation unit, or is the first parameter when the compilation root is a constructor.
 190 // This is equivalent to asking whether 'call' is a constructor invocation and the class declaring
 191 // the target method is a subclass of the class declaring 'field'.
 192 static bool call_can_modify_local_object(ciField* field, CallNode* call) {
 193   if (!call->is_CallJava()) {
 194     return false;
 195   }
 196 
 197   ciMethod* target = call->as_CallJava()->method();
 198   if (target == nullptr || !target->is_object_constructor()) {
 199     return false;
 200   }
 201 
 202   // If 'field' is declared in a class that is a subclass of the one declaring the constructor,
 203   // then the field is set inside the constructor, else the field must be set before the
 204   // constructor invocation. E.g. A field Super.x will be set during the execution of Sub::<init>,
 205   // while a field Sub.y must be set before Super::<init> is invoked.
 206   // We can try to be more heroic and decide if the receiver of the constructor invocation is the
 207   // object from which we are loading from. This, however, may be problematic as deciding if 2
 208   // nodes are definitely different may not be trivial, especially if the graph is not canonical.
 209   // As a result, it is made more conservative for now.
 210   assert(call->req() > TypeFunc::Parms, "constructor must have at least 1 argument");
 211   return target->holder()->is_subclass_of(field->holder());
 212 }
 213 
 214 Node* MemNode::optimize_simple_memory_chain(Node* mchain, const TypeOopPtr* t_oop, Node* load, PhaseGVN* phase) {
 215   assert(t_oop != nullptr, "sanity");
 216   bool is_instance = t_oop->is_known_instance_field();
 217 
 218   ciField* field = phase->C->alias_type(t_oop)->field();
 219   bool is_strict_final_load = false;
 220 
 221   // After macro expansion, an allocation may become a call, changing the memory input to the
 222   // memory output of that call would be illegal. As a result, disallow this transformation after
 223   // macro expansion.
 224   if (phase->is_IterGVN() && phase->C->allow_macro_nodes() && load != nullptr && load->is_Load() && !load->as_Load()->is_mismatched_access()) {
 225     if (EnableValhalla) {
 226       if (field != nullptr && (field->holder()->is_inlinetype() || field->holder()->is_abstract_value_klass())) {
 227         is_strict_final_load = true;
 228       }
 229 #ifdef ASSERT
 230       if (t_oop->is_inlinetypeptr() && t_oop->inline_klass()->contains_field_offset(t_oop->offset())) {
 231         assert(is_strict_final_load, "sanity check for basic cases");
 232       }
 233 #endif
 234     } else {
 235       is_strict_final_load = field != nullptr && t_oop->is_ptr_to_boxed_value();
 236     }
 237   }
 238 
 239   if (!is_instance && !is_strict_final_load) {
 240     return mchain;
 241   }
 242 
 243   Node* result = mchain;
 244   ProjNode* base_local = nullptr;
 245 
 246   if (is_strict_final_load) {
 247     Node* adr = load->in(MemNode::Address);
 248     assert(phase->type(adr) == t_oop, "inconsistent type");
 249     Node* tmp = try_optimize_strict_final_load_memory(phase, adr, base_local);
 250     if (tmp != nullptr) {
 251       result = tmp;
 252     }
 253   }
 254 
 255   uint instance_id = t_oop->instance_id();
 256   Node* start_mem = phase->C->start()->proj_out_or_null(TypeFunc::Memory);
 257   Node* prev = nullptr;

 258   while (prev != result) {
 259     prev = result;
 260     if (result == start_mem) {
 261       // start_mem is the earliest memory possible
 262       break;
 263     }
 264 
 265     // skip over a call which does not affect this memory slice
 266     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
 267       Node* proj_in = result->in(0);
 268       if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
 269         // This is the allocation that creates the object from which we are loading from
 270         break;
 271       } else if (proj_in->is_Call()) {
 272         // ArrayCopyNodes processed here as well
 273         CallNode* call = proj_in->as_Call();
 274         if (!call->may_modify(t_oop, phase)) {
 275           result = call->in(TypeFunc::Memory);
 276         } else if (is_strict_final_load && base_local != nullptr && !call_can_modify_local_object(field, call)) {
 277           result = call->in(TypeFunc::Memory);
 278         }
 279       } else if (proj_in->is_Initialize()) {
 280         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
 281         // Stop if this is the initialization for the object instance which
 282         // contains this memory slice, otherwise skip over it.
 283         if ((alloc == nullptr) || (alloc->_idx == instance_id)) {
 284           break;
 285         }
 286         if (is_instance) {
 287           result = proj_in->in(TypeFunc::Memory);
 288         } else if (is_strict_final_load) {
 289           Node* klass = alloc->in(AllocateNode::KlassNode);
 290           const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
 291           if (tklass->klass_is_exact() && !tklass->exact_klass()->equals(t_oop->is_instptr()->exact_klass())) {
 292             // Allocation of another type, must be another object
 293             result = proj_in->in(TypeFunc::Memory);
 294           } else if (base_local != nullptr && (base_local->is_Parm() || base_local->in(0) != alloc)) {
 295             // Allocation of another object
 296             result = proj_in->in(TypeFunc::Memory);
 297           }
 298         }
 299       } else if (proj_in->is_MemBar()) {
 300         ArrayCopyNode* ac = nullptr;
 301         if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) {
 302           break;
 303         }
 304         result = proj_in->in(TypeFunc::Memory);
 305       } else if (proj_in->is_top()) {
 306         break; // dead code
 307       } else {
 308         assert(false, "unexpected projection");
 309       }
 310     } else if (result->is_ClearArray()) {
 311       if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {
 312         // Can not bypass initialization of the instance
 313         // we are looking for.
 314         break;
 315       }
 316       // Otherwise skip it (the call updated 'result' value).

 329   bool is_instance = t_oop->is_known_instance_field();
 330   PhaseIterGVN *igvn = phase->is_IterGVN();
 331   if (is_instance && igvn != nullptr && result->is_Phi()) {
 332     PhiNode *mphi = result->as_Phi();
 333     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
 334     const TypePtr *t = mphi->adr_type();
 335     bool do_split = false;
 336     // In the following cases, Load memory input can be further optimized based on
 337     // its precise address type
 338     if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ) {
 339       do_split = true;
 340     } else if (t->isa_oopptr() && !t->is_oopptr()->is_known_instance()) {
 341       const TypeOopPtr* mem_t =
 342         t->is_oopptr()->cast_to_exactness(true)
 343         ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
 344         ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
 345       if (t_oop->isa_aryptr()) {
 346         mem_t = mem_t->is_aryptr()
 347                      ->cast_to_stable(t_oop->is_aryptr()->is_stable())
 348                      ->cast_to_size(t_oop->is_aryptr()->size())
 349                      ->cast_to_not_flat(t_oop->is_aryptr()->is_not_flat())
 350                      ->cast_to_not_null_free(t_oop->is_aryptr()->is_not_null_free())
 351                      ->with_offset(t_oop->is_aryptr()->offset())
 352                      ->is_aryptr();
 353       }
 354       do_split = mem_t == t_oop;
 355     }
 356     if (do_split) {
 357       // clone the Phi with our address type
 358       result = mphi->split_out_instance(t_adr, igvn);
 359     } else {
 360       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
 361     }
 362   }
 363   return result;
 364 }
 365 
 366 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
 367   uint alias_idx = phase->C->get_alias_index(tp);
 368   Node *mem = mmem;
 369 #ifdef ASSERT
 370   {
 371     // Check that current type is consistent with the alias index used during graph construction
 372     assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
 373     bool consistent =  adr_check == nullptr || adr_check->empty() ||
 374                        phase->C->must_alias(adr_check, alias_idx );
 375     // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
 376     if( !consistent && adr_check != nullptr && !adr_check->empty() &&
 377         tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
 378         adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
 379         ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
 380           adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
 381           adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
 382       // don't assert if it is dead code.
 383       consistent = true;
 384     }
 385     if( !consistent ) {
 386       st->print("alias_idx==%d, adr_check==", alias_idx);
 387       if( adr_check == nullptr ) {
 388         st->print("null");
 389       } else {
 390         adr_check->dump();
 391       }
 392       st->cr();
 393       print_alias_types();
 394       assert(consistent, "adr_check must match alias idx");
 395     }
 396   }
 397 #endif

1117     Node* ld = gvn.transform(load);
1118     return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
1119   }
1120 
1121   return load;
1122 }
1123 
1124 //------------------------------hash-------------------------------------------
1125 uint LoadNode::hash() const {
1126   // unroll addition of interesting fields
1127   return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
1128 }
1129 
1130 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
1131   if ((atp != nullptr) && (atp->index() >= Compile::AliasIdxRaw)) {
1132     bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
1133     bool is_stable_ary = FoldStableValues &&
1134                          (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
1135                          tp->isa_aryptr()->is_stable();
1136 
1137     return (eliminate_boxing && non_volatile) || is_stable_ary || tp->is_inlinetypeptr();
1138   }
1139 
1140   return false;
1141 }
1142 
1143 LoadNode* LoadNode::pin_array_access_node() const {
1144   const TypePtr* adr_type = this->adr_type();
1145   if (adr_type != nullptr && adr_type->isa_aryptr()) {
1146     return clone_pinned();
1147   }
1148   return nullptr;
1149 }
1150 
1151 // Is the value loaded previously stored by an arraycopy? If so return
1152 // a load node that reads from the source array so we may be able to
1153 // optimize out the ArrayCopy node later.
1154 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
1155   Node* ld_adr = in(MemNode::Address);
1156   intptr_t ld_off = 0;
1157   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);

1174       assert(ld_alloc != nullptr, "need an alloc");
1175       assert(addp->is_AddP(), "address must be addp");
1176       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1177       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1178       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1179       addp->set_req(AddPNode::Base, src);
1180       addp->set_req(AddPNode::Address, src);
1181     } else {
1182       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1183              ac->as_ArrayCopy()->is_copyof_validated() ||
1184              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1185       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1186       addp->set_req(AddPNode::Base, src);
1187       addp->set_req(AddPNode::Address, src);
1188 
1189       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1190       BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1191       if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1192 
1193       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1194       uint shift  = ary_t->is_flat() ? ary_t->flat_log_elem_size() : exact_log2(type2aelembytes(ary_elem));
1195 
1196       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1197 #ifdef _LP64
1198       diff = phase->transform(new ConvI2LNode(diff));
1199 #endif
1200       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1201 
1202       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1203       addp->set_req(AddPNode::Offset, offset);
1204     }
1205     addp = phase->transform(addp);
1206 #ifdef ASSERT
1207     const TypePtr* adr_type = phase->type(addp)->is_ptr();
1208     ld->_adr_type = adr_type;
1209 #endif
1210     ld->set_req(MemNode::Address, addp);
1211     ld->set_req(0, ctl);
1212     ld->set_req(MemNode::Memory, mem);
1213     return ld;
1214   }
1215   return nullptr;
1216 }
1217 
1218 static Node* see_through_inline_type(PhaseValues* phase, const MemNode* load, Node* base, int offset) {
1219   if (!load->is_mismatched_access() && base != nullptr && base->is_InlineType() && offset > oopDesc::klass_offset_in_bytes()) {
1220     InlineTypeNode* vt = base->as_InlineType();
1221     Node* value = vt->field_value_by_offset(offset, true);
1222     assert(value != nullptr, "must see some value");
1223     return value;
1224   }
1225 
1226   return nullptr;
1227 }
1228 
1229 //---------------------------can_see_stored_value------------------------------
1230 // This routine exists to make sure this set of tests is done the same
1231 // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
1232 // will change the graph shape in a way which makes memory alive twice at the
1233 // same time (uses the Oracle model of aliasing), then some
1234 // LoadXNode::Identity will fold things back to the equivalence-class model
1235 // of aliasing.
1236 Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
1237   Node* ld_adr = in(MemNode::Address);
1238   intptr_t ld_off = 0;
1239   Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
1240   // Try to see through an InlineTypeNode
1241   // LoadN is special because the input is not compressed
1242   if (Opcode() != Op_LoadN) {
1243     Node* value = see_through_inline_type(phase, this, ld_base, ld_off);
1244     if (value != nullptr) {
1245       return value;
1246     }
1247   }
1248 
1249   Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
1250   const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
1251   Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
1252   // This is more general than load from boxing objects.
1253   if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
1254     uint alias_idx = atp->index();
1255     Node* result = nullptr;
1256     Node* current = st;
1257     // Skip through chains of MemBarNodes checking the MergeMems for
1258     // new states for the slice of this load.  Stop once any other
1259     // kind of node is encountered.  Loads from final memory can skip
1260     // through any kind of MemBar but normal loads shouldn't skip
1261     // through MemBarAcquire since the could allow them to move out of
1262     // a synchronized region. It is not safe to step over MemBarCPUOrder,
1263     // because alias info above them may be inaccurate (e.g., due to
1264     // mixed/mismatched unsafe accesses).
1265     bool is_final_mem = !atp->is_rewritable();
1266     while (current->is_Proj()) {
1267       int opc = current->in(0)->Opcode();
1268       if ((is_final_mem && (opc == Op_MemBarAcquire ||

1312         // Same base, same offset.
1313         // Possible improvement for arrays: check index value instead of absolute offset.
1314 
1315         // At this point we have proven something like this setup:
1316         //   B = << base >>
1317         //   L =  LoadQ(AddP(Check/CastPP(B), #Off))
1318         //   S = StoreQ(AddP(             B , #Off), V)
1319         // (Actually, we haven't yet proven the Q's are the same.)
1320         // In other words, we are loading from a casted version of
1321         // the same pointer-and-offset that we stored to.
1322         // Casted version may carry a dependency and it is respected.
1323         // Thus, we are able to replace L by V.
1324       }
1325       // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1326       if (store_Opcode() != st->Opcode()) {
1327         return nullptr;
1328       }
1329       // LoadVector/StoreVector needs additional check to ensure the types match.
1330       if (st->is_StoreVector()) {
1331         const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
1332         const TypeVect* out_vt = is_Load() ? as_LoadVector()->vect_type() : as_StoreVector()->vect_type();
1333         if (in_vt != out_vt) {
1334           return nullptr;
1335         }
1336       }
1337       return st->in(MemNode::ValueIn);
1338     }
1339 
1340     // A load from a freshly-created object always returns zero.
1341     // (This can happen after LoadNode::Ideal resets the load's memory input
1342     // to find_captured_store, which returned InitializeNode::zero_memory.)
1343     if (st->is_Proj() && st->in(0)->is_Allocate() &&
1344         (st->in(0) == ld_alloc) &&
1345         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1346       // return a zero value for the load's basic type
1347       // (This is one of the few places where a generic PhaseTransform
1348       // can create new nodes.  Think of it as lazily manifesting
1349       // virtually pre-existing constants.)
1350       Node* init_value = ld_alloc->in(AllocateNode::InitValue);
1351       if (init_value != nullptr) {
1352         // TODO 8350865 Is this correct for non-all-zero init values? Don't we need field_value_by_offset?
1353         return init_value;
1354       }
1355       assert(ld_alloc->in(AllocateNode::RawInitValue) == nullptr, "init value may not be null");
1356       if (memory_type() != T_VOID) {
1357         if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
1358           // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1359           // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1360           // by the ArrayCopyNode.
1361           return phase->zerocon(memory_type());
1362         }
1363       } else {
1364         // TODO: materialize all-zero vector constant
1365         assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1366       }
1367     }
1368 
1369     // A load from an initialization barrier can match a captured store.
1370     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1371       InitializeNode* init = st->in(0)->as_Initialize();
1372       AllocateNode* alloc = init->allocation();
1373       if ((alloc != nullptr) && (alloc == ld_alloc)) {
1374         // examine a captured store value
1375         st = init->find_captured_store(ld_off, memory_size(), phase);

1996   bool addr_mark = ((phase->type(address)->isa_oopptr() || phase->type(address)->isa_narrowoop()) &&
1997          phase->type(address)->is_ptr()->offset() == oopDesc::mark_offset_in_bytes());
1998 
1999   // Skip up past a SafePoint control.  Cannot do this for Stores because
2000   // pointer stores & cardmarks must stay on the same side of a SafePoint.
2001   if( ctrl != nullptr && ctrl->Opcode() == Op_SafePoint &&
2002       phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw  &&
2003       !addr_mark &&
2004       (depends_only_on_test() || has_unknown_control_dependency())) {
2005     ctrl = ctrl->in(0);
2006     set_req(MemNode::Control,ctrl);
2007     progress = true;
2008   }
2009 
2010   intptr_t ignore = 0;
2011   Node*    base   = AddPNode::Ideal_base_and_offset(address, phase, ignore);
2012   if (base != nullptr
2013       && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
2014     // Check for useless control edge in some common special cases
2015     if (in(MemNode::Control) != nullptr
2016         && !(phase->type(address)->is_inlinetypeptr() && is_mismatched_access())
2017         && can_remove_control()
2018         && phase->type(base)->higher_equal(TypePtr::NOTNULL)
2019         && all_controls_dominate(base, phase->C->start())) {
2020       // A method-invariant, non-null address (constant or 'this' argument).
2021       set_req(MemNode::Control, nullptr);
2022       progress = true;
2023     }
2024   }
2025 
2026   Node* mem = in(MemNode::Memory);
2027   const TypePtr *addr_t = phase->type(address)->isa_ptr();
2028 
2029   if (can_reshape && (addr_t != nullptr)) {
2030     // try to optimize our memory input
2031     Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase);
2032     if (opt_mem != mem) {
2033       set_req_X(MemNode::Memory, opt_mem, phase);
2034       if (phase->type( opt_mem ) == Type::TOP) return nullptr;
2035       return this;
2036     }

2093   // fold up, do so.
2094   Node* prev_mem = find_previous_store(phase);
2095   if (prev_mem != nullptr) {
2096     Node* value = can_see_arraycopy_value(prev_mem, phase);
2097     if (value != nullptr) {
2098       return value;
2099     }
2100   }
2101   // Steps (a), (b):  Walk past independent stores to find an exact match.
2102   if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
2103     // (c) See if we can fold up on the spot, but don't fold up here.
2104     // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
2105     // just return a prior value, which is done by Identity calls.
2106     if (can_see_stored_value(prev_mem, phase)) {
2107       // Make ready for step (d):
2108       set_req_X(MemNode::Memory, prev_mem, phase);
2109       return this;
2110     }
2111   }
2112 
2113   if (progress) {
2114     return this;
2115   }
2116 
2117   if (!can_reshape) {
2118     phase->record_for_igvn(this);
2119   }
2120   return nullptr;
2121 }
2122 
2123 // Helper to recognize certain Klass fields which are invariant across
2124 // some group of array types (e.g., int[] or all T[] where T < Object).
2125 const Type*
2126 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
2127                                  ciKlass* klass) const {
2128   assert(!UseCompactObjectHeaders || tkls->offset() != in_bytes(Klass::prototype_header_offset()),
2129          "must not happen");
2130   if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
2131     // The field is Klass::_access_flags.  Return its (constant) value.
2132     // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
2133     assert(Opcode() == Op_LoadUS, "must load an unsigned short from _access_flags");
2134     return TypeInt::make(klass->access_flags());
2135   }
2136   if (tkls->offset() == in_bytes(Klass::misc_flags_offset())) {
2137     // The field is Klass::_misc_flags.  Return its (constant) value.
2138     // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
2139     assert(Opcode() == Op_LoadUB, "must load an unsigned byte from _misc_flags");
2140     return TypeInt::make(klass->misc_flags());

2202       }
2203     }
2204 
2205     // Don't do this for integer types. There is only potential profit if
2206     // the element type t is lower than _type; that is, for int types, if _type is
2207     // more restrictive than t.  This only happens here if one is short and the other
2208     // char (both 16 bits), and in those cases we've made an intentional decision
2209     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
2210     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
2211     //
2212     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
2213     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
2214     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
2215     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
2216     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
2217     // In fact, that could have been the original type of p1, and p1 could have
2218     // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
2219     // expression (LShiftL quux 3) independently optimized to the constant 8.
2220     if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
2221         && (_type->isa_vect() == nullptr)
2222         && !ary->is_flat()
2223         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
2224       // t might actually be lower than _type, if _type is a unique
2225       // concrete subclass of abstract class t.
2226       if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
2227         const Type* jt = t->join_speculative(_type);
2228         // In any case, do not allow the join, per se, to empty out the type.
2229         if (jt->empty() && !t->empty()) {
2230           // This can happen if a interface-typed array narrows to a class type.
2231           jt = _type;
2232         }
2233 #ifdef ASSERT
2234         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
2235           // The pointers in the autobox arrays are always non-null
2236           Node* base = adr->in(AddPNode::Base);
2237           if ((base != nullptr) && base->is_DecodeN()) {
2238             // Get LoadN node which loads IntegerCache.cache field
2239             base = base->in(1);
2240           }
2241           if ((base != nullptr) && base->is_Con()) {
2242             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
2243             if ((base_type != nullptr) && base_type->is_autobox_cache()) {
2244               // It could be narrow oop
2245               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
2246             }
2247           }
2248         }
2249 #endif
2250         return jt;
2251       }
2252     }
2253   } else if (tp->base() == Type::InstPtr) {
2254     assert( off != Type::OffsetBot ||
2255             // arrays can be cast to Objects
2256             !tp->isa_instptr() ||
2257             tp->is_instptr()->instance_klass()->is_java_lang_Object() ||
2258             // Default value load
2259             tp->is_instptr()->instance_klass() == ciEnv::current()->Class_klass() ||
2260             // unsafe field access may not have a constant offset
2261             C->has_unsafe_access(),
2262             "Field accesses must be precise" );
2263     // For oop loads, we expect the _type to be precise.
2264 

2265     const TypeInstPtr* tinst = tp->is_instptr();
2266     BasicType bt = memory_type();
2267 
2268     // Optimize loads from constant fields.
2269     ciObject* const_oop = tinst->const_oop();
2270     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
2271       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt);
2272       if (con_type != nullptr) {
2273         return con_type;
2274       }
2275     }
2276   } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
2277     assert(off != Type::OffsetBot ||
2278             !tp->isa_instklassptr() ||
2279            // arrays can be cast to Objects
2280            tp->isa_instklassptr()->instance_klass()->is_java_lang_Object() ||
2281            // also allow array-loading from the primary supertype
2282            // array during subtype checks
2283            Opcode() == Op_LoadKlass,
2284            "Field accesses must be precise");
2285     // For klass/static loads, we expect the _type to be precise
2286   } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
2287     /* With mirrors being an indirect in the Klass*
2288      * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
2289      * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
2290      *
2291      * So check the type and klass of the node before the LoadP.

2298         assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2299         assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2300         return TypeInstPtr::make(klass->java_mirror());
2301       }
2302     }
2303   }
2304 
2305   const TypeKlassPtr *tkls = tp->isa_klassptr();
2306   if (tkls != nullptr) {
2307     if (tkls->is_loaded() && tkls->klass_is_exact()) {
2308       ciKlass* klass = tkls->exact_klass();
2309       // We are loading a field from a Klass metaobject whose identity
2310       // is known at compile time (the type is "exact" or "precise").
2311       // Check for fields we know are maintained as constants by the VM.
2312       if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2313         // The field is Klass::_super_check_offset.  Return its (constant) value.
2314         // (Folds up type checking code.)
2315         assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2316         return TypeInt::make(klass->super_check_offset());
2317       }
2318       if (UseCompactObjectHeaders) { // TODO: Should EnableValhalla also take this path ?
2319         if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2320           // The field is Klass::_prototype_header. Return its (constant) value.
2321           assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2322           return TypeX::make(klass->prototype_header());
2323         }
2324       }
2325       // Compute index into primary_supers array
2326       juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2327       // Check for overflowing; use unsigned compare to handle the negative case.
2328       if( depth < ciKlass::primary_super_limit() ) {
2329         // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2330         // (Folds up type checking code.)
2331         assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2332         ciKlass *ss = klass->super_of_depth(depth);
2333         return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2334       }
2335       const Type* aift = load_array_final_field(tkls, klass);
2336       if (aift != nullptr)  return aift;
2337     }
2338 

2374         !tkls->is_instklassptr()->might_be_an_array() // not the supertype of all T[] (java.lang.Object) or has an interface that is not Serializable or Cloneable
2375     ) {
2376       assert(Opcode() == Op_LoadI, "must load an int from _layout_helper");
2377       jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false);
2378       // The key property of this type is that it folds up tests
2379       // for array-ness, since it proves that the layout_helper is positive.
2380       // Thus, a generic value like the basic object layout helper works fine.
2381       return TypeInt::make(min_size, max_jint, Type::WidenMin);
2382     }
2383   }
2384 
2385   bool is_vect = (_type->isa_vect() != nullptr);
2386   if (is_instance && !is_vect) {
2387     // If we have an instance type and our memory input is the
2388     // programs's initial memory state, there is no matching store,
2389     // so just return a zero of the appropriate type -
2390     // except if it is vectorized - then we have no zero constant.
2391     Node *mem = in(MemNode::Memory);
2392     if (mem->is_Parm() && mem->in(0)->is_Start()) {
2393       assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2394       // TODO 8350865 This is needed for flat array accesses, somehow the memory of the loads bypasses the intrinsic
2395       // Run TestArrays.test6 in Scenario4, we need more tests for this. TestBasicFunctionality::test20 also needs this.
2396       if (tp->isa_aryptr() && tp->is_aryptr()->is_flat() && !UseFieldFlattening) {
2397         return _type;
2398       }
2399       return Type::get_zero_type(_type->basic_type());
2400     }
2401   }

2402   if (!UseCompactObjectHeaders) {
2403     Node* alloc = is_new_object_mark_load();
2404     if (alloc != nullptr) {
2405       if (EnableValhalla) {
2406         // The mark word may contain property bits (inline, flat, null-free)
2407         Node* klass_node = alloc->in(AllocateNode::KlassNode);
2408         const TypeKlassPtr* tkls = phase->type(klass_node)->isa_klassptr();
2409         if (tkls != nullptr && tkls->is_loaded() && tkls->klass_is_exact()) {
2410           return TypeX::make(tkls->exact_klass()->prototype_header());
2411         }
2412       } else {
2413         return TypeX::make(markWord::prototype().value());
2414       }
2415     }
2416   }
2417 
2418   return _type;
2419 }
2420 
2421 //------------------------------match_edge-------------------------------------
2422 // Do we Match on this edge index or not?  Match only the address.
2423 uint LoadNode::match_edge(uint idx) const {
2424   return idx == MemNode::Address;
2425 }
2426 
2427 //--------------------------LoadBNode::Ideal--------------------------------------
2428 //
2429 //  If the previous store is to the same address as this load,
2430 //  and the value stored was larger than a byte, replace this load
2431 //  with the value stored truncated to a byte.  If no truncation is
2432 //  needed, the replacement is done in LoadNode::Identity().
2433 //
2434 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {

2543     }
2544   }
2545   // Identity call will handle the case where truncation is not needed.
2546   return LoadNode::Ideal(phase, can_reshape);
2547 }
2548 
2549 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2550   Node* mem = in(MemNode::Memory);
2551   Node* value = can_see_stored_value(mem,phase);
2552   if (value != nullptr && value->is_Con() &&
2553       !value->bottom_type()->higher_equal(_type)) {
2554     // If the input to the store does not fit with the load's result type,
2555     // it must be truncated. We can't delay until Ideal call since
2556     // a singleton Value is needed for split_thru_phi optimization.
2557     int con = value->get_int();
2558     return TypeInt::make((con << 16) >> 16);
2559   }
2560   return LoadNode::Value(phase);
2561 }
2562 
2563 Node* LoadNNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2564   // Loading from an InlineType, find the input and make an EncodeP
2565   Node* addr = in(Address);
2566   intptr_t offset;
2567   Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset);
2568   Node* value = see_through_inline_type(phase, this, base, offset);
2569   if (value != nullptr) {
2570     return new EncodePNode(value, type());
2571   }
2572 
2573   return LoadNode::Ideal(phase, can_reshape);
2574 }
2575 
2576 //=============================================================================
2577 //----------------------------LoadKlassNode::make------------------------------
2578 // Polymorphic factory method:
2579 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk, bool fold_for_arrays) {
2580   // sanity check the alias category against the created node type
2581   const TypePtr* adr_type = adr->bottom_type()->isa_ptr();
2582   assert(adr_type != nullptr, "expecting TypeKlassPtr");
2583 #ifdef _LP64
2584   if (adr_type->is_ptr_to_narrowklass()) {
2585     assert(UseCompressedClassPointers, "no compressed klasses");
2586     Node* load_klass = gvn.transform(new LoadNKlassNode(mem, adr, at, tk->make_narrowklass(), MemNode::unordered, fold_for_arrays));
2587     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2588   }
2589 #endif
2590   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2591   return new LoadKlassNode(mem, adr, at, tk, MemNode::unordered, fold_for_arrays);
2592 }
2593 
2594 //------------------------------Value------------------------------------------
2595 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2596   return klass_value_common(phase, _fold_for_arrays);
2597 }
2598 
2599 const Type* LoadNode::klass_value_common(PhaseGVN* phase, bool fold_for_arrays) const {
2600   // Either input is TOP ==> the result is TOP
2601   const Type *t1 = phase->type( in(MemNode::Memory) );
2602   if (t1 == Type::TOP)  return Type::TOP;
2603   Node *adr = in(MemNode::Address);
2604   const Type *t2 = phase->type( adr );
2605   if (t2 == Type::TOP)  return Type::TOP;
2606   const TypePtr *tp = t2->is_ptr();
2607   if (TypePtr::above_centerline(tp->ptr()) ||
2608       tp->ptr() == TypePtr::Null)  return Type::TOP;
2609 
2610   // Return a more precise klass, if possible
2611   const TypeInstPtr *tinst = tp->isa_instptr();
2612   if (tinst != nullptr) {
2613     ciInstanceKlass* ik = tinst->instance_klass();
2614     int offset = tinst->offset();
2615     if (ik == phase->C->env()->Class_klass()
2616         && (offset == java_lang_Class::klass_offset() ||
2617             offset == java_lang_Class::array_klass_offset())) {
2618       // We are loading a special hidden field from a Class mirror object,
2619       // the field which points to the VM's Klass metaobject.
2620       bool is_null_free_array = false;
2621       ciType* t = tinst->java_mirror_type(&is_null_free_array);
2622       // java_mirror_type returns non-null for compile-time Class constants.
2623       if (t != nullptr) {
2624         // constant oop => constant klass
2625         if (offset == java_lang_Class::array_klass_offset()) {
2626           if (t->is_void()) {
2627             // We cannot create a void array.  Since void is a primitive type return null
2628             // klass.  Users of this result need to do a null check on the returned klass.
2629             return TypePtr::NULL_PTR;
2630           }
2631           const TypeKlassPtr* tklass = TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
2632           if (is_null_free_array) {
2633             tklass = tklass->is_aryklassptr()->cast_to_null_free();
2634           }
2635           return tklass;
2636         }
2637         if (!t->is_klass()) {
2638           // a primitive Class (e.g., int.class) has null for a klass field
2639           return TypePtr::NULL_PTR;
2640         }
2641         // Fold up the load of the hidden field
2642         const TypeKlassPtr* tklass = TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
2643         if (is_null_free_array) {
2644           tklass = tklass->is_aryklassptr()->cast_to_null_free();
2645         }
2646         return tklass;
2647       }
2648       // non-constant mirror, so we can't tell what's going on
2649     }
2650     if (!tinst->is_loaded())
2651       return _type;             // Bail out if not loaded
2652     if (offset == oopDesc::klass_offset_in_bytes()) {
2653       return tinst->as_klass_type(true);
2654     }
2655   }
2656 
2657   // Check for loading klass from an array
2658   const TypeAryPtr* tary = tp->isa_aryptr();
2659   if (tary != nullptr && fold_for_arrays &&
2660       tary->offset() == oopDesc::klass_offset_in_bytes()) {
2661     return tary->as_klass_type(true);
2662   }
2663 
2664   // Check for loading klass from an array klass
2665   const TypeKlassPtr *tkls = tp->isa_klassptr();
2666   if (tkls != nullptr && !StressReflectiveCode) {
2667     if (!tkls->is_loaded())
2668      return _type;             // Bail out if not loaded
2669     if (tkls->isa_aryklassptr() && tkls->is_aryklassptr()->elem()->isa_klassptr() &&
2670         tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2671       // // Always returning precise element type is incorrect,
2672       // // e.g., element type could be object and array may contain strings
2673       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2674 
2675       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2676       // according to the element type's subclassing.
2677       return tkls->is_aryklassptr()->elem()->isa_klassptr()->cast_to_exactness(tkls->klass_is_exact());
2678     }
2679     if (tkls->isa_instklassptr() != nullptr && tkls->klass_is_exact() &&

2754           if (tkls->offset() == mirror_field) {
2755             return adr2->in(AddPNode::Base);
2756           }
2757         }
2758       }
2759     }
2760   }
2761 
2762   return this;
2763 }
2764 
2765 LoadNode* LoadNode::clone_pinned() const {
2766   LoadNode* ld = clone()->as_Load();
2767   ld->_control_dependency = UnknownControl;
2768   return ld;
2769 }
2770 
2771 
2772 //------------------------------Value------------------------------------------
2773 const Type* LoadNKlassNode::Value(PhaseGVN* phase) const {
2774   const Type *t = klass_value_common(phase, _fold_for_arrays);
2775   if (t == Type::TOP)
2776     return t;
2777 
2778   return t->make_narrowklass();
2779 }
2780 
2781 //------------------------------Identity---------------------------------------
2782 // To clean up reflective code, simplify k.java_mirror.as_klass to narrow k.
2783 // Also feed through the klass in Allocate(...klass...)._klass.
2784 Node* LoadNKlassNode::Identity(PhaseGVN* phase) {
2785   Node *x = klass_identity_common(phase);
2786 
2787   const Type *t = phase->type( x );
2788   if( t == Type::TOP ) return x;
2789   if( t->isa_narrowklass()) return x;
2790   assert (!t->isa_narrowoop(), "no narrow oop here");
2791 
2792   return phase->transform(new EncodePKlassNode(x, t->make_narrowklass()));
2793 }
2794 

3554   }
3555   ss.print_cr("[TraceMergeStores]: with");
3556   merged_input_value->dump("\n", false, &ss);
3557   merged_store->dump("\n", false, &ss);
3558   tty->print("%s", ss.as_string());
3559 }
3560 #endif
3561 
3562 //------------------------------Ideal------------------------------------------
3563 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
3564 // When a store immediately follows a relevant allocation/initialization,
3565 // try to capture it into the initialization, or hoist it above.
3566 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3567   Node* p = MemNode::Ideal_common(phase, can_reshape);
3568   if (p)  return (p == NodeSentinel) ? nullptr : p;
3569 
3570   Node* mem     = in(MemNode::Memory);
3571   Node* address = in(MemNode::Address);
3572   Node* value   = in(MemNode::ValueIn);
3573   // Back-to-back stores to same address?  Fold em up.  Generally
3574   // unsafe if I have intervening uses...
3575   if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::INLINES) {
3576     Node* st = mem;
3577     // If Store 'st' has more than one use, we cannot fold 'st' away.
3578     // For example, 'st' might be the final state at a conditional
3579     // return.  Or, 'st' might be used by some node which is live at
3580     // the same time 'st' is live, which might be unschedulable.  So,
3581     // require exactly ONE user until such time as we clone 'mem' for
3582     // each of 'mem's uses (thus making the exactly-1-user-rule hold
3583     // true).
3584     while (st->is_Store() && st->outcnt() == 1) {
3585       // Looking at a dead closed cycle of memory?
3586       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
3587       assert(Opcode() == st->Opcode() ||
3588              st->Opcode() == Op_StoreVector ||
3589              Opcode() == Op_StoreVector ||
3590              st->Opcode() == Op_StoreVectorScatter ||
3591              Opcode() == Op_StoreVectorScatter ||
3592              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
3593              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
3594              (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
3595              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
3596              (st->adr_type()->isa_aryptr() && st->adr_type()->is_aryptr()->is_flat()) || // TODO 8343835
3597              (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
3598              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
3599 
3600       if (st->in(MemNode::Address)->eqv_uncast(address) &&
3601           st->as_Store()->memory_size() <= this->memory_size()) {
3602         Node* use = st->raw_out(0);
3603         if (phase->is_IterGVN()) {
3604           phase->is_IterGVN()->rehash_node_delayed(use);
3605         }
3606         // It's OK to do this in the parser, since DU info is always accurate,
3607         // and the parser always refers to nodes via SafePointNode maps.
3608         use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
3609         return this;
3610       }
3611       st = st->in(MemNode::Memory);
3612     }
3613   }
3614 
3615 
3616   // Capture an unaliased, unconditional, simple store into an initializer.

3714       const StoreVectorNode* store_vector = as_StoreVector();
3715       const StoreVectorNode* mem_vector = mem->as_StoreVector();
3716       const Node* store_indices = store_vector->indices();
3717       const Node* mem_indices = mem_vector->indices();
3718       const Node* store_mask = store_vector->mask();
3719       const Node* mem_mask = mem_vector->mask();
3720       // Ensure types, indices, and masks match
3721       if (store_vector->vect_type() == mem_vector->vect_type() &&
3722           ((store_indices == nullptr) == (mem_indices == nullptr) &&
3723            (store_indices == nullptr || store_indices->eqv_uncast(mem_indices))) &&
3724           ((store_mask == nullptr) == (mem_mask == nullptr) &&
3725            (store_mask == nullptr || store_mask->eqv_uncast(mem_mask)))) {
3726         result = mem;
3727       }
3728     }
3729   }
3730 
3731   // Store of zero anywhere into a freshly-allocated object?
3732   // Then the store is useless.
3733   // (It must already have been captured by the InitializeNode.)
3734   if (result == this && ReduceFieldZeroing) {

3735     // a newly allocated object is already all-zeroes everywhere
3736     if (mem->is_Proj() && mem->in(0)->is_Allocate() &&
3737         (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::InitValue) == val)) {
3738       result = mem;
3739     }
3740 
3741     if (result == this && phase->type(val)->is_zero_type()) {
3742       // the store may also apply to zero-bits in an earlier object
3743       Node* prev_mem = find_previous_store(phase);
3744       // Steps (a), (b):  Walk past independent stores to find an exact match.
3745       if (prev_mem != nullptr) {
3746         Node* prev_val = can_see_stored_value(prev_mem, phase);
3747         if (prev_val != nullptr && prev_val == val) {
3748           // prev_val and val might differ by a cast; it would be good
3749           // to keep the more informative of the two.
3750           result = mem;
3751         }
3752       }
3753     }
3754   }
3755 
3756   PhaseIterGVN* igvn = phase->is_IterGVN();
3757   if (result != this && igvn != nullptr) {
3758     MemBarNode* trailing = trailing_membar();
3759     if (trailing != nullptr) {
3760 #ifdef ASSERT
3761       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();

4225 // Clearing a short array is faster with stores
4226 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
4227   // Already know this is a large node, do not try to ideal it
4228   if (_is_large) return nullptr;
4229 
4230   const int unit = BytesPerLong;
4231   const TypeX* t = phase->type(in(2))->isa_intptr_t();
4232   if (!t)  return nullptr;
4233   if (!t->is_con())  return nullptr;
4234   intptr_t raw_count = t->get_con();
4235   intptr_t size = raw_count;
4236   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
4237   // Clearing nothing uses the Identity call.
4238   // Negative clears are possible on dead ClearArrays
4239   // (see jck test stmt114.stmt11402.val).
4240   if (size <= 0 || size % unit != 0)  return nullptr;
4241   intptr_t count = size / unit;
4242   // Length too long; communicate this to matchers and assemblers.
4243   // Assemblers are responsible to produce fast hardware clears for it.
4244   if (size > InitArrayShortSize) {
4245     return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true);
4246   } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
4247     return nullptr;
4248   }
4249   if (!IdealizeClearArrayNode) return nullptr;
4250   Node *mem = in(1);
4251   if( phase->type(mem)==Type::TOP ) return nullptr;
4252   Node *adr = in(3);
4253   const Type* at = phase->type(adr);
4254   if( at==Type::TOP ) return nullptr;
4255   const TypePtr* atp = at->isa_ptr();
4256   // adjust atp to be the correct array element address type
4257   if (atp == nullptr)  atp = TypePtr::BOTTOM;
4258   else              atp = atp->add_offset(Type::OffsetBot);
4259   // Get base for derived pointer purposes
4260   if( adr->Opcode() != Op_AddP ) Unimplemented();
4261   Node *base = adr->in(1);
4262 
4263   Node *val = in(4);
4264   Node *off  = phase->MakeConX(BytesPerLong);
4265   mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
4266   count--;
4267   while( count-- ) {
4268     mem = phase->transform(mem);
4269     adr = phase->transform(new AddPNode(base,adr,off));
4270     mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
4271   }
4272   return mem;
4273 }
4274 
4275 //----------------------------step_through----------------------------------
4276 // Return allocation input memory edge if it is different instance
4277 // or itself if it is the one we are looking for.
4278 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseValues* phase) {
4279   Node* n = *np;
4280   assert(n->is_ClearArray(), "sanity");
4281   intptr_t offset;
4282   AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
4283   // This method is called only before Allocate nodes are expanded
4284   // during macro nodes expansion. Before that ClearArray nodes are
4285   // only generated in PhaseMacroExpand::generate_arraycopy() (before
4286   // Allocate nodes are expanded) which follows allocations.
4287   assert(alloc != nullptr, "should have allocation");
4288   if (alloc->_idx == instance_id) {
4289     // Can not bypass initialization of the instance we are looking for.
4290     return false;
4291   }
4292   // Otherwise skip it.
4293   InitializeNode* init = alloc->initialization();
4294   if (init != nullptr)
4295     *np = init->in(TypeFunc::Memory);
4296   else
4297     *np = alloc->in(TypeFunc::Memory);
4298   return true;
4299 }
4300 
4301 //----------------------------clear_memory-------------------------------------
4302 // Generate code to initialize object storage to zero.
4303 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4304                                    Node* val,
4305                                    Node* raw_val,
4306                                    intptr_t start_offset,
4307                                    Node* end_offset,
4308                                    PhaseGVN* phase) {
4309   intptr_t offset = start_offset;
4310 
4311   int unit = BytesPerLong;
4312   if ((offset % unit) != 0) {
4313     Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
4314     adr = phase->transform(adr);
4315     const TypePtr* atp = TypeRawPtr::BOTTOM;
4316     if (val != nullptr) {
4317       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
4318       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
4319     } else {
4320       assert(raw_val == nullptr, "val may not be null");
4321       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4322     }
4323     mem = phase->transform(mem);
4324     offset += BytesPerInt;
4325   }
4326   assert((offset % unit) == 0, "");
4327 
4328   // Initialize the remaining stuff, if any, with a ClearArray.
4329   return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase);
4330 }
4331 
4332 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4333                                    Node* raw_val,
4334                                    Node* start_offset,
4335                                    Node* end_offset,
4336                                    PhaseGVN* phase) {
4337   if (start_offset == end_offset) {
4338     // nothing to do
4339     return mem;
4340   }
4341 
4342   int unit = BytesPerLong;
4343   Node* zbase = start_offset;
4344   Node* zend  = end_offset;
4345 
4346   // Scale to the unit required by the CPU:
4347   if (!Matcher::init_array_count_is_in_bytes) {
4348     Node* shift = phase->intcon(exact_log2(unit));
4349     zbase = phase->transform(new URShiftXNode(zbase, shift) );
4350     zend  = phase->transform(new URShiftXNode(zend,  shift) );
4351   }
4352 
4353   // Bulk clear double-words
4354   Node* zsize = phase->transform(new SubXNode(zend, zbase) );
4355   Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
4356   if (raw_val == nullptr) {
4357     raw_val = phase->MakeConX(0);
4358   }
4359   mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false);
4360   return phase->transform(mem);
4361 }
4362 
4363 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4364                                    Node* val,
4365                                    Node* raw_val,
4366                                    intptr_t start_offset,
4367                                    intptr_t end_offset,
4368                                    PhaseGVN* phase) {
4369   if (start_offset == end_offset) {
4370     // nothing to do
4371     return mem;
4372   }
4373 
4374   assert((end_offset % BytesPerInt) == 0, "odd end offset");
4375   intptr_t done_offset = end_offset;
4376   if ((done_offset % BytesPerLong) != 0) {
4377     done_offset -= BytesPerInt;
4378   }
4379   if (done_offset > start_offset) {
4380     mem = clear_memory(ctl, mem, dest, val, raw_val,
4381                        start_offset, phase->MakeConX(done_offset), phase);
4382   }
4383   if (done_offset < end_offset) { // emit the final 32-bit store
4384     Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
4385     adr = phase->transform(adr);
4386     const TypePtr* atp = TypeRawPtr::BOTTOM;
4387     if (val != nullptr) {
4388       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
4389       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
4390     } else {
4391       assert(raw_val == nullptr, "val may not be null");
4392       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4393     }
4394     mem = phase->transform(mem);
4395     done_offset += BytesPerInt;
4396   }
4397   assert(done_offset == end_offset, "");
4398   return mem;
4399 }
4400 
4401 //=============================================================================
4402 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
4403   : MultiNode(TypeFunc::Parms + (precedent == nullptr? 0: 1)),
4404     _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
4405 #ifdef ASSERT
4406   , _pair_idx(0)
4407 #endif
4408 {
4409   init_class_id(Class_MemBar);
4410   Node* top = C->top();
4411   init_req(TypeFunc::I_O,top);
4412   init_req(TypeFunc::FramePtr,top);
4413   init_req(TypeFunc::ReturnAdr,top);

4519       PhaseIterGVN* igvn = phase->is_IterGVN();
4520       remove(igvn);
4521       // Must return either the original node (now dead) or a new node
4522       // (Do not return a top here, since that would break the uniqueness of top.)
4523       return new ConINode(TypeInt::ZERO);
4524     }
4525   }
4526   return progress ? this : nullptr;
4527 }
4528 
4529 //------------------------------Value------------------------------------------
4530 const Type* MemBarNode::Value(PhaseGVN* phase) const {
4531   if( !in(0) ) return Type::TOP;
4532   if( phase->type(in(0)) == Type::TOP )
4533     return Type::TOP;
4534   return TypeTuple::MEMBAR;
4535 }
4536 
4537 //------------------------------match------------------------------------------
4538 // Construct projections for memory.
4539 Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) {
4540   switch (proj->_con) {
4541   case TypeFunc::Control:
4542   case TypeFunc::Memory:
4543     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
4544   }
4545   ShouldNotReachHere();
4546   return nullptr;
4547 }
4548 
4549 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4550   trailing->_kind = TrailingStore;
4551   leading->_kind = LeadingStore;
4552 #ifdef ASSERT
4553   trailing->_pair_idx = leading->_idx;
4554   leading->_pair_idx = leading->_idx;
4555 #endif
4556 }
4557 
4558 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4559   trailing->_kind = TrailingLoadStore;

4806   return (req() > RawStores);
4807 }
4808 
4809 void InitializeNode::set_complete(PhaseGVN* phase) {
4810   assert(!is_complete(), "caller responsibility");
4811   _is_complete = Complete;
4812 
4813   // After this node is complete, it contains a bunch of
4814   // raw-memory initializations.  There is no need for
4815   // it to have anything to do with non-raw memory effects.
4816   // Therefore, tell all non-raw users to re-optimize themselves,
4817   // after skipping the memory effects of this initialization.
4818   PhaseIterGVN* igvn = phase->is_IterGVN();
4819   if (igvn)  igvn->add_users_to_worklist(this);
4820 }
4821 
4822 // convenience function
4823 // return false if the init contains any stores already
4824 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
4825   InitializeNode* init = initialization();
4826   if (init == nullptr || init->is_complete()) {
4827     return false;
4828   }
4829   init->remove_extra_zeroes();
4830   // for now, if this allocation has already collected any inits, bail:
4831   if (init->is_non_zero())  return false;
4832   init->set_complete(phase);
4833   return true;
4834 }
4835 
4836 void InitializeNode::remove_extra_zeroes() {
4837   if (req() == RawStores)  return;
4838   Node* zmem = zero_memory();
4839   uint fill = RawStores;
4840   for (uint i = fill; i < req(); i++) {
4841     Node* n = in(i);
4842     if (n->is_top() || n == zmem)  continue;  // skip
4843     if (fill < i)  set_req(fill, n);          // compact
4844     ++fill;
4845   }
4846   // delete any empty spaces created:
4847   while (fill < req()) {
4848     del_req(fill);

4992             // store node that we'd like to capture. We need to check
4993             // the uses of the MergeMemNode.
4994             mems.push(n);
4995           }
4996         } else if (n->is_Mem()) {
4997           Node* other_adr = n->in(MemNode::Address);
4998           if (other_adr == adr) {
4999             failed = true;
5000             break;
5001           } else {
5002             const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
5003             if (other_t_adr != nullptr) {
5004               int other_alias_idx = phase->C->get_alias_index(other_t_adr);
5005               if (other_alias_idx == alias_idx) {
5006                 // A load from the same memory slice as the store right
5007                 // after the InitializeNode. We check the control of the
5008                 // object/array that is loaded from. If it's the same as
5009                 // the store control then we cannot capture the store.
5010                 assert(!n->is_Store(), "2 stores to same slice on same control?");
5011                 Node* base = other_adr;
5012                 if (base->is_Phi()) {
5013                   // In rare case, base may be a PhiNode and it may read
5014                   // the same memory slice between InitializeNode and store.
5015                   failed = true;
5016                   break;
5017                 }
5018                 assert(base->is_AddP(), "should be addp but is %s", base->Name());
5019                 base = base->in(AddPNode::Base);
5020                 if (base != nullptr) {
5021                   base = base->uncast();
5022                   if (base->is_Proj() && base->in(0) == alloc) {
5023                     failed = true;
5024                     break;
5025                   }
5026                 }
5027               }
5028             }
5029           }
5030         } else {
5031           failed = true;
5032           break;
5033         }
5034       }
5035     }
5036   }
5037   if (failed) {

5584         //   z's_done      12  16  16  16    12  16    12
5585         //   z's_needed    12  16  16  16    16  16    16
5586         //   zsize          0   0   0   0     4   0     4
5587         if (next_full_store < 0) {
5588           // Conservative tack:  Zero to end of current word.
5589           zeroes_needed = align_up(zeroes_needed, BytesPerInt);
5590         } else {
5591           // Zero to beginning of next fully initialized word.
5592           // Or, don't zero at all, if we are already in that word.
5593           assert(next_full_store >= zeroes_needed, "must go forward");
5594           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
5595           zeroes_needed = next_full_store;
5596         }
5597       }
5598 
5599       if (zeroes_needed > zeroes_done) {
5600         intptr_t zsize = zeroes_needed - zeroes_done;
5601         // Do some incremental zeroing on rawmem, in parallel with inits.
5602         zeroes_done = align_down(zeroes_done, BytesPerInt);
5603         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5604                                               allocation()->in(AllocateNode::InitValue),
5605                                               allocation()->in(AllocateNode::RawInitValue),
5606                                               zeroes_done, zeroes_needed,
5607                                               phase);
5608         zeroes_done = zeroes_needed;
5609         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
5610           do_zeroing = false;   // leave the hole, next time
5611       }
5612     }
5613 
5614     // Collect the store and move on:
5615     phase->replace_input_of(st, MemNode::Memory, inits);
5616     inits = st;                 // put it on the linearized chain
5617     set_req(i, zmem);           // unhook from previous position
5618 
5619     if (zeroes_done == st_off)
5620       zeroes_done = next_init_off;
5621 
5622     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
5623 
5624     #ifdef ASSERT
5625     // Various order invariants.  Weaker than stores_are_sane because

5645   remove_extra_zeroes();        // clear out all the zmems left over
5646   add_req(inits);
5647 
5648   if (!(UseTLAB && ZeroTLAB)) {
5649     // If anything remains to be zeroed, zero it all now.
5650     zeroes_done = align_down(zeroes_done, BytesPerInt);
5651     // if it is the last unused 4 bytes of an instance, forget about it
5652     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
5653     if (zeroes_done + BytesPerLong >= size_limit) {
5654       AllocateNode* alloc = allocation();
5655       assert(alloc != nullptr, "must be present");
5656       if (alloc != nullptr && alloc->Opcode() == Op_Allocate) {
5657         Node* klass_node = alloc->in(AllocateNode::KlassNode);
5658         ciKlass* k = phase->type(klass_node)->is_instklassptr()->instance_klass();
5659         if (zeroes_done == k->layout_helper())
5660           zeroes_done = size_limit;
5661       }
5662     }
5663     if (zeroes_done < size_limit) {
5664       rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5665                                             allocation()->in(AllocateNode::InitValue),
5666                                             allocation()->in(AllocateNode::RawInitValue),
5667                                             zeroes_done, size_in_bytes, phase);
5668     }
5669   }
5670 
5671   set_complete(phase);
5672   return rawmem;
5673 }
5674 
5675 
5676 #ifdef ASSERT
5677 bool InitializeNode::stores_are_sane(PhaseValues* phase) {
5678   if (is_complete())
5679     return true;                // stores could be anything at this point
5680   assert(allocation() != nullptr, "must be present");
5681   intptr_t last_off = allocation()->minimum_header_size();
5682   for (uint i = InitializeNode::RawStores; i < req(); i++) {
5683     Node* st = in(i);
5684     intptr_t st_off = get_store_offset(st, phase);
5685     if (st_off < 0)  continue;  // ignore dead garbage
5686     if (last_off > st_off) {
< prev index next >