< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page

   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 

  26 #include "classfile/javaClasses.hpp"

  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "memory/allocation.inline.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "oops/objArrayKlass.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/connode.hpp"
  39 #include "opto/convertnode.hpp"

  40 #include "opto/loopnode.hpp"
  41 #include "opto/machnode.hpp"
  42 #include "opto/matcher.hpp"
  43 #include "opto/memnode.hpp"
  44 #include "opto/mempointer.hpp"
  45 #include "opto/mulnode.hpp"
  46 #include "opto/narrowptrnode.hpp"
  47 #include "opto/phaseX.hpp"
  48 #include "opto/regalloc.hpp"
  49 #include "opto/regmask.hpp"
  50 #include "opto/rootnode.hpp"
  51 #include "opto/traceMergeStoresTag.hpp"
  52 #include "opto/vectornode.hpp"
  53 #include "utilities/align.hpp"
  54 #include "utilities/copy.hpp"
  55 #include "utilities/macros.hpp"
  56 #include "utilities/powerOfTwo.hpp"
  57 #include "utilities/vmError.hpp"
  58 
  59 // Portions of code courtesy of Clifford Click

 123       st->print(", idx=Bot;");
 124     else if (atp->index() == Compile::AliasIdxTop)
 125       st->print(", idx=Top;");
 126     else if (atp->index() == Compile::AliasIdxRaw)
 127       st->print(", idx=Raw;");
 128     else {
 129       ciField* field = atp->field();
 130       if (field) {
 131         st->print(", name=");
 132         field->print_name_on(st);
 133       }
 134       st->print(", idx=%d;", atp->index());
 135     }
 136   }
 137 }
 138 
 139 extern void print_alias_types();
 140 
 141 #endif
 142 
 143 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) {
 144   assert((t_oop != nullptr), "sanity");




































































 145   bool is_instance = t_oop->is_known_instance_field();
 146   bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() &&
 147                              (load != nullptr) && load->is_Load() &&
 148                              (phase->is_IterGVN() != nullptr);
 149   if (!(is_instance || is_boxed_value_load))
 150     return mchain;  // don't try to optimize non-instance types

































 151   uint instance_id = t_oop->instance_id();
 152   Node *start_mem = phase->C->start()->proj_out_or_null(TypeFunc::Memory);
 153   Node *prev = nullptr;
 154   Node *result = mchain;
 155   while (prev != result) {
 156     prev = result;
 157     if (result == start_mem)
 158       break;  // hit one of our sentinels



 159     // skip over a call which does not affect this memory slice
 160     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
 161       Node *proj_in = result->in(0);
 162       if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
 163         break;  // hit one of our sentinels

 164       } else if (proj_in->is_Call()) {
 165         // ArrayCopyNodes processed here as well
 166         CallNode *call = proj_in->as_Call();
 167         if (!call->may_modify(t_oop, phase)) { // returns false for instances


 168           result = call->in(TypeFunc::Memory);
 169         }
 170       } else if (proj_in->is_Initialize()) {
 171         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
 172         // Stop if this is the initialization for the object instance which
 173         // contains this memory slice, otherwise skip over it.
 174         if ((alloc == nullptr) || (alloc->_idx == instance_id)) {
 175           break;
 176         }
 177         if (is_instance) {
 178           result = proj_in->in(TypeFunc::Memory);
 179         } else if (is_boxed_value_load) {
 180           Node* klass = alloc->in(AllocateNode::KlassNode);
 181           const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
 182           if (tklass->klass_is_exact() && !tklass->exact_klass()->equals(t_oop->is_instptr()->exact_klass())) {
 183             result = proj_in->in(TypeFunc::Memory); // not related allocation




 184           }
 185         }
 186       } else if (proj_in->is_MemBar()) {
 187         ArrayCopyNode* ac = nullptr;
 188         if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) {
 189           break;
 190         }
 191         result = proj_in->in(TypeFunc::Memory);
 192       } else if (proj_in->is_top()) {
 193         break; // dead code
 194       } else {
 195         assert(false, "unexpected projection");
 196       }
 197     } else if (result->is_ClearArray()) {
 198       if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {
 199         // Can not bypass initialization of the instance
 200         // we are looking for.
 201         break;
 202       }
 203       // Otherwise skip it (the call updated 'result' value).

 216   bool is_instance = t_oop->is_known_instance_field();
 217   PhaseIterGVN *igvn = phase->is_IterGVN();
 218   if (is_instance && igvn != nullptr && result->is_Phi()) {
 219     PhiNode *mphi = result->as_Phi();
 220     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
 221     const TypePtr *t = mphi->adr_type();
 222     bool do_split = false;
 223     // In the following cases, Load memory input can be further optimized based on
 224     // its precise address type
 225     if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ) {
 226       do_split = true;
 227     } else if (t->isa_oopptr() && !t->is_oopptr()->is_known_instance()) {
 228       const TypeOopPtr* mem_t =
 229         t->is_oopptr()->cast_to_exactness(true)
 230         ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
 231         ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
 232       if (t_oop->isa_aryptr()) {
 233         mem_t = mem_t->is_aryptr()
 234                      ->cast_to_stable(t_oop->is_aryptr()->is_stable())
 235                      ->cast_to_size(t_oop->is_aryptr()->size())


 236                      ->with_offset(t_oop->is_aryptr()->offset())
 237                      ->is_aryptr();
 238       }
 239       do_split = mem_t == t_oop;
 240     }
 241     if (do_split) {
 242       // clone the Phi with our address type
 243       result = mphi->split_out_instance(t_adr, igvn);
 244     } else {
 245       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
 246     }
 247   }
 248   return result;
 249 }
 250 
 251 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
 252   uint alias_idx = phase->C->get_alias_index(tp);
 253   Node *mem = mmem;
 254 #ifdef ASSERT
 255   {
 256     // Check that current type is consistent with the alias index used during graph construction
 257     assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
 258     bool consistent =  adr_check == nullptr || adr_check->empty() ||
 259                        phase->C->must_alias(adr_check, alias_idx );
 260     // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
 261     if( !consistent && adr_check != nullptr && !adr_check->empty() &&
 262                tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
 263         adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
 264         ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
 265           adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
 266           adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
 267       // don't assert if it is dead code.
 268       consistent = true;
 269     }
 270     if( !consistent ) {
 271       st->print("alias_idx==%d, adr_check==", alias_idx);
 272       if( adr_check == nullptr ) {
 273         st->print("null");
 274       } else {
 275         adr_check->dump();
 276       }
 277       st->cr();
 278       print_alias_types();
 279       assert(consistent, "adr_check must match alias idx");
 280     }
 281   }
 282 #endif

1002     Node* ld = gvn.transform(load);
1003     return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
1004   }
1005 
1006   return load;
1007 }
1008 
1009 //------------------------------hash-------------------------------------------
1010 uint LoadNode::hash() const {
1011   // unroll addition of interesting fields
1012   return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
1013 }
1014 
1015 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
1016   if ((atp != nullptr) && (atp->index() >= Compile::AliasIdxRaw)) {
1017     bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
1018     bool is_stable_ary = FoldStableValues &&
1019                          (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
1020                          tp->isa_aryptr()->is_stable();
1021 
1022     return (eliminate_boxing && non_volatile) || is_stable_ary;
1023   }
1024 
1025   return false;
1026 }
1027 
1028 LoadNode* LoadNode::pin_array_access_node() const {
1029   const TypePtr* adr_type = this->adr_type();
1030   if (adr_type != nullptr && adr_type->isa_aryptr()) {
1031     return clone_pinned();
1032   }
1033   return nullptr;
1034 }
1035 
1036 // Is the value loaded previously stored by an arraycopy? If so return
1037 // a load node that reads from the source array so we may be able to
1038 // optimize out the ArrayCopy node later.
1039 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
1040   Node* ld_adr = in(MemNode::Address);
1041   intptr_t ld_off = 0;
1042   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);

1058     if (ac->as_ArrayCopy()->is_clonebasic()) {
1059       assert(ld_alloc != nullptr, "need an alloc");
1060       assert(addp->is_AddP(), "address must be addp");
1061       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1062       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1063       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1064       addp->set_req(AddPNode::Base, src);
1065       addp->set_req(AddPNode::Address, src);
1066     } else {
1067       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1068              ac->as_ArrayCopy()->is_copyof_validated() ||
1069              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1070       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1071       addp->set_req(AddPNode::Base, src);
1072       addp->set_req(AddPNode::Address, src);
1073 
1074       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1075       BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1076       if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1077 
1078       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1079       uint shift  = exact_log2(type2aelembytes(ary_elem));
1080 
1081       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1082 #ifdef _LP64
1083       diff = phase->transform(new ConvI2LNode(diff));
1084 #endif
1085       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1086 
1087       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1088       addp->set_req(AddPNode::Offset, offset);
1089     }
1090     addp = phase->transform(addp);
1091 #ifdef ASSERT
1092     const TypePtr* adr_type = phase->type(addp)->is_ptr();
1093     ld->_adr_type = adr_type;
1094 #endif
1095     ld->set_req(MemNode::Address, addp);
1096     ld->set_req(0, ctl);
1097     ld->set_req(MemNode::Memory, mem);
1098     return ld;
1099   }
1100   return nullptr;
1101 }
1102 










1103 
1104 //---------------------------can_see_stored_value------------------------------
1105 // This routine exists to make sure this set of tests is done the same
1106 // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
1107 // will change the graph shape in a way which makes memory alive twice at the
1108 // same time (uses the Oracle model of aliasing), then some
1109 // LoadXNode::Identity will fold things back to the equivalence-class model
1110 // of aliasing.
1111 Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
1112   Node* ld_adr = in(MemNode::Address);
1113   intptr_t ld_off = 0;
1114   Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);









1115   Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
1116   const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
1117   Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
1118   // This is more general than load from boxing objects.
1119   if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
1120     uint alias_idx = atp->index();
1121     Node* result = nullptr;
1122     Node* current = st;
1123     // Skip through chains of MemBarNodes checking the MergeMems for
1124     // new states for the slice of this load.  Stop once any other
1125     // kind of node is encountered.  Loads from final memory can skip
1126     // through any kind of MemBar but normal loads shouldn't skip
1127     // through MemBarAcquire since the could allow them to move out of
1128     // a synchronized region. It is not safe to step over MemBarCPUOrder,
1129     // because alias info above them may be inaccurate (e.g., due to
1130     // mixed/mismatched unsafe accesses).
1131     bool is_final_mem = !atp->is_rewritable();
1132     while (current->is_Proj()) {
1133       int opc = current->in(0)->Opcode();
1134       if ((is_final_mem && (opc == Op_MemBarAcquire ||

1178         // Same base, same offset.
1179         // Possible improvement for arrays: check index value instead of absolute offset.
1180 
1181         // At this point we have proven something like this setup:
1182         //   B = << base >>
1183         //   L =  LoadQ(AddP(Check/CastPP(B), #Off))
1184         //   S = StoreQ(AddP(             B , #Off), V)
1185         // (Actually, we haven't yet proven the Q's are the same.)
1186         // In other words, we are loading from a casted version of
1187         // the same pointer-and-offset that we stored to.
1188         // Casted version may carry a dependency and it is respected.
1189         // Thus, we are able to replace L by V.
1190       }
1191       // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1192       if (store_Opcode() != st->Opcode()) {
1193         return nullptr;
1194       }
1195       // LoadVector/StoreVector needs additional check to ensure the types match.
1196       if (st->is_StoreVector()) {
1197         const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
1198         const TypeVect* out_vt = as_LoadVector()->vect_type();
1199         if (in_vt != out_vt) {
1200           return nullptr;
1201         }
1202       }
1203       return st->in(MemNode::ValueIn);
1204     }
1205 
1206     // A load from a freshly-created object always returns zero.
1207     // (This can happen after LoadNode::Ideal resets the load's memory input
1208     // to find_captured_store, which returned InitializeNode::zero_memory.)
1209     if (st->is_Proj() && st->in(0)->is_Allocate() &&
1210         (st->in(0) == ld_alloc) &&
1211         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1212       // return a zero value for the load's basic type
1213       // (This is one of the few places where a generic PhaseTransform
1214       // can create new nodes.  Think of it as lazily manifesting
1215       // virtually pre-existing constants.)







1216       if (value_basic_type() != T_VOID) {
1217         if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
1218           // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1219           // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1220           // by the ArrayCopyNode.
1221           return phase->zerocon(value_basic_type());
1222         }
1223       } else {
1224         // TODO: materialize all-zero vector constant
1225         assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1226       }
1227     }
1228 
1229     // A load from an initialization barrier can match a captured store.
1230     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1231       InitializeNode* init = st->in(0)->as_Initialize();
1232       AllocateNode* alloc = init->allocation();
1233       if ((alloc != nullptr) && (alloc == ld_alloc)) {
1234         // examine a captured store value
1235         st = init->find_captured_store(ld_off, memory_size(), phase);

1856   bool addr_mark = ((phase->type(address)->isa_oopptr() || phase->type(address)->isa_narrowoop()) &&
1857          phase->type(address)->is_ptr()->offset() == oopDesc::mark_offset_in_bytes());
1858 
1859   // Skip up past a SafePoint control.  Cannot do this for Stores because
1860   // pointer stores & cardmarks must stay on the same side of a SafePoint.
1861   if( ctrl != nullptr && ctrl->Opcode() == Op_SafePoint &&
1862       phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw  &&
1863       !addr_mark &&
1864       (depends_only_on_test() || has_unknown_control_dependency())) {
1865     ctrl = ctrl->in(0);
1866     set_req(MemNode::Control,ctrl);
1867     progress = true;
1868   }
1869 
1870   intptr_t ignore = 0;
1871   Node*    base   = AddPNode::Ideal_base_and_offset(address, phase, ignore);
1872   if (base != nullptr
1873       && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
1874     // Check for useless control edge in some common special cases
1875     if (in(MemNode::Control) != nullptr

1876         && can_remove_control()
1877         && phase->type(base)->higher_equal(TypePtr::NOTNULL)
1878         && all_controls_dominate(base, phase->C->start())) {
1879       // A method-invariant, non-null address (constant or 'this' argument).
1880       set_req(MemNode::Control, nullptr);
1881       progress = true;
1882     }
1883   }
1884 
1885   Node* mem = in(MemNode::Memory);
1886   const TypePtr *addr_t = phase->type(address)->isa_ptr();
1887 
1888   if (can_reshape && (addr_t != nullptr)) {
1889     // try to optimize our memory input
1890     Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase);
1891     if (opt_mem != mem) {
1892       set_req_X(MemNode::Memory, opt_mem, phase);
1893       if (phase->type( opt_mem ) == Type::TOP) return nullptr;
1894       return this;
1895     }

1952   // fold up, do so.
1953   Node* prev_mem = find_previous_store(phase);
1954   if (prev_mem != nullptr) {
1955     Node* value = can_see_arraycopy_value(prev_mem, phase);
1956     if (value != nullptr) {
1957       return value;
1958     }
1959   }
1960   // Steps (a), (b):  Walk past independent stores to find an exact match.
1961   if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
1962     // (c) See if we can fold up on the spot, but don't fold up here.
1963     // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1964     // just return a prior value, which is done by Identity calls.
1965     if (can_see_stored_value(prev_mem, phase)) {
1966       // Make ready for step (d):
1967       set_req_X(MemNode::Memory, prev_mem, phase);
1968       return this;
1969     }
1970   }
1971 
1972   return progress ? this : nullptr;







1973 }
1974 
1975 // Helper to recognize certain Klass fields which are invariant across
1976 // some group of array types (e.g., int[] or all T[] where T < Object).
1977 const Type*
1978 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1979                                  ciKlass* klass) const {
1980   assert(!UseCompactObjectHeaders || tkls->offset() != in_bytes(Klass::prototype_header_offset()),
1981          "must not happen");
1982   if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1983     // The field is Klass::_access_flags.  Return its (constant) value.
1984     assert(Opcode() == Op_LoadUS, "must load an unsigned short from _access_flags");
1985     return TypeInt::make(klass->access_flags());
1986   }
1987   if (tkls->offset() == in_bytes(Klass::misc_flags_offset())) {
1988     // The field is Klass::_misc_flags.  Return its (constant) value.
1989     assert(Opcode() == Op_LoadUB, "must load an unsigned byte from _misc_flags");
1990     return TypeInt::make(klass->misc_flags());
1991   }
1992   if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {

2052       }
2053     }
2054 
2055     // Don't do this for integer types. There is only potential profit if
2056     // the element type t is lower than _type; that is, for int types, if _type is
2057     // more restrictive than t.  This only happens here if one is short and the other
2058     // char (both 16 bits), and in those cases we've made an intentional decision
2059     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
2060     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
2061     //
2062     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
2063     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
2064     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
2065     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
2066     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
2067     // In fact, that could have been the original type of p1, and p1 could have
2068     // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
2069     // expression (LShiftL quux 3) independently optimized to the constant 8.
2070     if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
2071         && (_type->isa_vect() == nullptr)

2072         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
2073       // t might actually be lower than _type, if _type is a unique
2074       // concrete subclass of abstract class t.
2075       if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
2076         const Type* jt = t->join_speculative(_type);
2077         // In any case, do not allow the join, per se, to empty out the type.
2078         if (jt->empty() && !t->empty()) {
2079           // This can happen if a interface-typed array narrows to a class type.
2080           jt = _type;
2081         }
2082 #ifdef ASSERT
2083         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
2084           // The pointers in the autobox arrays are always non-null
2085           Node* base = adr->in(AddPNode::Base);
2086           if ((base != nullptr) && base->is_DecodeN()) {
2087             // Get LoadN node which loads IntegerCache.cache field
2088             base = base->in(1);
2089           }
2090           if ((base != nullptr) && base->is_Con()) {
2091             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
2092             if ((base_type != nullptr) && base_type->is_autobox_cache()) {
2093               // It could be narrow oop
2094               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
2095             }
2096           }
2097         }
2098 #endif
2099         return jt;
2100       }
2101     }
2102   } else if (tp->base() == Type::InstPtr) {
2103     assert( off != Type::OffsetBot ||
2104             // arrays can be cast to Objects
2105             !tp->isa_instptr() ||
2106             tp->is_instptr()->instance_klass()->is_java_lang_Object() ||


2107             // unsafe field access may not have a constant offset
2108             C->has_unsafe_access(),
2109             "Field accesses must be precise" );
2110     // For oop loads, we expect the _type to be precise.
2111 
2112     // Optimize loads from constant fields.
2113     const TypeInstPtr* tinst = tp->is_instptr();



2114     ciObject* const_oop = tinst->const_oop();
2115     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
2116       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), value_basic_type());
2117       if (con_type != nullptr) {
2118         return con_type;
2119       }
2120     }
2121   } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
2122     assert(off != Type::OffsetBot ||
2123             !tp->isa_instklassptr() ||
2124            // arrays can be cast to Objects
2125            tp->isa_instklassptr()->instance_klass()->is_java_lang_Object() ||
2126            // also allow array-loading from the primary supertype
2127            // array during subtype checks
2128            Opcode() == Op_LoadKlass,
2129            "Field accesses must be precise");
2130     // For klass/static loads, we expect the _type to be precise
2131   } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
2132     /* With mirrors being an indirect in the Klass*
2133      * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
2134      * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
2135      *
2136      * So check the type and klass of the node before the LoadP.

2143         assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2144         assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2145         return TypeInstPtr::make(klass->java_mirror());
2146       }
2147     }
2148   }
2149 
2150   const TypeKlassPtr *tkls = tp->isa_klassptr();
2151   if (tkls != nullptr) {
2152     if (tkls->is_loaded() && tkls->klass_is_exact()) {
2153       ciKlass* klass = tkls->exact_klass();
2154       // We are loading a field from a Klass metaobject whose identity
2155       // is known at compile time (the type is "exact" or "precise").
2156       // Check for fields we know are maintained as constants by the VM.
2157       if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2158         // The field is Klass::_super_check_offset.  Return its (constant) value.
2159         // (Folds up type checking code.)
2160         assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2161         return TypeInt::make(klass->super_check_offset());
2162       }
2163       if (UseCompactObjectHeaders) {
2164         if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2165           // The field is Klass::_prototype_header. Return its (constant) value.
2166           assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2167           return TypeX::make(klass->prototype_header());
2168         }
2169       }
2170       // Compute index into primary_supers array
2171       juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2172       // Check for overflowing; use unsigned compare to handle the negative case.
2173       if( depth < ciKlass::primary_super_limit() ) {
2174         // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2175         // (Folds up type checking code.)
2176         assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2177         ciKlass *ss = klass->super_of_depth(depth);
2178         return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2179       }
2180       const Type* aift = load_array_final_field(tkls, klass);
2181       if (aift != nullptr)  return aift;
2182     }
2183 

2219         !tkls->is_instklassptr()->might_be_an_array() // not the supertype of all T[] (java.lang.Object) or has an interface that is not Serializable or Cloneable
2220     ) {
2221       assert(Opcode() == Op_LoadI, "must load an int from _layout_helper");
2222       jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false);
2223       // The key property of this type is that it folds up tests
2224       // for array-ness, since it proves that the layout_helper is positive.
2225       // Thus, a generic value like the basic object layout helper works fine.
2226       return TypeInt::make(min_size, max_jint, Type::WidenMin);
2227     }
2228   }
2229 
2230   bool is_vect = (_type->isa_vect() != nullptr);
2231   if (is_instance && !is_vect) {
2232     // If we have an instance type and our memory input is the
2233     // programs's initial memory state, there is no matching store,
2234     // so just return a zero of the appropriate type -
2235     // except if it is vectorized - then we have no zero constant.
2236     Node *mem = in(MemNode::Memory);
2237     if (mem->is_Parm() && mem->in(0)->is_Start()) {
2238       assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");












2239       return Type::get_zero_type(_type->basic_type());
2240     }
2241   }
2242 
2243   if (!UseCompactObjectHeaders) {
2244     Node* alloc = is_new_object_mark_load();
2245     if (alloc != nullptr) {
2246       return TypeX::make(markWord::prototype().value());









2247     }
2248   }
2249 
2250   return _type;
2251 }
2252 
2253 //------------------------------match_edge-------------------------------------
2254 // Do we Match on this edge index or not?  Match only the address.
2255 uint LoadNode::match_edge(uint idx) const {
2256   return idx == MemNode::Address;
2257 }
2258 
2259 //--------------------------LoadBNode::Ideal--------------------------------------
2260 //
2261 //  If the previous store is to the same address as this load,
2262 //  and the value stored was larger than a byte, replace this load
2263 //  with the value stored truncated to a byte.  If no truncation is
2264 //  needed, the replacement is done in LoadNode::Identity().
2265 //
2266 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {

2375     }
2376   }
2377   // Identity call will handle the case where truncation is not needed.
2378   return LoadNode::Ideal(phase, can_reshape);
2379 }
2380 
2381 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2382   Node* mem = in(MemNode::Memory);
2383   Node* value = can_see_stored_value(mem,phase);
2384   if (value != nullptr && value->is_Con() &&
2385       !value->bottom_type()->higher_equal(_type)) {
2386     // If the input to the store does not fit with the load's result type,
2387     // it must be truncated. We can't delay until Ideal call since
2388     // a singleton Value is needed for split_thru_phi optimization.
2389     int con = value->get_int();
2390     return TypeInt::make((con << 16) >> 16);
2391   }
2392   return LoadNode::Value(phase);
2393 }
2394 













2395 //=============================================================================
2396 //----------------------------LoadKlassNode::make------------------------------
2397 // Polymorphic factory method:
2398 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
2399   // sanity check the alias category against the created node type
2400   const TypePtr* adr_type = adr->bottom_type()->isa_ptr();
2401   assert(adr_type != nullptr, "expecting TypeKlassPtr");
2402 #ifdef _LP64
2403   if (adr_type->is_ptr_to_narrowklass()) {
2404     assert(UseCompressedClassPointers, "no compressed klasses");
2405     Node* load_klass = gvn.transform(new LoadNKlassNode(mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2406     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2407   }
2408 #endif
2409   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2410   return new LoadKlassNode(mem, adr, at, tk, MemNode::unordered);
2411 }
2412 
2413 //------------------------------Value------------------------------------------
2414 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {

2448           }
2449           return TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
2450         }
2451         if (!t->is_klass()) {
2452           // a primitive Class (e.g., int.class) has null for a klass field
2453           return TypePtr::NULL_PTR;
2454         }
2455         // Fold up the load of the hidden field
2456         return TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
2457       }
2458       // non-constant mirror, so we can't tell what's going on
2459     }
2460     if (!tinst->is_loaded())
2461       return _type;             // Bail out if not loaded
2462     if (offset == oopDesc::klass_offset_in_bytes()) {
2463       return tinst->as_klass_type(true);
2464     }
2465   }
2466 
2467   // Check for loading klass from an array
2468   const TypeAryPtr *tary = tp->isa_aryptr();
2469   if (tary != nullptr &&
2470       tary->offset() == oopDesc::klass_offset_in_bytes()) {
2471     return tary->as_klass_type(true);
2472   }
2473 
2474   // Check for loading klass from an array klass
2475   const TypeKlassPtr *tkls = tp->isa_klassptr();
2476   if (tkls != nullptr && !StressReflectiveCode) {
2477     if (!tkls->is_loaded())
2478      return _type;             // Bail out if not loaded
2479     if (tkls->isa_aryklassptr() && tkls->is_aryklassptr()->elem()->isa_klassptr() &&
2480         tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2481       // // Always returning precise element type is incorrect,
2482       // // e.g., element type could be object and array may contain strings
2483       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2484 
2485       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2486       // according to the element type's subclassing.
2487       return tkls->is_aryklassptr()->elem()->isa_klassptr()->cast_to_exactness(tkls->klass_is_exact());
2488     }

2539       return allocated_klass;
2540     }
2541   }
2542 
2543   // Simplify k.java_mirror.as_klass to plain k, where k is a Klass*.
2544   // See inline_native_Class_query for occurrences of these patterns.
2545   // Java Example:  x.getClass().isAssignableFrom(y)
2546   //
2547   // This improves reflective code, often making the Class
2548   // mirror go completely dead.  (Current exception:  Class
2549   // mirrors may appear in debug info, but we could clean them out by
2550   // introducing a new debug info operator for Klass.java_mirror).
2551 
2552   if (toop->isa_instptr() && toop->is_instptr()->instance_klass() == phase->C->env()->Class_klass()
2553       && offset == java_lang_Class::klass_offset()) {
2554     if (base->is_Load()) {
2555       Node* base2 = base->in(MemNode::Address);
2556       if (base2->is_Load()) { /* direct load of a load which is the OopHandle */
2557         Node* adr2 = base2->in(MemNode::Address);
2558         const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();

2559         if (tkls != nullptr && !tkls->empty()
2560             && (tkls->isa_instklassptr() || tkls->isa_aryklassptr())
2561             && adr2->is_AddP()
2562            ) {
2563           int mirror_field = in_bytes(Klass::java_mirror_offset());
2564           if (tkls->offset() == mirror_field) {
2565             return adr2->in(AddPNode::Base);
2566           }
2567         }
2568       }
2569     }
2570   }
2571 
2572   return this;
2573 }
2574 
2575 LoadNode* LoadNode::clone_pinned() const {
2576   LoadNode* ld = clone()->as_Load();
2577   ld->_control_dependency = UnknownControl;
2578   return ld;
2579 }
2580 

3364   }
3365   ss.print_cr("[TraceMergeStores]: with");
3366   merged_input_value->dump("\n", false, &ss);
3367   merged_store->dump("\n", false, &ss);
3368   tty->print("%s", ss.as_string());
3369 }
3370 #endif
3371 
3372 //------------------------------Ideal------------------------------------------
3373 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
3374 // When a store immediately follows a relevant allocation/initialization,
3375 // try to capture it into the initialization, or hoist it above.
3376 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3377   Node* p = MemNode::Ideal_common(phase, can_reshape);
3378   if (p)  return (p == NodeSentinel) ? nullptr : p;
3379 
3380   Node* mem     = in(MemNode::Memory);
3381   Node* address = in(MemNode::Address);
3382   Node* value   = in(MemNode::ValueIn);
3383   // Back-to-back stores to same address?  Fold em up.  Generally
3384   // unsafe if I have intervening uses.
3385   {
3386     Node* st = mem;
3387     // If Store 'st' has more than one use, we cannot fold 'st' away.
3388     // For example, 'st' might be the final state at a conditional
3389     // return.  Or, 'st' might be used by some node which is live at
3390     // the same time 'st' is live, which might be unschedulable.  So,
3391     // require exactly ONE user until such time as we clone 'mem' for
3392     // each of 'mem's uses (thus making the exactly-1-user-rule hold
3393     // true).
3394     while (st->is_Store() && st->outcnt() == 1) {
3395       // Looking at a dead closed cycle of memory?
3396       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
3397       assert(Opcode() == st->Opcode() ||
3398              st->Opcode() == Op_StoreVector ||
3399              Opcode() == Op_StoreVector ||
3400              st->Opcode() == Op_StoreVectorScatter ||
3401              Opcode() == Op_StoreVectorScatter ||
3402              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
3403              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
3404              (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy


3405              (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
3406              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
3407 
3408       if (st->in(MemNode::Address)->eqv_uncast(address) &&
3409           st->as_Store()->memory_size() <= this->memory_size()) {
3410         Node* use = st->raw_out(0);
3411         if (phase->is_IterGVN()) {
3412           phase->is_IterGVN()->rehash_node_delayed(use);
3413         }
3414         // It's OK to do this in the parser, since DU info is always accurate,
3415         // and the parser always refers to nodes via SafePointNode maps.
3416         use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
3417         return this;
3418       }
3419       st = st->in(MemNode::Memory);
3420     }
3421   }
3422 
3423 
3424   // Capture an unaliased, unconditional, simple store into an initializer.

3522       const StoreVectorNode* store_vector = as_StoreVector();
3523       const StoreVectorNode* mem_vector = mem->as_StoreVector();
3524       const Node* store_indices = store_vector->indices();
3525       const Node* mem_indices = mem_vector->indices();
3526       const Node* store_mask = store_vector->mask();
3527       const Node* mem_mask = mem_vector->mask();
3528       // Ensure types, indices, and masks match
3529       if (store_vector->vect_type() == mem_vector->vect_type() &&
3530           ((store_indices == nullptr) == (mem_indices == nullptr) &&
3531            (store_indices == nullptr || store_indices->eqv_uncast(mem_indices))) &&
3532           ((store_mask == nullptr) == (mem_mask == nullptr) &&
3533            (store_mask == nullptr || store_mask->eqv_uncast(mem_mask)))) {
3534         result = mem;
3535       }
3536     }
3537   }
3538 
3539   // Store of zero anywhere into a freshly-allocated object?
3540   // Then the store is useless.
3541   // (It must already have been captured by the InitializeNode.)
3542   if (result == this &&
3543       ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
3544     // a newly allocated object is already all-zeroes everywhere
3545     if (mem->is_Proj() && mem->in(0)->is_Allocate()) {

3546       result = mem;
3547     }
3548 
3549     if (result == this) {
3550       // the store may also apply to zero-bits in an earlier object
3551       Node* prev_mem = find_previous_store(phase);
3552       // Steps (a), (b):  Walk past independent stores to find an exact match.
3553       if (prev_mem != nullptr) {
3554         Node* prev_val = can_see_stored_value(prev_mem, phase);
3555         if (prev_val != nullptr && prev_val == val) {
3556           // prev_val and val might differ by a cast; it would be good
3557           // to keep the more informative of the two.
3558           result = mem;
3559         }
3560       }
3561     }
3562   }
3563 
3564   PhaseIterGVN* igvn = phase->is_IterGVN();
3565   if (result != this && igvn != nullptr) {
3566     MemBarNode* trailing = trailing_membar();
3567     if (trailing != nullptr) {
3568 #ifdef ASSERT
3569       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();

4033 // Clearing a short array is faster with stores
4034 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
4035   // Already know this is a large node, do not try to ideal it
4036   if (_is_large) return nullptr;
4037 
4038   const int unit = BytesPerLong;
4039   const TypeX* t = phase->type(in(2))->isa_intptr_t();
4040   if (!t)  return nullptr;
4041   if (!t->is_con())  return nullptr;
4042   intptr_t raw_count = t->get_con();
4043   intptr_t size = raw_count;
4044   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
4045   // Clearing nothing uses the Identity call.
4046   // Negative clears are possible on dead ClearArrays
4047   // (see jck test stmt114.stmt11402.val).
4048   if (size <= 0 || size % unit != 0)  return nullptr;
4049   intptr_t count = size / unit;
4050   // Length too long; communicate this to matchers and assemblers.
4051   // Assemblers are responsible to produce fast hardware clears for it.
4052   if (size > InitArrayShortSize) {
4053     return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
4054   } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
4055     return nullptr;
4056   }
4057   if (!IdealizeClearArrayNode) return nullptr;
4058   Node *mem = in(1);
4059   if( phase->type(mem)==Type::TOP ) return nullptr;
4060   Node *adr = in(3);
4061   const Type* at = phase->type(adr);
4062   if( at==Type::TOP ) return nullptr;
4063   const TypePtr* atp = at->isa_ptr();
4064   // adjust atp to be the correct array element address type
4065   if (atp == nullptr)  atp = TypePtr::BOTTOM;
4066   else              atp = atp->add_offset(Type::OffsetBot);
4067   // Get base for derived pointer purposes
4068   if( adr->Opcode() != Op_AddP ) Unimplemented();
4069   Node *base = adr->in(1);
4070 
4071   Node *zero = phase->makecon(TypeLong::ZERO);
4072   Node *off  = phase->MakeConX(BytesPerLong);
4073   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
4074   count--;
4075   while( count-- ) {
4076     mem = phase->transform(mem);
4077     adr = phase->transform(new AddPNode(base,adr,off));
4078     mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
4079   }
4080   return mem;
4081 }
4082 
4083 //----------------------------step_through----------------------------------
4084 // Return allocation input memory edge if it is different instance
4085 // or itself if it is the one we are looking for.
4086 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseValues* phase) {
4087   Node* n = *np;
4088   assert(n->is_ClearArray(), "sanity");
4089   intptr_t offset;
4090   AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
4091   // This method is called only before Allocate nodes are expanded
4092   // during macro nodes expansion. Before that ClearArray nodes are
4093   // only generated in PhaseMacroExpand::generate_arraycopy() (before
4094   // Allocate nodes are expanded) which follows allocations.
4095   assert(alloc != nullptr, "should have allocation");
4096   if (alloc->_idx == instance_id) {
4097     // Can not bypass initialization of the instance we are looking for.
4098     return false;
4099   }
4100   // Otherwise skip it.
4101   InitializeNode* init = alloc->initialization();
4102   if (init != nullptr)
4103     *np = init->in(TypeFunc::Memory);
4104   else
4105     *np = alloc->in(TypeFunc::Memory);
4106   return true;
4107 }
4108 
4109 //----------------------------clear_memory-------------------------------------
4110 // Generate code to initialize object storage to zero.
4111 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,


4112                                    intptr_t start_offset,
4113                                    Node* end_offset,
4114                                    PhaseGVN* phase) {
4115   intptr_t offset = start_offset;
4116 
4117   int unit = BytesPerLong;
4118   if ((offset % unit) != 0) {
4119     Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
4120     adr = phase->transform(adr);
4121     const TypePtr* atp = TypeRawPtr::BOTTOM;
4122     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);






4123     mem = phase->transform(mem);
4124     offset += BytesPerInt;
4125   }
4126   assert((offset % unit) == 0, "");
4127 
4128   // Initialize the remaining stuff, if any, with a ClearArray.
4129   return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
4130 }
4131 
4132 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,

4133                                    Node* start_offset,
4134                                    Node* end_offset,
4135                                    PhaseGVN* phase) {
4136   if (start_offset == end_offset) {
4137     // nothing to do
4138     return mem;
4139   }
4140 
4141   int unit = BytesPerLong;
4142   Node* zbase = start_offset;
4143   Node* zend  = end_offset;
4144 
4145   // Scale to the unit required by the CPU:
4146   if (!Matcher::init_array_count_is_in_bytes) {
4147     Node* shift = phase->intcon(exact_log2(unit));
4148     zbase = phase->transform(new URShiftXNode(zbase, shift) );
4149     zend  = phase->transform(new URShiftXNode(zend,  shift) );
4150   }
4151 
4152   // Bulk clear double-words
4153   Node* zsize = phase->transform(new SubXNode(zend, zbase) );
4154   Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
4155   mem = new ClearArrayNode(ctl, mem, zsize, adr, false);



4156   return phase->transform(mem);
4157 }
4158 
4159 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,


4160                                    intptr_t start_offset,
4161                                    intptr_t end_offset,
4162                                    PhaseGVN* phase) {
4163   if (start_offset == end_offset) {
4164     // nothing to do
4165     return mem;
4166   }
4167 
4168   assert((end_offset % BytesPerInt) == 0, "odd end offset");
4169   intptr_t done_offset = end_offset;
4170   if ((done_offset % BytesPerLong) != 0) {
4171     done_offset -= BytesPerInt;
4172   }
4173   if (done_offset > start_offset) {
4174     mem = clear_memory(ctl, mem, dest,
4175                        start_offset, phase->MakeConX(done_offset), phase);
4176   }
4177   if (done_offset < end_offset) { // emit the final 32-bit store
4178     Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
4179     adr = phase->transform(adr);
4180     const TypePtr* atp = TypeRawPtr::BOTTOM;
4181     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);






4182     mem = phase->transform(mem);
4183     done_offset += BytesPerInt;
4184   }
4185   assert(done_offset == end_offset, "");
4186   return mem;
4187 }
4188 
4189 //=============================================================================
4190 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
4191   : MultiNode(TypeFunc::Parms + (precedent == nullptr? 0: 1)),
4192     _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
4193 #ifdef ASSERT
4194   , _pair_idx(0)
4195 #endif
4196 {
4197   init_class_id(Class_MemBar);
4198   Node* top = C->top();
4199   init_req(TypeFunc::I_O,top);
4200   init_req(TypeFunc::FramePtr,top);
4201   init_req(TypeFunc::ReturnAdr,top);

4307       PhaseIterGVN* igvn = phase->is_IterGVN();
4308       remove(igvn);
4309       // Must return either the original node (now dead) or a new node
4310       // (Do not return a top here, since that would break the uniqueness of top.)
4311       return new ConINode(TypeInt::ZERO);
4312     }
4313   }
4314   return progress ? this : nullptr;
4315 }
4316 
4317 //------------------------------Value------------------------------------------
4318 const Type* MemBarNode::Value(PhaseGVN* phase) const {
4319   if( !in(0) ) return Type::TOP;
4320   if( phase->type(in(0)) == Type::TOP )
4321     return Type::TOP;
4322   return TypeTuple::MEMBAR;
4323 }
4324 
4325 //------------------------------match------------------------------------------
4326 // Construct projections for memory.
4327 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
4328   switch (proj->_con) {
4329   case TypeFunc::Control:
4330   case TypeFunc::Memory:
4331     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
4332   }
4333   ShouldNotReachHere();
4334   return nullptr;
4335 }
4336 
4337 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4338   trailing->_kind = TrailingStore;
4339   leading->_kind = LeadingStore;
4340 #ifdef ASSERT
4341   trailing->_pair_idx = leading->_idx;
4342   leading->_pair_idx = leading->_idx;
4343 #endif
4344 }
4345 
4346 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4347   trailing->_kind = TrailingLoadStore;

4594   return (req() > RawStores);
4595 }
4596 
4597 void InitializeNode::set_complete(PhaseGVN* phase) {
4598   assert(!is_complete(), "caller responsibility");
4599   _is_complete = Complete;
4600 
4601   // After this node is complete, it contains a bunch of
4602   // raw-memory initializations.  There is no need for
4603   // it to have anything to do with non-raw memory effects.
4604   // Therefore, tell all non-raw users to re-optimize themselves,
4605   // after skipping the memory effects of this initialization.
4606   PhaseIterGVN* igvn = phase->is_IterGVN();
4607   if (igvn)  igvn->add_users_to_worklist(this);
4608 }
4609 
4610 // convenience function
4611 // return false if the init contains any stores already
4612 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
4613   InitializeNode* init = initialization();
4614   if (init == nullptr || init->is_complete())  return false;


4615   init->remove_extra_zeroes();
4616   // for now, if this allocation has already collected any inits, bail:
4617   if (init->is_non_zero())  return false;
4618   init->set_complete(phase);
4619   return true;
4620 }
4621 
4622 void InitializeNode::remove_extra_zeroes() {
4623   if (req() == RawStores)  return;
4624   Node* zmem = zero_memory();
4625   uint fill = RawStores;
4626   for (uint i = fill; i < req(); i++) {
4627     Node* n = in(i);
4628     if (n->is_top() || n == zmem)  continue;  // skip
4629     if (fill < i)  set_req(fill, n);          // compact
4630     ++fill;
4631   }
4632   // delete any empty spaces created:
4633   while (fill < req()) {
4634     del_req(fill);

4778             // store node that we'd like to capture. We need to check
4779             // the uses of the MergeMemNode.
4780             mems.push(n);
4781           }
4782         } else if (n->is_Mem()) {
4783           Node* other_adr = n->in(MemNode::Address);
4784           if (other_adr == adr) {
4785             failed = true;
4786             break;
4787           } else {
4788             const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
4789             if (other_t_adr != nullptr) {
4790               int other_alias_idx = phase->C->get_alias_index(other_t_adr);
4791               if (other_alias_idx == alias_idx) {
4792                 // A load from the same memory slice as the store right
4793                 // after the InitializeNode. We check the control of the
4794                 // object/array that is loaded from. If it's the same as
4795                 // the store control then we cannot capture the store.
4796                 assert(!n->is_Store(), "2 stores to same slice on same control?");
4797                 Node* base = other_adr;






4798                 assert(base->is_AddP(), "should be addp but is %s", base->Name());
4799                 base = base->in(AddPNode::Base);
4800                 if (base != nullptr) {
4801                   base = base->uncast();
4802                   if (base->is_Proj() && base->in(0) == alloc) {
4803                     failed = true;
4804                     break;
4805                   }
4806                 }
4807               }
4808             }
4809           }
4810         } else {
4811           failed = true;
4812           break;
4813         }
4814       }
4815     }
4816   }
4817   if (failed) {

5364         //   z's_done      12  16  16  16    12  16    12
5365         //   z's_needed    12  16  16  16    16  16    16
5366         //   zsize          0   0   0   0     4   0     4
5367         if (next_full_store < 0) {
5368           // Conservative tack:  Zero to end of current word.
5369           zeroes_needed = align_up(zeroes_needed, BytesPerInt);
5370         } else {
5371           // Zero to beginning of next fully initialized word.
5372           // Or, don't zero at all, if we are already in that word.
5373           assert(next_full_store >= zeroes_needed, "must go forward");
5374           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
5375           zeroes_needed = next_full_store;
5376         }
5377       }
5378 
5379       if (zeroes_needed > zeroes_done) {
5380         intptr_t zsize = zeroes_needed - zeroes_done;
5381         // Do some incremental zeroing on rawmem, in parallel with inits.
5382         zeroes_done = align_down(zeroes_done, BytesPerInt);
5383         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,


5384                                               zeroes_done, zeroes_needed,
5385                                               phase);
5386         zeroes_done = zeroes_needed;
5387         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
5388           do_zeroing = false;   // leave the hole, next time
5389       }
5390     }
5391 
5392     // Collect the store and move on:
5393     phase->replace_input_of(st, MemNode::Memory, inits);
5394     inits = st;                 // put it on the linearized chain
5395     set_req(i, zmem);           // unhook from previous position
5396 
5397     if (zeroes_done == st_off)
5398       zeroes_done = next_init_off;
5399 
5400     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
5401 
5402     #ifdef ASSERT
5403     // Various order invariants.  Weaker than stores_are_sane because

5423   remove_extra_zeroes();        // clear out all the zmems left over
5424   add_req(inits);
5425 
5426   if (!(UseTLAB && ZeroTLAB)) {
5427     // If anything remains to be zeroed, zero it all now.
5428     zeroes_done = align_down(zeroes_done, BytesPerInt);
5429     // if it is the last unused 4 bytes of an instance, forget about it
5430     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
5431     if (zeroes_done + BytesPerLong >= size_limit) {
5432       AllocateNode* alloc = allocation();
5433       assert(alloc != nullptr, "must be present");
5434       if (alloc != nullptr && alloc->Opcode() == Op_Allocate) {
5435         Node* klass_node = alloc->in(AllocateNode::KlassNode);
5436         ciKlass* k = phase->type(klass_node)->is_instklassptr()->instance_klass();
5437         if (zeroes_done == k->layout_helper())
5438           zeroes_done = size_limit;
5439       }
5440     }
5441     if (zeroes_done < size_limit) {
5442       rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,


5443                                             zeroes_done, size_in_bytes, phase);
5444     }
5445   }
5446 
5447   set_complete(phase);
5448   return rawmem;
5449 }
5450 
5451 
5452 #ifdef ASSERT
5453 bool InitializeNode::stores_are_sane(PhaseValues* phase) {
5454   if (is_complete())
5455     return true;                // stores could be anything at this point
5456   assert(allocation() != nullptr, "must be present");
5457   intptr_t last_off = allocation()->minimum_header_size();
5458   for (uint i = InitializeNode::RawStores; i < req(); i++) {
5459     Node* st = in(i);
5460     intptr_t st_off = get_store_offset(st, phase);
5461     if (st_off < 0)  continue;  // ignore dead garbage
5462     if (last_off > st_off) {

   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "ci/ciFlatArrayKlass.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/c2/barrierSetC2.hpp"
  32 #include "gc/shared/tlab_globals.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/cfgnode.hpp"
  39 #include "opto/compile.hpp"
  40 #include "opto/connode.hpp"
  41 #include "opto/convertnode.hpp"
  42 #include "opto/inlinetypenode.hpp"
  43 #include "opto/loopnode.hpp"
  44 #include "opto/machnode.hpp"
  45 #include "opto/matcher.hpp"
  46 #include "opto/memnode.hpp"
  47 #include "opto/mempointer.hpp"
  48 #include "opto/mulnode.hpp"
  49 #include "opto/narrowptrnode.hpp"
  50 #include "opto/phaseX.hpp"
  51 #include "opto/regalloc.hpp"
  52 #include "opto/regmask.hpp"
  53 #include "opto/rootnode.hpp"
  54 #include "opto/traceMergeStoresTag.hpp"
  55 #include "opto/vectornode.hpp"
  56 #include "utilities/align.hpp"
  57 #include "utilities/copy.hpp"
  58 #include "utilities/macros.hpp"
  59 #include "utilities/powerOfTwo.hpp"
  60 #include "utilities/vmError.hpp"
  61 
  62 // Portions of code courtesy of Clifford Click

 126       st->print(", idx=Bot;");
 127     else if (atp->index() == Compile::AliasIdxTop)
 128       st->print(", idx=Top;");
 129     else if (atp->index() == Compile::AliasIdxRaw)
 130       st->print(", idx=Raw;");
 131     else {
 132       ciField* field = atp->field();
 133       if (field) {
 134         st->print(", name=");
 135         field->print_name_on(st);
 136       }
 137       st->print(", idx=%d;", atp->index());
 138     }
 139   }
 140 }
 141 
 142 extern void print_alias_types();
 143 
 144 #endif
 145 
 146 // Find the memory output corresponding to the fall-through path of a call
 147 static Node* find_call_fallthrough_mem_output(CallNode* call) {
 148   ResourceMark rm;
 149   CallProjections* projs = call->extract_projections(false, false);
 150   Node* res = projs->fallthrough_memproj;
 151   assert(res != nullptr, "must have a fallthrough mem output");
 152   return res;
 153 }
 154 
 155 // Try to find a better memory input for a load from a strict final field
 156 static Node* try_optimize_strict_final_load_memory(PhaseGVN* phase, Node* adr, ProjNode*& base_local) {
 157   intptr_t offset = 0;
 158   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 159   if (base == nullptr) {
 160     return nullptr;
 161   }
 162 
 163   Node* base_uncasted = base->uncast();
 164   if (base_uncasted->is_Proj()) {
 165     MultiNode* multi = base_uncasted->in(0)->as_Multi();
 166     if (multi->is_Allocate()) {
 167       base_local = base_uncasted->as_Proj();
 168       return nullptr;
 169     } else if (multi->is_Call()) {
 170       // The oop is returned from a call, the memory can be the fallthrough output of the call
 171       return find_call_fallthrough_mem_output(multi->as_Call());
 172     } else if (multi->is_Start()) {
 173       // The oop is a parameter
 174       if (phase->C->method()->is_object_constructor() && base_uncasted->as_Proj()->_con == TypeFunc::Parms) {
 175         // The receiver of a constructor is similar to the result of an AllocateNode
 176         base_local = base_uncasted->as_Proj();
 177         return nullptr;
 178       } else {
 179         // Use the start memory otherwise
 180         return multi->proj_out(TypeFunc::Memory);
 181       }
 182     }
 183   }
 184 
 185   return nullptr;
 186 }
 187 
 188 // Whether a call can modify a strict final field, given that the object is allocated inside the
 189 // current compilation unit, or is the first parameter when the compilation root is a constructor.
 190 // This is equivalent to asking whether 'call' is a constructor invocation and the class declaring
 191 // the target method is a subclass of the class declaring 'field'.
 192 static bool call_can_modify_local_object(ciField* field, CallNode* call) {
 193   if (!call->is_CallJava()) {
 194     return false;
 195   }
 196 
 197   ciMethod* target = call->as_CallJava()->method();
 198   if (target == nullptr || !target->is_object_constructor()) {
 199     return false;
 200   }
 201 
 202   // If 'field' is declared in a class that is a subclass of the one declaring the constructor,
 203   // then the field is set inside the constructor, else the field must be set before the
 204   // constructor invocation. E.g. A field Super.x will be set during the execution of Sub::<init>,
 205   // while a field Sub.y must be set before Super::<init> is invoked.
 206   // We can try to be more heroic and decide if the receiver of the constructor invocation is the
 207   // object from which we are loading from. This, however, may be problematic as deciding if 2
 208   // nodes are definitely different may not be trivial, especially if the graph is not canonical.
 209   // As a result, it is made more conservative for now.
 210   assert(call->req() > TypeFunc::Parms, "constructor must have at least 1 argument");
 211   return target->holder()->is_subclass_of(field->holder());
 212 }
 213 
 214 Node* MemNode::optimize_simple_memory_chain(Node* mchain, const TypeOopPtr* t_oop, Node* load, PhaseGVN* phase) {
 215   assert(t_oop != nullptr, "sanity");
 216   bool is_instance = t_oop->is_known_instance_field();
 217 
 218   ciField* field = phase->C->alias_type(t_oop)->field();
 219   bool is_strict_final_load = false;
 220 
 221   // After macro expansion, an allocation may become a call, changing the memory input to the
 222   // memory output of that call would be illegal. As a result, disallow this transformation after
 223   // macro expansion.
 224   if (phase->is_IterGVN() && phase->C->allow_macro_nodes() && load != nullptr && load->is_Load() && !load->as_Load()->is_mismatched_access()) {
 225     if (EnableValhalla) {
 226       if (field != nullptr && (field->holder()->is_inlinetype() || field->holder()->is_abstract_value_klass())) {
 227         is_strict_final_load = true;
 228       }
 229 #ifdef ASSERT
 230       if (t_oop->is_inlinetypeptr() && t_oop->inline_klass()->contains_field_offset(t_oop->offset())) {
 231         assert(is_strict_final_load, "sanity check for basic cases");
 232       }
 233 #endif
 234     } else {
 235       is_strict_final_load = field != nullptr && t_oop->is_ptr_to_boxed_value();
 236     }
 237   }
 238 
 239   if (!is_instance && !is_strict_final_load) {
 240     return mchain;
 241   }
 242 
 243   Node* result = mchain;
 244   ProjNode* base_local = nullptr;
 245 
 246   if (is_strict_final_load) {
 247     Node* adr = load->in(MemNode::Address);
 248     assert(phase->type(adr) == t_oop, "inconsistent type");
 249     Node* tmp = try_optimize_strict_final_load_memory(phase, adr, base_local);
 250     if (tmp != nullptr) {
 251       result = tmp;
 252     }
 253   }
 254 
 255   uint instance_id = t_oop->instance_id();
 256   Node* start_mem = phase->C->start()->proj_out_or_null(TypeFunc::Memory);
 257   Node* prev = nullptr;

 258   while (prev != result) {
 259     prev = result;
 260     if (result == start_mem) {
 261       // start_mem is the earliest memory possible
 262       break;
 263     }
 264 
 265     // skip over a call which does not affect this memory slice
 266     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
 267       Node* proj_in = result->in(0);
 268       if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
 269         // This is the allocation that creates the object from which we are loading from
 270         break;
 271       } else if (proj_in->is_Call()) {
 272         // ArrayCopyNodes processed here as well
 273         CallNode* call = proj_in->as_Call();
 274         if (!call->may_modify(t_oop, phase)) {
 275           result = call->in(TypeFunc::Memory);
 276         } else if (is_strict_final_load && base_local != nullptr && !call_can_modify_local_object(field, call)) {
 277           result = call->in(TypeFunc::Memory);
 278         }
 279       } else if (proj_in->is_Initialize()) {
 280         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
 281         // Stop if this is the initialization for the object instance which
 282         // contains this memory slice, otherwise skip over it.
 283         if ((alloc == nullptr) || (alloc->_idx == instance_id)) {
 284           break;
 285         }
 286         if (is_instance) {
 287           result = proj_in->in(TypeFunc::Memory);
 288         } else if (is_strict_final_load) {
 289           Node* klass = alloc->in(AllocateNode::KlassNode);
 290           const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
 291           if (tklass->klass_is_exact() && !tklass->exact_klass()->equals(t_oop->is_instptr()->exact_klass())) {
 292             // Allocation of another type, must be another object
 293             result = proj_in->in(TypeFunc::Memory);
 294           } else if (base_local != nullptr && (base_local->is_Parm() || base_local->in(0) != alloc)) {
 295             // Allocation of another object
 296             result = proj_in->in(TypeFunc::Memory);
 297           }
 298         }
 299       } else if (proj_in->is_MemBar()) {
 300         ArrayCopyNode* ac = nullptr;
 301         if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) {
 302           break;
 303         }
 304         result = proj_in->in(TypeFunc::Memory);
 305       } else if (proj_in->is_top()) {
 306         break; // dead code
 307       } else {
 308         assert(false, "unexpected projection");
 309       }
 310     } else if (result->is_ClearArray()) {
 311       if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {
 312         // Can not bypass initialization of the instance
 313         // we are looking for.
 314         break;
 315       }
 316       // Otherwise skip it (the call updated 'result' value).

 329   bool is_instance = t_oop->is_known_instance_field();
 330   PhaseIterGVN *igvn = phase->is_IterGVN();
 331   if (is_instance && igvn != nullptr && result->is_Phi()) {
 332     PhiNode *mphi = result->as_Phi();
 333     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
 334     const TypePtr *t = mphi->adr_type();
 335     bool do_split = false;
 336     // In the following cases, Load memory input can be further optimized based on
 337     // its precise address type
 338     if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ) {
 339       do_split = true;
 340     } else if (t->isa_oopptr() && !t->is_oopptr()->is_known_instance()) {
 341       const TypeOopPtr* mem_t =
 342         t->is_oopptr()->cast_to_exactness(true)
 343         ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
 344         ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
 345       if (t_oop->isa_aryptr()) {
 346         mem_t = mem_t->is_aryptr()
 347                      ->cast_to_stable(t_oop->is_aryptr()->is_stable())
 348                      ->cast_to_size(t_oop->is_aryptr()->size())
 349                      ->cast_to_not_flat(t_oop->is_aryptr()->is_not_flat())
 350                      ->cast_to_not_null_free(t_oop->is_aryptr()->is_not_null_free())
 351                      ->with_offset(t_oop->is_aryptr()->offset())
 352                      ->is_aryptr();
 353       }
 354       do_split = mem_t == t_oop;
 355     }
 356     if (do_split) {
 357       // clone the Phi with our address type
 358       result = mphi->split_out_instance(t_adr, igvn);
 359     } else {
 360       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
 361     }
 362   }
 363   return result;
 364 }
 365 
 366 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
 367   uint alias_idx = phase->C->get_alias_index(tp);
 368   Node *mem = mmem;
 369 #ifdef ASSERT
 370   {
 371     // Check that current type is consistent with the alias index used during graph construction
 372     assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
 373     bool consistent =  adr_check == nullptr || adr_check->empty() ||
 374                        phase->C->must_alias(adr_check, alias_idx );
 375     // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
 376     if( !consistent && adr_check != nullptr && !adr_check->empty() &&
 377         tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
 378         adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
 379         ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
 380           adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
 381           adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
 382       // don't assert if it is dead code.
 383       consistent = true;
 384     }
 385     if( !consistent ) {
 386       st->print("alias_idx==%d, adr_check==", alias_idx);
 387       if( adr_check == nullptr ) {
 388         st->print("null");
 389       } else {
 390         adr_check->dump();
 391       }
 392       st->cr();
 393       print_alias_types();
 394       assert(consistent, "adr_check must match alias idx");
 395     }
 396   }
 397 #endif

1117     Node* ld = gvn.transform(load);
1118     return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
1119   }
1120 
1121   return load;
1122 }
1123 
1124 //------------------------------hash-------------------------------------------
1125 uint LoadNode::hash() const {
1126   // unroll addition of interesting fields
1127   return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
1128 }
1129 
1130 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
1131   if ((atp != nullptr) && (atp->index() >= Compile::AliasIdxRaw)) {
1132     bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
1133     bool is_stable_ary = FoldStableValues &&
1134                          (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
1135                          tp->isa_aryptr()->is_stable();
1136 
1137     return (eliminate_boxing && non_volatile) || is_stable_ary || tp->is_inlinetypeptr();
1138   }
1139 
1140   return false;
1141 }
1142 
1143 LoadNode* LoadNode::pin_array_access_node() const {
1144   const TypePtr* adr_type = this->adr_type();
1145   if (adr_type != nullptr && adr_type->isa_aryptr()) {
1146     return clone_pinned();
1147   }
1148   return nullptr;
1149 }
1150 
1151 // Is the value loaded previously stored by an arraycopy? If so return
1152 // a load node that reads from the source array so we may be able to
1153 // optimize out the ArrayCopy node later.
1154 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
1155   Node* ld_adr = in(MemNode::Address);
1156   intptr_t ld_off = 0;
1157   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);

1173     if (ac->as_ArrayCopy()->is_clonebasic()) {
1174       assert(ld_alloc != nullptr, "need an alloc");
1175       assert(addp->is_AddP(), "address must be addp");
1176       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1177       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1178       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1179       addp->set_req(AddPNode::Base, src);
1180       addp->set_req(AddPNode::Address, src);
1181     } else {
1182       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1183              ac->as_ArrayCopy()->is_copyof_validated() ||
1184              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1185       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1186       addp->set_req(AddPNode::Base, src);
1187       addp->set_req(AddPNode::Address, src);
1188 
1189       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1190       BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1191       if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1192 
1193       uint shift  = ary_t->is_flat() ? ary_t->flat_log_elem_size() : exact_log2(type2aelembytes(ary_elem));

1194 
1195       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1196 #ifdef _LP64
1197       diff = phase->transform(new ConvI2LNode(diff));
1198 #endif
1199       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1200 
1201       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1202       addp->set_req(AddPNode::Offset, offset);
1203     }
1204     addp = phase->transform(addp);
1205 #ifdef ASSERT
1206     const TypePtr* adr_type = phase->type(addp)->is_ptr();
1207     ld->_adr_type = adr_type;
1208 #endif
1209     ld->set_req(MemNode::Address, addp);
1210     ld->set_req(0, ctl);
1211     ld->set_req(MemNode::Memory, mem);
1212     return ld;
1213   }
1214   return nullptr;
1215 }
1216 
1217 static Node* see_through_inline_type(PhaseValues* phase, const MemNode* load, Node* base, int offset) {
1218   if (!load->is_mismatched_access() && base != nullptr && base->is_InlineType() && offset > oopDesc::klass_offset_in_bytes()) {
1219     InlineTypeNode* vt = base->as_InlineType();
1220     Node* value = vt->field_value_by_offset(offset, true);
1221     assert(value != nullptr, "must see some value");
1222     return value;
1223   }
1224 
1225   return nullptr;
1226 }
1227 
1228 //---------------------------can_see_stored_value------------------------------
1229 // This routine exists to make sure this set of tests is done the same
1230 // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
1231 // will change the graph shape in a way which makes memory alive twice at the
1232 // same time (uses the Oracle model of aliasing), then some
1233 // LoadXNode::Identity will fold things back to the equivalence-class model
1234 // of aliasing.
1235 Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
1236   Node* ld_adr = in(MemNode::Address);
1237   intptr_t ld_off = 0;
1238   Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
1239   // Try to see through an InlineTypeNode
1240   // LoadN is special because the input is not compressed
1241   if (Opcode() != Op_LoadN) {
1242     Node* value = see_through_inline_type(phase, this, ld_base, ld_off);
1243     if (value != nullptr) {
1244       return value;
1245     }
1246   }
1247 
1248   Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
1249   const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
1250   Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
1251   // This is more general than load from boxing objects.
1252   if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
1253     uint alias_idx = atp->index();
1254     Node* result = nullptr;
1255     Node* current = st;
1256     // Skip through chains of MemBarNodes checking the MergeMems for
1257     // new states for the slice of this load.  Stop once any other
1258     // kind of node is encountered.  Loads from final memory can skip
1259     // through any kind of MemBar but normal loads shouldn't skip
1260     // through MemBarAcquire since the could allow them to move out of
1261     // a synchronized region. It is not safe to step over MemBarCPUOrder,
1262     // because alias info above them may be inaccurate (e.g., due to
1263     // mixed/mismatched unsafe accesses).
1264     bool is_final_mem = !atp->is_rewritable();
1265     while (current->is_Proj()) {
1266       int opc = current->in(0)->Opcode();
1267       if ((is_final_mem && (opc == Op_MemBarAcquire ||

1311         // Same base, same offset.
1312         // Possible improvement for arrays: check index value instead of absolute offset.
1313 
1314         // At this point we have proven something like this setup:
1315         //   B = << base >>
1316         //   L =  LoadQ(AddP(Check/CastPP(B), #Off))
1317         //   S = StoreQ(AddP(             B , #Off), V)
1318         // (Actually, we haven't yet proven the Q's are the same.)
1319         // In other words, we are loading from a casted version of
1320         // the same pointer-and-offset that we stored to.
1321         // Casted version may carry a dependency and it is respected.
1322         // Thus, we are able to replace L by V.
1323       }
1324       // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1325       if (store_Opcode() != st->Opcode()) {
1326         return nullptr;
1327       }
1328       // LoadVector/StoreVector needs additional check to ensure the types match.
1329       if (st->is_StoreVector()) {
1330         const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
1331         const TypeVect* out_vt = is_Load() ? as_LoadVector()->vect_type() : as_StoreVector()->vect_type();
1332         if (in_vt != out_vt) {
1333           return nullptr;
1334         }
1335       }
1336       return st->in(MemNode::ValueIn);
1337     }
1338 
1339     // A load from a freshly-created object always returns zero.
1340     // (This can happen after LoadNode::Ideal resets the load's memory input
1341     // to find_captured_store, which returned InitializeNode::zero_memory.)
1342     if (st->is_Proj() && st->in(0)->is_Allocate() &&
1343         (st->in(0) == ld_alloc) &&
1344         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1345       // return a zero value for the load's basic type
1346       // (This is one of the few places where a generic PhaseTransform
1347       // can create new nodes.  Think of it as lazily manifesting
1348       // virtually pre-existing constants.)
1349       Node* init_value = ld_alloc->in(AllocateNode::InitValue);
1350       if (init_value != nullptr) {
1351         // TODO 8350865 Scalar replacement does not work well for flat arrays.
1352         // Is this correct for non-all-zero init values? Don't we need field_value_by_offset?
1353         return init_value;
1354       }
1355       assert(ld_alloc->in(AllocateNode::RawInitValue) == nullptr, "init value may not be null");
1356       if (value_basic_type() != T_VOID) {
1357         if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
1358           // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1359           // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1360           // by the ArrayCopyNode.
1361           return phase->zerocon(value_basic_type());
1362         }
1363       } else {
1364         // TODO: materialize all-zero vector constant
1365         assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1366       }
1367     }
1368 
1369     // A load from an initialization barrier can match a captured store.
1370     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1371       InitializeNode* init = st->in(0)->as_Initialize();
1372       AllocateNode* alloc = init->allocation();
1373       if ((alloc != nullptr) && (alloc == ld_alloc)) {
1374         // examine a captured store value
1375         st = init->find_captured_store(ld_off, memory_size(), phase);

1996   bool addr_mark = ((phase->type(address)->isa_oopptr() || phase->type(address)->isa_narrowoop()) &&
1997          phase->type(address)->is_ptr()->offset() == oopDesc::mark_offset_in_bytes());
1998 
1999   // Skip up past a SafePoint control.  Cannot do this for Stores because
2000   // pointer stores & cardmarks must stay on the same side of a SafePoint.
2001   if( ctrl != nullptr && ctrl->Opcode() == Op_SafePoint &&
2002       phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw  &&
2003       !addr_mark &&
2004       (depends_only_on_test() || has_unknown_control_dependency())) {
2005     ctrl = ctrl->in(0);
2006     set_req(MemNode::Control,ctrl);
2007     progress = true;
2008   }
2009 
2010   intptr_t ignore = 0;
2011   Node*    base   = AddPNode::Ideal_base_and_offset(address, phase, ignore);
2012   if (base != nullptr
2013       && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
2014     // Check for useless control edge in some common special cases
2015     if (in(MemNode::Control) != nullptr
2016         && !(phase->type(address)->is_inlinetypeptr() && is_mismatched_access())
2017         && can_remove_control()
2018         && phase->type(base)->higher_equal(TypePtr::NOTNULL)
2019         && all_controls_dominate(base, phase->C->start())) {
2020       // A method-invariant, non-null address (constant or 'this' argument).
2021       set_req(MemNode::Control, nullptr);
2022       progress = true;
2023     }
2024   }
2025 
2026   Node* mem = in(MemNode::Memory);
2027   const TypePtr *addr_t = phase->type(address)->isa_ptr();
2028 
2029   if (can_reshape && (addr_t != nullptr)) {
2030     // try to optimize our memory input
2031     Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase);
2032     if (opt_mem != mem) {
2033       set_req_X(MemNode::Memory, opt_mem, phase);
2034       if (phase->type( opt_mem ) == Type::TOP) return nullptr;
2035       return this;
2036     }

2093   // fold up, do so.
2094   Node* prev_mem = find_previous_store(phase);
2095   if (prev_mem != nullptr) {
2096     Node* value = can_see_arraycopy_value(prev_mem, phase);
2097     if (value != nullptr) {
2098       return value;
2099     }
2100   }
2101   // Steps (a), (b):  Walk past independent stores to find an exact match.
2102   if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
2103     // (c) See if we can fold up on the spot, but don't fold up here.
2104     // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
2105     // just return a prior value, which is done by Identity calls.
2106     if (can_see_stored_value(prev_mem, phase)) {
2107       // Make ready for step (d):
2108       set_req_X(MemNode::Memory, prev_mem, phase);
2109       return this;
2110     }
2111   }
2112 
2113   if (progress) {
2114     return this;
2115   }
2116 
2117   if (!can_reshape) {
2118     phase->record_for_igvn(this);
2119   }
2120   return nullptr;
2121 }
2122 
2123 // Helper to recognize certain Klass fields which are invariant across
2124 // some group of array types (e.g., int[] or all T[] where T < Object).
2125 const Type*
2126 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
2127                                  ciKlass* klass) const {
2128   assert(!UseCompactObjectHeaders || tkls->offset() != in_bytes(Klass::prototype_header_offset()),
2129          "must not happen");
2130   if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
2131     // The field is Klass::_access_flags.  Return its (constant) value.
2132     assert(Opcode() == Op_LoadUS, "must load an unsigned short from _access_flags");
2133     return TypeInt::make(klass->access_flags());
2134   }
2135   if (tkls->offset() == in_bytes(Klass::misc_flags_offset())) {
2136     // The field is Klass::_misc_flags.  Return its (constant) value.
2137     assert(Opcode() == Op_LoadUB, "must load an unsigned byte from _misc_flags");
2138     return TypeInt::make(klass->misc_flags());
2139   }
2140   if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {

2200       }
2201     }
2202 
2203     // Don't do this for integer types. There is only potential profit if
2204     // the element type t is lower than _type; that is, for int types, if _type is
2205     // more restrictive than t.  This only happens here if one is short and the other
2206     // char (both 16 bits), and in those cases we've made an intentional decision
2207     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
2208     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
2209     //
2210     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
2211     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
2212     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
2213     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
2214     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
2215     // In fact, that could have been the original type of p1, and p1 could have
2216     // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
2217     // expression (LShiftL quux 3) independently optimized to the constant 8.
2218     if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
2219         && (_type->isa_vect() == nullptr)
2220         && !ary->is_flat()
2221         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
2222       // t might actually be lower than _type, if _type is a unique
2223       // concrete subclass of abstract class t.
2224       if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
2225         const Type* jt = t->join_speculative(_type);
2226         // In any case, do not allow the join, per se, to empty out the type.
2227         if (jt->empty() && !t->empty()) {
2228           // This can happen if a interface-typed array narrows to a class type.
2229           jt = _type;
2230         }
2231 #ifdef ASSERT
2232         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
2233           // The pointers in the autobox arrays are always non-null
2234           Node* base = adr->in(AddPNode::Base);
2235           if ((base != nullptr) && base->is_DecodeN()) {
2236             // Get LoadN node which loads IntegerCache.cache field
2237             base = base->in(1);
2238           }
2239           if ((base != nullptr) && base->is_Con()) {
2240             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
2241             if ((base_type != nullptr) && base_type->is_autobox_cache()) {
2242               // It could be narrow oop
2243               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
2244             }
2245           }
2246         }
2247 #endif
2248         return jt;
2249       }
2250     }
2251   } else if (tp->base() == Type::InstPtr) {
2252     assert( off != Type::OffsetBot ||
2253             // arrays can be cast to Objects
2254             !tp->isa_instptr() ||
2255             tp->is_instptr()->instance_klass()->is_java_lang_Object() ||
2256             // Default value load
2257             tp->is_instptr()->instance_klass() == ciEnv::current()->Class_klass() ||
2258             // unsafe field access may not have a constant offset
2259             C->has_unsafe_access(),
2260             "Field accesses must be precise" );
2261     // For oop loads, we expect the _type to be precise.
2262 

2263     const TypeInstPtr* tinst = tp->is_instptr();
2264     BasicType bt = value_basic_type();
2265 
2266     // Optimize loads from constant fields.
2267     ciObject* const_oop = tinst->const_oop();
2268     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
2269       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt);
2270       if (con_type != nullptr) {
2271         return con_type;
2272       }
2273     }
2274   } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
2275     assert(off != Type::OffsetBot ||
2276             !tp->isa_instklassptr() ||
2277            // arrays can be cast to Objects
2278            tp->isa_instklassptr()->instance_klass()->is_java_lang_Object() ||
2279            // also allow array-loading from the primary supertype
2280            // array during subtype checks
2281            Opcode() == Op_LoadKlass,
2282            "Field accesses must be precise");
2283     // For klass/static loads, we expect the _type to be precise
2284   } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
2285     /* With mirrors being an indirect in the Klass*
2286      * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
2287      * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
2288      *
2289      * So check the type and klass of the node before the LoadP.

2296         assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2297         assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2298         return TypeInstPtr::make(klass->java_mirror());
2299       }
2300     }
2301   }
2302 
2303   const TypeKlassPtr *tkls = tp->isa_klassptr();
2304   if (tkls != nullptr) {
2305     if (tkls->is_loaded() && tkls->klass_is_exact()) {
2306       ciKlass* klass = tkls->exact_klass();
2307       // We are loading a field from a Klass metaobject whose identity
2308       // is known at compile time (the type is "exact" or "precise").
2309       // Check for fields we know are maintained as constants by the VM.
2310       if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2311         // The field is Klass::_super_check_offset.  Return its (constant) value.
2312         // (Folds up type checking code.)
2313         assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2314         return TypeInt::make(klass->super_check_offset());
2315       }
2316       if (UseCompactObjectHeaders) { // TODO: Should EnableValhalla also take this path ?
2317         if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2318           // The field is Klass::_prototype_header. Return its (constant) value.
2319           assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2320           return TypeX::make(klass->prototype_header());
2321         }
2322       }
2323       // Compute index into primary_supers array
2324       juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2325       // Check for overflowing; use unsigned compare to handle the negative case.
2326       if( depth < ciKlass::primary_super_limit() ) {
2327         // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2328         // (Folds up type checking code.)
2329         assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2330         ciKlass *ss = klass->super_of_depth(depth);
2331         return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2332       }
2333       const Type* aift = load_array_final_field(tkls, klass);
2334       if (aift != nullptr)  return aift;
2335     }
2336 

2372         !tkls->is_instklassptr()->might_be_an_array() // not the supertype of all T[] (java.lang.Object) or has an interface that is not Serializable or Cloneable
2373     ) {
2374       assert(Opcode() == Op_LoadI, "must load an int from _layout_helper");
2375       jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false);
2376       // The key property of this type is that it folds up tests
2377       // for array-ness, since it proves that the layout_helper is positive.
2378       // Thus, a generic value like the basic object layout helper works fine.
2379       return TypeInt::make(min_size, max_jint, Type::WidenMin);
2380     }
2381   }
2382 
2383   bool is_vect = (_type->isa_vect() != nullptr);
2384   if (is_instance && !is_vect) {
2385     // If we have an instance type and our memory input is the
2386     // programs's initial memory state, there is no matching store,
2387     // so just return a zero of the appropriate type -
2388     // except if it is vectorized - then we have no zero constant.
2389     Node *mem = in(MemNode::Memory);
2390     if (mem->is_Parm() && mem->in(0)->is_Start()) {
2391       assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2392       // TODO 8350865 Scalar replacement does not work well for flat arrays.
2393       // Escape Analysis assumes that arrays are always zeroed during allocation which is not true for null-free arrays
2394       // ConnectionGraph::split_unique_types will re-wire the memory of loads from such arrays around the allocation
2395       // TestArrays::test6 and test152 and TestBasicFunctionality::test20 are affected by this.
2396       if (tp->isa_aryptr() && tp->is_aryptr()->is_flat() && tp->is_aryptr()->is_null_free()) {
2397         intptr_t offset = 0;
2398         Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2399         AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
2400         if (alloc != nullptr && alloc->is_AllocateArray() && alloc->in(AllocateNode::InitValue) != nullptr) {
2401           return _type;
2402         }
2403       }
2404       return Type::get_zero_type(_type->basic_type());
2405     }
2406   }

2407   if (!UseCompactObjectHeaders) {
2408     Node* alloc = is_new_object_mark_load();
2409     if (alloc != nullptr) {
2410       if (EnableValhalla) {
2411         // The mark word may contain property bits (inline, flat, null-free)
2412         Node* klass_node = alloc->in(AllocateNode::KlassNode);
2413         const TypeKlassPtr* tkls = phase->type(klass_node)->isa_klassptr();
2414         if (tkls != nullptr && tkls->is_loaded() && tkls->klass_is_exact()) {
2415           return TypeX::make(tkls->exact_klass()->prototype_header());
2416         }
2417       } else {
2418         return TypeX::make(markWord::prototype().value());
2419       }
2420     }
2421   }
2422 
2423   return _type;
2424 }
2425 
2426 //------------------------------match_edge-------------------------------------
2427 // Do we Match on this edge index or not?  Match only the address.
2428 uint LoadNode::match_edge(uint idx) const {
2429   return idx == MemNode::Address;
2430 }
2431 
2432 //--------------------------LoadBNode::Ideal--------------------------------------
2433 //
2434 //  If the previous store is to the same address as this load,
2435 //  and the value stored was larger than a byte, replace this load
2436 //  with the value stored truncated to a byte.  If no truncation is
2437 //  needed, the replacement is done in LoadNode::Identity().
2438 //
2439 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {

2548     }
2549   }
2550   // Identity call will handle the case where truncation is not needed.
2551   return LoadNode::Ideal(phase, can_reshape);
2552 }
2553 
2554 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2555   Node* mem = in(MemNode::Memory);
2556   Node* value = can_see_stored_value(mem,phase);
2557   if (value != nullptr && value->is_Con() &&
2558       !value->bottom_type()->higher_equal(_type)) {
2559     // If the input to the store does not fit with the load's result type,
2560     // it must be truncated. We can't delay until Ideal call since
2561     // a singleton Value is needed for split_thru_phi optimization.
2562     int con = value->get_int();
2563     return TypeInt::make((con << 16) >> 16);
2564   }
2565   return LoadNode::Value(phase);
2566 }
2567 
2568 Node* LoadNNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2569   // Loading from an InlineType, find the input and make an EncodeP
2570   Node* addr = in(Address);
2571   intptr_t offset;
2572   Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset);
2573   Node* value = see_through_inline_type(phase, this, base, offset);
2574   if (value != nullptr) {
2575     return new EncodePNode(value, type());
2576   }
2577 
2578   return LoadNode::Ideal(phase, can_reshape);
2579 }
2580 
2581 //=============================================================================
2582 //----------------------------LoadKlassNode::make------------------------------
2583 // Polymorphic factory method:
2584 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
2585   // sanity check the alias category against the created node type
2586   const TypePtr* adr_type = adr->bottom_type()->isa_ptr();
2587   assert(adr_type != nullptr, "expecting TypeKlassPtr");
2588 #ifdef _LP64
2589   if (adr_type->is_ptr_to_narrowklass()) {
2590     assert(UseCompressedClassPointers, "no compressed klasses");
2591     Node* load_klass = gvn.transform(new LoadNKlassNode(mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2592     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2593   }
2594 #endif
2595   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2596   return new LoadKlassNode(mem, adr, at, tk, MemNode::unordered);
2597 }
2598 
2599 //------------------------------Value------------------------------------------
2600 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {

2634           }
2635           return TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
2636         }
2637         if (!t->is_klass()) {
2638           // a primitive Class (e.g., int.class) has null for a klass field
2639           return TypePtr::NULL_PTR;
2640         }
2641         // Fold up the load of the hidden field
2642         return TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
2643       }
2644       // non-constant mirror, so we can't tell what's going on
2645     }
2646     if (!tinst->is_loaded())
2647       return _type;             // Bail out if not loaded
2648     if (offset == oopDesc::klass_offset_in_bytes()) {
2649       return tinst->as_klass_type(true);
2650     }
2651   }
2652 
2653   // Check for loading klass from an array
2654   const TypeAryPtr* tary = tp->isa_aryptr();
2655   if (tary != nullptr &&
2656       tary->offset() == oopDesc::klass_offset_in_bytes()) {
2657     return tary->as_klass_type(true);
2658   }
2659 
2660   // Check for loading klass from an array klass
2661   const TypeKlassPtr *tkls = tp->isa_klassptr();
2662   if (tkls != nullptr && !StressReflectiveCode) {
2663     if (!tkls->is_loaded())
2664      return _type;             // Bail out if not loaded
2665     if (tkls->isa_aryklassptr() && tkls->is_aryklassptr()->elem()->isa_klassptr() &&
2666         tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2667       // // Always returning precise element type is incorrect,
2668       // // e.g., element type could be object and array may contain strings
2669       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2670 
2671       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2672       // according to the element type's subclassing.
2673       return tkls->is_aryklassptr()->elem()->isa_klassptr()->cast_to_exactness(tkls->klass_is_exact());
2674     }

2725       return allocated_klass;
2726     }
2727   }
2728 
2729   // Simplify k.java_mirror.as_klass to plain k, where k is a Klass*.
2730   // See inline_native_Class_query for occurrences of these patterns.
2731   // Java Example:  x.getClass().isAssignableFrom(y)
2732   //
2733   // This improves reflective code, often making the Class
2734   // mirror go completely dead.  (Current exception:  Class
2735   // mirrors may appear in debug info, but we could clean them out by
2736   // introducing a new debug info operator for Klass.java_mirror).
2737 
2738   if (toop->isa_instptr() && toop->is_instptr()->instance_klass() == phase->C->env()->Class_klass()
2739       && offset == java_lang_Class::klass_offset()) {
2740     if (base->is_Load()) {
2741       Node* base2 = base->in(MemNode::Address);
2742       if (base2->is_Load()) { /* direct load of a load which is the OopHandle */
2743         Node* adr2 = base2->in(MemNode::Address);
2744         const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
2745         // TODO 8366668 Re-enable this for arrays
2746         if (tkls != nullptr && !tkls->empty()
2747             && ((tkls->isa_instklassptr() && !tkls->is_instklassptr()->might_be_an_array()) || (tkls->isa_aryklassptr() && false))
2748             && adr2->is_AddP()
2749            ) {
2750           int mirror_field = in_bytes(Klass::java_mirror_offset());
2751           if (tkls->offset() == mirror_field) {
2752             return adr2->in(AddPNode::Base);
2753           }
2754         }
2755       }
2756     }
2757   }
2758 
2759   return this;
2760 }
2761 
2762 LoadNode* LoadNode::clone_pinned() const {
2763   LoadNode* ld = clone()->as_Load();
2764   ld->_control_dependency = UnknownControl;
2765   return ld;
2766 }
2767 

3551   }
3552   ss.print_cr("[TraceMergeStores]: with");
3553   merged_input_value->dump("\n", false, &ss);
3554   merged_store->dump("\n", false, &ss);
3555   tty->print("%s", ss.as_string());
3556 }
3557 #endif
3558 
3559 //------------------------------Ideal------------------------------------------
3560 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
3561 // When a store immediately follows a relevant allocation/initialization,
3562 // try to capture it into the initialization, or hoist it above.
3563 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3564   Node* p = MemNode::Ideal_common(phase, can_reshape);
3565   if (p)  return (p == NodeSentinel) ? nullptr : p;
3566 
3567   Node* mem     = in(MemNode::Memory);
3568   Node* address = in(MemNode::Address);
3569   Node* value   = in(MemNode::ValueIn);
3570   // Back-to-back stores to same address?  Fold em up.  Generally
3571   // unsafe if I have intervening uses...
3572   if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::INLINES) {
3573     Node* st = mem;
3574     // If Store 'st' has more than one use, we cannot fold 'st' away.
3575     // For example, 'st' might be the final state at a conditional
3576     // return.  Or, 'st' might be used by some node which is live at
3577     // the same time 'st' is live, which might be unschedulable.  So,
3578     // require exactly ONE user until such time as we clone 'mem' for
3579     // each of 'mem's uses (thus making the exactly-1-user-rule hold
3580     // true).
3581     while (st->is_Store() && st->outcnt() == 1) {
3582       // Looking at a dead closed cycle of memory?
3583       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
3584       assert(Opcode() == st->Opcode() ||
3585              st->Opcode() == Op_StoreVector ||
3586              Opcode() == Op_StoreVector ||
3587              st->Opcode() == Op_StoreVectorScatter ||
3588              Opcode() == Op_StoreVectorScatter ||
3589              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
3590              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
3591              (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
3592              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
3593              (st->adr_type()->isa_aryptr() && st->adr_type()->is_aryptr()->is_flat()) || // TODO 8343835
3594              (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
3595              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
3596 
3597       if (st->in(MemNode::Address)->eqv_uncast(address) &&
3598           st->as_Store()->memory_size() <= this->memory_size()) {
3599         Node* use = st->raw_out(0);
3600         if (phase->is_IterGVN()) {
3601           phase->is_IterGVN()->rehash_node_delayed(use);
3602         }
3603         // It's OK to do this in the parser, since DU info is always accurate,
3604         // and the parser always refers to nodes via SafePointNode maps.
3605         use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
3606         return this;
3607       }
3608       st = st->in(MemNode::Memory);
3609     }
3610   }
3611 
3612 
3613   // Capture an unaliased, unconditional, simple store into an initializer.

3711       const StoreVectorNode* store_vector = as_StoreVector();
3712       const StoreVectorNode* mem_vector = mem->as_StoreVector();
3713       const Node* store_indices = store_vector->indices();
3714       const Node* mem_indices = mem_vector->indices();
3715       const Node* store_mask = store_vector->mask();
3716       const Node* mem_mask = mem_vector->mask();
3717       // Ensure types, indices, and masks match
3718       if (store_vector->vect_type() == mem_vector->vect_type() &&
3719           ((store_indices == nullptr) == (mem_indices == nullptr) &&
3720            (store_indices == nullptr || store_indices->eqv_uncast(mem_indices))) &&
3721           ((store_mask == nullptr) == (mem_mask == nullptr) &&
3722            (store_mask == nullptr || store_mask->eqv_uncast(mem_mask)))) {
3723         result = mem;
3724       }
3725     }
3726   }
3727 
3728   // Store of zero anywhere into a freshly-allocated object?
3729   // Then the store is useless.
3730   // (It must already have been captured by the InitializeNode.)
3731   if (result == this && ReduceFieldZeroing) {

3732     // a newly allocated object is already all-zeroes everywhere
3733     if (mem->is_Proj() && mem->in(0)->is_Allocate() &&
3734         (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::InitValue) == val)) {
3735       result = mem;
3736     }
3737 
3738     if (result == this && phase->type(val)->is_zero_type()) {
3739       // the store may also apply to zero-bits in an earlier object
3740       Node* prev_mem = find_previous_store(phase);
3741       // Steps (a), (b):  Walk past independent stores to find an exact match.
3742       if (prev_mem != nullptr) {
3743         Node* prev_val = can_see_stored_value(prev_mem, phase);
3744         if (prev_val != nullptr && prev_val == val) {
3745           // prev_val and val might differ by a cast; it would be good
3746           // to keep the more informative of the two.
3747           result = mem;
3748         }
3749       }
3750     }
3751   }
3752 
3753   PhaseIterGVN* igvn = phase->is_IterGVN();
3754   if (result != this && igvn != nullptr) {
3755     MemBarNode* trailing = trailing_membar();
3756     if (trailing != nullptr) {
3757 #ifdef ASSERT
3758       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();

4222 // Clearing a short array is faster with stores
4223 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
4224   // Already know this is a large node, do not try to ideal it
4225   if (_is_large) return nullptr;
4226 
4227   const int unit = BytesPerLong;
4228   const TypeX* t = phase->type(in(2))->isa_intptr_t();
4229   if (!t)  return nullptr;
4230   if (!t->is_con())  return nullptr;
4231   intptr_t raw_count = t->get_con();
4232   intptr_t size = raw_count;
4233   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
4234   // Clearing nothing uses the Identity call.
4235   // Negative clears are possible on dead ClearArrays
4236   // (see jck test stmt114.stmt11402.val).
4237   if (size <= 0 || size % unit != 0)  return nullptr;
4238   intptr_t count = size / unit;
4239   // Length too long; communicate this to matchers and assemblers.
4240   // Assemblers are responsible to produce fast hardware clears for it.
4241   if (size > InitArrayShortSize) {
4242     return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true);
4243   } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
4244     return nullptr;
4245   }
4246   if (!IdealizeClearArrayNode) return nullptr;
4247   Node *mem = in(1);
4248   if( phase->type(mem)==Type::TOP ) return nullptr;
4249   Node *adr = in(3);
4250   const Type* at = phase->type(adr);
4251   if( at==Type::TOP ) return nullptr;
4252   const TypePtr* atp = at->isa_ptr();
4253   // adjust atp to be the correct array element address type
4254   if (atp == nullptr)  atp = TypePtr::BOTTOM;
4255   else              atp = atp->add_offset(Type::OffsetBot);
4256   // Get base for derived pointer purposes
4257   if( adr->Opcode() != Op_AddP ) Unimplemented();
4258   Node *base = adr->in(1);
4259 
4260   Node *val = in(4);
4261   Node *off  = phase->MakeConX(BytesPerLong);
4262   mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
4263   count--;
4264   while( count-- ) {
4265     mem = phase->transform(mem);
4266     adr = phase->transform(new AddPNode(base,adr,off));
4267     mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
4268   }
4269   return mem;
4270 }
4271 
4272 //----------------------------step_through----------------------------------
4273 // Return allocation input memory edge if it is different instance
4274 // or itself if it is the one we are looking for.
4275 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseValues* phase) {
4276   Node* n = *np;
4277   assert(n->is_ClearArray(), "sanity");
4278   intptr_t offset;
4279   AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
4280   // This method is called only before Allocate nodes are expanded
4281   // during macro nodes expansion. Before that ClearArray nodes are
4282   // only generated in PhaseMacroExpand::generate_arraycopy() (before
4283   // Allocate nodes are expanded) which follows allocations.
4284   assert(alloc != nullptr, "should have allocation");
4285   if (alloc->_idx == instance_id) {
4286     // Can not bypass initialization of the instance we are looking for.
4287     return false;
4288   }
4289   // Otherwise skip it.
4290   InitializeNode* init = alloc->initialization();
4291   if (init != nullptr)
4292     *np = init->in(TypeFunc::Memory);
4293   else
4294     *np = alloc->in(TypeFunc::Memory);
4295   return true;
4296 }
4297 
4298 //----------------------------clear_memory-------------------------------------
4299 // Generate code to initialize object storage to zero.
4300 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4301                                    Node* val,
4302                                    Node* raw_val,
4303                                    intptr_t start_offset,
4304                                    Node* end_offset,
4305                                    PhaseGVN* phase) {
4306   intptr_t offset = start_offset;
4307 
4308   int unit = BytesPerLong;
4309   if ((offset % unit) != 0) {
4310     Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
4311     adr = phase->transform(adr);
4312     const TypePtr* atp = TypeRawPtr::BOTTOM;
4313     if (val != nullptr) {
4314       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
4315       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
4316     } else {
4317       assert(raw_val == nullptr, "val may not be null");
4318       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4319     }
4320     mem = phase->transform(mem);
4321     offset += BytesPerInt;
4322   }
4323   assert((offset % unit) == 0, "");
4324 
4325   // Initialize the remaining stuff, if any, with a ClearArray.
4326   return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase);
4327 }
4328 
4329 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4330                                    Node* raw_val,
4331                                    Node* start_offset,
4332                                    Node* end_offset,
4333                                    PhaseGVN* phase) {
4334   if (start_offset == end_offset) {
4335     // nothing to do
4336     return mem;
4337   }
4338 
4339   int unit = BytesPerLong;
4340   Node* zbase = start_offset;
4341   Node* zend  = end_offset;
4342 
4343   // Scale to the unit required by the CPU:
4344   if (!Matcher::init_array_count_is_in_bytes) {
4345     Node* shift = phase->intcon(exact_log2(unit));
4346     zbase = phase->transform(new URShiftXNode(zbase, shift) );
4347     zend  = phase->transform(new URShiftXNode(zend,  shift) );
4348   }
4349 
4350   // Bulk clear double-words
4351   Node* zsize = phase->transform(new SubXNode(zend, zbase) );
4352   Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
4353   if (raw_val == nullptr) {
4354     raw_val = phase->MakeConX(0);
4355   }
4356   mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false);
4357   return phase->transform(mem);
4358 }
4359 
4360 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4361                                    Node* val,
4362                                    Node* raw_val,
4363                                    intptr_t start_offset,
4364                                    intptr_t end_offset,
4365                                    PhaseGVN* phase) {
4366   if (start_offset == end_offset) {
4367     // nothing to do
4368     return mem;
4369   }
4370 
4371   assert((end_offset % BytesPerInt) == 0, "odd end offset");
4372   intptr_t done_offset = end_offset;
4373   if ((done_offset % BytesPerLong) != 0) {
4374     done_offset -= BytesPerInt;
4375   }
4376   if (done_offset > start_offset) {
4377     mem = clear_memory(ctl, mem, dest, val, raw_val,
4378                        start_offset, phase->MakeConX(done_offset), phase);
4379   }
4380   if (done_offset < end_offset) { // emit the final 32-bit store
4381     Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
4382     adr = phase->transform(adr);
4383     const TypePtr* atp = TypeRawPtr::BOTTOM;
4384     if (val != nullptr) {
4385       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
4386       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
4387     } else {
4388       assert(raw_val == nullptr, "val may not be null");
4389       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4390     }
4391     mem = phase->transform(mem);
4392     done_offset += BytesPerInt;
4393   }
4394   assert(done_offset == end_offset, "");
4395   return mem;
4396 }
4397 
4398 //=============================================================================
4399 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
4400   : MultiNode(TypeFunc::Parms + (precedent == nullptr? 0: 1)),
4401     _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
4402 #ifdef ASSERT
4403   , _pair_idx(0)
4404 #endif
4405 {
4406   init_class_id(Class_MemBar);
4407   Node* top = C->top();
4408   init_req(TypeFunc::I_O,top);
4409   init_req(TypeFunc::FramePtr,top);
4410   init_req(TypeFunc::ReturnAdr,top);

4516       PhaseIterGVN* igvn = phase->is_IterGVN();
4517       remove(igvn);
4518       // Must return either the original node (now dead) or a new node
4519       // (Do not return a top here, since that would break the uniqueness of top.)
4520       return new ConINode(TypeInt::ZERO);
4521     }
4522   }
4523   return progress ? this : nullptr;
4524 }
4525 
4526 //------------------------------Value------------------------------------------
4527 const Type* MemBarNode::Value(PhaseGVN* phase) const {
4528   if( !in(0) ) return Type::TOP;
4529   if( phase->type(in(0)) == Type::TOP )
4530     return Type::TOP;
4531   return TypeTuple::MEMBAR;
4532 }
4533 
4534 //------------------------------match------------------------------------------
4535 // Construct projections for memory.
4536 Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) {
4537   switch (proj->_con) {
4538   case TypeFunc::Control:
4539   case TypeFunc::Memory:
4540     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
4541   }
4542   ShouldNotReachHere();
4543   return nullptr;
4544 }
4545 
4546 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4547   trailing->_kind = TrailingStore;
4548   leading->_kind = LeadingStore;
4549 #ifdef ASSERT
4550   trailing->_pair_idx = leading->_idx;
4551   leading->_pair_idx = leading->_idx;
4552 #endif
4553 }
4554 
4555 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4556   trailing->_kind = TrailingLoadStore;

4803   return (req() > RawStores);
4804 }
4805 
4806 void InitializeNode::set_complete(PhaseGVN* phase) {
4807   assert(!is_complete(), "caller responsibility");
4808   _is_complete = Complete;
4809 
4810   // After this node is complete, it contains a bunch of
4811   // raw-memory initializations.  There is no need for
4812   // it to have anything to do with non-raw memory effects.
4813   // Therefore, tell all non-raw users to re-optimize themselves,
4814   // after skipping the memory effects of this initialization.
4815   PhaseIterGVN* igvn = phase->is_IterGVN();
4816   if (igvn)  igvn->add_users_to_worklist(this);
4817 }
4818 
4819 // convenience function
4820 // return false if the init contains any stores already
4821 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
4822   InitializeNode* init = initialization();
4823   if (init == nullptr || init->is_complete()) {
4824     return false;
4825   }
4826   init->remove_extra_zeroes();
4827   // for now, if this allocation has already collected any inits, bail:
4828   if (init->is_non_zero())  return false;
4829   init->set_complete(phase);
4830   return true;
4831 }
4832 
4833 void InitializeNode::remove_extra_zeroes() {
4834   if (req() == RawStores)  return;
4835   Node* zmem = zero_memory();
4836   uint fill = RawStores;
4837   for (uint i = fill; i < req(); i++) {
4838     Node* n = in(i);
4839     if (n->is_top() || n == zmem)  continue;  // skip
4840     if (fill < i)  set_req(fill, n);          // compact
4841     ++fill;
4842   }
4843   // delete any empty spaces created:
4844   while (fill < req()) {
4845     del_req(fill);

4989             // store node that we'd like to capture. We need to check
4990             // the uses of the MergeMemNode.
4991             mems.push(n);
4992           }
4993         } else if (n->is_Mem()) {
4994           Node* other_adr = n->in(MemNode::Address);
4995           if (other_adr == adr) {
4996             failed = true;
4997             break;
4998           } else {
4999             const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
5000             if (other_t_adr != nullptr) {
5001               int other_alias_idx = phase->C->get_alias_index(other_t_adr);
5002               if (other_alias_idx == alias_idx) {
5003                 // A load from the same memory slice as the store right
5004                 // after the InitializeNode. We check the control of the
5005                 // object/array that is loaded from. If it's the same as
5006                 // the store control then we cannot capture the store.
5007                 assert(!n->is_Store(), "2 stores to same slice on same control?");
5008                 Node* base = other_adr;
5009                 if (base->is_Phi()) {
5010                   // In rare case, base may be a PhiNode and it may read
5011                   // the same memory slice between InitializeNode and store.
5012                   failed = true;
5013                   break;
5014                 }
5015                 assert(base->is_AddP(), "should be addp but is %s", base->Name());
5016                 base = base->in(AddPNode::Base);
5017                 if (base != nullptr) {
5018                   base = base->uncast();
5019                   if (base->is_Proj() && base->in(0) == alloc) {
5020                     failed = true;
5021                     break;
5022                   }
5023                 }
5024               }
5025             }
5026           }
5027         } else {
5028           failed = true;
5029           break;
5030         }
5031       }
5032     }
5033   }
5034   if (failed) {

5581         //   z's_done      12  16  16  16    12  16    12
5582         //   z's_needed    12  16  16  16    16  16    16
5583         //   zsize          0   0   0   0     4   0     4
5584         if (next_full_store < 0) {
5585           // Conservative tack:  Zero to end of current word.
5586           zeroes_needed = align_up(zeroes_needed, BytesPerInt);
5587         } else {
5588           // Zero to beginning of next fully initialized word.
5589           // Or, don't zero at all, if we are already in that word.
5590           assert(next_full_store >= zeroes_needed, "must go forward");
5591           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
5592           zeroes_needed = next_full_store;
5593         }
5594       }
5595 
5596       if (zeroes_needed > zeroes_done) {
5597         intptr_t zsize = zeroes_needed - zeroes_done;
5598         // Do some incremental zeroing on rawmem, in parallel with inits.
5599         zeroes_done = align_down(zeroes_done, BytesPerInt);
5600         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5601                                               allocation()->in(AllocateNode::InitValue),
5602                                               allocation()->in(AllocateNode::RawInitValue),
5603                                               zeroes_done, zeroes_needed,
5604                                               phase);
5605         zeroes_done = zeroes_needed;
5606         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
5607           do_zeroing = false;   // leave the hole, next time
5608       }
5609     }
5610 
5611     // Collect the store and move on:
5612     phase->replace_input_of(st, MemNode::Memory, inits);
5613     inits = st;                 // put it on the linearized chain
5614     set_req(i, zmem);           // unhook from previous position
5615 
5616     if (zeroes_done == st_off)
5617       zeroes_done = next_init_off;
5618 
5619     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
5620 
5621     #ifdef ASSERT
5622     // Various order invariants.  Weaker than stores_are_sane because

5642   remove_extra_zeroes();        // clear out all the zmems left over
5643   add_req(inits);
5644 
5645   if (!(UseTLAB && ZeroTLAB)) {
5646     // If anything remains to be zeroed, zero it all now.
5647     zeroes_done = align_down(zeroes_done, BytesPerInt);
5648     // if it is the last unused 4 bytes of an instance, forget about it
5649     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
5650     if (zeroes_done + BytesPerLong >= size_limit) {
5651       AllocateNode* alloc = allocation();
5652       assert(alloc != nullptr, "must be present");
5653       if (alloc != nullptr && alloc->Opcode() == Op_Allocate) {
5654         Node* klass_node = alloc->in(AllocateNode::KlassNode);
5655         ciKlass* k = phase->type(klass_node)->is_instklassptr()->instance_klass();
5656         if (zeroes_done == k->layout_helper())
5657           zeroes_done = size_limit;
5658       }
5659     }
5660     if (zeroes_done < size_limit) {
5661       rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5662                                             allocation()->in(AllocateNode::InitValue),
5663                                             allocation()->in(AllocateNode::RawInitValue),
5664                                             zeroes_done, size_in_bytes, phase);
5665     }
5666   }
5667 
5668   set_complete(phase);
5669   return rawmem;
5670 }
5671 
5672 
5673 #ifdef ASSERT
5674 bool InitializeNode::stores_are_sane(PhaseValues* phase) {
5675   if (is_complete())
5676     return true;                // stores could be anything at this point
5677   assert(allocation() != nullptr, "must be present");
5678   intptr_t last_off = allocation()->minimum_header_size();
5679   for (uint i = InitializeNode::RawStores; i < req(); i++) {
5680     Node* st = in(i);
5681     intptr_t st_off = get_store_offset(st, phase);
5682     if (st_off < 0)  continue;  // ignore dead garbage
5683     if (last_off > st_off) {
< prev index next >