< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page

   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 


  26 #include "classfile/javaClasses.hpp"

  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "memory/allocation.inline.hpp"
  32 #include "memory/resourceArea.hpp"

  33 #include "oops/objArrayKlass.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/arraycopynode.hpp"

  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/connode.hpp"
  39 #include "opto/convertnode.hpp"

  40 #include "opto/loopnode.hpp"
  41 #include "opto/machnode.hpp"
  42 #include "opto/matcher.hpp"
  43 #include "opto/memnode.hpp"
  44 #include "opto/mempointer.hpp"
  45 #include "opto/mulnode.hpp"
  46 #include "opto/narrowptrnode.hpp"
  47 #include "opto/phaseX.hpp"
  48 #include "opto/regalloc.hpp"
  49 #include "opto/regmask.hpp"
  50 #include "opto/rootnode.hpp"
  51 #include "opto/traceMergeStoresTag.hpp"
  52 #include "opto/vectornode.hpp"

  53 #include "utilities/align.hpp"
  54 #include "utilities/copy.hpp"

  55 #include "utilities/macros.hpp"
  56 #include "utilities/powerOfTwo.hpp"
  57 #include "utilities/vmError.hpp"
  58 
  59 // Portions of code courtesy of Clifford Click
  60 
  61 // Optimization - Graph Style
  62 
  63 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  64 
  65 //=============================================================================
  66 uint MemNode::size_of() const { return sizeof(*this); }
  67 
  68 const TypePtr *MemNode::adr_type() const {
  69   Node* adr = in(Address);
  70   if (adr == nullptr)  return nullptr; // node is dead
  71   const TypePtr* cross_check = nullptr;
  72   DEBUG_ONLY(cross_check = _adr_type);
  73   return calculate_adr_type(adr->bottom_type(), cross_check);
  74 }

 123       st->print(", idx=Bot;");
 124     else if (atp->index() == Compile::AliasIdxTop)
 125       st->print(", idx=Top;");
 126     else if (atp->index() == Compile::AliasIdxRaw)
 127       st->print(", idx=Raw;");
 128     else {
 129       ciField* field = atp->field();
 130       if (field) {
 131         st->print(", name=");
 132         field->print_name_on(st);
 133       }
 134       st->print(", idx=%d;", atp->index());
 135     }
 136   }
 137 }
 138 
 139 extern void print_alias_types();
 140 
 141 #endif
 142 
 143 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) {
 144   assert((t_oop != nullptr), "sanity");
 145   bool is_instance = t_oop->is_known_instance_field();
 146   bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() &&
 147                              (load != nullptr) && load->is_Load() &&
 148                              (phase->is_IterGVN() != nullptr);
 149   if (!(is_instance || is_boxed_value_load))
 150     return mchain;  // don't try to optimize non-instance types

































































































 151   uint instance_id = t_oop->instance_id();
 152   Node *start_mem = phase->C->start()->proj_out_or_null(TypeFunc::Memory);
 153   Node *prev = nullptr;
 154   Node *result = mchain;
 155   while (prev != result) {
 156     prev = result;
 157     if (result == start_mem)
 158       break;  // hit one of our sentinels



 159     // skip over a call which does not affect this memory slice
 160     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
 161       Node *proj_in = result->in(0);
 162       if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
 163         break;  // hit one of our sentinels

 164       } else if (proj_in->is_Call()) {
 165         // ArrayCopyNodes processed here as well
 166         CallNode *call = proj_in->as_Call();
 167         if (!call->may_modify(t_oop, phase)) { // returns false for instances


 168           result = call->in(TypeFunc::Memory);
 169         }



 170       } else if (proj_in->is_Initialize()) {
 171         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
 172         // Stop if this is the initialization for the object instance which
 173         // contains this memory slice, otherwise skip over it.
 174         if ((alloc == nullptr) || (alloc->_idx == instance_id)) {
 175           break;
 176         }
 177         if (is_instance) {
 178           result = proj_in->in(TypeFunc::Memory);
 179         } else if (is_boxed_value_load) {
 180           Node* klass = alloc->in(AllocateNode::KlassNode);
 181           const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
 182           if (tklass->klass_is_exact() && !tklass->exact_klass()->equals(t_oop->is_instptr()->exact_klass())) {
 183             result = proj_in->in(TypeFunc::Memory); // not related allocation




 184           }
 185         }
 186       } else if (proj_in->is_MemBar()) {
 187         ArrayCopyNode* ac = nullptr;
 188         if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) {
 189           break;
 190         }
 191         result = proj_in->in(TypeFunc::Memory);





 192       } else if (proj_in->is_top()) {
 193         break; // dead code
 194       } else {
 195         assert(false, "unexpected projection");
 196       }
 197     } else if (result->is_ClearArray()) {
 198       if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {
 199         // Can not bypass initialization of the instance
 200         // we are looking for.
 201         break;
 202       }
 203       // Otherwise skip it (the call updated 'result' value).
 204     } else if (result->is_MergeMem()) {
 205       result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, nullptr, tty);
 206     }
 207   }
 208   return result;
 209 }
 210 
 211 Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) {
 212   const TypeOopPtr* t_oop = t_adr->isa_oopptr();
 213   if (t_oop == nullptr)
 214     return mchain;  // don't try to optimize non-oop types
 215   Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase);
 216   bool is_instance = t_oop->is_known_instance_field();
 217   PhaseIterGVN *igvn = phase->is_IterGVN();
 218   if (is_instance && igvn != nullptr && result->is_Phi()) {
 219     PhiNode *mphi = result->as_Phi();
 220     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
 221     const TypePtr *t = mphi->adr_type();
 222     bool do_split = false;
 223     // In the following cases, Load memory input can be further optimized based on
 224     // its precise address type
 225     if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ) {
 226       do_split = true;
 227     } else if (t->isa_oopptr() && !t->is_oopptr()->is_known_instance()) {
 228       const TypeOopPtr* mem_t =
 229         t->is_oopptr()->cast_to_exactness(true)
 230         ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
 231         ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
 232       if (t_oop->isa_aryptr()) {
 233         mem_t = mem_t->is_aryptr()
 234                      ->cast_to_stable(t_oop->is_aryptr()->is_stable())
 235                      ->cast_to_size(t_oop->is_aryptr()->size())


 236                      ->with_offset(t_oop->is_aryptr()->offset())
 237                      ->is_aryptr();
 238       }
 239       do_split = mem_t == t_oop;
 240     }
 241     if (do_split) {
 242       // clone the Phi with our address type
 243       result = mphi->split_out_instance(t_adr, igvn);
 244     } else {
 245       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
 246     }
 247   }
 248   return result;
 249 }
 250 
 251 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
 252   uint alias_idx = phase->C->get_alias_index(tp);
 253   Node *mem = mmem;
 254 #ifdef ASSERT
 255   {
 256     // Check that current type is consistent with the alias index used during graph construction
 257     assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
 258     bool consistent =  adr_check == nullptr || adr_check->empty() ||
 259                        phase->C->must_alias(adr_check, alias_idx );
 260     // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
 261     if( !consistent && adr_check != nullptr && !adr_check->empty() &&
 262                tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
 263         adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
 264         ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
 265           adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
 266           adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
 267       // don't assert if it is dead code.
 268       consistent = true;
 269     }
 270     if( !consistent ) {
 271       st->print("alias_idx==%d, adr_check==", alias_idx);
 272       if( adr_check == nullptr ) {
 273         st->print("null");
 274       } else {
 275         adr_check->dump();
 276       }
 277       st->cr();
 278       print_alias_types();
 279       assert(consistent, "adr_check must match alias idx");
 280     }
 281   }
 282 #endif

 954          "use LoadKlassNode instead");
 955   assert(!(adr_type->isa_aryptr() &&
 956            adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
 957          "use LoadRangeNode instead");
 958   // Check control edge of raw loads
 959   assert( ctl != nullptr || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
 960           // oop will be recorded in oop map if load crosses safepoint
 961           rt->isa_oopptr() || is_immutable_value(adr),
 962           "raw memory operations should have control edge");
 963   LoadNode* load = nullptr;
 964   switch (bt) {
 965   case T_BOOLEAN: load = new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 966   case T_BYTE:    load = new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 967   case T_INT:     load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 968   case T_CHAR:    load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 969   case T_SHORT:   load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 970   case T_LONG:    load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic_access); break;
 971   case T_FLOAT:   load = new LoadFNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 972   case T_DOUBLE:  load = new LoadDNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency, require_atomic_access); break;
 973   case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(),  mo, control_dependency); break;

 974   case T_OBJECT:
 975   case T_NARROWOOP:
 976 #ifdef _LP64
 977     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 978       load = new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency);
 979     } else
 980 #endif
 981     {
 982       assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
 983       load = new LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
 984     }
 985     break;
 986   default:
 987     ShouldNotReachHere();
 988     break;
 989   }
 990   assert(load != nullptr, "LoadNode should have been created");
 991   if (unaligned) {
 992     load->set_unaligned_access();
 993   }
 994   if (mismatched) {
 995     load->set_mismatched_access();
 996   }
 997   if (unsafe) {
 998     load->set_unsafe_access();
 999   }
1000   load->set_barrier_data(barrier_data);
1001   if (load->Opcode() == Op_LoadN) {
1002     Node* ld = gvn.transform(load);
1003     return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
1004   }
1005 
1006   return load;
1007 }
1008 
1009 //------------------------------hash-------------------------------------------
1010 uint LoadNode::hash() const {
1011   // unroll addition of interesting fields
1012   return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
1013 }
1014 
1015 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
1016   if ((atp != nullptr) && (atp->index() >= Compile::AliasIdxRaw)) {
1017     bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
1018     bool is_stable_ary = FoldStableValues &&
1019                          (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
1020                          tp->isa_aryptr()->is_stable();
1021 
1022     return (eliminate_boxing && non_volatile) || is_stable_ary;
1023   }
1024 
1025   return false;
1026 }
1027 
1028 LoadNode* LoadNode::pin_array_access_node() const {
1029   const TypePtr* adr_type = this->adr_type();
1030   if (adr_type != nullptr && adr_type->isa_aryptr()) {
1031     return clone_pinned();
1032   }
1033   return nullptr;
1034 }
1035 
1036 // Is the value loaded previously stored by an arraycopy? If so return
1037 // a load node that reads from the source array so we may be able to
1038 // optimize out the ArrayCopy node later.
1039 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
1040   Node* ld_adr = in(MemNode::Address);
1041   intptr_t ld_off = 0;
1042   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);

1058     if (ac->as_ArrayCopy()->is_clonebasic()) {
1059       assert(ld_alloc != nullptr, "need an alloc");
1060       assert(addp->is_AddP(), "address must be addp");
1061       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1062       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1063       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1064       addp->set_req(AddPNode::Base, src);
1065       addp->set_req(AddPNode::Address, src);
1066     } else {
1067       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1068              ac->as_ArrayCopy()->is_copyof_validated() ||
1069              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1070       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1071       addp->set_req(AddPNode::Base, src);
1072       addp->set_req(AddPNode::Address, src);
1073 
1074       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1075       BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1076       if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1077 
1078       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1079       uint shift  = exact_log2(type2aelembytes(ary_elem));
1080 
1081       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1082 #ifdef _LP64
1083       diff = phase->transform(new ConvI2LNode(diff));
1084 #endif
1085       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1086 
1087       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1088       addp->set_req(AddPNode::Offset, offset);
1089     }
1090     addp = phase->transform(addp);
1091 #ifdef ASSERT
1092     const TypePtr* adr_type = phase->type(addp)->is_ptr();
1093     ld->_adr_type = adr_type;
1094 #endif
1095     ld->set_req(MemNode::Address, addp);
1096     ld->set_req(0, ctl);
1097     ld->set_req(MemNode::Memory, mem);
1098     return ld;
1099   }
1100   return nullptr;
1101 }
1102 










1103 
1104 //---------------------------can_see_stored_value------------------------------
1105 // This routine exists to make sure this set of tests is done the same
1106 // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
1107 // will change the graph shape in a way which makes memory alive twice at the
1108 // same time (uses the Oracle model of aliasing), then some
1109 // LoadXNode::Identity will fold things back to the equivalence-class model
1110 // of aliasing.

1111 Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
1112   Node* ld_adr = in(MemNode::Address);
1113   intptr_t ld_off = 0;
1114   Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);









1115   Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
1116   const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
1117   Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
1118   // This is more general than load from boxing objects.
1119   if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
1120     uint alias_idx = atp->index();
1121     Node* result = nullptr;
1122     Node* current = st;
1123     // Skip through chains of MemBarNodes checking the MergeMems for
1124     // new states for the slice of this load.  Stop once any other
1125     // kind of node is encountered.  Loads from final memory can skip
1126     // through any kind of MemBar but normal loads shouldn't skip
1127     // through MemBarAcquire since the could allow them to move out of
1128     // a synchronized region. It is not safe to step over MemBarCPUOrder,
1129     // because alias info above them may be inaccurate (e.g., due to
1130     // mixed/mismatched unsafe accesses).
1131     bool is_final_mem = !atp->is_rewritable();
1132     while (current->is_Proj()) {
1133       int opc = current->in(0)->Opcode();
1134       if ((is_final_mem && (opc == Op_MemBarAcquire ||

1178         // Same base, same offset.
1179         // Possible improvement for arrays: check index value instead of absolute offset.
1180 
1181         // At this point we have proven something like this setup:
1182         //   B = << base >>
1183         //   L =  LoadQ(AddP(Check/CastPP(B), #Off))
1184         //   S = StoreQ(AddP(             B , #Off), V)
1185         // (Actually, we haven't yet proven the Q's are the same.)
1186         // In other words, we are loading from a casted version of
1187         // the same pointer-and-offset that we stored to.
1188         // Casted version may carry a dependency and it is respected.
1189         // Thus, we are able to replace L by V.
1190       }
1191       // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1192       if (store_Opcode() != st->Opcode()) {
1193         return nullptr;
1194       }
1195       // LoadVector/StoreVector needs additional check to ensure the types match.
1196       if (st->is_StoreVector()) {
1197         const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
1198         const TypeVect* out_vt = as_LoadVector()->vect_type();
1199         if (in_vt != out_vt) {
1200           return nullptr;
1201         }
1202       }
1203       return st->in(MemNode::ValueIn);
1204     }
1205 
1206     // A load from a freshly-created object always returns zero.
1207     // (This can happen after LoadNode::Ideal resets the load's memory input
1208     // to find_captured_store, which returned InitializeNode::zero_memory.)
1209     if (st->is_Proj() && st->in(0)->is_Allocate() &&
1210         (st->in(0) == ld_alloc) &&
1211         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1212       // return a zero value for the load's basic type
1213       // (This is one of the few places where a generic PhaseTransform
1214       // can create new nodes.  Think of it as lazily manifesting
1215       // virtually pre-existing constants.)





























1216       if (value_basic_type() != T_VOID) {
1217         if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
1218           // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1219           // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1220           // by the ArrayCopyNode.
1221           return phase->zerocon(value_basic_type());
1222         }
1223       } else {
1224         // TODO: materialize all-zero vector constant
1225         assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1226       }
1227     }
1228 
1229     // A load from an initialization barrier can match a captured store.
1230     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1231       InitializeNode* init = st->in(0)->as_Initialize();
1232       AllocateNode* alloc = init->allocation();
1233       if ((alloc != nullptr) && (alloc == ld_alloc)) {
1234         // examine a captured store value
1235         st = init->find_captured_store(ld_off, memory_size(), phase);

1248       base = bs->step_over_gc_barrier(base);
1249       if (base != nullptr && base->is_Proj() &&
1250           base->as_Proj()->_con == TypeFunc::Parms &&
1251           base->in(0)->is_CallStaticJava() &&
1252           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1253         return base->in(0)->in(TypeFunc::Parms);
1254       }
1255     }
1256 
1257     break;
1258   }
1259 
1260   return nullptr;
1261 }
1262 
1263 //----------------------is_instance_field_load_with_local_phi------------------
1264 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1265   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1266       in(Address)->is_AddP() ) {
1267     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1268     // Only instances and boxed values.
1269     if( t_oop != nullptr &&
1270         (t_oop->is_ptr_to_boxed_value() ||
1271          t_oop->is_known_instance_field()) &&
1272         t_oop->offset() != Type::OffsetBot &&
1273         t_oop->offset() != Type::OffsetTop) {
1274       return true;
1275     }
1276   }
1277   return false;
1278 }
1279 
1280 //------------------------------Identity---------------------------------------
1281 // Loads are identity if previous store is to same address
1282 Node* LoadNode::Identity(PhaseGVN* phase) {
1283   // If the previous store-maker is the right kind of Store, and the store is
1284   // to the same address, then we are equal to the value stored.
1285   Node* mem = in(Memory);
1286   Node* value = can_see_stored_value(mem, phase);
1287   if( value ) {
1288     // byte, short & char stores truncate naturally.
1289     // A load has to load the truncated value which requires
1290     // some sort of masking operation and that requires an
1291     // Ideal call instead of an Identity call.
1292     if (memory_size() < BytesPerInt) {
1293       // If the input to the store does not fit with the load's result type,
1294       // it must be truncated via an Ideal call.
1295       if (!phase->type(value)->higher_equal(phase->type(this)))
1296         return this;
1297     }




1298     // (This works even when value is a Con, but LoadNode::Value
1299     // usually runs first, producing the singleton type of the Con.)
1300     if (!has_pinned_control_dependency() || value->is_Con()) {
1301       return value;
1302     } else {
1303       return this;
1304     }
1305   }
1306 
1307   if (has_pinned_control_dependency()) {
1308     return this;
1309   }
1310   // Search for an existing data phi which was generated before for the same
1311   // instance's field to avoid infinite generation of phis in a loop.
1312   Node *region = mem->in(0);
1313   if (is_instance_field_load_with_local_phi(region)) {
1314     const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr();
1315     int this_index  = phase->C->get_alias_index(addr_t);
1316     int this_offset = addr_t->offset();
1317     int this_iid    = addr_t->instance_id();
1318     if (!addr_t->is_known_instance() &&
1319          addr_t->is_ptr_to_boxed_value()) {
1320       // Use _idx of address base (could be Phi node) for boxed values.
1321       intptr_t   ignore = 0;
1322       Node*      base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
1323       if (base == nullptr) {
1324         return this;
1325       }
1326       this_iid = base->_idx;
1327     }
1328     const Type* this_type = bottom_type();
1329     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1330       Node* phi = region->fast_out(i);
1331       if (phi->is_Phi() && phi != mem &&
1332           phi->as_Phi()->is_same_inst_field(this_type, (int)mem->_idx, this_iid, this_index, this_offset)) {
1333         return phi;
1334       }
1335     }
1336   }
1337 
1338   return this;
1339 }
1340 

1856   bool addr_mark = ((phase->type(address)->isa_oopptr() || phase->type(address)->isa_narrowoop()) &&
1857          phase->type(address)->is_ptr()->offset() == oopDesc::mark_offset_in_bytes());
1858 
1859   // Skip up past a SafePoint control.  Cannot do this for Stores because
1860   // pointer stores & cardmarks must stay on the same side of a SafePoint.
1861   if( ctrl != nullptr && ctrl->Opcode() == Op_SafePoint &&
1862       phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw  &&
1863       !addr_mark &&
1864       (depends_only_on_test() || has_unknown_control_dependency())) {
1865     ctrl = ctrl->in(0);
1866     set_req(MemNode::Control,ctrl);
1867     progress = true;
1868   }
1869 
1870   intptr_t ignore = 0;
1871   Node*    base   = AddPNode::Ideal_base_and_offset(address, phase, ignore);
1872   if (base != nullptr
1873       && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
1874     // Check for useless control edge in some common special cases
1875     if (in(MemNode::Control) != nullptr


1876         && can_remove_control()
1877         && phase->type(base)->higher_equal(TypePtr::NOTNULL)
1878         && all_controls_dominate(base, phase->C->start())) {
1879       // A method-invariant, non-null address (constant or 'this' argument).
1880       set_req(MemNode::Control, nullptr);
1881       progress = true;
1882     }
1883   }
1884 
1885   Node* mem = in(MemNode::Memory);
1886   const TypePtr *addr_t = phase->type(address)->isa_ptr();
1887 
1888   if (can_reshape && (addr_t != nullptr)) {
1889     // try to optimize our memory input
1890     Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase);
1891     if (opt_mem != mem) {
1892       set_req_X(MemNode::Memory, opt_mem, phase);
1893       if (phase->type( opt_mem ) == Type::TOP) return nullptr;
1894       return this;
1895     }

1952   // fold up, do so.
1953   Node* prev_mem = find_previous_store(phase);
1954   if (prev_mem != nullptr) {
1955     Node* value = can_see_arraycopy_value(prev_mem, phase);
1956     if (value != nullptr) {
1957       return value;
1958     }
1959   }
1960   // Steps (a), (b):  Walk past independent stores to find an exact match.
1961   if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
1962     // (c) See if we can fold up on the spot, but don't fold up here.
1963     // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1964     // just return a prior value, which is done by Identity calls.
1965     if (can_see_stored_value(prev_mem, phase)) {
1966       // Make ready for step (d):
1967       set_req_X(MemNode::Memory, prev_mem, phase);
1968       return this;
1969     }
1970   }
1971 
1972   return progress ? this : nullptr;







1973 }
1974 
1975 // Helper to recognize certain Klass fields which are invariant across
1976 // some group of array types (e.g., int[] or all T[] where T < Object).
1977 const Type*
1978 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1979                                  ciKlass* klass) const {
1980   assert(!UseCompactObjectHeaders || tkls->offset() != in_bytes(Klass::prototype_header_offset()),
1981          "must not happen");
1982 
1983   if (tkls->isa_instklassptr() && tkls->offset() == in_bytes(InstanceKlass::access_flags_offset())) {
1984     // The field is InstanceKlass::_access_flags.  Return its (constant) value.
1985     assert(Opcode() == Op_LoadUS, "must load an unsigned short from _access_flags");
1986     ciInstanceKlass* iklass = tkls->is_instklassptr()->instance_klass();
1987     return TypeInt::make(iklass->access_flags());
1988   }
1989   if (tkls->offset() == in_bytes(Klass::misc_flags_offset())) {
1990     // The field is Klass::_misc_flags.  Return its (constant) value.
1991     assert(Opcode() == Op_LoadUB, "must load an unsigned byte from _misc_flags");
1992     return TypeInt::make(klass->misc_flags());

2000   // No match.
2001   return nullptr;
2002 }
2003 
2004 //------------------------------Value-----------------------------------------
2005 const Type* LoadNode::Value(PhaseGVN* phase) const {
2006   // Either input is TOP ==> the result is TOP
2007   Node* mem = in(MemNode::Memory);
2008   const Type *t1 = phase->type(mem);
2009   if (t1 == Type::TOP)  return Type::TOP;
2010   Node* adr = in(MemNode::Address);
2011   const TypePtr* tp = phase->type(adr)->isa_ptr();
2012   if (tp == nullptr || tp->empty())  return Type::TOP;
2013   int off = tp->offset();
2014   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
2015   Compile* C = phase->C;
2016 
2017   // If load can see a previous constant store, use that.
2018   Node* value = can_see_stored_value(mem, phase);
2019   if (value != nullptr && value->is_Con()) {
2020     assert(value->bottom_type()->higher_equal(_type), "sanity");
2021     return value->bottom_type();




2022   }
2023 
2024   // Try to guess loaded type from pointer type
2025   if (tp->isa_aryptr()) {
2026     const TypeAryPtr* ary = tp->is_aryptr();
2027     const Type* t = ary->elem();
2028 
2029     // Determine whether the reference is beyond the header or not, by comparing
2030     // the offset against the offset of the start of the array's data.
2031     // Different array types begin at slightly different offsets (12 vs. 16).
2032     // We choose T_BYTE as an example base type that is least restrictive
2033     // as to alignment, which will therefore produce the smallest
2034     // possible base offset.
2035     const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
2036     const bool off_beyond_header = (off >= min_base_off);
2037 
2038     // Try to constant-fold a stable array element.
2039     if (FoldStableValues && !is_mismatched_access() && ary->is_stable()) {
2040       // Make sure the reference is not into the header and the offset is constant
2041       ciObject* aobj = ary->const_oop();
2042       if (aobj != nullptr && off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
2043         int stable_dimension = (ary->stable_dimension() > 0 ? ary->stable_dimension() - 1 : 0);

2050       }
2051     }
2052 
2053     // Don't do this for integer types. There is only potential profit if
2054     // the element type t is lower than _type; that is, for int types, if _type is
2055     // more restrictive than t.  This only happens here if one is short and the other
2056     // char (both 16 bits), and in those cases we've made an intentional decision
2057     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
2058     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
2059     //
2060     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
2061     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
2062     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
2063     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
2064     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
2065     // In fact, that could have been the original type of p1, and p1 could have
2066     // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
2067     // expression (LShiftL quux 3) independently optimized to the constant 8.
2068     if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
2069         && (_type->isa_vect() == nullptr)

2070         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
2071       // t might actually be lower than _type, if _type is a unique
2072       // concrete subclass of abstract class t.
2073       if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
2074         const Type* jt = t->join_speculative(_type);
2075         // In any case, do not allow the join, per se, to empty out the type.
2076         if (jt->empty() && !t->empty()) {
2077           // This can happen if a interface-typed array narrows to a class type.
2078           jt = _type;
2079         }
2080 #ifdef ASSERT
2081         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
2082           // The pointers in the autobox arrays are always non-null
2083           Node* base = adr->in(AddPNode::Base);
2084           if ((base != nullptr) && base->is_DecodeN()) {
2085             // Get LoadN node which loads IntegerCache.cache field
2086             base = base->in(1);
2087           }
2088           if ((base != nullptr) && base->is_Con()) {
2089             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
2090             if ((base_type != nullptr) && base_type->is_autobox_cache()) {
2091               // It could be narrow oop
2092               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
2093             }
2094           }
2095         }
2096 #endif
2097         return jt;
2098       }
2099     }
2100   } else if (tp->base() == Type::InstPtr) {
2101     assert( off != Type::OffsetBot ||
2102             // arrays can be cast to Objects
2103             !tp->isa_instptr() ||
2104             tp->is_instptr()->instance_klass()->is_java_lang_Object() ||


2105             // unsafe field access may not have a constant offset
2106             C->has_unsafe_access(),
2107             "Field accesses must be precise" );
2108     // For oop loads, we expect the _type to be precise.
2109 
2110     // Optimize loads from constant fields.
2111     const TypeInstPtr* tinst = tp->is_instptr();

















2112     ciObject* const_oop = tinst->const_oop();
2113     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
2114       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), value_basic_type());
2115       if (con_type != nullptr) {
2116         return con_type;
2117       }
2118     }
2119   } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
2120     assert(off != Type::OffsetBot ||
2121             !tp->isa_instklassptr() ||
2122            // arrays can be cast to Objects
2123            tp->isa_instklassptr()->instance_klass()->is_java_lang_Object() ||
2124            // also allow array-loading from the primary supertype
2125            // array during subtype checks
2126            Opcode() == Op_LoadKlass,
2127            "Field accesses must be precise");
2128     // For klass/static loads, we expect the _type to be precise
2129   } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
2130     /* With mirrors being an indirect in the Klass*
2131      * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
2132      * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
2133      *
2134      * So check the type and klass of the node before the LoadP.

2141         assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2142         assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2143         return TypeInstPtr::make(klass->java_mirror());
2144       }
2145     }
2146   }
2147 
2148   const TypeKlassPtr *tkls = tp->isa_klassptr();
2149   if (tkls != nullptr) {
2150     if (tkls->is_loaded() && tkls->klass_is_exact()) {
2151       ciKlass* klass = tkls->exact_klass();
2152       // We are loading a field from a Klass metaobject whose identity
2153       // is known at compile time (the type is "exact" or "precise").
2154       // Check for fields we know are maintained as constants by the VM.
2155       if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2156         // The field is Klass::_super_check_offset.  Return its (constant) value.
2157         // (Folds up type checking code.)
2158         assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2159         return TypeInt::make(klass->super_check_offset());
2160       }
2161       if (UseCompactObjectHeaders) {
2162         if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2163           // The field is Klass::_prototype_header. Return its (constant) value.
2164           assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2165           return TypeX::make(klass->prototype_header());
2166         }













2167       }
2168       // Compute index into primary_supers array
2169       juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2170       // Check for overflowing; use unsigned compare to handle the negative case.
2171       if( depth < ciKlass::primary_super_limit() ) {
2172         // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2173         // (Folds up type checking code.)
2174         assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2175         ciKlass *ss = klass->super_of_depth(depth);
2176         return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2177       }
2178       const Type* aift = load_array_final_field(tkls, klass);
2179       if (aift != nullptr)  return aift;
2180     }
2181 
2182     // We can still check if we are loading from the primary_supers array at a
2183     // shallow enough depth.  Even though the klass is not exact, entries less
2184     // than or equal to its super depth are correct.
2185     if (tkls->is_loaded()) {
2186       ciKlass* klass = nullptr;

2220       jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false);
2221       // The key property of this type is that it folds up tests
2222       // for array-ness, since it proves that the layout_helper is positive.
2223       // Thus, a generic value like the basic object layout helper works fine.
2224       return TypeInt::make(min_size, max_jint, Type::WidenMin);
2225     }
2226   }
2227 
2228   // If we are loading from a freshly-allocated object/array, produce a zero.
2229   // Things to check:
2230   //   1. Load is beyond the header: headers are not guaranteed to be zero
2231   //   2. Load is not vectorized: vectors have no zero constant
2232   //   3. Load has no matching store, i.e. the input is the initial memory state
2233   const TypeOopPtr* tinst = tp->isa_oopptr();
2234   bool is_not_header = (tinst != nullptr) && tinst->is_known_instance_field();
2235   bool is_not_vect = (_type->isa_vect() == nullptr);
2236   if (is_not_header && is_not_vect) {
2237     Node* mem = in(MemNode::Memory);
2238     if (mem->is_Parm() && mem->in(0)->is_Start()) {
2239       assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");












2240       return Type::get_zero_type(_type->basic_type());
2241     }
2242   }
2243 
2244   if (!UseCompactObjectHeaders) {
2245     Node* alloc = is_new_object_mark_load();
2246     if (alloc != nullptr) {
2247       return TypeX::make(markWord::prototype().value());









2248     }
2249   }
2250 
2251   return _type;
2252 }
2253 
2254 //------------------------------match_edge-------------------------------------
2255 // Do we Match on this edge index or not?  Match only the address.
2256 uint LoadNode::match_edge(uint idx) const {
2257   return idx == MemNode::Address;
2258 }
2259 
2260 //--------------------------LoadBNode::Ideal--------------------------------------
2261 //
2262 //  If the previous store is to the same address as this load,
2263 //  and the value stored was larger than a byte, replace this load
2264 //  with the value stored truncated to a byte.  If no truncation is
2265 //  needed, the replacement is done in LoadNode::Identity().
2266 //
2267 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {

2376     }
2377   }
2378   // Identity call will handle the case where truncation is not needed.
2379   return LoadNode::Ideal(phase, can_reshape);
2380 }
2381 
2382 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2383   Node* mem = in(MemNode::Memory);
2384   Node* value = can_see_stored_value(mem,phase);
2385   if (value != nullptr && value->is_Con() &&
2386       !value->bottom_type()->higher_equal(_type)) {
2387     // If the input to the store does not fit with the load's result type,
2388     // it must be truncated. We can't delay until Ideal call since
2389     // a singleton Value is needed for split_thru_phi optimization.
2390     int con = value->get_int();
2391     return TypeInt::make((con << 16) >> 16);
2392   }
2393   return LoadNode::Value(phase);
2394 }
2395 




















2396 //=============================================================================
2397 //----------------------------LoadKlassNode::make------------------------------
2398 // Polymorphic factory method:
2399 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
2400   // sanity check the alias category against the created node type
2401   const TypePtr* adr_type = adr->bottom_type()->isa_ptr();
2402   assert(adr_type != nullptr, "expecting TypeKlassPtr");
2403 #ifdef _LP64
2404   if (adr_type->is_ptr_to_narrowklass()) {
2405     assert(UseCompressedClassPointers, "no compressed klasses");
2406     Node* load_klass = gvn.transform(new LoadNKlassNode(mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2407     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2408   }
2409 #endif
2410   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2411   return new LoadKlassNode(mem, adr, at, tk, MemNode::unordered);
2412 }
2413 
2414 //------------------------------Value------------------------------------------
2415 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {

2449           }
2450           return TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
2451         }
2452         if (!t->is_klass()) {
2453           // a primitive Class (e.g., int.class) has null for a klass field
2454           return TypePtr::NULL_PTR;
2455         }
2456         // Fold up the load of the hidden field
2457         return TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
2458       }
2459       // non-constant mirror, so we can't tell what's going on
2460     }
2461     if (!tinst->is_loaded())
2462       return _type;             // Bail out if not loaded
2463     if (offset == oopDesc::klass_offset_in_bytes()) {
2464       return tinst->as_klass_type(true);
2465     }
2466   }
2467 
2468   // Check for loading klass from an array
2469   const TypeAryPtr *tary = tp->isa_aryptr();
2470   if (tary != nullptr &&
2471       tary->offset() == oopDesc::klass_offset_in_bytes()) {
2472     return tary->as_klass_type(true);
2473   }
2474 
2475   // Check for loading klass from an array klass
2476   const TypeKlassPtr *tkls = tp->isa_klassptr();
2477   if (tkls != nullptr && !StressReflectiveCode) {
2478     if (!tkls->is_loaded())
2479      return _type;             // Bail out if not loaded
2480     if (tkls->isa_aryklassptr() && tkls->is_aryklassptr()->elem()->isa_klassptr() &&
2481         tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2482       // // Always returning precise element type is incorrect,
2483       // // e.g., element type could be object and array may contain strings
2484       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2485 
2486       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2487       // according to the element type's subclassing.
2488       return tkls->is_aryklassptr()->elem()->isa_klassptr()->cast_to_exactness(tkls->klass_is_exact());
2489     }







2490     if (tkls->isa_instklassptr() != nullptr && tkls->klass_is_exact() &&
2491         tkls->offset() == in_bytes(Klass::super_offset())) {
2492       ciKlass* sup = tkls->is_instklassptr()->instance_klass()->super();
2493       // The field is Klass::_super.  Return its (constant) value.
2494       // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2495       return sup ? TypeKlassPtr::make(sup, Type::trust_interfaces) : TypePtr::NULL_PTR;
2496     }
2497   }
2498 
2499   if (tkls != nullptr && !UseSecondarySupersCache
2500       && tkls->offset() == in_bytes(Klass::secondary_super_cache_offset()))  {
2501     // Treat Klass::_secondary_super_cache as a constant when the cache is disabled.
2502     return TypePtr::NULL_PTR;
2503   }
2504 
2505   // Bailout case
2506   return LoadNode::Value(phase);
2507 }
2508 
2509 //------------------------------Identity---------------------------------------

2532     base = bs->step_over_gc_barrier(base);
2533   }
2534 
2535   // We can fetch the klass directly through an AllocateNode.
2536   // This works even if the klass is not constant (clone or newArray).
2537   if (offset == oopDesc::klass_offset_in_bytes()) {
2538     Node* allocated_klass = AllocateNode::Ideal_klass(base, phase);
2539     if (allocated_klass != nullptr) {
2540       return allocated_klass;
2541     }
2542   }
2543 
2544   // Simplify k.java_mirror.as_klass to plain k, where k is a Klass*.
2545   // See inline_native_Class_query for occurrences of these patterns.
2546   // Java Example:  x.getClass().isAssignableFrom(y)
2547   //
2548   // This improves reflective code, often making the Class
2549   // mirror go completely dead.  (Current exception:  Class
2550   // mirrors may appear in debug info, but we could clean them out by
2551   // introducing a new debug info operator for Klass.java_mirror).




2552 
2553   if (toop->isa_instptr() && toop->is_instptr()->instance_klass() == phase->C->env()->Class_klass()
2554       && offset == java_lang_Class::klass_offset()) {
2555     if (base->is_Load()) {
2556       Node* base2 = base->in(MemNode::Address);
2557       if (base2->is_Load()) { /* direct load of a load which is the OopHandle */
2558         Node* adr2 = base2->in(MemNode::Address);
2559         const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
2560         if (tkls != nullptr && !tkls->empty()
2561             && (tkls->isa_instklassptr() || tkls->isa_aryklassptr())
2562             && adr2->is_AddP()
2563            ) {
2564           int mirror_field = in_bytes(Klass::java_mirror_offset());
2565           if (tkls->offset() == mirror_field) {
2566             return adr2->in(AddPNode::Base);
2567           }
2568         }
2569       }
2570     }
2571   }
2572 
2573   return this;
2574 }
2575 
2576 LoadNode* LoadNode::clone_pinned() const {
2577   LoadNode* ld = clone()->as_Load();
2578   ld->_control_dependency = UnknownControl;
2579   return ld;
2580 }
2581 
2582 
2583 //------------------------------Value------------------------------------------

2688 //---------------------------StoreNode::make-----------------------------------
2689 // Polymorphic factory method:
2690 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo, bool require_atomic_access) {
2691   assert((mo == unordered || mo == release), "unexpected");
2692   Compile* C = gvn.C;
2693   assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2694          ctl != nullptr, "raw memory operations should have control edge");
2695 
2696   switch (bt) {
2697   case T_BOOLEAN: val = gvn.transform(new AndINode(val, gvn.intcon(0x1))); // Fall through to T_BYTE case
2698   case T_BYTE:    return new StoreBNode(ctl, mem, adr, adr_type, val, mo);
2699   case T_INT:     return new StoreINode(ctl, mem, adr, adr_type, val, mo);
2700   case T_CHAR:
2701   case T_SHORT:   return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
2702   case T_LONG:    return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic_access);
2703   case T_FLOAT:   return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
2704   case T_DOUBLE:  return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic_access);
2705   case T_METADATA:
2706   case T_ADDRESS:
2707   case T_OBJECT:

2708 #ifdef _LP64
2709     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2710       val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop()));
2711       return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
2712     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2713                (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2714                 adr->bottom_type()->isa_rawptr())) {
2715       val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2716       return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2717     }
2718 #endif
2719     {
2720       return new StorePNode(ctl, mem, adr, adr_type, val, mo);
2721     }
2722   default:
2723     ShouldNotReachHere();
2724     return (StoreNode*)nullptr;
2725   }
2726 }
2727 
2728 //--------------------------bottom_type----------------------------------------
2729 const Type *StoreNode::bottom_type() const {
2730   return Type::MEMORY;
2731 }
2732 
2733 //------------------------------hash-------------------------------------------
2734 uint StoreNode::hash() const {
2735   // unroll addition of interesting fields
2736   //return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn);
2737 
2738   // Since they are not commoned, do not hash them:
2739   return NO_HASH;
2740 }
2741 
2742 // Link together multiple stores (B/S/C/I) into a longer one.
2743 //

3365   }
3366   ss.print_cr("[TraceMergeStores]: with");
3367   merged_input_value->dump("\n", false, &ss);
3368   merged_store->dump("\n", false, &ss);
3369   tty->print("%s", ss.as_string());
3370 }
3371 #endif
3372 
3373 //------------------------------Ideal------------------------------------------
3374 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
3375 // When a store immediately follows a relevant allocation/initialization,
3376 // try to capture it into the initialization, or hoist it above.
3377 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3378   Node* p = MemNode::Ideal_common(phase, can_reshape);
3379   if (p)  return (p == NodeSentinel) ? nullptr : p;
3380 
3381   Node* mem     = in(MemNode::Memory);
3382   Node* address = in(MemNode::Address);
3383   Node* value   = in(MemNode::ValueIn);
3384   // Back-to-back stores to same address?  Fold em up.  Generally
3385   // unsafe if I have intervening uses.
3386   {
3387     Node* st = mem;
3388     // If Store 'st' has more than one use, we cannot fold 'st' away.
3389     // For example, 'st' might be the final state at a conditional
3390     // return.  Or, 'st' might be used by some node which is live at
3391     // the same time 'st' is live, which might be unschedulable.  So,
3392     // require exactly ONE user until such time as we clone 'mem' for
3393     // each of 'mem's uses (thus making the exactly-1-user-rule hold
3394     // true).
3395     while (st->is_Store() && st->outcnt() == 1) {
3396       // Looking at a dead closed cycle of memory?
3397       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
3398       assert(Opcode() == st->Opcode() ||
3399              st->Opcode() == Op_StoreVector ||
3400              Opcode() == Op_StoreVector ||
3401              st->Opcode() == Op_StoreVectorScatter ||
3402              Opcode() == Op_StoreVectorScatter ||
3403              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
3404              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
3405              (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy


3406              (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
3407              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
3408 
3409       if (st->in(MemNode::Address)->eqv_uncast(address) &&
3410           st->as_Store()->memory_size() <= this->memory_size()) {
3411         Node* use = st->raw_out(0);
3412         if (phase->is_IterGVN()) {
3413           phase->is_IterGVN()->rehash_node_delayed(use);
3414         }
3415         // It's OK to do this in the parser, since DU info is always accurate,
3416         // and the parser always refers to nodes via SafePointNode maps.
3417         use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
3418         return this;
3419       }
3420       st = st->in(MemNode::Memory);
3421     }
3422   }
3423 
3424 
3425   // Capture an unaliased, unconditional, simple store into an initializer.

3523       const StoreVectorNode* store_vector = as_StoreVector();
3524       const StoreVectorNode* mem_vector = mem->as_StoreVector();
3525       const Node* store_indices = store_vector->indices();
3526       const Node* mem_indices = mem_vector->indices();
3527       const Node* store_mask = store_vector->mask();
3528       const Node* mem_mask = mem_vector->mask();
3529       // Ensure types, indices, and masks match
3530       if (store_vector->vect_type() == mem_vector->vect_type() &&
3531           ((store_indices == nullptr) == (mem_indices == nullptr) &&
3532            (store_indices == nullptr || store_indices->eqv_uncast(mem_indices))) &&
3533           ((store_mask == nullptr) == (mem_mask == nullptr) &&
3534            (store_mask == nullptr || store_mask->eqv_uncast(mem_mask)))) {
3535         result = mem;
3536       }
3537     }
3538   }
3539 
3540   // Store of zero anywhere into a freshly-allocated object?
3541   // Then the store is useless.
3542   // (It must already have been captured by the InitializeNode.)
3543   if (result == this &&
3544       ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
3545     // a newly allocated object is already all-zeroes everywhere
3546     if (mem->is_Proj() && mem->in(0)->is_Allocate()) {

3547       result = mem;
3548     }
3549 
3550     if (result == this) {
3551       // the store may also apply to zero-bits in an earlier object
3552       Node* prev_mem = find_previous_store(phase);
3553       // Steps (a), (b):  Walk past independent stores to find an exact match.
3554       if (prev_mem != nullptr) {
3555         Node* prev_val = can_see_stored_value(prev_mem, phase);
3556         if (prev_val != nullptr && prev_val == val) {
3557           // prev_val and val might differ by a cast; it would be good
3558           // to keep the more informative of the two.
3559           result = mem;
3560         }
3561       }
3562     }
3563   }
3564 
3565   PhaseIterGVN* igvn = phase->is_IterGVN();
3566   if (result != this && igvn != nullptr) {
3567     MemBarNode* trailing = trailing_membar();
3568     if (trailing != nullptr) {
3569 #ifdef ASSERT
3570       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();

4034 // Clearing a short array is faster with stores
4035 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
4036   // Already know this is a large node, do not try to ideal it
4037   if (_is_large) return nullptr;
4038 
4039   const int unit = BytesPerLong;
4040   const TypeX* t = phase->type(in(2))->isa_intptr_t();
4041   if (!t)  return nullptr;
4042   if (!t->is_con())  return nullptr;
4043   intptr_t raw_count = t->get_con();
4044   intptr_t size = raw_count;
4045   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
4046   // Clearing nothing uses the Identity call.
4047   // Negative clears are possible on dead ClearArrays
4048   // (see jck test stmt114.stmt11402.val).
4049   if (size <= 0 || size % unit != 0)  return nullptr;
4050   intptr_t count = size / unit;
4051   // Length too long; communicate this to matchers and assemblers.
4052   // Assemblers are responsible to produce fast hardware clears for it.
4053   if (size > InitArrayShortSize) {
4054     return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
4055   } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
4056     return nullptr;
4057   }
4058   if (!IdealizeClearArrayNode) return nullptr;
4059   Node *mem = in(1);
4060   if( phase->type(mem)==Type::TOP ) return nullptr;
4061   Node *adr = in(3);
4062   const Type* at = phase->type(adr);
4063   if( at==Type::TOP ) return nullptr;
4064   const TypePtr* atp = at->isa_ptr();
4065   // adjust atp to be the correct array element address type
4066   if (atp == nullptr)  atp = TypePtr::BOTTOM;
4067   else              atp = atp->add_offset(Type::OffsetBot);
4068   // Get base for derived pointer purposes
4069   if( adr->Opcode() != Op_AddP ) Unimplemented();
4070   Node *base = adr->in(1);
4071 
4072   Node *zero = phase->makecon(TypeLong::ZERO);
4073   Node *off  = phase->MakeConX(BytesPerLong);
4074   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
4075   count--;
4076   while( count-- ) {
4077     mem = phase->transform(mem);
4078     adr = phase->transform(new AddPNode(base,adr,off));
4079     mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
4080   }
4081   return mem;
4082 }
4083 
4084 //----------------------------step_through----------------------------------
4085 // Return allocation input memory edge if it is different instance
4086 // or itself if it is the one we are looking for.
4087 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseValues* phase) {
4088   Node* n = *np;
4089   assert(n->is_ClearArray(), "sanity");
4090   intptr_t offset;
4091   AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
4092   // This method is called only before Allocate nodes are expanded
4093   // during macro nodes expansion. Before that ClearArray nodes are
4094   // only generated in PhaseMacroExpand::generate_arraycopy() (before
4095   // Allocate nodes are expanded) which follows allocations.
4096   assert(alloc != nullptr, "should have allocation");
4097   if (alloc->_idx == instance_id) {
4098     // Can not bypass initialization of the instance we are looking for.
4099     return false;
4100   }
4101   // Otherwise skip it.
4102   InitializeNode* init = alloc->initialization();
4103   if (init != nullptr)
4104     *np = init->in(TypeFunc::Memory);
4105   else
4106     *np = alloc->in(TypeFunc::Memory);
4107   return true;
4108 }
4109 
4110 //----------------------------clear_memory-------------------------------------
4111 // Generate code to initialize object storage to zero.
4112 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,


4113                                    intptr_t start_offset,
4114                                    Node* end_offset,
4115                                    PhaseGVN* phase) {
4116   intptr_t offset = start_offset;
4117 
4118   int unit = BytesPerLong;
4119   if ((offset % unit) != 0) {
4120     Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
4121     adr = phase->transform(adr);
4122     const TypePtr* atp = TypeRawPtr::BOTTOM;
4123     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);






4124     mem = phase->transform(mem);
4125     offset += BytesPerInt;
4126   }
4127   assert((offset % unit) == 0, "");
4128 
4129   // Initialize the remaining stuff, if any, with a ClearArray.
4130   return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
4131 }
4132 
4133 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,

4134                                    Node* start_offset,
4135                                    Node* end_offset,
4136                                    PhaseGVN* phase) {
4137   if (start_offset == end_offset) {
4138     // nothing to do
4139     return mem;
4140   }
4141 
4142   int unit = BytesPerLong;
4143   Node* zbase = start_offset;
4144   Node* zend  = end_offset;
4145 
4146   // Scale to the unit required by the CPU:
4147   if (!Matcher::init_array_count_is_in_bytes) {
4148     Node* shift = phase->intcon(exact_log2(unit));
4149     zbase = phase->transform(new URShiftXNode(zbase, shift) );
4150     zend  = phase->transform(new URShiftXNode(zend,  shift) );
4151   }
4152 
4153   // Bulk clear double-words
4154   Node* zsize = phase->transform(new SubXNode(zend, zbase) );
4155   Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
4156   mem = new ClearArrayNode(ctl, mem, zsize, adr, false);



4157   return phase->transform(mem);
4158 }
4159 
4160 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,


4161                                    intptr_t start_offset,
4162                                    intptr_t end_offset,
4163                                    PhaseGVN* phase) {
4164   if (start_offset == end_offset) {
4165     // nothing to do
4166     return mem;
4167   }
4168 
4169   assert((end_offset % BytesPerInt) == 0, "odd end offset");
4170   intptr_t done_offset = end_offset;
4171   if ((done_offset % BytesPerLong) != 0) {
4172     done_offset -= BytesPerInt;
4173   }
4174   if (done_offset > start_offset) {
4175     mem = clear_memory(ctl, mem, dest,
4176                        start_offset, phase->MakeConX(done_offset), phase);
4177   }
4178   if (done_offset < end_offset) { // emit the final 32-bit store
4179     Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
4180     adr = phase->transform(adr);
4181     const TypePtr* atp = TypeRawPtr::BOTTOM;
4182     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);






4183     mem = phase->transform(mem);
4184     done_offset += BytesPerInt;
4185   }
4186   assert(done_offset == end_offset, "");
4187   return mem;
4188 }
4189 
4190 //=============================================================================
4191 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
4192   : MultiNode(TypeFunc::Parms + (precedent == nullptr? 0: 1)),
4193     _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
4194 #ifdef ASSERT
4195   , _pair_idx(0)
4196 #endif
4197 {
4198   init_class_id(Class_MemBar);
4199   Node* top = C->top();
4200   init_req(TypeFunc::I_O,top);
4201   init_req(TypeFunc::FramePtr,top);
4202   init_req(TypeFunc::ReturnAdr,top);

4309       PhaseIterGVN* igvn = phase->is_IterGVN();
4310       remove(igvn);
4311       // Must return either the original node (now dead) or a new node
4312       // (Do not return a top here, since that would break the uniqueness of top.)
4313       return new ConINode(TypeInt::ZERO);
4314     }
4315   }
4316   return progress ? this : nullptr;
4317 }
4318 
4319 //------------------------------Value------------------------------------------
4320 const Type* MemBarNode::Value(PhaseGVN* phase) const {
4321   if( !in(0) ) return Type::TOP;
4322   if( phase->type(in(0)) == Type::TOP )
4323     return Type::TOP;
4324   return TypeTuple::MEMBAR;
4325 }
4326 
4327 //------------------------------match------------------------------------------
4328 // Construct projections for memory.
4329 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
4330   switch (proj->_con) {
4331   case TypeFunc::Control:
4332   case TypeFunc::Memory:
4333     return new MachProjNode(this, proj->_con, RegMask::EMPTY, MachProjNode::unmatched_proj);
4334   }
4335   ShouldNotReachHere();
4336   return nullptr;
4337 }
4338 
4339 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4340   trailing->_kind = TrailingStore;
4341   leading->_kind = LeadingStore;
4342 #ifdef ASSERT
4343   trailing->_pair_idx = leading->_idx;
4344   leading->_pair_idx = leading->_idx;
4345 #endif
4346 }
4347 
4348 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4349   trailing->_kind = TrailingLoadStore;

4596   return (req() > RawStores);
4597 }
4598 
4599 void InitializeNode::set_complete(PhaseGVN* phase) {
4600   assert(!is_complete(), "caller responsibility");
4601   _is_complete = Complete;
4602 
4603   // After this node is complete, it contains a bunch of
4604   // raw-memory initializations.  There is no need for
4605   // it to have anything to do with non-raw memory effects.
4606   // Therefore, tell all non-raw users to re-optimize themselves,
4607   // after skipping the memory effects of this initialization.
4608   PhaseIterGVN* igvn = phase->is_IterGVN();
4609   if (igvn)  igvn->add_users_to_worklist(this);
4610 }
4611 
4612 // convenience function
4613 // return false if the init contains any stores already
4614 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
4615   InitializeNode* init = initialization();
4616   if (init == nullptr || init->is_complete())  return false;


4617   init->remove_extra_zeroes();
4618   // for now, if this allocation has already collected any inits, bail:
4619   if (init->is_non_zero())  return false;
4620   init->set_complete(phase);
4621   return true;
4622 }
4623 
4624 void InitializeNode::remove_extra_zeroes() {
4625   if (req() == RawStores)  return;
4626   Node* zmem = zero_memory();
4627   uint fill = RawStores;
4628   for (uint i = fill; i < req(); i++) {
4629     Node* n = in(i);
4630     if (n->is_top() || n == zmem)  continue;  // skip
4631     if (fill < i)  set_req(fill, n);          // compact
4632     ++fill;
4633   }
4634   // delete any empty spaces created:
4635   while (fill < req()) {
4636     del_req(fill);

4780             // store node that we'd like to capture. We need to check
4781             // the uses of the MergeMemNode.
4782             mems.push(n);
4783           }
4784         } else if (n->is_Mem()) {
4785           Node* other_adr = n->in(MemNode::Address);
4786           if (other_adr == adr) {
4787             failed = true;
4788             break;
4789           } else {
4790             const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
4791             if (other_t_adr != nullptr) {
4792               int other_alias_idx = phase->C->get_alias_index(other_t_adr);
4793               if (other_alias_idx == alias_idx) {
4794                 // A load from the same memory slice as the store right
4795                 // after the InitializeNode. We check the control of the
4796                 // object/array that is loaded from. If it's the same as
4797                 // the store control then we cannot capture the store.
4798                 assert(!n->is_Store(), "2 stores to same slice on same control?");
4799                 Node* base = other_adr;






4800                 assert(base->is_AddP(), "should be addp but is %s", base->Name());
4801                 base = base->in(AddPNode::Base);
4802                 if (base != nullptr) {
4803                   base = base->uncast();
4804                   if (base->is_Proj() && base->in(0) == alloc) {
4805                     failed = true;
4806                     break;
4807                   }
4808                 }
4809               }
4810             }
4811           }
4812         } else {
4813           failed = true;
4814           break;
4815         }
4816       }
4817     }
4818   }
4819   if (failed) {

5366         //   z's_done      12  16  16  16    12  16    12
5367         //   z's_needed    12  16  16  16    16  16    16
5368         //   zsize          0   0   0   0     4   0     4
5369         if (next_full_store < 0) {
5370           // Conservative tack:  Zero to end of current word.
5371           zeroes_needed = align_up(zeroes_needed, BytesPerInt);
5372         } else {
5373           // Zero to beginning of next fully initialized word.
5374           // Or, don't zero at all, if we are already in that word.
5375           assert(next_full_store >= zeroes_needed, "must go forward");
5376           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
5377           zeroes_needed = next_full_store;
5378         }
5379       }
5380 
5381       if (zeroes_needed > zeroes_done) {
5382         intptr_t zsize = zeroes_needed - zeroes_done;
5383         // Do some incremental zeroing on rawmem, in parallel with inits.
5384         zeroes_done = align_down(zeroes_done, BytesPerInt);
5385         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,


5386                                               zeroes_done, zeroes_needed,
5387                                               phase);
5388         zeroes_done = zeroes_needed;
5389         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
5390           do_zeroing = false;   // leave the hole, next time
5391       }
5392     }
5393 
5394     // Collect the store and move on:
5395     phase->replace_input_of(st, MemNode::Memory, inits);
5396     inits = st;                 // put it on the linearized chain
5397     set_req(i, zmem);           // unhook from previous position
5398 
5399     if (zeroes_done == st_off)
5400       zeroes_done = next_init_off;
5401 
5402     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
5403 
5404     #ifdef ASSERT
5405     // Various order invariants.  Weaker than stores_are_sane because

5425   remove_extra_zeroes();        // clear out all the zmems left over
5426   add_req(inits);
5427 
5428   if (!(UseTLAB && ZeroTLAB)) {
5429     // If anything remains to be zeroed, zero it all now.
5430     zeroes_done = align_down(zeroes_done, BytesPerInt);
5431     // if it is the last unused 4 bytes of an instance, forget about it
5432     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
5433     if (zeroes_done + BytesPerLong >= size_limit) {
5434       AllocateNode* alloc = allocation();
5435       assert(alloc != nullptr, "must be present");
5436       if (alloc != nullptr && alloc->Opcode() == Op_Allocate) {
5437         Node* klass_node = alloc->in(AllocateNode::KlassNode);
5438         ciKlass* k = phase->type(klass_node)->is_instklassptr()->instance_klass();
5439         if (zeroes_done == k->layout_helper())
5440           zeroes_done = size_limit;
5441       }
5442     }
5443     if (zeroes_done < size_limit) {
5444       rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,


5445                                             zeroes_done, size_in_bytes, phase);
5446     }
5447   }
5448 
5449   set_complete(phase);
5450   return rawmem;
5451 }
5452 
5453 void InitializeNode::replace_mem_projs_by(Node* mem, Compile* C) {
5454   auto replace_proj = [&](ProjNode* proj) {
5455     C->gvn_replace_by(proj, mem);
5456     return CONTINUE;
5457   };
5458   apply_to_projs(replace_proj, TypeFunc::Memory);
5459 }
5460 
5461 void InitializeNode::replace_mem_projs_by(Node* mem, PhaseIterGVN* igvn) {
5462   DUIterator_Fast imax, i = fast_outs(imax);
5463   auto replace_proj = [&](ProjNode* proj) {
5464     igvn->replace_node(proj, mem);

   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "ci/ciFlatArrayKlass.hpp"
  27 #include "ci/ciInlineKlass.hpp"
  28 #include "classfile/javaClasses.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "gc/shared/c2/barrierSetC2.hpp"
  33 #include "gc/shared/tlab_globals.hpp"
  34 #include "memory/allocation.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "oops/flatArrayKlass.hpp"
  37 #include "oops/objArrayKlass.hpp"
  38 #include "opto/addnode.hpp"
  39 #include "opto/arraycopynode.hpp"
  40 #include "opto/callnode.hpp"
  41 #include "opto/cfgnode.hpp"
  42 #include "opto/compile.hpp"
  43 #include "opto/connode.hpp"
  44 #include "opto/convertnode.hpp"
  45 #include "opto/inlinetypenode.hpp"
  46 #include "opto/loopnode.hpp"
  47 #include "opto/machnode.hpp"
  48 #include "opto/matcher.hpp"
  49 #include "opto/memnode.hpp"
  50 #include "opto/mempointer.hpp"
  51 #include "opto/mulnode.hpp"
  52 #include "opto/narrowptrnode.hpp"
  53 #include "opto/phaseX.hpp"
  54 #include "opto/regalloc.hpp"
  55 #include "opto/regmask.hpp"
  56 #include "opto/rootnode.hpp"
  57 #include "opto/traceMergeStoresTag.hpp"
  58 #include "opto/vectornode.hpp"
  59 #include "runtime/arguments.hpp"
  60 #include "utilities/align.hpp"
  61 #include "utilities/copy.hpp"
  62 #include "utilities/globalDefinitions.hpp"
  63 #include "utilities/macros.hpp"
  64 #include "utilities/powerOfTwo.hpp"
  65 #include "utilities/vmError.hpp"
  66 
  67 // Portions of code courtesy of Clifford Click
  68 
  69 // Optimization - Graph Style
  70 
  71 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
  72 
  73 //=============================================================================
  74 uint MemNode::size_of() const { return sizeof(*this); }
  75 
  76 const TypePtr *MemNode::adr_type() const {
  77   Node* adr = in(Address);
  78   if (adr == nullptr)  return nullptr; // node is dead
  79   const TypePtr* cross_check = nullptr;
  80   DEBUG_ONLY(cross_check = _adr_type);
  81   return calculate_adr_type(adr->bottom_type(), cross_check);
  82 }

 131       st->print(", idx=Bot;");
 132     else if (atp->index() == Compile::AliasIdxTop)
 133       st->print(", idx=Top;");
 134     else if (atp->index() == Compile::AliasIdxRaw)
 135       st->print(", idx=Raw;");
 136     else {
 137       ciField* field = atp->field();
 138       if (field) {
 139         st->print(", name=");
 140         field->print_name_on(st);
 141       }
 142       st->print(", idx=%d;", atp->index());
 143     }
 144   }
 145 }
 146 
 147 extern void print_alias_types();
 148 
 149 #endif
 150 
 151 // Find the memory output corresponding to the fall-through path of a call
 152 static Node* find_call_fallthrough_mem_output(CallNode* call) {
 153   ResourceMark rm;
 154   CallProjections* projs = call->extract_projections(false, false);
 155   Node* res = projs->fallthrough_memproj;
 156   assert(res != nullptr, "must have a fallthrough mem output");
 157   return res;
 158 }
 159 
 160 // Try to find a better memory input for a load from a strict final field
 161 static Node* try_optimize_strict_final_load_memory(PhaseGVN* phase, Node* adr, ProjNode*& base_local) {
 162   intptr_t offset = 0;
 163   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
 164   if (base == nullptr) {
 165     return nullptr;
 166   }
 167 
 168   Node* base_uncasted = base->uncast();
 169   if (base_uncasted->is_Proj()) {
 170     MultiNode* multi = base_uncasted->in(0)->as_Multi();
 171     if (multi->is_Allocate()) {
 172       base_local = base_uncasted->as_Proj();
 173       return nullptr;
 174     } else if (multi->is_Call()) {
 175       // The oop is returned from a call, the memory can be the fallthrough output of the call
 176       return find_call_fallthrough_mem_output(multi->as_Call());
 177     } else if (multi->is_Start()) {
 178       // The oop is a parameter
 179       if (phase->C->method()->is_object_constructor() && base_uncasted->as_Proj()->_con == TypeFunc::Parms) {
 180         // The receiver of a constructor is similar to the result of an AllocateNode
 181         base_local = base_uncasted->as_Proj();
 182         return nullptr;
 183       } else {
 184         // Use the start memory otherwise
 185         return multi->proj_out(TypeFunc::Memory);
 186       }
 187     }
 188   }
 189 
 190   return nullptr;
 191 }
 192 
 193 // Whether a call can modify a strict final field, given that the object is allocated inside the
 194 // current compilation unit, or is the first parameter when the compilation root is a constructor.
 195 // This is equivalent to asking whether 'call' is a constructor invocation and the class declaring
 196 // the target method is a subclass of the class declaring 'field'.
 197 static bool call_can_modify_local_object(ciField* field, CallNode* call) {
 198   if (!call->is_CallJava()) {
 199     return false;
 200   }
 201 
 202   ciMethod* target = call->as_CallJava()->method();
 203   if (target == nullptr || !target->is_object_constructor()) {
 204     return false;
 205   }
 206 
 207   // If 'field' is declared in a class that is a subclass of the one declaring the constructor,
 208   // then the field is set inside the constructor, else the field must be set before the
 209   // constructor invocation. E.g. A field Super.x will be set during the execution of Sub::<init>,
 210   // while a field Sub.y must be set before Super::<init> is invoked.
 211   // We can try to be more heroic and decide if the receiver of the constructor invocation is the
 212   // object from which we are loading from. This, however, may be problematic as deciding if 2
 213   // nodes are definitely different may not be trivial, especially if the graph is not canonical.
 214   // As a result, it is made more conservative for now.
 215   assert(call->req() > TypeFunc::Parms, "constructor must have at least 1 argument");
 216   return target->holder()->is_subclass_of(field->holder());
 217 }
 218 
 219 Node* MemNode::optimize_simple_memory_chain(Node* mchain, const TypeOopPtr* t_oop, Node* load, PhaseGVN* phase) {
 220   assert(t_oop != nullptr, "sanity");
 221   bool is_known_instance = t_oop->is_known_instance_field();
 222   bool is_strict_final_load = false;
 223 
 224   // After macro expansion, an allocation may become a call, changing the memory input to the
 225   // memory output of that call would be illegal. As a result, disallow this transformation after
 226   // macro expansion.
 227   if (phase->is_IterGVN() && phase->C->allow_macro_nodes() && load != nullptr && load->is_Load() && !load->as_Load()->is_mismatched_access()) {
 228     is_strict_final_load = t_oop->is_ptr_to_strict_final_field();
 229 #ifdef ASSERT
 230     if ((t_oop->is_inlinetypeptr() && t_oop->inline_klass()->contains_field_offset(t_oop->offset())) || t_oop->is_ptr_to_boxed_value()) {
 231       assert(is_strict_final_load, "sanity check for basic cases");
 232     }
 233 #endif // ASSERT
 234   }
 235 
 236   if (!is_known_instance && !is_strict_final_load) {
 237     return mchain;
 238   }
 239 
 240   Node* result = mchain;
 241   ProjNode* base_local = nullptr;
 242 
 243   ciField* field = nullptr;
 244   if (is_strict_final_load) {
 245     field = phase->C->alias_type(t_oop)->field();
 246     assert(field != nullptr, "must point to a field");
 247 
 248     Node* adr = load->in(MemNode::Address);
 249     assert(phase->type(adr) == t_oop, "inconsistent type");
 250     Node* tmp = try_optimize_strict_final_load_memory(phase, adr, base_local);
 251     if (tmp != nullptr) {
 252       result = tmp;
 253     }
 254   }
 255 
 256   uint instance_id = t_oop->instance_id();
 257   Node* start_mem = phase->C->start()->proj_out_or_null(TypeFunc::Memory);
 258   Node* prev = nullptr;

 259   while (prev != result) {
 260     prev = result;
 261     if (result == start_mem) {
 262       // start_mem is the earliest memory possible
 263       break;
 264     }
 265 
 266     // skip over a call which does not affect this memory slice
 267     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
 268       Node* proj_in = result->in(0);
 269       if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
 270         // This is the allocation that creates the object from which we are loading from
 271         break;
 272       } else if (proj_in->is_Call()) {
 273         // ArrayCopyNodes processed here as well
 274         CallNode* call = proj_in->as_Call();
 275         if (!call->may_modify(t_oop, phase)) {
 276           result = call->in(TypeFunc::Memory);
 277         } else if (is_strict_final_load && base_local != nullptr && !call_can_modify_local_object(field, call)) {
 278           result = call->in(TypeFunc::Memory);
 279         }
 280       } else if (proj_in->Opcode() == Op_Tuple) {
 281         // The call will be folded, skip over it.
 282         break;
 283       } else if (proj_in->is_Initialize()) {
 284         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
 285         // Stop if this is the initialization for the object instance which
 286         // contains this memory slice, otherwise skip over it.
 287         if ((alloc == nullptr) || (alloc->_idx == instance_id)) {
 288           break;
 289         }
 290         if (is_known_instance) {
 291           result = proj_in->in(TypeFunc::Memory);
 292         } else if (is_strict_final_load) {
 293           Node* klass = alloc->in(AllocateNode::KlassNode);
 294           const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
 295           if (tklass->klass_is_exact() && !tklass->exact_klass()->equals(t_oop->is_instptr()->exact_klass())) {
 296             // Allocation of another type, must be another object
 297             result = proj_in->in(TypeFunc::Memory);
 298           } else if (base_local != nullptr && (base_local->is_Parm() || base_local->in(0) != alloc)) {
 299             // Allocation of another object
 300             result = proj_in->in(TypeFunc::Memory);
 301           }
 302         }
 303       } else if (proj_in->is_MemBar()) {
 304         ArrayCopyNode* ac = nullptr;
 305         if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) {
 306           break;
 307         }
 308         result = proj_in->in(TypeFunc::Memory);
 309       } else if (proj_in->is_LoadFlat() || proj_in->is_StoreFlat()) {
 310         if (is_strict_final_load) {
 311           // LoadFlat and StoreFlat cannot happen to strict final fields
 312           result = proj_in->in(TypeFunc::Memory);
 313         }
 314       } else if (proj_in->is_top()) {
 315         break; // dead code
 316       } else {
 317         assert(false, "unexpected projection of %s", proj_in->Name());
 318       }
 319     } else if (result->is_ClearArray()) {
 320       if (!is_known_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {
 321         // Can not bypass initialization of the instance
 322         // we are looking for.
 323         break;
 324       }
 325       // Otherwise skip it (the call updated 'result' value).
 326     } else if (result->is_MergeMem()) {
 327       result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, nullptr, tty);
 328     }
 329   }
 330   return result;
 331 }
 332 
 333 Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) {
 334   const TypeOopPtr* t_oop = t_adr->isa_oopptr();
 335   if (t_oop == nullptr)
 336     return mchain;  // don't try to optimize non-oop types
 337   Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase);
 338   bool is_instance = t_oop->is_known_instance_field();
 339   PhaseIterGVN *igvn = phase->is_IterGVN();
 340   if (is_instance && igvn != nullptr && result->is_Phi()) {
 341     PhiNode *mphi = result->as_Phi();
 342     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
 343     const TypePtr *t = mphi->adr_type();
 344     bool do_split = false;
 345     // In the following cases, Load memory input can be further optimized based on
 346     // its precise address type
 347     if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ) {
 348       do_split = true;
 349     } else if (t->isa_oopptr() && !t->is_oopptr()->is_known_instance()) {
 350       const TypeOopPtr* mem_t =
 351         t->is_oopptr()->cast_to_exactness(true)
 352         ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
 353         ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
 354       if (t_oop->isa_aryptr()) {
 355         mem_t = mem_t->is_aryptr()
 356                      ->cast_to_stable(t_oop->is_aryptr()->is_stable())
 357                      ->cast_to_size(t_oop->is_aryptr()->size())
 358                      ->cast_to_not_flat(t_oop->is_aryptr()->is_not_flat())
 359                      ->cast_to_not_null_free(t_oop->is_aryptr()->is_not_null_free())
 360                      ->with_offset(t_oop->is_aryptr()->offset())
 361                      ->is_aryptr();
 362       }
 363       do_split = mem_t == t_oop;
 364     }
 365     if (do_split) {
 366       // clone the Phi with our address type
 367       result = mphi->split_out_instance(t_adr, igvn);
 368     } else {
 369       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
 370     }
 371   }
 372   return result;
 373 }
 374 
 375 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
 376   uint alias_idx = phase->C->get_alias_index(tp);
 377   Node *mem = mmem;
 378 #ifdef ASSERT
 379   {
 380     // Check that current type is consistent with the alias index used during graph construction
 381     assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
 382     bool consistent =  adr_check == nullptr || adr_check->empty() ||
 383                        phase->C->must_alias(adr_check, alias_idx );
 384     // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
 385     if( !consistent && adr_check != nullptr && !adr_check->empty() &&
 386         tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
 387         adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
 388         ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
 389           adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
 390           adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
 391       // don't assert if it is dead code.
 392       consistent = true;
 393     }
 394     if( !consistent ) {
 395       st->print("alias_idx==%d, adr_check==", alias_idx);
 396       if( adr_check == nullptr ) {
 397         st->print("null");
 398       } else {
 399         adr_check->dump();
 400       }
 401       st->cr();
 402       print_alias_types();
 403       assert(consistent, "adr_check must match alias idx");
 404     }
 405   }
 406 #endif

1078          "use LoadKlassNode instead");
1079   assert(!(adr_type->isa_aryptr() &&
1080            adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
1081          "use LoadRangeNode instead");
1082   // Check control edge of raw loads
1083   assert( ctl != nullptr || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
1084           // oop will be recorded in oop map if load crosses safepoint
1085           rt->isa_oopptr() || is_immutable_value(adr),
1086           "raw memory operations should have control edge");
1087   LoadNode* load = nullptr;
1088   switch (bt) {
1089   case T_BOOLEAN: load = new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
1090   case T_BYTE:    load = new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
1091   case T_INT:     load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
1092   case T_CHAR:    load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
1093   case T_SHORT:   load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
1094   case T_LONG:    load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic_access); break;
1095   case T_FLOAT:   load = new LoadFNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
1096   case T_DOUBLE:  load = new LoadDNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency, require_atomic_access); break;
1097   case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(),  mo, control_dependency); break;
1098   case T_ARRAY:
1099   case T_OBJECT:
1100   case T_NARROWOOP:
1101 #ifdef _LP64
1102     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
1103       load = new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency);
1104     } else
1105 #endif
1106     {
1107       assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
1108       load = new LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
1109     }
1110     break;
1111   default:
1112     assert(false, "unexpected basic type %s", type2name(bt));
1113     break;
1114   }
1115   assert(load != nullptr, "LoadNode should have been created");
1116   if (unaligned) {
1117     load->set_unaligned_access();
1118   }
1119   if (mismatched) {
1120     load->set_mismatched_access();
1121   }
1122   if (unsafe) {
1123     load->set_unsafe_access();
1124   }
1125   load->set_barrier_data(barrier_data);
1126   if (load->Opcode() == Op_LoadN) {
1127     Node* ld = gvn.transform(load);
1128     return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
1129   }
1130 
1131   return load;
1132 }
1133 
1134 //------------------------------hash-------------------------------------------
1135 uint LoadNode::hash() const {
1136   // unroll addition of interesting fields
1137   return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
1138 }
1139 
1140 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
1141   if ((atp != nullptr) && (atp->index() >= Compile::AliasIdxRaw)) {
1142     bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
1143     bool is_stable_ary = FoldStableValues &&
1144                          (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
1145                          tp->isa_aryptr()->is_stable();
1146 
1147     return (eliminate_boxing && non_volatile) || is_stable_ary || tp->is_inlinetypeptr();
1148   }
1149 
1150   return false;
1151 }
1152 
1153 LoadNode* LoadNode::pin_array_access_node() const {
1154   const TypePtr* adr_type = this->adr_type();
1155   if (adr_type != nullptr && adr_type->isa_aryptr()) {
1156     return clone_pinned();
1157   }
1158   return nullptr;
1159 }
1160 
1161 // Is the value loaded previously stored by an arraycopy? If so return
1162 // a load node that reads from the source array so we may be able to
1163 // optimize out the ArrayCopy node later.
1164 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
1165   Node* ld_adr = in(MemNode::Address);
1166   intptr_t ld_off = 0;
1167   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);

1183     if (ac->as_ArrayCopy()->is_clonebasic()) {
1184       assert(ld_alloc != nullptr, "need an alloc");
1185       assert(addp->is_AddP(), "address must be addp");
1186       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1187       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1188       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1189       addp->set_req(AddPNode::Base, src);
1190       addp->set_req(AddPNode::Address, src);
1191     } else {
1192       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1193              ac->as_ArrayCopy()->is_copyof_validated() ||
1194              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1195       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1196       addp->set_req(AddPNode::Base, src);
1197       addp->set_req(AddPNode::Address, src);
1198 
1199       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1200       BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1201       if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1202 
1203       uint shift  = ary_t->is_flat() ? ary_t->flat_log_elem_size() : exact_log2(type2aelembytes(ary_elem));

1204 
1205       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1206 #ifdef _LP64
1207       diff = phase->transform(new ConvI2LNode(diff));
1208 #endif
1209       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1210 
1211       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1212       addp->set_req(AddPNode::Offset, offset);
1213     }
1214     addp = phase->transform(addp);
1215 #ifdef ASSERT
1216     const TypePtr* adr_type = phase->type(addp)->is_ptr();
1217     ld->_adr_type = adr_type;
1218 #endif
1219     ld->set_req(MemNode::Address, addp);
1220     ld->set_req(0, ctl);
1221     ld->set_req(MemNode::Memory, mem);
1222     return ld;
1223   }
1224   return nullptr;
1225 }
1226 
1227 static Node* see_through_inline_type(PhaseValues* phase, const MemNode* load, Node* base, int offset) {
1228   if (!load->is_mismatched_access() && base != nullptr && base->is_InlineType() && offset > oopDesc::klass_offset_in_bytes()) {
1229     InlineTypeNode* vt = base->as_InlineType();
1230     Node* value = vt->field_value_by_offset(offset, true);
1231     assert(value != nullptr, "must see some value");
1232     return value;
1233   }
1234 
1235   return nullptr;
1236 }
1237 
1238 //---------------------------can_see_stored_value------------------------------
1239 // This routine exists to make sure this set of tests is done the same
1240 // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
1241 // will change the graph shape in a way which makes memory alive twice at the
1242 // same time (uses the Oracle model of aliasing), then some
1243 // LoadXNode::Identity will fold things back to the equivalence-class model
1244 // of aliasing.
1245 // This method may find an unencoded node instead of the corresponding encoded one.
1246 Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
1247   Node* ld_adr = in(MemNode::Address);
1248   intptr_t ld_off = 0;
1249   Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
1250   // Try to see through an InlineTypeNode
1251   // LoadN is special because the input is not compressed
1252   if (Opcode() != Op_LoadN) {
1253     Node* value = see_through_inline_type(phase, this, ld_base, ld_off);
1254     if (value != nullptr) {
1255       return value;
1256     }
1257   }
1258 
1259   Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
1260   const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
1261   Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
1262   // This is more general than load from boxing objects.
1263   if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
1264     uint alias_idx = atp->index();
1265     Node* result = nullptr;
1266     Node* current = st;
1267     // Skip through chains of MemBarNodes checking the MergeMems for
1268     // new states for the slice of this load.  Stop once any other
1269     // kind of node is encountered.  Loads from final memory can skip
1270     // through any kind of MemBar but normal loads shouldn't skip
1271     // through MemBarAcquire since the could allow them to move out of
1272     // a synchronized region. It is not safe to step over MemBarCPUOrder,
1273     // because alias info above them may be inaccurate (e.g., due to
1274     // mixed/mismatched unsafe accesses).
1275     bool is_final_mem = !atp->is_rewritable();
1276     while (current->is_Proj()) {
1277       int opc = current->in(0)->Opcode();
1278       if ((is_final_mem && (opc == Op_MemBarAcquire ||

1322         // Same base, same offset.
1323         // Possible improvement for arrays: check index value instead of absolute offset.
1324 
1325         // At this point we have proven something like this setup:
1326         //   B = << base >>
1327         //   L =  LoadQ(AddP(Check/CastPP(B), #Off))
1328         //   S = StoreQ(AddP(             B , #Off), V)
1329         // (Actually, we haven't yet proven the Q's are the same.)
1330         // In other words, we are loading from a casted version of
1331         // the same pointer-and-offset that we stored to.
1332         // Casted version may carry a dependency and it is respected.
1333         // Thus, we are able to replace L by V.
1334       }
1335       // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1336       if (store_Opcode() != st->Opcode()) {
1337         return nullptr;
1338       }
1339       // LoadVector/StoreVector needs additional check to ensure the types match.
1340       if (st->is_StoreVector()) {
1341         const TypeVect*  in_vt = st->as_StoreVector()->vect_type();
1342         const TypeVect* out_vt = is_Load() ? as_LoadVector()->vect_type() : as_StoreVector()->vect_type();
1343         if (in_vt != out_vt) {
1344           return nullptr;
1345         }
1346       }
1347       return st->in(MemNode::ValueIn);
1348     }
1349 
1350     // A load from a freshly-created object always returns zero.
1351     // (This can happen after LoadNode::Ideal resets the load's memory input
1352     // to find_captured_store, which returned InitializeNode::zero_memory.)
1353     if (st->is_Proj() && st->in(0)->is_Allocate() &&
1354         (st->in(0) == ld_alloc) &&
1355         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1356       // return a zero value for the load's basic type
1357       // (This is one of the few places where a generic PhaseTransform
1358       // can create new nodes.  Think of it as lazily manifesting
1359       // virtually pre-existing constants.)
1360       Node* init_value = ld_alloc->in(AllocateNode::InitValue);
1361       if (init_value != nullptr) {
1362         const TypeAryPtr* ld_adr_type = phase->type(ld_adr)->isa_aryptr();
1363         if (ld_adr_type == nullptr) {
1364           return nullptr;
1365         }
1366 
1367         // We know that this is not a flat array, the load should return the whole oop
1368         if (ld_adr_type->is_not_flat()) {
1369           return init_value;
1370         }
1371 
1372         // If this is a flat array, try to see through init_value
1373         if (init_value->is_EncodeP()) {
1374           init_value = init_value->in(1);
1375         }
1376         if (!init_value->is_InlineType() || ld_adr_type->field_offset() == Type::Offset::bottom) {
1377           return nullptr;
1378         }
1379 
1380         ciInlineKlass* vk = phase->type(init_value)->inline_klass();
1381         int field_offset_in_payload = ld_adr_type->field_offset().get();
1382         if (field_offset_in_payload == vk->null_marker_offset_in_payload()) {
1383           return init_value->as_InlineType()->get_null_marker();
1384         } else {
1385           return init_value->as_InlineType()->field_value_by_offset(field_offset_in_payload + vk->payload_offset(), true);
1386         }
1387       }
1388       assert(ld_alloc->in(AllocateNode::RawInitValue) == nullptr, "init value may not be null");
1389       if (value_basic_type() != T_VOID) {
1390         if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
1391           // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1392           // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1393           // by the ArrayCopyNode.
1394           return phase->zerocon(value_basic_type());
1395         }
1396       } else {
1397         // TODO: materialize all-zero vector constant
1398         assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1399       }
1400     }
1401 
1402     // A load from an initialization barrier can match a captured store.
1403     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1404       InitializeNode* init = st->in(0)->as_Initialize();
1405       AllocateNode* alloc = init->allocation();
1406       if ((alloc != nullptr) && (alloc == ld_alloc)) {
1407         // examine a captured store value
1408         st = init->find_captured_store(ld_off, memory_size(), phase);

1421       base = bs->step_over_gc_barrier(base);
1422       if (base != nullptr && base->is_Proj() &&
1423           base->as_Proj()->_con == TypeFunc::Parms &&
1424           base->in(0)->is_CallStaticJava() &&
1425           base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1426         return base->in(0)->in(TypeFunc::Parms);
1427       }
1428     }
1429 
1430     break;
1431   }
1432 
1433   return nullptr;
1434 }
1435 
1436 //----------------------is_instance_field_load_with_local_phi------------------
1437 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1438   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1439       in(Address)->is_AddP() ) {
1440     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1441     // Only known instances and immutable fields
1442     if( t_oop != nullptr &&
1443         (t_oop->is_ptr_to_strict_final_field() ||
1444          t_oop->is_known_instance_field()) &&
1445         t_oop->offset() != Type::OffsetBot &&
1446         t_oop->offset() != Type::OffsetTop) {
1447       return true;
1448     }
1449   }
1450   return false;
1451 }
1452 
1453 //------------------------------Identity---------------------------------------
1454 // Loads are identity if previous store is to same address
1455 Node* LoadNode::Identity(PhaseGVN* phase) {
1456   // If the previous store-maker is the right kind of Store, and the store is
1457   // to the same address, then we are equal to the value stored.
1458   Node* mem = in(Memory);
1459   Node* value = can_see_stored_value(mem, phase);
1460   if( value ) {
1461     // byte, short & char stores truncate naturally.
1462     // A load has to load the truncated value which requires
1463     // some sort of masking operation and that requires an
1464     // Ideal call instead of an Identity call.
1465     if (memory_size() < BytesPerInt) {
1466       // If the input to the store does not fit with the load's result type,
1467       // it must be truncated via an Ideal call.
1468       if (!phase->type(value)->higher_equal(phase->type(this)))
1469         return this;
1470     }
1471 
1472     if (phase->type(value)->isa_ptr() && phase->type(this)->isa_narrowoop()) {
1473       return this;
1474     }
1475     // (This works even when value is a Con, but LoadNode::Value
1476     // usually runs first, producing the singleton type of the Con.)
1477     if (!has_pinned_control_dependency() || value->is_Con()) {
1478       return value;
1479     } else {
1480       return this;
1481     }
1482   }
1483 
1484   if (has_pinned_control_dependency()) {
1485     return this;
1486   }
1487   // Search for an existing data phi which was generated before for the same
1488   // instance's field to avoid infinite generation of phis in a loop.
1489   Node *region = mem->in(0);
1490   if (is_instance_field_load_with_local_phi(region)) {
1491     const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr();
1492     int this_index  = phase->C->get_alias_index(addr_t);
1493     int this_offset = addr_t->offset();
1494     int this_iid    = addr_t->instance_id();
1495     if (!addr_t->is_known_instance() &&
1496          addr_t->is_ptr_to_strict_final_field()) {
1497       // Use _idx of address base (could be Phi node) for immutable fields in unknown instances
1498       intptr_t   ignore = 0;
1499       Node*      base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
1500       if (base == nullptr) {
1501         return this;
1502       }
1503       this_iid = base->_idx;
1504     }
1505     const Type* this_type = bottom_type();
1506     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1507       Node* phi = region->fast_out(i);
1508       if (phi->is_Phi() && phi != mem &&
1509           phi->as_Phi()->is_same_inst_field(this_type, (int)mem->_idx, this_iid, this_index, this_offset)) {
1510         return phi;
1511       }
1512     }
1513   }
1514 
1515   return this;
1516 }
1517 

2033   bool addr_mark = ((phase->type(address)->isa_oopptr() || phase->type(address)->isa_narrowoop()) &&
2034          phase->type(address)->is_ptr()->offset() == oopDesc::mark_offset_in_bytes());
2035 
2036   // Skip up past a SafePoint control.  Cannot do this for Stores because
2037   // pointer stores & cardmarks must stay on the same side of a SafePoint.
2038   if( ctrl != nullptr && ctrl->Opcode() == Op_SafePoint &&
2039       phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw  &&
2040       !addr_mark &&
2041       (depends_only_on_test() || has_unknown_control_dependency())) {
2042     ctrl = ctrl->in(0);
2043     set_req(MemNode::Control,ctrl);
2044     progress = true;
2045   }
2046 
2047   intptr_t ignore = 0;
2048   Node*    base   = AddPNode::Ideal_base_and_offset(address, phase, ignore);
2049   if (base != nullptr
2050       && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
2051     // Check for useless control edge in some common special cases
2052     if (in(MemNode::Control) != nullptr
2053         // TODO 8350865 Can we re-enable this?
2054         && !(phase->type(address)->is_inlinetypeptr() && is_mismatched_access())
2055         && can_remove_control()
2056         && phase->type(base)->higher_equal(TypePtr::NOTNULL)
2057         && all_controls_dominate(base, phase->C->start())) {
2058       // A method-invariant, non-null address (constant or 'this' argument).
2059       set_req(MemNode::Control, nullptr);
2060       progress = true;
2061     }
2062   }
2063 
2064   Node* mem = in(MemNode::Memory);
2065   const TypePtr *addr_t = phase->type(address)->isa_ptr();
2066 
2067   if (can_reshape && (addr_t != nullptr)) {
2068     // try to optimize our memory input
2069     Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase);
2070     if (opt_mem != mem) {
2071       set_req_X(MemNode::Memory, opt_mem, phase);
2072       if (phase->type( opt_mem ) == Type::TOP) return nullptr;
2073       return this;
2074     }

2131   // fold up, do so.
2132   Node* prev_mem = find_previous_store(phase);
2133   if (prev_mem != nullptr) {
2134     Node* value = can_see_arraycopy_value(prev_mem, phase);
2135     if (value != nullptr) {
2136       return value;
2137     }
2138   }
2139   // Steps (a), (b):  Walk past independent stores to find an exact match.
2140   if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) {
2141     // (c) See if we can fold up on the spot, but don't fold up here.
2142     // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
2143     // just return a prior value, which is done by Identity calls.
2144     if (can_see_stored_value(prev_mem, phase)) {
2145       // Make ready for step (d):
2146       set_req_X(MemNode::Memory, prev_mem, phase);
2147       return this;
2148     }
2149   }
2150 
2151   if (progress) {
2152     return this;
2153   }
2154 
2155   if (!can_reshape) {
2156     phase->record_for_igvn(this);
2157   }
2158   return nullptr;
2159 }
2160 
2161 // Helper to recognize certain Klass fields which are invariant across
2162 // some group of array types (e.g., int[] or all T[] where T < Object).
2163 const Type*
2164 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
2165                                  ciKlass* klass) const {
2166   assert(!UseCompactObjectHeaders || tkls->offset() != in_bytes(Klass::prototype_header_offset()),
2167          "must not happen");
2168 
2169   if (tkls->isa_instklassptr() && tkls->offset() == in_bytes(InstanceKlass::access_flags_offset())) {
2170     // The field is InstanceKlass::_access_flags.  Return its (constant) value.
2171     assert(Opcode() == Op_LoadUS, "must load an unsigned short from _access_flags");
2172     ciInstanceKlass* iklass = tkls->is_instklassptr()->instance_klass();
2173     return TypeInt::make(iklass->access_flags());
2174   }
2175   if (tkls->offset() == in_bytes(Klass::misc_flags_offset())) {
2176     // The field is Klass::_misc_flags.  Return its (constant) value.
2177     assert(Opcode() == Op_LoadUB, "must load an unsigned byte from _misc_flags");
2178     return TypeInt::make(klass->misc_flags());

2186   // No match.
2187   return nullptr;
2188 }
2189 
2190 //------------------------------Value-----------------------------------------
2191 const Type* LoadNode::Value(PhaseGVN* phase) const {
2192   // Either input is TOP ==> the result is TOP
2193   Node* mem = in(MemNode::Memory);
2194   const Type *t1 = phase->type(mem);
2195   if (t1 == Type::TOP)  return Type::TOP;
2196   Node* adr = in(MemNode::Address);
2197   const TypePtr* tp = phase->type(adr)->isa_ptr();
2198   if (tp == nullptr || tp->empty())  return Type::TOP;
2199   int off = tp->offset();
2200   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
2201   Compile* C = phase->C;
2202 
2203   // If load can see a previous constant store, use that.
2204   Node* value = can_see_stored_value(mem, phase);
2205   if (value != nullptr && value->is_Con()) {
2206     if (phase->type(value)->isa_ptr() && _type->isa_narrowoop()) {
2207       return phase->type(value)->make_narrowoop();
2208     } else {
2209       assert(value->bottom_type()->higher_equal(_type), "sanity");
2210       return phase->type(value);
2211     }
2212   }

2213   // Try to guess loaded type from pointer type
2214   if (tp->isa_aryptr()) {
2215     const TypeAryPtr* ary = tp->is_aryptr();
2216     const Type* t = ary->elem();
2217 
2218     // Determine whether the reference is beyond the header or not, by comparing
2219     // the offset against the offset of the start of the array's data.
2220     // Different array types begin at slightly different offsets (12 vs. 16).
2221     // We choose T_BYTE as an example base type that is least restrictive
2222     // as to alignment, which will therefore produce the smallest
2223     // possible base offset.
2224     const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
2225     const bool off_beyond_header = (off >= min_base_off);
2226 
2227     // Try to constant-fold a stable array element.
2228     if (FoldStableValues && !is_mismatched_access() && ary->is_stable()) {
2229       // Make sure the reference is not into the header and the offset is constant
2230       ciObject* aobj = ary->const_oop();
2231       if (aobj != nullptr && off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
2232         int stable_dimension = (ary->stable_dimension() > 0 ? ary->stable_dimension() - 1 : 0);

2239       }
2240     }
2241 
2242     // Don't do this for integer types. There is only potential profit if
2243     // the element type t is lower than _type; that is, for int types, if _type is
2244     // more restrictive than t.  This only happens here if one is short and the other
2245     // char (both 16 bits), and in those cases we've made an intentional decision
2246     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
2247     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
2248     //
2249     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
2250     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
2251     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
2252     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
2253     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
2254     // In fact, that could have been the original type of p1, and p1 could have
2255     // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
2256     // expression (LShiftL quux 3) independently optimized to the constant 8.
2257     if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
2258         && (_type->isa_vect() == nullptr)
2259         && !ary->is_flat()
2260         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
2261       // t might actually be lower than _type, if _type is a unique
2262       // concrete subclass of abstract class t.
2263       if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
2264         const Type* jt = t->join_speculative(_type);
2265         // In any case, do not allow the join, per se, to empty out the type.
2266         if (jt->empty() && !t->empty()) {
2267           // This can happen if a interface-typed array narrows to a class type.
2268           jt = _type;
2269         }
2270 #ifdef ASSERT
2271         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
2272           // The pointers in the autobox arrays are always non-null
2273           Node* base = adr->in(AddPNode::Base);
2274           if ((base != nullptr) && base->is_DecodeN()) {
2275             // Get LoadN node which loads IntegerCache.cache field
2276             base = base->in(1);
2277           }
2278           if ((base != nullptr) && base->is_Con()) {
2279             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
2280             if ((base_type != nullptr) && base_type->is_autobox_cache()) {
2281               // It could be narrow oop
2282               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
2283             }
2284           }
2285         }
2286 #endif
2287         return jt;
2288       }
2289     }
2290   } else if (tp->base() == Type::InstPtr) {
2291     assert( off != Type::OffsetBot ||
2292             // arrays can be cast to Objects
2293             !tp->isa_instptr() ||
2294             tp->is_instptr()->instance_klass()->is_java_lang_Object() ||
2295             // Default value load
2296             tp->is_instptr()->instance_klass() == ciEnv::current()->Class_klass() ||
2297             // unsafe field access may not have a constant offset
2298             C->has_unsafe_access(),
2299             "Field accesses must be precise" );
2300     // For oop loads, we expect the _type to be precise.
2301 

2302     const TypeInstPtr* tinst = tp->is_instptr();
2303     BasicType bt = value_basic_type();
2304 
2305     // Fold loads of the field map
2306     if (UseAltSubstitutabilityMethod && tinst != nullptr) {
2307       ciInstanceKlass* ik = tinst->instance_klass();
2308       int offset = tinst->offset();
2309       if (ik == phase->C->env()->Class_klass()) {
2310         ciType* t = tinst->java_mirror_type();
2311         if (t != nullptr && t->is_inlinetype() && offset == t->as_inline_klass()->field_map_offset()) {
2312           ciConstant map = t->as_inline_klass()->get_field_map();
2313           bool is_narrow_oop = (bt == T_NARROWOOP);
2314           return Type::make_from_constant(map, true, 1, is_narrow_oop);
2315         }
2316       }
2317     }
2318 
2319     // Optimize loads from constant fields.
2320     ciObject* const_oop = tinst->const_oop();
2321     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
2322       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt);
2323       if (con_type != nullptr) {
2324         return con_type;
2325       }
2326     }
2327   } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
2328     assert(off != Type::OffsetBot ||
2329             !tp->isa_instklassptr() ||
2330            // arrays can be cast to Objects
2331            tp->isa_instklassptr()->instance_klass()->is_java_lang_Object() ||
2332            // also allow array-loading from the primary supertype
2333            // array during subtype checks
2334            Opcode() == Op_LoadKlass,
2335            "Field accesses must be precise");
2336     // For klass/static loads, we expect the _type to be precise
2337   } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
2338     /* With mirrors being an indirect in the Klass*
2339      * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
2340      * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
2341      *
2342      * So check the type and klass of the node before the LoadP.

2349         assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2350         assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2351         return TypeInstPtr::make(klass->java_mirror());
2352       }
2353     }
2354   }
2355 
2356   const TypeKlassPtr *tkls = tp->isa_klassptr();
2357   if (tkls != nullptr) {
2358     if (tkls->is_loaded() && tkls->klass_is_exact()) {
2359       ciKlass* klass = tkls->exact_klass();
2360       // We are loading a field from a Klass metaobject whose identity
2361       // is known at compile time (the type is "exact" or "precise").
2362       // Check for fields we know are maintained as constants by the VM.
2363       if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2364         // The field is Klass::_super_check_offset.  Return its (constant) value.
2365         // (Folds up type checking code.)
2366         assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2367         return TypeInt::make(klass->super_check_offset());
2368       }
2369       if (klass->is_inlinetype() && tkls->offset() == in_bytes(InstanceKlass::acmp_maps_offset_offset())) {
2370         return TypeInt::make(klass->as_inline_klass()->field_map_offset());
2371       }
2372       if (klass->is_obj_array_klass() && tkls->offset() == in_bytes(ObjArrayKlass::next_refined_array_klass_offset())) {
2373         // Fold loads from LibraryCallKit::load_default_refined_array_klass
2374         return tkls->is_aryklassptr()->cast_to_refined_array_klass_ptr();
2375       }
2376       if (klass->is_array_klass() && tkls->offset() == in_bytes(ObjArrayKlass::properties_offset())) {
2377         assert(klass->is_type_array_klass() || tkls->is_aryklassptr()->is_refined_type(), "Must be a refined array klass pointer");
2378         return TypeInt::make(klass->as_array_klass()->properties());
2379       }
2380       if (klass->is_flat_array_klass() && tkls->offset() == in_bytes(FlatArrayKlass::layout_kind_offset())) {
2381         assert(Opcode() == Op_LoadI, "must load an int from _layout_kind");
2382         return TypeInt::make(static_cast<jint>(klass->as_flat_array_klass()->layout_kind()));
2383       }
2384       if (UseCompactObjectHeaders && tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2385         // The field is Klass::_prototype_header. Return its (constant) value.
2386         assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2387         return TypeX::make(klass->prototype_header());
2388       }
2389       // Compute index into primary_supers array
2390       juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2391       // Check for overflowing; use unsigned compare to handle the negative case.
2392       if( depth < ciKlass::primary_super_limit() ) {
2393         // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2394         // (Folds up type checking code.)
2395         assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2396         ciKlass *ss = klass->super_of_depth(depth);
2397         return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2398       }
2399       const Type* aift = load_array_final_field(tkls, klass);
2400       if (aift != nullptr)  return aift;
2401     }
2402 
2403     // We can still check if we are loading from the primary_supers array at a
2404     // shallow enough depth.  Even though the klass is not exact, entries less
2405     // than or equal to its super depth are correct.
2406     if (tkls->is_loaded()) {
2407       ciKlass* klass = nullptr;

2441       jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false);
2442       // The key property of this type is that it folds up tests
2443       // for array-ness, since it proves that the layout_helper is positive.
2444       // Thus, a generic value like the basic object layout helper works fine.
2445       return TypeInt::make(min_size, max_jint, Type::WidenMin);
2446     }
2447   }
2448 
2449   // If we are loading from a freshly-allocated object/array, produce a zero.
2450   // Things to check:
2451   //   1. Load is beyond the header: headers are not guaranteed to be zero
2452   //   2. Load is not vectorized: vectors have no zero constant
2453   //   3. Load has no matching store, i.e. the input is the initial memory state
2454   const TypeOopPtr* tinst = tp->isa_oopptr();
2455   bool is_not_header = (tinst != nullptr) && tinst->is_known_instance_field();
2456   bool is_not_vect = (_type->isa_vect() == nullptr);
2457   if (is_not_header && is_not_vect) {
2458     Node* mem = in(MemNode::Memory);
2459     if (mem->is_Parm() && mem->in(0)->is_Start()) {
2460       assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2461       // TODO 8350865 Scalar replacement does not work well for flat arrays.
2462       // Escape Analysis assumes that arrays are always zeroed during allocation which is not true for null-free arrays
2463       // ConnectionGraph::split_unique_types will re-wire the memory of loads from such arrays around the allocation
2464       // TestArrays::test6 and test152 and TestBasicFunctionality::test20 are affected by this.
2465       if (tp->isa_aryptr() && tp->is_aryptr()->is_flat() && tp->is_aryptr()->is_null_free()) {
2466         intptr_t offset = 0;
2467         Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2468         AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
2469         if (alloc != nullptr && alloc->is_AllocateArray() && alloc->in(AllocateNode::InitValue) != nullptr) {
2470           return _type;
2471         }
2472       }
2473       return Type::get_zero_type(_type->basic_type());
2474     }
2475   }

2476   if (!UseCompactObjectHeaders) {
2477     Node* alloc = is_new_object_mark_load();
2478     if (alloc != nullptr) {
2479       if (Arguments::is_valhalla_enabled()) {
2480         // The mark word may contain property bits (inline, flat, null-free)
2481         Node* klass_node = alloc->in(AllocateNode::KlassNode);
2482         const TypeKlassPtr* tkls = phase->type(klass_node)->isa_klassptr();
2483         if (tkls != nullptr && tkls->is_loaded() && tkls->klass_is_exact()) {
2484           return TypeX::make(tkls->exact_klass()->prototype_header());
2485         }
2486       } else {
2487         return TypeX::make(markWord::prototype().value());
2488       }
2489     }
2490   }
2491 
2492   return _type;
2493 }
2494 
2495 //------------------------------match_edge-------------------------------------
2496 // Do we Match on this edge index or not?  Match only the address.
2497 uint LoadNode::match_edge(uint idx) const {
2498   return idx == MemNode::Address;
2499 }
2500 
2501 //--------------------------LoadBNode::Ideal--------------------------------------
2502 //
2503 //  If the previous store is to the same address as this load,
2504 //  and the value stored was larger than a byte, replace this load
2505 //  with the value stored truncated to a byte.  If no truncation is
2506 //  needed, the replacement is done in LoadNode::Identity().
2507 //
2508 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {

2617     }
2618   }
2619   // Identity call will handle the case where truncation is not needed.
2620   return LoadNode::Ideal(phase, can_reshape);
2621 }
2622 
2623 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2624   Node* mem = in(MemNode::Memory);
2625   Node* value = can_see_stored_value(mem,phase);
2626   if (value != nullptr && value->is_Con() &&
2627       !value->bottom_type()->higher_equal(_type)) {
2628     // If the input to the store does not fit with the load's result type,
2629     // it must be truncated. We can't delay until Ideal call since
2630     // a singleton Value is needed for split_thru_phi optimization.
2631     int con = value->get_int();
2632     return TypeInt::make((con << 16) >> 16);
2633   }
2634   return LoadNode::Value(phase);
2635 }
2636 
2637 Node* LoadNNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2638   // Loading from an InlineType, find the input and make an EncodeP
2639   Node* addr = in(Address);
2640   intptr_t offset;
2641   Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset);
2642   Node* value = see_through_inline_type(phase, this, base, offset);
2643   if (value != nullptr) {
2644     return new EncodePNode(value, type());
2645   }
2646 
2647   // Can see the corresponding value, may need to add an EncodeP
2648   value = can_see_stored_value(in(Memory), phase);
2649   if (value != nullptr && phase->type(value)->isa_ptr() && type()->isa_narrowoop()) {
2650     return new EncodePNode(value, type());
2651   }
2652 
2653   // Identity call will handle the case where EncodeP is unnecessary
2654   return LoadNode::Ideal(phase, can_reshape);
2655 }
2656 
2657 //=============================================================================
2658 //----------------------------LoadKlassNode::make------------------------------
2659 // Polymorphic factory method:
2660 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
2661   // sanity check the alias category against the created node type
2662   const TypePtr* adr_type = adr->bottom_type()->isa_ptr();
2663   assert(adr_type != nullptr, "expecting TypeKlassPtr");
2664 #ifdef _LP64
2665   if (adr_type->is_ptr_to_narrowklass()) {
2666     assert(UseCompressedClassPointers, "no compressed klasses");
2667     Node* load_klass = gvn.transform(new LoadNKlassNode(mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2668     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2669   }
2670 #endif
2671   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2672   return new LoadKlassNode(mem, adr, at, tk, MemNode::unordered);
2673 }
2674 
2675 //------------------------------Value------------------------------------------
2676 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {

2710           }
2711           return TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
2712         }
2713         if (!t->is_klass()) {
2714           // a primitive Class (e.g., int.class) has null for a klass field
2715           return TypePtr::NULL_PTR;
2716         }
2717         // Fold up the load of the hidden field
2718         return TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
2719       }
2720       // non-constant mirror, so we can't tell what's going on
2721     }
2722     if (!tinst->is_loaded())
2723       return _type;             // Bail out if not loaded
2724     if (offset == oopDesc::klass_offset_in_bytes()) {
2725       return tinst->as_klass_type(true);
2726     }
2727   }
2728 
2729   // Check for loading klass from an array
2730   const TypeAryPtr* tary = tp->isa_aryptr();
2731   if (tary != nullptr &&
2732       tary->offset() == oopDesc::klass_offset_in_bytes()) {
2733     return tary->as_klass_type(true)->is_aryklassptr();
2734   }
2735 
2736   // Check for loading klass from an array klass
2737   const TypeKlassPtr *tkls = tp->isa_klassptr();
2738   if (tkls != nullptr && !StressReflectiveCode) {
2739     if (!tkls->is_loaded())
2740      return _type;             // Bail out if not loaded
2741     if (tkls->isa_aryklassptr() && tkls->is_aryklassptr()->elem()->isa_klassptr() &&
2742         tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2743       // // Always returning precise element type is incorrect,
2744       // // e.g., element type could be object and array may contain strings
2745       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2746 
2747       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2748       // according to the element type's subclassing.
2749       return tkls->is_aryklassptr()->elem()->isa_klassptr()->cast_to_exactness(tkls->klass_is_exact());
2750     }
2751     if (tkls->isa_aryklassptr() != nullptr && tkls->klass_is_exact() &&
2752         !tkls->exact_klass()->is_type_array_klass() &&
2753         tkls->offset() == in_bytes(Klass::super_offset())) {
2754       // We are loading the super klass of a refined array klass, return the non-refined klass pointer
2755       assert(tkls->is_aryklassptr()->is_refined_type(), "Must be a refined array klass pointer");
2756       return tkls->is_aryklassptr()->with_offset(0)->cast_to_non_refined();
2757     }
2758     if (tkls->isa_instklassptr() != nullptr && tkls->klass_is_exact() &&
2759         tkls->offset() == in_bytes(Klass::super_offset())) {
2760       ciKlass* sup = tkls->is_instklassptr()->instance_klass()->super();
2761       // The field is Klass::_super.  Return its (constant) value.
2762       // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2763       return sup ? TypeKlassPtr::make(sup, Type::trust_interfaces) : TypePtr::NULL_PTR;
2764     }
2765   }
2766 
2767   if (tkls != nullptr && !UseSecondarySupersCache
2768       && tkls->offset() == in_bytes(Klass::secondary_super_cache_offset()))  {
2769     // Treat Klass::_secondary_super_cache as a constant when the cache is disabled.
2770     return TypePtr::NULL_PTR;
2771   }
2772 
2773   // Bailout case
2774   return LoadNode::Value(phase);
2775 }
2776 
2777 //------------------------------Identity---------------------------------------

2800     base = bs->step_over_gc_barrier(base);
2801   }
2802 
2803   // We can fetch the klass directly through an AllocateNode.
2804   // This works even if the klass is not constant (clone or newArray).
2805   if (offset == oopDesc::klass_offset_in_bytes()) {
2806     Node* allocated_klass = AllocateNode::Ideal_klass(base, phase);
2807     if (allocated_klass != nullptr) {
2808       return allocated_klass;
2809     }
2810   }
2811 
2812   // Simplify k.java_mirror.as_klass to plain k, where k is a Klass*.
2813   // See inline_native_Class_query for occurrences of these patterns.
2814   // Java Example:  x.getClass().isAssignableFrom(y)
2815   //
2816   // This improves reflective code, often making the Class
2817   // mirror go completely dead.  (Current exception:  Class
2818   // mirrors may appear in debug info, but we could clean them out by
2819   // introducing a new debug info operator for Klass.java_mirror).
2820   //
2821   // This optimization does not apply to arrays because if k is not a
2822   // constant, it was obtained via load_klass which returns the VM type
2823   // and '.java_mirror.as_klass' should return the Java type instead.
2824 
2825   if (toop->isa_instptr() && toop->is_instptr()->instance_klass() == phase->C->env()->Class_klass()
2826       && offset == java_lang_Class::klass_offset()) {
2827     if (base->is_Load()) {
2828       Node* base2 = base->in(MemNode::Address);
2829       if (base2->is_Load()) { /* direct load of a load which is the OopHandle */
2830         Node* adr2 = base2->in(MemNode::Address);
2831         const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
2832         if (tkls != nullptr && !tkls->empty()
2833             && ((tkls->isa_instklassptr() && !tkls->is_instklassptr()->might_be_an_array()))
2834             && adr2->is_AddP()) {

2835           int mirror_field = in_bytes(Klass::java_mirror_offset());
2836           if (tkls->offset() == mirror_field) {
2837             return adr2->in(AddPNode::Base);
2838           }
2839         }
2840       }
2841     }
2842   }
2843 
2844   return this;
2845 }
2846 
2847 LoadNode* LoadNode::clone_pinned() const {
2848   LoadNode* ld = clone()->as_Load();
2849   ld->_control_dependency = UnknownControl;
2850   return ld;
2851 }
2852 
2853 
2854 //------------------------------Value------------------------------------------

2959 //---------------------------StoreNode::make-----------------------------------
2960 // Polymorphic factory method:
2961 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo, bool require_atomic_access) {
2962   assert((mo == unordered || mo == release), "unexpected");
2963   Compile* C = gvn.C;
2964   assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2965          ctl != nullptr, "raw memory operations should have control edge");
2966 
2967   switch (bt) {
2968   case T_BOOLEAN: val = gvn.transform(new AndINode(val, gvn.intcon(0x1))); // Fall through to T_BYTE case
2969   case T_BYTE:    return new StoreBNode(ctl, mem, adr, adr_type, val, mo);
2970   case T_INT:     return new StoreINode(ctl, mem, adr, adr_type, val, mo);
2971   case T_CHAR:
2972   case T_SHORT:   return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
2973   case T_LONG:    return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic_access);
2974   case T_FLOAT:   return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
2975   case T_DOUBLE:  return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic_access);
2976   case T_METADATA:
2977   case T_ADDRESS:
2978   case T_OBJECT:
2979   case T_ARRAY:
2980 #ifdef _LP64
2981     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2982       val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop()));
2983       return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
2984     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2985                (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2986                 adr->bottom_type()->isa_rawptr())) {
2987       val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2988       return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2989     }
2990 #endif
2991     {
2992       return new StorePNode(ctl, mem, adr, adr_type, val, mo);
2993     }
2994   default:
2995     assert(false, "unexpected basic type %s", type2name(bt));
2996     return (StoreNode*)nullptr;
2997   }
2998 }
2999 
3000 //--------------------------bottom_type----------------------------------------
3001 const Type *StoreNode::bottom_type() const {
3002   return Type::MEMORY;
3003 }
3004 
3005 //------------------------------hash-------------------------------------------
3006 uint StoreNode::hash() const {
3007   // unroll addition of interesting fields
3008   //return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn);
3009 
3010   // Since they are not commoned, do not hash them:
3011   return NO_HASH;
3012 }
3013 
3014 // Link together multiple stores (B/S/C/I) into a longer one.
3015 //

3637   }
3638   ss.print_cr("[TraceMergeStores]: with");
3639   merged_input_value->dump("\n", false, &ss);
3640   merged_store->dump("\n", false, &ss);
3641   tty->print("%s", ss.as_string());
3642 }
3643 #endif
3644 
3645 //------------------------------Ideal------------------------------------------
3646 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
3647 // When a store immediately follows a relevant allocation/initialization,
3648 // try to capture it into the initialization, or hoist it above.
3649 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3650   Node* p = MemNode::Ideal_common(phase, can_reshape);
3651   if (p)  return (p == NodeSentinel) ? nullptr : p;
3652 
3653   Node* mem     = in(MemNode::Memory);
3654   Node* address = in(MemNode::Address);
3655   Node* value   = in(MemNode::ValueIn);
3656   // Back-to-back stores to same address?  Fold em up.  Generally
3657   // unsafe if I have intervening uses...
3658   if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::INLINES) {
3659     Node* st = mem;
3660     // If Store 'st' has more than one use, we cannot fold 'st' away.
3661     // For example, 'st' might be the final state at a conditional
3662     // return.  Or, 'st' might be used by some node which is live at
3663     // the same time 'st' is live, which might be unschedulable.  So,
3664     // require exactly ONE user until such time as we clone 'mem' for
3665     // each of 'mem's uses (thus making the exactly-1-user-rule hold
3666     // true).
3667     while (st->is_Store() && st->outcnt() == 1) {
3668       // Looking at a dead closed cycle of memory?
3669       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
3670       assert(Opcode() == st->Opcode() ||
3671              st->Opcode() == Op_StoreVector ||
3672              Opcode() == Op_StoreVector ||
3673              st->Opcode() == Op_StoreVectorScatter ||
3674              Opcode() == Op_StoreVectorScatter ||
3675              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
3676              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
3677              (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
3678              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
3679              (st->adr_type()->isa_aryptr() && st->adr_type()->is_aryptr()->is_flat()) || // TODO 8343835
3680              (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
3681              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
3682 
3683       if (st->in(MemNode::Address)->eqv_uncast(address) &&
3684           st->as_Store()->memory_size() <= this->memory_size()) {
3685         Node* use = st->raw_out(0);
3686         if (phase->is_IterGVN()) {
3687           phase->is_IterGVN()->rehash_node_delayed(use);
3688         }
3689         // It's OK to do this in the parser, since DU info is always accurate,
3690         // and the parser always refers to nodes via SafePointNode maps.
3691         use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
3692         return this;
3693       }
3694       st = st->in(MemNode::Memory);
3695     }
3696   }
3697 
3698 
3699   // Capture an unaliased, unconditional, simple store into an initializer.

3797       const StoreVectorNode* store_vector = as_StoreVector();
3798       const StoreVectorNode* mem_vector = mem->as_StoreVector();
3799       const Node* store_indices = store_vector->indices();
3800       const Node* mem_indices = mem_vector->indices();
3801       const Node* store_mask = store_vector->mask();
3802       const Node* mem_mask = mem_vector->mask();
3803       // Ensure types, indices, and masks match
3804       if (store_vector->vect_type() == mem_vector->vect_type() &&
3805           ((store_indices == nullptr) == (mem_indices == nullptr) &&
3806            (store_indices == nullptr || store_indices->eqv_uncast(mem_indices))) &&
3807           ((store_mask == nullptr) == (mem_mask == nullptr) &&
3808            (store_mask == nullptr || store_mask->eqv_uncast(mem_mask)))) {
3809         result = mem;
3810       }
3811     }
3812   }
3813 
3814   // Store of zero anywhere into a freshly-allocated object?
3815   // Then the store is useless.
3816   // (It must already have been captured by the InitializeNode.)
3817   if (result == this && ReduceFieldZeroing) {

3818     // a newly allocated object is already all-zeroes everywhere
3819     if (mem->is_Proj() && mem->in(0)->is_Allocate() &&
3820         (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::InitValue) == val)) {
3821       result = mem;
3822     }
3823 
3824     if (result == this && phase->type(val)->is_zero_type()) {
3825       // the store may also apply to zero-bits in an earlier object
3826       Node* prev_mem = find_previous_store(phase);
3827       // Steps (a), (b):  Walk past independent stores to find an exact match.
3828       if (prev_mem != nullptr) {
3829         Node* prev_val = can_see_stored_value(prev_mem, phase);
3830         if (prev_val != nullptr && prev_val == val) {
3831           // prev_val and val might differ by a cast; it would be good
3832           // to keep the more informative of the two.
3833           result = mem;
3834         }
3835       }
3836     }
3837   }
3838 
3839   PhaseIterGVN* igvn = phase->is_IterGVN();
3840   if (result != this && igvn != nullptr) {
3841     MemBarNode* trailing = trailing_membar();
3842     if (trailing != nullptr) {
3843 #ifdef ASSERT
3844       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();

4308 // Clearing a short array is faster with stores
4309 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
4310   // Already know this is a large node, do not try to ideal it
4311   if (_is_large) return nullptr;
4312 
4313   const int unit = BytesPerLong;
4314   const TypeX* t = phase->type(in(2))->isa_intptr_t();
4315   if (!t)  return nullptr;
4316   if (!t->is_con())  return nullptr;
4317   intptr_t raw_count = t->get_con();
4318   intptr_t size = raw_count;
4319   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
4320   // Clearing nothing uses the Identity call.
4321   // Negative clears are possible on dead ClearArrays
4322   // (see jck test stmt114.stmt11402.val).
4323   if (size <= 0 || size % unit != 0)  return nullptr;
4324   intptr_t count = size / unit;
4325   // Length too long; communicate this to matchers and assemblers.
4326   // Assemblers are responsible to produce fast hardware clears for it.
4327   if (size > InitArrayShortSize) {
4328     return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true);
4329   } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
4330     return nullptr;
4331   }
4332   if (!IdealizeClearArrayNode) return nullptr;
4333   Node *mem = in(1);
4334   if( phase->type(mem)==Type::TOP ) return nullptr;
4335   Node *adr = in(3);
4336   const Type* at = phase->type(adr);
4337   if( at==Type::TOP ) return nullptr;
4338   const TypePtr* atp = at->isa_ptr();
4339   // adjust atp to be the correct array element address type
4340   if (atp == nullptr)  atp = TypePtr::BOTTOM;
4341   else              atp = atp->add_offset(Type::OffsetBot);
4342   // Get base for derived pointer purposes
4343   if( adr->Opcode() != Op_AddP ) Unimplemented();
4344   Node *base = adr->in(1);
4345 
4346   Node *val = in(4);
4347   Node *off  = phase->MakeConX(BytesPerLong);
4348   mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
4349   count--;
4350   while( count-- ) {
4351     mem = phase->transform(mem);
4352     adr = phase->transform(new AddPNode(base,adr,off));
4353     mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
4354   }
4355   return mem;
4356 }
4357 
4358 //----------------------------step_through----------------------------------
4359 // Return allocation input memory edge if it is different instance
4360 // or itself if it is the one we are looking for.
4361 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseValues* phase) {
4362   Node* n = *np;
4363   assert(n->is_ClearArray(), "sanity");
4364   intptr_t offset;
4365   AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
4366   // This method is called only before Allocate nodes are expanded
4367   // during macro nodes expansion. Before that ClearArray nodes are
4368   // only generated in PhaseMacroExpand::generate_arraycopy() (before
4369   // Allocate nodes are expanded) which follows allocations.
4370   assert(alloc != nullptr, "should have allocation");
4371   if (alloc->_idx == instance_id) {
4372     // Can not bypass initialization of the instance we are looking for.
4373     return false;
4374   }
4375   // Otherwise skip it.
4376   InitializeNode* init = alloc->initialization();
4377   if (init != nullptr)
4378     *np = init->in(TypeFunc::Memory);
4379   else
4380     *np = alloc->in(TypeFunc::Memory);
4381   return true;
4382 }
4383 
4384 //----------------------------clear_memory-------------------------------------
4385 // Generate code to initialize object storage to zero.
4386 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4387                                    Node* val,
4388                                    Node* raw_val,
4389                                    intptr_t start_offset,
4390                                    Node* end_offset,
4391                                    PhaseGVN* phase) {
4392   intptr_t offset = start_offset;
4393 
4394   int unit = BytesPerLong;
4395   if ((offset % unit) != 0) {
4396     Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
4397     adr = phase->transform(adr);
4398     const TypePtr* atp = TypeRawPtr::BOTTOM;
4399     if (val != nullptr) {
4400       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
4401       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
4402     } else {
4403       assert(raw_val == nullptr, "val may not be null");
4404       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4405     }
4406     mem = phase->transform(mem);
4407     offset += BytesPerInt;
4408   }
4409   assert((offset % unit) == 0, "");
4410 
4411   // Initialize the remaining stuff, if any, with a ClearArray.
4412   return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase);
4413 }
4414 
4415 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4416                                    Node* raw_val,
4417                                    Node* start_offset,
4418                                    Node* end_offset,
4419                                    PhaseGVN* phase) {
4420   if (start_offset == end_offset) {
4421     // nothing to do
4422     return mem;
4423   }
4424 
4425   int unit = BytesPerLong;
4426   Node* zbase = start_offset;
4427   Node* zend  = end_offset;
4428 
4429   // Scale to the unit required by the CPU:
4430   if (!Matcher::init_array_count_is_in_bytes) {
4431     Node* shift = phase->intcon(exact_log2(unit));
4432     zbase = phase->transform(new URShiftXNode(zbase, shift) );
4433     zend  = phase->transform(new URShiftXNode(zend,  shift) );
4434   }
4435 
4436   // Bulk clear double-words
4437   Node* zsize = phase->transform(new SubXNode(zend, zbase) );
4438   Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
4439   if (raw_val == nullptr) {
4440     raw_val = phase->MakeConX(0);
4441   }
4442   mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false);
4443   return phase->transform(mem);
4444 }
4445 
4446 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4447                                    Node* val,
4448                                    Node* raw_val,
4449                                    intptr_t start_offset,
4450                                    intptr_t end_offset,
4451                                    PhaseGVN* phase) {
4452   if (start_offset == end_offset) {
4453     // nothing to do
4454     return mem;
4455   }
4456 
4457   assert((end_offset % BytesPerInt) == 0, "odd end offset");
4458   intptr_t done_offset = end_offset;
4459   if ((done_offset % BytesPerLong) != 0) {
4460     done_offset -= BytesPerInt;
4461   }
4462   if (done_offset > start_offset) {
4463     mem = clear_memory(ctl, mem, dest, val, raw_val,
4464                        start_offset, phase->MakeConX(done_offset), phase);
4465   }
4466   if (done_offset < end_offset) { // emit the final 32-bit store
4467     Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
4468     adr = phase->transform(adr);
4469     const TypePtr* atp = TypeRawPtr::BOTTOM;
4470     if (val != nullptr) {
4471       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
4472       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
4473     } else {
4474       assert(raw_val == nullptr, "val may not be null");
4475       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4476     }
4477     mem = phase->transform(mem);
4478     done_offset += BytesPerInt;
4479   }
4480   assert(done_offset == end_offset, "");
4481   return mem;
4482 }
4483 
4484 //=============================================================================
4485 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
4486   : MultiNode(TypeFunc::Parms + (precedent == nullptr? 0: 1)),
4487     _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
4488 #ifdef ASSERT
4489   , _pair_idx(0)
4490 #endif
4491 {
4492   init_class_id(Class_MemBar);
4493   Node* top = C->top();
4494   init_req(TypeFunc::I_O,top);
4495   init_req(TypeFunc::FramePtr,top);
4496   init_req(TypeFunc::ReturnAdr,top);

4603       PhaseIterGVN* igvn = phase->is_IterGVN();
4604       remove(igvn);
4605       // Must return either the original node (now dead) or a new node
4606       // (Do not return a top here, since that would break the uniqueness of top.)
4607       return new ConINode(TypeInt::ZERO);
4608     }
4609   }
4610   return progress ? this : nullptr;
4611 }
4612 
4613 //------------------------------Value------------------------------------------
4614 const Type* MemBarNode::Value(PhaseGVN* phase) const {
4615   if( !in(0) ) return Type::TOP;
4616   if( phase->type(in(0)) == Type::TOP )
4617     return Type::TOP;
4618   return TypeTuple::MEMBAR;
4619 }
4620 
4621 //------------------------------match------------------------------------------
4622 // Construct projections for memory.
4623 Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) {
4624   switch (proj->_con) {
4625   case TypeFunc::Control:
4626   case TypeFunc::Memory:
4627     return new MachProjNode(this, proj->_con, RegMask::EMPTY, MachProjNode::unmatched_proj);
4628   }
4629   ShouldNotReachHere();
4630   return nullptr;
4631 }
4632 
4633 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4634   trailing->_kind = TrailingStore;
4635   leading->_kind = LeadingStore;
4636 #ifdef ASSERT
4637   trailing->_pair_idx = leading->_idx;
4638   leading->_pair_idx = leading->_idx;
4639 #endif
4640 }
4641 
4642 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4643   trailing->_kind = TrailingLoadStore;

4890   return (req() > RawStores);
4891 }
4892 
4893 void InitializeNode::set_complete(PhaseGVN* phase) {
4894   assert(!is_complete(), "caller responsibility");
4895   _is_complete = Complete;
4896 
4897   // After this node is complete, it contains a bunch of
4898   // raw-memory initializations.  There is no need for
4899   // it to have anything to do with non-raw memory effects.
4900   // Therefore, tell all non-raw users to re-optimize themselves,
4901   // after skipping the memory effects of this initialization.
4902   PhaseIterGVN* igvn = phase->is_IterGVN();
4903   if (igvn)  igvn->add_users_to_worklist(this);
4904 }
4905 
4906 // convenience function
4907 // return false if the init contains any stores already
4908 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
4909   InitializeNode* init = initialization();
4910   if (init == nullptr || init->is_complete()) {
4911     return false;
4912   }
4913   init->remove_extra_zeroes();
4914   // for now, if this allocation has already collected any inits, bail:
4915   if (init->is_non_zero())  return false;
4916   init->set_complete(phase);
4917   return true;
4918 }
4919 
4920 void InitializeNode::remove_extra_zeroes() {
4921   if (req() == RawStores)  return;
4922   Node* zmem = zero_memory();
4923   uint fill = RawStores;
4924   for (uint i = fill; i < req(); i++) {
4925     Node* n = in(i);
4926     if (n->is_top() || n == zmem)  continue;  // skip
4927     if (fill < i)  set_req(fill, n);          // compact
4928     ++fill;
4929   }
4930   // delete any empty spaces created:
4931   while (fill < req()) {
4932     del_req(fill);

5076             // store node that we'd like to capture. We need to check
5077             // the uses of the MergeMemNode.
5078             mems.push(n);
5079           }
5080         } else if (n->is_Mem()) {
5081           Node* other_adr = n->in(MemNode::Address);
5082           if (other_adr == adr) {
5083             failed = true;
5084             break;
5085           } else {
5086             const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
5087             if (other_t_adr != nullptr) {
5088               int other_alias_idx = phase->C->get_alias_index(other_t_adr);
5089               if (other_alias_idx == alias_idx) {
5090                 // A load from the same memory slice as the store right
5091                 // after the InitializeNode. We check the control of the
5092                 // object/array that is loaded from. If it's the same as
5093                 // the store control then we cannot capture the store.
5094                 assert(!n->is_Store(), "2 stores to same slice on same control?");
5095                 Node* base = other_adr;
5096                 if (base->is_Phi()) {
5097                   // In rare case, base may be a PhiNode and it may read
5098                   // the same memory slice between InitializeNode and store.
5099                   failed = true;
5100                   break;
5101                 }
5102                 assert(base->is_AddP(), "should be addp but is %s", base->Name());
5103                 base = base->in(AddPNode::Base);
5104                 if (base != nullptr) {
5105                   base = base->uncast();
5106                   if (base->is_Proj() && base->in(0) == alloc) {
5107                     failed = true;
5108                     break;
5109                   }
5110                 }
5111               }
5112             }
5113           }
5114         } else {
5115           failed = true;
5116           break;
5117         }
5118       }
5119     }
5120   }
5121   if (failed) {

5668         //   z's_done      12  16  16  16    12  16    12
5669         //   z's_needed    12  16  16  16    16  16    16
5670         //   zsize          0   0   0   0     4   0     4
5671         if (next_full_store < 0) {
5672           // Conservative tack:  Zero to end of current word.
5673           zeroes_needed = align_up(zeroes_needed, BytesPerInt);
5674         } else {
5675           // Zero to beginning of next fully initialized word.
5676           // Or, don't zero at all, if we are already in that word.
5677           assert(next_full_store >= zeroes_needed, "must go forward");
5678           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
5679           zeroes_needed = next_full_store;
5680         }
5681       }
5682 
5683       if (zeroes_needed > zeroes_done) {
5684         intptr_t zsize = zeroes_needed - zeroes_done;
5685         // Do some incremental zeroing on rawmem, in parallel with inits.
5686         zeroes_done = align_down(zeroes_done, BytesPerInt);
5687         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5688                                               allocation()->in(AllocateNode::InitValue),
5689                                               allocation()->in(AllocateNode::RawInitValue),
5690                                               zeroes_done, zeroes_needed,
5691                                               phase);
5692         zeroes_done = zeroes_needed;
5693         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
5694           do_zeroing = false;   // leave the hole, next time
5695       }
5696     }
5697 
5698     // Collect the store and move on:
5699     phase->replace_input_of(st, MemNode::Memory, inits);
5700     inits = st;                 // put it on the linearized chain
5701     set_req(i, zmem);           // unhook from previous position
5702 
5703     if (zeroes_done == st_off)
5704       zeroes_done = next_init_off;
5705 
5706     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
5707 
5708     #ifdef ASSERT
5709     // Various order invariants.  Weaker than stores_are_sane because

5729   remove_extra_zeroes();        // clear out all the zmems left over
5730   add_req(inits);
5731 
5732   if (!(UseTLAB && ZeroTLAB)) {
5733     // If anything remains to be zeroed, zero it all now.
5734     zeroes_done = align_down(zeroes_done, BytesPerInt);
5735     // if it is the last unused 4 bytes of an instance, forget about it
5736     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
5737     if (zeroes_done + BytesPerLong >= size_limit) {
5738       AllocateNode* alloc = allocation();
5739       assert(alloc != nullptr, "must be present");
5740       if (alloc != nullptr && alloc->Opcode() == Op_Allocate) {
5741         Node* klass_node = alloc->in(AllocateNode::KlassNode);
5742         ciKlass* k = phase->type(klass_node)->is_instklassptr()->instance_klass();
5743         if (zeroes_done == k->layout_helper())
5744           zeroes_done = size_limit;
5745       }
5746     }
5747     if (zeroes_done < size_limit) {
5748       rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5749                                             allocation()->in(AllocateNode::InitValue),
5750                                             allocation()->in(AllocateNode::RawInitValue),
5751                                             zeroes_done, size_in_bytes, phase);
5752     }
5753   }
5754 
5755   set_complete(phase);
5756   return rawmem;
5757 }
5758 
5759 void InitializeNode::replace_mem_projs_by(Node* mem, Compile* C) {
5760   auto replace_proj = [&](ProjNode* proj) {
5761     C->gvn_replace_by(proj, mem);
5762     return CONTINUE;
5763   };
5764   apply_to_projs(replace_proj, TypeFunc::Memory);
5765 }
5766 
5767 void InitializeNode::replace_mem_projs_by(Node* mem, PhaseIterGVN* igvn) {
5768   DUIterator_Fast imax, i = fast_outs(imax);
5769   auto replace_proj = [&](ProjNode* proj) {
5770     igvn->replace_node(proj, mem);
< prev index next >