7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "compiler/compileLog.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "memory/allocation.inline.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "oops/objArrayKlass.hpp"
35 #include "opto/addnode.hpp"
36 #include "opto/arraycopynode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/regalloc.hpp"
39 #include "opto/compile.hpp"
40 #include "opto/connode.hpp"
41 #include "opto/convertnode.hpp"
42 #include "opto/loopnode.hpp"
43 #include "opto/machnode.hpp"
44 #include "opto/matcher.hpp"
45 #include "opto/memnode.hpp"
46 #include "opto/mulnode.hpp"
47 #include "opto/narrowptrnode.hpp"
48 #include "opto/phaseX.hpp"
49 #include "opto/regmask.hpp"
50 #include "opto/rootnode.hpp"
51 #include "opto/vectornode.hpp"
52 #include "utilities/align.hpp"
53 #include "utilities/copy.hpp"
54 #include "utilities/macros.hpp"
55 #include "utilities/powerOfTwo.hpp"
56 #include "utilities/vmError.hpp"
57
58 // Portions of code courtesy of Clifford Click
59
60 // Optimization - Graph Style
61
215 bool is_instance = t_oop->is_known_instance_field();
216 PhaseIterGVN *igvn = phase->is_IterGVN();
217 if (is_instance && igvn != nullptr && result->is_Phi()) {
218 PhiNode *mphi = result->as_Phi();
219 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
220 const TypePtr *t = mphi->adr_type();
221 bool do_split = false;
222 // In the following cases, Load memory input can be further optimized based on
223 // its precise address type
224 if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ) {
225 do_split = true;
226 } else if (t->isa_oopptr() && !t->is_oopptr()->is_known_instance()) {
227 const TypeOopPtr* mem_t =
228 t->is_oopptr()->cast_to_exactness(true)
229 ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
230 ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
231 if (t_oop->isa_aryptr()) {
232 mem_t = mem_t->is_aryptr()
233 ->cast_to_stable(t_oop->is_aryptr()->is_stable())
234 ->cast_to_size(t_oop->is_aryptr()->size())
235 ->with_offset(t_oop->is_aryptr()->offset())
236 ->is_aryptr();
237 }
238 do_split = mem_t == t_oop;
239 }
240 if (do_split) {
241 // clone the Phi with our address type
242 result = mphi->split_out_instance(t_adr, igvn);
243 } else {
244 assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
245 }
246 }
247 return result;
248 }
249
250 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
251 uint alias_idx = phase->C->get_alias_index(tp);
252 Node *mem = mmem;
253 #ifdef ASSERT
254 {
255 // Check that current type is consistent with the alias index used during graph construction
256 assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
257 bool consistent = adr_check == nullptr || adr_check->empty() ||
258 phase->C->must_alias(adr_check, alias_idx );
259 // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
260 if( !consistent && adr_check != nullptr && !adr_check->empty() &&
261 tp->isa_aryptr() && tp->offset() == Type::OffsetBot &&
262 adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
263 ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
264 adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
265 adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
266 // don't assert if it is dead code.
267 consistent = true;
268 }
269 if( !consistent ) {
270 st->print("alias_idx==%d, adr_check==", alias_idx);
271 if( adr_check == nullptr ) {
272 st->print("null");
273 } else {
274 adr_check->dump();
275 }
276 st->cr();
277 print_alias_types();
278 assert(consistent, "adr_check must match alias idx");
279 }
280 }
281 #endif
994 Node* ld = gvn.transform(load);
995 return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
996 }
997
998 return load;
999 }
1000
1001 //------------------------------hash-------------------------------------------
1002 uint LoadNode::hash() const {
1003 // unroll addition of interesting fields
1004 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
1005 }
1006
1007 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
1008 if ((atp != nullptr) && (atp->index() >= Compile::AliasIdxRaw)) {
1009 bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
1010 bool is_stable_ary = FoldStableValues &&
1011 (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
1012 tp->isa_aryptr()->is_stable();
1013
1014 return (eliminate_boxing && non_volatile) || is_stable_ary;
1015 }
1016
1017 return false;
1018 }
1019
1020 LoadNode* LoadNode::pin_array_access_node() const {
1021 const TypePtr* adr_type = this->adr_type();
1022 if (adr_type != nullptr && adr_type->isa_aryptr()) {
1023 return clone_pinned();
1024 }
1025 return nullptr;
1026 }
1027
1028 // Is the value loaded previously stored by an arraycopy? If so return
1029 // a load node that reads from the source array so we may be able to
1030 // optimize out the ArrayCopy node later.
1031 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
1032 Node* ld_adr = in(MemNode::Address);
1033 intptr_t ld_off = 0;
1034 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
1051 assert(ld_alloc != nullptr, "need an alloc");
1052 assert(addp->is_AddP(), "address must be addp");
1053 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1054 assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1055 assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1056 addp->set_req(AddPNode::Base, src);
1057 addp->set_req(AddPNode::Address, src);
1058 } else {
1059 assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1060 ac->as_ArrayCopy()->is_copyof_validated() ||
1061 ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1062 assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1063 addp->set_req(AddPNode::Base, src);
1064 addp->set_req(AddPNode::Address, src);
1065
1066 const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1067 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1068 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1069
1070 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1071 uint shift = exact_log2(type2aelembytes(ary_elem));
1072
1073 Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1074 #ifdef _LP64
1075 diff = phase->transform(new ConvI2LNode(diff));
1076 #endif
1077 diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1078
1079 Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1080 addp->set_req(AddPNode::Offset, offset);
1081 }
1082 addp = phase->transform(addp);
1083 #ifdef ASSERT
1084 const TypePtr* adr_type = phase->type(addp)->is_ptr();
1085 ld->_adr_type = adr_type;
1086 #endif
1087 ld->set_req(MemNode::Address, addp);
1088 ld->set_req(0, ctl);
1089 ld->set_req(MemNode::Memory, mem);
1090 return ld;
1091 }
1170 // Same base, same offset.
1171 // Possible improvement for arrays: check index value instead of absolute offset.
1172
1173 // At this point we have proven something like this setup:
1174 // B = << base >>
1175 // L = LoadQ(AddP(Check/CastPP(B), #Off))
1176 // S = StoreQ(AddP( B , #Off), V)
1177 // (Actually, we haven't yet proven the Q's are the same.)
1178 // In other words, we are loading from a casted version of
1179 // the same pointer-and-offset that we stored to.
1180 // Casted version may carry a dependency and it is respected.
1181 // Thus, we are able to replace L by V.
1182 }
1183 // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1184 if (store_Opcode() != st->Opcode()) {
1185 return nullptr;
1186 }
1187 // LoadVector/StoreVector needs additional check to ensure the types match.
1188 if (st->is_StoreVector()) {
1189 const TypeVect* in_vt = st->as_StoreVector()->vect_type();
1190 const TypeVect* out_vt = as_LoadVector()->vect_type();
1191 if (in_vt != out_vt) {
1192 return nullptr;
1193 }
1194 }
1195 return st->in(MemNode::ValueIn);
1196 }
1197
1198 // A load from a freshly-created object always returns zero.
1199 // (This can happen after LoadNode::Ideal resets the load's memory input
1200 // to find_captured_store, which returned InitializeNode::zero_memory.)
1201 if (st->is_Proj() && st->in(0)->is_Allocate() &&
1202 (st->in(0) == ld_alloc) &&
1203 (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1204 // return a zero value for the load's basic type
1205 // (This is one of the few places where a generic PhaseTransform
1206 // can create new nodes. Think of it as lazily manifesting
1207 // virtually pre-existing constants.)
1208 if (memory_type() != T_VOID) {
1209 if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
1210 // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1211 // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1212 // by the ArrayCopyNode.
1213 return phase->zerocon(memory_type());
1214 }
1215 } else {
1216 // TODO: materialize all-zero vector constant
1217 assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1218 }
1219 }
1220
1221 // A load from an initialization barrier can match a captured store.
1222 if (st->is_Proj() && st->in(0)->is_Initialize()) {
1223 InitializeNode* init = st->in(0)->as_Initialize();
1224 AllocateNode* alloc = init->allocation();
1225 if ((alloc != nullptr) && (alloc == ld_alloc)) {
1226 // examine a captured store value
1227 st = init->find_captured_store(ld_off, memory_size(), phase);
1255 //----------------------is_instance_field_load_with_local_phi------------------
1256 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1257 if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1258 in(Address)->is_AddP() ) {
1259 const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1260 // Only instances and boxed values.
1261 if( t_oop != nullptr &&
1262 (t_oop->is_ptr_to_boxed_value() ||
1263 t_oop->is_known_instance_field()) &&
1264 t_oop->offset() != Type::OffsetBot &&
1265 t_oop->offset() != Type::OffsetTop) {
1266 return true;
1267 }
1268 }
1269 return false;
1270 }
1271
1272 //------------------------------Identity---------------------------------------
1273 // Loads are identity if previous store is to same address
1274 Node* LoadNode::Identity(PhaseGVN* phase) {
1275 // If the previous store-maker is the right kind of Store, and the store is
1276 // to the same address, then we are equal to the value stored.
1277 Node* mem = in(Memory);
1278 Node* value = can_see_stored_value(mem, phase);
1279 if( value ) {
1280 // byte, short & char stores truncate naturally.
1281 // A load has to load the truncated value which requires
1282 // some sort of masking operation and that requires an
1283 // Ideal call instead of an Identity call.
1284 if (memory_size() < BytesPerInt) {
1285 // If the input to the store does not fit with the load's result type,
1286 // it must be truncated via an Ideal call.
1287 if (!phase->type(value)->higher_equal(phase->type(this)))
1288 return this;
1289 }
1290 // (This works even when value is a Con, but LoadNode::Value
1291 // usually runs first, producing the singleton type of the Con.)
1292 if (!has_pinned_control_dependency() || value->is_Con()) {
1293 return value;
1294 } else {
2039 }
2040 }
2041
2042 // Don't do this for integer types. There is only potential profit if
2043 // the element type t is lower than _type; that is, for int types, if _type is
2044 // more restrictive than t. This only happens here if one is short and the other
2045 // char (both 16 bits), and in those cases we've made an intentional decision
2046 // to use one kind of load over the other. See AndINode::Ideal and 4965907.
2047 // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
2048 //
2049 // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
2050 // where the _gvn.type of the AddP is wider than 8. This occurs when an earlier
2051 // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
2052 // subsumed by p1. If p1 is on the worklist but has not yet been re-transformed,
2053 // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
2054 // In fact, that could have been the original type of p1, and p1 could have
2055 // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
2056 // expression (LShiftL quux 3) independently optimized to the constant 8.
2057 if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
2058 && (_type->isa_vect() == nullptr)
2059 && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
2060 // t might actually be lower than _type, if _type is a unique
2061 // concrete subclass of abstract class t.
2062 if (off_beyond_header || off == Type::OffsetBot) { // is the offset beyond the header?
2063 const Type* jt = t->join_speculative(_type);
2064 // In any case, do not allow the join, per se, to empty out the type.
2065 if (jt->empty() && !t->empty()) {
2066 // This can happen if a interface-typed array narrows to a class type.
2067 jt = _type;
2068 }
2069 #ifdef ASSERT
2070 if (phase->C->eliminate_boxing() && adr->is_AddP()) {
2071 // The pointers in the autobox arrays are always non-null
2072 Node* base = adr->in(AddPNode::Base);
2073 if ((base != nullptr) && base->is_DecodeN()) {
2074 // Get LoadN node which loads IntegerCache.cache field
2075 base = base->in(1);
2076 }
2077 if ((base != nullptr) && base->is_Con()) {
2078 const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
2079 if ((base_type != nullptr) && base_type->is_autobox_cache()) {
2080 // It could be narrow oop
2081 assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
2082 }
2083 }
2084 }
2085 #endif
2086 return jt;
2087 }
2088 }
2089 } else if (tp->base() == Type::InstPtr) {
2090 assert( off != Type::OffsetBot ||
2091 // arrays can be cast to Objects
2092 !tp->isa_instptr() ||
2093 tp->is_instptr()->instance_klass()->is_java_lang_Object() ||
2094 // unsafe field access may not have a constant offset
2095 C->has_unsafe_access(),
2096 "Field accesses must be precise" );
2097 // For oop loads, we expect the _type to be precise.
2098
2099 // Optimize loads from constant fields.
2100 const TypeInstPtr* tinst = tp->is_instptr();
2101 ciObject* const_oop = tinst->const_oop();
2102 if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
2103 const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());
2104 if (con_type != nullptr) {
2105 return con_type;
2106 }
2107 }
2108 } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
2109 assert(off != Type::OffsetBot ||
2110 !tp->isa_instklassptr() ||
2111 // arrays can be cast to Objects
2112 tp->isa_instklassptr()->instance_klass()->is_java_lang_Object() ||
2113 // also allow array-loading from the primary supertype
2114 // array during subtype checks
2115 Opcode() == Op_LoadKlass,
2116 "Field accesses must be precise");
2117 // For klass/static loads, we expect the _type to be precise
2118 } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
2119 /* With mirrors being an indirect in the Klass*
2120 * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
2121 * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
2122 *
2123 * So check the type and klass of the node before the LoadP.
2218 if (ReduceFieldZeroing || is_instance || is_boxed_value) {
2219 Node* value = can_see_stored_value(mem,phase);
2220 if (value != nullptr && value->is_Con()) {
2221 assert(value->bottom_type()->higher_equal(_type),"sanity");
2222 return value->bottom_type();
2223 }
2224 }
2225
2226 bool is_vect = (_type->isa_vect() != nullptr);
2227 if (is_instance && !is_vect) {
2228 // If we have an instance type and our memory input is the
2229 // programs's initial memory state, there is no matching store,
2230 // so just return a zero of the appropriate type -
2231 // except if it is vectorized - then we have no zero constant.
2232 Node *mem = in(MemNode::Memory);
2233 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2234 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2235 return Type::get_zero_type(_type->basic_type());
2236 }
2237 }
2238
2239 Node* alloc = is_new_object_mark_load();
2240 if (alloc != nullptr) {
2241 return TypeX::make(markWord::prototype().value());
2242 }
2243
2244 return _type;
2245 }
2246
2247 //------------------------------match_edge-------------------------------------
2248 // Do we Match on this edge index or not? Match only the address.
2249 uint LoadNode::match_edge(uint idx) const {
2250 return idx == MemNode::Address;
2251 }
2252
2253 //--------------------------LoadBNode::Ideal--------------------------------------
2254 //
2255 // If the previous store is to the same address as this load,
2256 // and the value stored was larger than a byte, replace this load
2257 // with the value stored truncated to a byte. If no truncation is
2258 // needed, the replacement is done in LoadNode::Identity().
2259 //
2260 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2261 Node* mem = in(MemNode::Memory);
2372 return LoadNode::Ideal(phase, can_reshape);
2373 }
2374
2375 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2376 Node* mem = in(MemNode::Memory);
2377 Node* value = can_see_stored_value(mem,phase);
2378 if (value != nullptr && value->is_Con() &&
2379 !value->bottom_type()->higher_equal(_type)) {
2380 // If the input to the store does not fit with the load's result type,
2381 // it must be truncated. We can't delay until Ideal call since
2382 // a singleton Value is needed for split_thru_phi optimization.
2383 int con = value->get_int();
2384 return TypeInt::make((con << 16) >> 16);
2385 }
2386 return LoadNode::Value(phase);
2387 }
2388
2389 //=============================================================================
2390 //----------------------------LoadKlassNode::make------------------------------
2391 // Polymorphic factory method:
2392 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
2393 // sanity check the alias category against the created node type
2394 const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2395 assert(adr_type != nullptr, "expecting TypeKlassPtr");
2396 #ifdef _LP64
2397 if (adr_type->is_ptr_to_narrowklass()) {
2398 assert(UseCompressedClassPointers, "no compressed klasses");
2399 Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2400 return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2401 }
2402 #endif
2403 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2404 return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
2405 }
2406
2407 //------------------------------Value------------------------------------------
2408 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2409 return klass_value_common(phase);
2410 }
2411
2412 // In most cases, LoadKlassNode does not have the control input set. If the control
2419 // Either input is TOP ==> the result is TOP
2420 const Type *t1 = phase->type( in(MemNode::Memory) );
2421 if (t1 == Type::TOP) return Type::TOP;
2422 Node *adr = in(MemNode::Address);
2423 const Type *t2 = phase->type( adr );
2424 if (t2 == Type::TOP) return Type::TOP;
2425 const TypePtr *tp = t2->is_ptr();
2426 if (TypePtr::above_centerline(tp->ptr()) ||
2427 tp->ptr() == TypePtr::Null) return Type::TOP;
2428
2429 // Return a more precise klass, if possible
2430 const TypeInstPtr *tinst = tp->isa_instptr();
2431 if (tinst != nullptr) {
2432 ciInstanceKlass* ik = tinst->instance_klass();
2433 int offset = tinst->offset();
2434 if (ik == phase->C->env()->Class_klass()
2435 && (offset == java_lang_Class::klass_offset() ||
2436 offset == java_lang_Class::array_klass_offset())) {
2437 // We are loading a special hidden field from a Class mirror object,
2438 // the field which points to the VM's Klass metaobject.
2439 ciType* t = tinst->java_mirror_type();
2440 // java_mirror_type returns non-null for compile-time Class constants.
2441 if (t != nullptr) {
2442 // constant oop => constant klass
2443 if (offset == java_lang_Class::array_klass_offset()) {
2444 if (t->is_void()) {
2445 // We cannot create a void array. Since void is a primitive type return null
2446 // klass. Users of this result need to do a null check on the returned klass.
2447 return TypePtr::NULL_PTR;
2448 }
2449 return TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
2450 }
2451 if (!t->is_klass()) {
2452 // a primitive Class (e.g., int.class) has null for a klass field
2453 return TypePtr::NULL_PTR;
2454 }
2455 // (Folds up the 1st indirection in aClassConstant.getModifiers().)
2456 return TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
2457 }
2458 // non-constant mirror, so we can't tell what's going on
2459 }
2460 if (!tinst->is_loaded())
2461 return _type; // Bail out if not loaded
2462 if (offset == oopDesc::klass_offset_in_bytes()) {
2463 return tinst->as_klass_type(true);
2464 }
2465 }
2466
2467 // Check for loading klass from an array
2468 const TypeAryPtr *tary = tp->isa_aryptr();
2469 if (tary != nullptr &&
2470 tary->offset() == oopDesc::klass_offset_in_bytes()) {
2471 return tary->as_klass_type(true);
2472 }
2473
2474 // Check for loading klass from an array klass
2475 const TypeKlassPtr *tkls = tp->isa_klassptr();
2476 if (tkls != nullptr && !StressReflectiveCode) {
2477 if (!tkls->is_loaded())
2478 return _type; // Bail out if not loaded
2479 if (tkls->isa_aryklassptr() && tkls->is_aryklassptr()->elem()->isa_klassptr() &&
2480 tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2481 // // Always returning precise element type is incorrect,
2482 // // e.g., element type could be object and array may contain strings
2483 // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2484
2485 // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2486 // according to the element type's subclassing.
2487 return tkls->is_aryklassptr()->elem()->isa_klassptr()->cast_to_exactness(tkls->klass_is_exact());
2488 }
3445 }
3446 ss.print_cr("[TraceMergeStores]: with");
3447 merged_input_value->dump("\n", false, &ss);
3448 merged_store->dump("\n", false, &ss);
3449 tty->print("%s", ss.as_string());
3450 }
3451 #endif
3452
3453 //------------------------------Ideal------------------------------------------
3454 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
3455 // When a store immediately follows a relevant allocation/initialization,
3456 // try to capture it into the initialization, or hoist it above.
3457 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3458 Node* p = MemNode::Ideal_common(phase, can_reshape);
3459 if (p) return (p == NodeSentinel) ? nullptr : p;
3460
3461 Node* mem = in(MemNode::Memory);
3462 Node* address = in(MemNode::Address);
3463 Node* value = in(MemNode::ValueIn);
3464 // Back-to-back stores to same address? Fold em up. Generally
3465 // unsafe if I have intervening uses.
3466 {
3467 Node* st = mem;
3468 // If Store 'st' has more than one use, we cannot fold 'st' away.
3469 // For example, 'st' might be the final state at a conditional
3470 // return. Or, 'st' might be used by some node which is live at
3471 // the same time 'st' is live, which might be unschedulable. So,
3472 // require exactly ONE user until such time as we clone 'mem' for
3473 // each of 'mem's uses (thus making the exactly-1-user-rule hold
3474 // true).
3475 while (st->is_Store() && st->outcnt() == 1) {
3476 // Looking at a dead closed cycle of memory?
3477 assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
3478 assert(Opcode() == st->Opcode() ||
3479 st->Opcode() == Op_StoreVector ||
3480 Opcode() == Op_StoreVector ||
3481 st->Opcode() == Op_StoreVectorScatter ||
3482 Opcode() == Op_StoreVectorScatter ||
3483 phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
3484 (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
3485 (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
3486 (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
3487 "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
3488
3489 if (st->in(MemNode::Address)->eqv_uncast(address) &&
3490 st->as_Store()->memory_size() <= this->memory_size()) {
3491 Node* use = st->raw_out(0);
3492 if (phase->is_IterGVN()) {
3493 phase->is_IterGVN()->rehash_node_delayed(use);
3494 }
3495 // It's OK to do this in the parser, since DU info is always accurate,
3496 // and the parser always refers to nodes via SafePointNode maps.
3497 use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
3498 return this;
3499 }
3500 st = st->in(MemNode::Memory);
3501 }
3502 }
3503
3504
3505 // Capture an unaliased, unconditional, simple store into an initializer.
3592 const StoreVectorNode* store_vector = as_StoreVector();
3593 const StoreVectorNode* mem_vector = mem->as_StoreVector();
3594 const Node* store_indices = store_vector->indices();
3595 const Node* mem_indices = mem_vector->indices();
3596 const Node* store_mask = store_vector->mask();
3597 const Node* mem_mask = mem_vector->mask();
3598 // Ensure types, indices, and masks match
3599 if (store_vector->vect_type() == mem_vector->vect_type() &&
3600 ((store_indices == nullptr) == (mem_indices == nullptr) &&
3601 (store_indices == nullptr || store_indices->eqv_uncast(mem_indices))) &&
3602 ((store_mask == nullptr) == (mem_mask == nullptr) &&
3603 (store_mask == nullptr || store_mask->eqv_uncast(mem_mask)))) {
3604 result = mem;
3605 }
3606 }
3607 }
3608
3609 // Store of zero anywhere into a freshly-allocated object?
3610 // Then the store is useless.
3611 // (It must already have been captured by the InitializeNode.)
3612 if (result == this &&
3613 ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
3614 // a newly allocated object is already all-zeroes everywhere
3615 if (mem->is_Proj() && mem->in(0)->is_Allocate()) {
3616 result = mem;
3617 }
3618
3619 if (result == this) {
3620 // the store may also apply to zero-bits in an earlier object
3621 Node* prev_mem = find_previous_store(phase);
3622 // Steps (a), (b): Walk past independent stores to find an exact match.
3623 if (prev_mem != nullptr) {
3624 Node* prev_val = can_see_stored_value(prev_mem, phase);
3625 if (prev_val != nullptr && prev_val == val) {
3626 // prev_val and val might differ by a cast; it would be good
3627 // to keep the more informative of the two.
3628 result = mem;
3629 }
3630 }
3631 }
3632 }
3633
3634 PhaseIterGVN* igvn = phase->is_IterGVN();
3635 if (result != this && igvn != nullptr) {
3636 MemBarNode* trailing = trailing_membar();
3637 if (trailing != nullptr) {
3638 #ifdef ASSERT
3639 const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();
3915 // Clearing a short array is faster with stores
3916 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3917 // Already know this is a large node, do not try to ideal it
3918 if (_is_large) return nullptr;
3919
3920 const int unit = BytesPerLong;
3921 const TypeX* t = phase->type(in(2))->isa_intptr_t();
3922 if (!t) return nullptr;
3923 if (!t->is_con()) return nullptr;
3924 intptr_t raw_count = t->get_con();
3925 intptr_t size = raw_count;
3926 if (!Matcher::init_array_count_is_in_bytes) size *= unit;
3927 // Clearing nothing uses the Identity call.
3928 // Negative clears are possible on dead ClearArrays
3929 // (see jck test stmt114.stmt11402.val).
3930 if (size <= 0 || size % unit != 0) return nullptr;
3931 intptr_t count = size / unit;
3932 // Length too long; communicate this to matchers and assemblers.
3933 // Assemblers are responsible to produce fast hardware clears for it.
3934 if (size > InitArrayShortSize) {
3935 return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
3936 } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
3937 return nullptr;
3938 }
3939 if (!IdealizeClearArrayNode) return nullptr;
3940 Node *mem = in(1);
3941 if( phase->type(mem)==Type::TOP ) return nullptr;
3942 Node *adr = in(3);
3943 const Type* at = phase->type(adr);
3944 if( at==Type::TOP ) return nullptr;
3945 const TypePtr* atp = at->isa_ptr();
3946 // adjust atp to be the correct array element address type
3947 if (atp == nullptr) atp = TypePtr::BOTTOM;
3948 else atp = atp->add_offset(Type::OffsetBot);
3949 // Get base for derived pointer purposes
3950 if( adr->Opcode() != Op_AddP ) Unimplemented();
3951 Node *base = adr->in(1);
3952
3953 Node *zero = phase->makecon(TypeLong::ZERO);
3954 Node *off = phase->MakeConX(BytesPerLong);
3955 mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
3956 count--;
3957 while( count-- ) {
3958 mem = phase->transform(mem);
3959 adr = phase->transform(new AddPNode(base,adr,off));
3960 mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
3961 }
3962 return mem;
3963 }
3964
3965 //----------------------------step_through----------------------------------
3966 // Return allocation input memory edge if it is different instance
3967 // or itself if it is the one we are looking for.
3968 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseValues* phase) {
3969 Node* n = *np;
3970 assert(n->is_ClearArray(), "sanity");
3971 intptr_t offset;
3972 AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
3973 // This method is called only before Allocate nodes are expanded
3974 // during macro nodes expansion. Before that ClearArray nodes are
3975 // only generated in PhaseMacroExpand::generate_arraycopy() (before
3976 // Allocate nodes are expanded) which follows allocations.
3977 assert(alloc != nullptr, "should have allocation");
3978 if (alloc->_idx == instance_id) {
3979 // Can not bypass initialization of the instance we are looking for.
3980 return false;
3981 }
3982 // Otherwise skip it.
3983 InitializeNode* init = alloc->initialization();
3984 if (init != nullptr)
3985 *np = init->in(TypeFunc::Memory);
3986 else
3987 *np = alloc->in(TypeFunc::Memory);
3988 return true;
3989 }
3990
3991 //----------------------------clear_memory-------------------------------------
3992 // Generate code to initialize object storage to zero.
3993 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
3994 intptr_t start_offset,
3995 Node* end_offset,
3996 PhaseGVN* phase) {
3997 intptr_t offset = start_offset;
3998
3999 int unit = BytesPerLong;
4000 if ((offset % unit) != 0) {
4001 Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
4002 adr = phase->transform(adr);
4003 const TypePtr* atp = TypeRawPtr::BOTTOM;
4004 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4005 mem = phase->transform(mem);
4006 offset += BytesPerInt;
4007 }
4008 assert((offset % unit) == 0, "");
4009
4010 // Initialize the remaining stuff, if any, with a ClearArray.
4011 return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
4012 }
4013
4014 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4015 Node* start_offset,
4016 Node* end_offset,
4017 PhaseGVN* phase) {
4018 if (start_offset == end_offset) {
4019 // nothing to do
4020 return mem;
4021 }
4022
4023 int unit = BytesPerLong;
4024 Node* zbase = start_offset;
4025 Node* zend = end_offset;
4026
4027 // Scale to the unit required by the CPU:
4028 if (!Matcher::init_array_count_is_in_bytes) {
4029 Node* shift = phase->intcon(exact_log2(unit));
4030 zbase = phase->transform(new URShiftXNode(zbase, shift) );
4031 zend = phase->transform(new URShiftXNode(zend, shift) );
4032 }
4033
4034 // Bulk clear double-words
4035 Node* zsize = phase->transform(new SubXNode(zend, zbase) );
4036 Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
4037 mem = new ClearArrayNode(ctl, mem, zsize, adr, false);
4038 return phase->transform(mem);
4039 }
4040
4041 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4042 intptr_t start_offset,
4043 intptr_t end_offset,
4044 PhaseGVN* phase) {
4045 if (start_offset == end_offset) {
4046 // nothing to do
4047 return mem;
4048 }
4049
4050 assert((end_offset % BytesPerInt) == 0, "odd end offset");
4051 intptr_t done_offset = end_offset;
4052 if ((done_offset % BytesPerLong) != 0) {
4053 done_offset -= BytesPerInt;
4054 }
4055 if (done_offset > start_offset) {
4056 mem = clear_memory(ctl, mem, dest,
4057 start_offset, phase->MakeConX(done_offset), phase);
4058 }
4059 if (done_offset < end_offset) { // emit the final 32-bit store
4060 Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
4061 adr = phase->transform(adr);
4062 const TypePtr* atp = TypeRawPtr::BOTTOM;
4063 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4064 mem = phase->transform(mem);
4065 done_offset += BytesPerInt;
4066 }
4067 assert(done_offset == end_offset, "");
4068 return mem;
4069 }
4070
4071 //=============================================================================
4072 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
4073 : MultiNode(TypeFunc::Parms + (precedent == nullptr? 0: 1)),
4074 _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
4075 #ifdef ASSERT
4076 , _pair_idx(0)
4077 #endif
4078 {
4079 init_class_id(Class_MemBar);
4080 Node* top = C->top();
4081 init_req(TypeFunc::I_O,top);
4082 init_req(TypeFunc::FramePtr,top);
4083 init_req(TypeFunc::ReturnAdr,top);
4189 PhaseIterGVN* igvn = phase->is_IterGVN();
4190 remove(igvn);
4191 // Must return either the original node (now dead) or a new node
4192 // (Do not return a top here, since that would break the uniqueness of top.)
4193 return new ConINode(TypeInt::ZERO);
4194 }
4195 }
4196 return progress ? this : nullptr;
4197 }
4198
4199 //------------------------------Value------------------------------------------
4200 const Type* MemBarNode::Value(PhaseGVN* phase) const {
4201 if( !in(0) ) return Type::TOP;
4202 if( phase->type(in(0)) == Type::TOP )
4203 return Type::TOP;
4204 return TypeTuple::MEMBAR;
4205 }
4206
4207 //------------------------------match------------------------------------------
4208 // Construct projections for memory.
4209 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
4210 switch (proj->_con) {
4211 case TypeFunc::Control:
4212 case TypeFunc::Memory:
4213 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
4214 }
4215 ShouldNotReachHere();
4216 return nullptr;
4217 }
4218
4219 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4220 trailing->_kind = TrailingStore;
4221 leading->_kind = LeadingStore;
4222 #ifdef ASSERT
4223 trailing->_pair_idx = leading->_idx;
4224 leading->_pair_idx = leading->_idx;
4225 #endif
4226 }
4227
4228 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4229 trailing->_kind = TrailingLoadStore;
4476 return (req() > RawStores);
4477 }
4478
4479 void InitializeNode::set_complete(PhaseGVN* phase) {
4480 assert(!is_complete(), "caller responsibility");
4481 _is_complete = Complete;
4482
4483 // After this node is complete, it contains a bunch of
4484 // raw-memory initializations. There is no need for
4485 // it to have anything to do with non-raw memory effects.
4486 // Therefore, tell all non-raw users to re-optimize themselves,
4487 // after skipping the memory effects of this initialization.
4488 PhaseIterGVN* igvn = phase->is_IterGVN();
4489 if (igvn) igvn->add_users_to_worklist(this);
4490 }
4491
4492 // convenience function
4493 // return false if the init contains any stores already
4494 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
4495 InitializeNode* init = initialization();
4496 if (init == nullptr || init->is_complete()) return false;
4497 init->remove_extra_zeroes();
4498 // for now, if this allocation has already collected any inits, bail:
4499 if (init->is_non_zero()) return false;
4500 init->set_complete(phase);
4501 return true;
4502 }
4503
4504 void InitializeNode::remove_extra_zeroes() {
4505 if (req() == RawStores) return;
4506 Node* zmem = zero_memory();
4507 uint fill = RawStores;
4508 for (uint i = fill; i < req(); i++) {
4509 Node* n = in(i);
4510 if (n->is_top() || n == zmem) continue; // skip
4511 if (fill < i) set_req(fill, n); // compact
4512 ++fill;
4513 }
4514 // delete any empty spaces created:
4515 while (fill < req()) {
4516 del_req(fill);
4660 // store node that we'd like to capture. We need to check
4661 // the uses of the MergeMemNode.
4662 mems.push(n);
4663 }
4664 } else if (n->is_Mem()) {
4665 Node* other_adr = n->in(MemNode::Address);
4666 if (other_adr == adr) {
4667 failed = true;
4668 break;
4669 } else {
4670 const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
4671 if (other_t_adr != nullptr) {
4672 int other_alias_idx = phase->C->get_alias_index(other_t_adr);
4673 if (other_alias_idx == alias_idx) {
4674 // A load from the same memory slice as the store right
4675 // after the InitializeNode. We check the control of the
4676 // object/array that is loaded from. If it's the same as
4677 // the store control then we cannot capture the store.
4678 assert(!n->is_Store(), "2 stores to same slice on same control?");
4679 Node* base = other_adr;
4680 assert(base->is_AddP(), "should be addp but is %s", base->Name());
4681 base = base->in(AddPNode::Base);
4682 if (base != nullptr) {
4683 base = base->uncast();
4684 if (base->is_Proj() && base->in(0) == alloc) {
4685 failed = true;
4686 break;
4687 }
4688 }
4689 }
4690 }
4691 }
4692 } else {
4693 failed = true;
4694 break;
4695 }
4696 }
4697 }
4698 }
4699 if (failed) {
5246 // z's_done 12 16 16 16 12 16 12
5247 // z's_needed 12 16 16 16 16 16 16
5248 // zsize 0 0 0 0 4 0 4
5249 if (next_full_store < 0) {
5250 // Conservative tack: Zero to end of current word.
5251 zeroes_needed = align_up(zeroes_needed, BytesPerInt);
5252 } else {
5253 // Zero to beginning of next fully initialized word.
5254 // Or, don't zero at all, if we are already in that word.
5255 assert(next_full_store >= zeroes_needed, "must go forward");
5256 assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
5257 zeroes_needed = next_full_store;
5258 }
5259 }
5260
5261 if (zeroes_needed > zeroes_done) {
5262 intptr_t zsize = zeroes_needed - zeroes_done;
5263 // Do some incremental zeroing on rawmem, in parallel with inits.
5264 zeroes_done = align_down(zeroes_done, BytesPerInt);
5265 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5266 zeroes_done, zeroes_needed,
5267 phase);
5268 zeroes_done = zeroes_needed;
5269 if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
5270 do_zeroing = false; // leave the hole, next time
5271 }
5272 }
5273
5274 // Collect the store and move on:
5275 phase->replace_input_of(st, MemNode::Memory, inits);
5276 inits = st; // put it on the linearized chain
5277 set_req(i, zmem); // unhook from previous position
5278
5279 if (zeroes_done == st_off)
5280 zeroes_done = next_init_off;
5281
5282 assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
5283
5284 #ifdef ASSERT
5285 // Various order invariants. Weaker than stores_are_sane because
5305 remove_extra_zeroes(); // clear out all the zmems left over
5306 add_req(inits);
5307
5308 if (!(UseTLAB && ZeroTLAB)) {
5309 // If anything remains to be zeroed, zero it all now.
5310 zeroes_done = align_down(zeroes_done, BytesPerInt);
5311 // if it is the last unused 4 bytes of an instance, forget about it
5312 intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
5313 if (zeroes_done + BytesPerLong >= size_limit) {
5314 AllocateNode* alloc = allocation();
5315 assert(alloc != nullptr, "must be present");
5316 if (alloc != nullptr && alloc->Opcode() == Op_Allocate) {
5317 Node* klass_node = alloc->in(AllocateNode::KlassNode);
5318 ciKlass* k = phase->type(klass_node)->is_instklassptr()->instance_klass();
5319 if (zeroes_done == k->layout_helper())
5320 zeroes_done = size_limit;
5321 }
5322 }
5323 if (zeroes_done < size_limit) {
5324 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5325 zeroes_done, size_in_bytes, phase);
5326 }
5327 }
5328
5329 set_complete(phase);
5330 return rawmem;
5331 }
5332
5333
5334 #ifdef ASSERT
5335 bool InitializeNode::stores_are_sane(PhaseValues* phase) {
5336 if (is_complete())
5337 return true; // stores could be anything at this point
5338 assert(allocation() != nullptr, "must be present");
5339 intptr_t last_off = allocation()->minimum_header_size();
5340 for (uint i = InitializeNode::RawStores; i < req(); i++) {
5341 Node* st = in(i);
5342 intptr_t st_off = get_store_offset(st, phase);
5343 if (st_off < 0) continue; // ignore dead garbage
5344 if (last_off > st_off) {
|
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "ci/ciFlatArrayKlass.hpp"
28 #include "classfile/javaClasses.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "gc/shared/c2/barrierSetC2.hpp"
33 #include "gc/shared/tlab_globals.hpp"
34 #include "memory/allocation.inline.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "opto/addnode.hpp"
38 #include "opto/arraycopynode.hpp"
39 #include "opto/cfgnode.hpp"
40 #include "opto/regalloc.hpp"
41 #include "opto/compile.hpp"
42 #include "opto/connode.hpp"
43 #include "opto/convertnode.hpp"
44 #include "opto/inlinetypenode.hpp"
45 #include "opto/loopnode.hpp"
46 #include "opto/machnode.hpp"
47 #include "opto/matcher.hpp"
48 #include "opto/memnode.hpp"
49 #include "opto/mulnode.hpp"
50 #include "opto/narrowptrnode.hpp"
51 #include "opto/phaseX.hpp"
52 #include "opto/regmask.hpp"
53 #include "opto/rootnode.hpp"
54 #include "opto/vectornode.hpp"
55 #include "utilities/align.hpp"
56 #include "utilities/copy.hpp"
57 #include "utilities/macros.hpp"
58 #include "utilities/powerOfTwo.hpp"
59 #include "utilities/vmError.hpp"
60
61 // Portions of code courtesy of Clifford Click
62
63 // Optimization - Graph Style
64
218 bool is_instance = t_oop->is_known_instance_field();
219 PhaseIterGVN *igvn = phase->is_IterGVN();
220 if (is_instance && igvn != nullptr && result->is_Phi()) {
221 PhiNode *mphi = result->as_Phi();
222 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
223 const TypePtr *t = mphi->adr_type();
224 bool do_split = false;
225 // In the following cases, Load memory input can be further optimized based on
226 // its precise address type
227 if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ) {
228 do_split = true;
229 } else if (t->isa_oopptr() && !t->is_oopptr()->is_known_instance()) {
230 const TypeOopPtr* mem_t =
231 t->is_oopptr()->cast_to_exactness(true)
232 ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
233 ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
234 if (t_oop->isa_aryptr()) {
235 mem_t = mem_t->is_aryptr()
236 ->cast_to_stable(t_oop->is_aryptr()->is_stable())
237 ->cast_to_size(t_oop->is_aryptr()->size())
238 ->cast_to_not_flat(t_oop->is_aryptr()->is_not_flat())
239 ->cast_to_not_null_free(t_oop->is_aryptr()->is_not_null_free())
240 ->with_offset(t_oop->is_aryptr()->offset())
241 ->is_aryptr();
242 }
243 do_split = mem_t == t_oop;
244 }
245 if (do_split) {
246 // clone the Phi with our address type
247 result = mphi->split_out_instance(t_adr, igvn);
248 } else {
249 assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
250 }
251 }
252 return result;
253 }
254
255 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
256 uint alias_idx = phase->C->get_alias_index(tp);
257 Node *mem = mmem;
258 #ifdef ASSERT
259 {
260 // Check that current type is consistent with the alias index used during graph construction
261 assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
262 bool consistent = adr_check == nullptr || adr_check->empty() ||
263 phase->C->must_alias(adr_check, alias_idx );
264 // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
265 if( !consistent && adr_check != nullptr && !adr_check->empty() &&
266 tp->isa_aryptr() && tp->offset() == Type::OffsetBot &&
267 adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
268 ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
269 adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
270 adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
271 // don't assert if it is dead code.
272 consistent = true;
273 }
274 if( !consistent ) {
275 st->print("alias_idx==%d, adr_check==", alias_idx);
276 if( adr_check == nullptr ) {
277 st->print("null");
278 } else {
279 adr_check->dump();
280 }
281 st->cr();
282 print_alias_types();
283 assert(consistent, "adr_check must match alias idx");
284 }
285 }
286 #endif
999 Node* ld = gvn.transform(load);
1000 return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
1001 }
1002
1003 return load;
1004 }
1005
1006 //------------------------------hash-------------------------------------------
1007 uint LoadNode::hash() const {
1008 // unroll addition of interesting fields
1009 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
1010 }
1011
1012 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
1013 if ((atp != nullptr) && (atp->index() >= Compile::AliasIdxRaw)) {
1014 bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
1015 bool is_stable_ary = FoldStableValues &&
1016 (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
1017 tp->isa_aryptr()->is_stable();
1018
1019 return (eliminate_boxing && non_volatile) || is_stable_ary || tp->is_inlinetypeptr();
1020 }
1021
1022 return false;
1023 }
1024
1025 LoadNode* LoadNode::pin_array_access_node() const {
1026 const TypePtr* adr_type = this->adr_type();
1027 if (adr_type != nullptr && adr_type->isa_aryptr()) {
1028 return clone_pinned();
1029 }
1030 return nullptr;
1031 }
1032
1033 // Is the value loaded previously stored by an arraycopy? If so return
1034 // a load node that reads from the source array so we may be able to
1035 // optimize out the ArrayCopy node later.
1036 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
1037 Node* ld_adr = in(MemNode::Address);
1038 intptr_t ld_off = 0;
1039 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
1056 assert(ld_alloc != nullptr, "need an alloc");
1057 assert(addp->is_AddP(), "address must be addp");
1058 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1059 assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1060 assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1061 addp->set_req(AddPNode::Base, src);
1062 addp->set_req(AddPNode::Address, src);
1063 } else {
1064 assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1065 ac->as_ArrayCopy()->is_copyof_validated() ||
1066 ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1067 assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1068 addp->set_req(AddPNode::Base, src);
1069 addp->set_req(AddPNode::Address, src);
1070
1071 const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1072 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1073 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1074
1075 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1076 uint shift = ary_t->is_flat() ? ary_t->flat_log_elem_size() : exact_log2(type2aelembytes(ary_elem));
1077
1078 Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1079 #ifdef _LP64
1080 diff = phase->transform(new ConvI2LNode(diff));
1081 #endif
1082 diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1083
1084 Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1085 addp->set_req(AddPNode::Offset, offset);
1086 }
1087 addp = phase->transform(addp);
1088 #ifdef ASSERT
1089 const TypePtr* adr_type = phase->type(addp)->is_ptr();
1090 ld->_adr_type = adr_type;
1091 #endif
1092 ld->set_req(MemNode::Address, addp);
1093 ld->set_req(0, ctl);
1094 ld->set_req(MemNode::Memory, mem);
1095 return ld;
1096 }
1175 // Same base, same offset.
1176 // Possible improvement for arrays: check index value instead of absolute offset.
1177
1178 // At this point we have proven something like this setup:
1179 // B = << base >>
1180 // L = LoadQ(AddP(Check/CastPP(B), #Off))
1181 // S = StoreQ(AddP( B , #Off), V)
1182 // (Actually, we haven't yet proven the Q's are the same.)
1183 // In other words, we are loading from a casted version of
1184 // the same pointer-and-offset that we stored to.
1185 // Casted version may carry a dependency and it is respected.
1186 // Thus, we are able to replace L by V.
1187 }
1188 // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1189 if (store_Opcode() != st->Opcode()) {
1190 return nullptr;
1191 }
1192 // LoadVector/StoreVector needs additional check to ensure the types match.
1193 if (st->is_StoreVector()) {
1194 const TypeVect* in_vt = st->as_StoreVector()->vect_type();
1195 const TypeVect* out_vt = is_Load() ? as_LoadVector()->vect_type() : as_StoreVector()->vect_type();
1196 if (in_vt != out_vt) {
1197 return nullptr;
1198 }
1199 }
1200 return st->in(MemNode::ValueIn);
1201 }
1202
1203 // A load from a freshly-created object always returns zero.
1204 // (This can happen after LoadNode::Ideal resets the load's memory input
1205 // to find_captured_store, which returned InitializeNode::zero_memory.)
1206 if (st->is_Proj() && st->in(0)->is_Allocate() &&
1207 (st->in(0) == ld_alloc) &&
1208 (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1209 // return a zero value for the load's basic type
1210 // (This is one of the few places where a generic PhaseTransform
1211 // can create new nodes. Think of it as lazily manifesting
1212 // virtually pre-existing constants.)
1213 Node* default_value = ld_alloc->in(AllocateNode::DefaultValue);
1214 if (default_value != nullptr) {
1215 return default_value;
1216 }
1217 assert(ld_alloc->in(AllocateNode::RawDefaultValue) == nullptr, "default value may not be null");
1218 if (memory_type() != T_VOID) {
1219 if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
1220 // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1221 // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1222 // by the ArrayCopyNode.
1223 return phase->zerocon(memory_type());
1224 }
1225 } else {
1226 // TODO: materialize all-zero vector constant
1227 assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1228 }
1229 }
1230
1231 // A load from an initialization barrier can match a captured store.
1232 if (st->is_Proj() && st->in(0)->is_Initialize()) {
1233 InitializeNode* init = st->in(0)->as_Initialize();
1234 AllocateNode* alloc = init->allocation();
1235 if ((alloc != nullptr) && (alloc == ld_alloc)) {
1236 // examine a captured store value
1237 st = init->find_captured_store(ld_off, memory_size(), phase);
1265 //----------------------is_instance_field_load_with_local_phi------------------
1266 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1267 if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1268 in(Address)->is_AddP() ) {
1269 const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1270 // Only instances and boxed values.
1271 if( t_oop != nullptr &&
1272 (t_oop->is_ptr_to_boxed_value() ||
1273 t_oop->is_known_instance_field()) &&
1274 t_oop->offset() != Type::OffsetBot &&
1275 t_oop->offset() != Type::OffsetTop) {
1276 return true;
1277 }
1278 }
1279 return false;
1280 }
1281
1282 //------------------------------Identity---------------------------------------
1283 // Loads are identity if previous store is to same address
1284 Node* LoadNode::Identity(PhaseGVN* phase) {
1285 // Loading from an InlineType? The InlineType has the values of
1286 // all fields as input. Look for the field with matching offset.
1287 Node* addr = in(Address);
1288 intptr_t offset;
1289 Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset);
1290 if (base != nullptr && base->is_InlineType() && offset > oopDesc::klass_offset_in_bytes()) {
1291 Node* value = base->as_InlineType()->field_value_by_offset((int)offset, true);
1292 if (value != nullptr) {
1293 if (Opcode() == Op_LoadN) {
1294 // Encode oop value if we are loading a narrow oop
1295 assert(!phase->type(value)->isa_narrowoop(), "should already be decoded");
1296 value = phase->transform(new EncodePNode(value, bottom_type()));
1297 }
1298 return value;
1299 }
1300 }
1301
1302 // If the previous store-maker is the right kind of Store, and the store is
1303 // to the same address, then we are equal to the value stored.
1304 Node* mem = in(Memory);
1305 Node* value = can_see_stored_value(mem, phase);
1306 if( value ) {
1307 // byte, short & char stores truncate naturally.
1308 // A load has to load the truncated value which requires
1309 // some sort of masking operation and that requires an
1310 // Ideal call instead of an Identity call.
1311 if (memory_size() < BytesPerInt) {
1312 // If the input to the store does not fit with the load's result type,
1313 // it must be truncated via an Ideal call.
1314 if (!phase->type(value)->higher_equal(phase->type(this)))
1315 return this;
1316 }
1317 // (This works even when value is a Con, but LoadNode::Value
1318 // usually runs first, producing the singleton type of the Con.)
1319 if (!has_pinned_control_dependency() || value->is_Con()) {
1320 return value;
1321 } else {
2066 }
2067 }
2068
2069 // Don't do this for integer types. There is only potential profit if
2070 // the element type t is lower than _type; that is, for int types, if _type is
2071 // more restrictive than t. This only happens here if one is short and the other
2072 // char (both 16 bits), and in those cases we've made an intentional decision
2073 // to use one kind of load over the other. See AndINode::Ideal and 4965907.
2074 // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
2075 //
2076 // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
2077 // where the _gvn.type of the AddP is wider than 8. This occurs when an earlier
2078 // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
2079 // subsumed by p1. If p1 is on the worklist but has not yet been re-transformed,
2080 // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
2081 // In fact, that could have been the original type of p1, and p1 could have
2082 // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
2083 // expression (LShiftL quux 3) independently optimized to the constant 8.
2084 if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
2085 && (_type->isa_vect() == nullptr)
2086 && !ary->is_flat()
2087 && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
2088 // t might actually be lower than _type, if _type is a unique
2089 // concrete subclass of abstract class t.
2090 if (off_beyond_header || off == Type::OffsetBot) { // is the offset beyond the header?
2091 const Type* jt = t->join_speculative(_type);
2092 // In any case, do not allow the join, per se, to empty out the type.
2093 if (jt->empty() && !t->empty()) {
2094 // This can happen if a interface-typed array narrows to a class type.
2095 jt = _type;
2096 }
2097 #ifdef ASSERT
2098 if (phase->C->eliminate_boxing() && adr->is_AddP()) {
2099 // The pointers in the autobox arrays are always non-null
2100 Node* base = adr->in(AddPNode::Base);
2101 if ((base != nullptr) && base->is_DecodeN()) {
2102 // Get LoadN node which loads IntegerCache.cache field
2103 base = base->in(1);
2104 }
2105 if ((base != nullptr) && base->is_Con()) {
2106 const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
2107 if ((base_type != nullptr) && base_type->is_autobox_cache()) {
2108 // It could be narrow oop
2109 assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
2110 }
2111 }
2112 }
2113 #endif
2114 return jt;
2115 }
2116 }
2117 } else if (tp->base() == Type::InstPtr) {
2118 assert( off != Type::OffsetBot ||
2119 // arrays can be cast to Objects
2120 !tp->isa_instptr() ||
2121 tp->is_instptr()->instance_klass()->is_java_lang_Object() ||
2122 // Default value load
2123 tp->is_instptr()->instance_klass() == ciEnv::current()->Class_klass() ||
2124 // unsafe field access may not have a constant offset
2125 C->has_unsafe_access(),
2126 "Field accesses must be precise" );
2127 // For oop loads, we expect the _type to be precise.
2128
2129 const TypeInstPtr* tinst = tp->is_instptr();
2130 BasicType bt = memory_type();
2131
2132 // Optimize loads from constant fields.
2133 ciObject* const_oop = tinst->const_oop();
2134 if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
2135 const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt);
2136 if (con_type != nullptr) {
2137 return con_type;
2138 }
2139 }
2140 } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
2141 assert(off != Type::OffsetBot ||
2142 !tp->isa_instklassptr() ||
2143 // arrays can be cast to Objects
2144 tp->isa_instklassptr()->instance_klass()->is_java_lang_Object() ||
2145 // also allow array-loading from the primary supertype
2146 // array during subtype checks
2147 Opcode() == Op_LoadKlass,
2148 "Field accesses must be precise");
2149 // For klass/static loads, we expect the _type to be precise
2150 } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
2151 /* With mirrors being an indirect in the Klass*
2152 * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
2153 * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
2154 *
2155 * So check the type and klass of the node before the LoadP.
2250 if (ReduceFieldZeroing || is_instance || is_boxed_value) {
2251 Node* value = can_see_stored_value(mem,phase);
2252 if (value != nullptr && value->is_Con()) {
2253 assert(value->bottom_type()->higher_equal(_type),"sanity");
2254 return value->bottom_type();
2255 }
2256 }
2257
2258 bool is_vect = (_type->isa_vect() != nullptr);
2259 if (is_instance && !is_vect) {
2260 // If we have an instance type and our memory input is the
2261 // programs's initial memory state, there is no matching store,
2262 // so just return a zero of the appropriate type -
2263 // except if it is vectorized - then we have no zero constant.
2264 Node *mem = in(MemNode::Memory);
2265 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2266 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2267 return Type::get_zero_type(_type->basic_type());
2268 }
2269 }
2270 Node* alloc = is_new_object_mark_load();
2271 if (alloc != nullptr) {
2272 if (EnableValhalla) {
2273 // The mark word may contain property bits (inline, flat, null-free)
2274 Node* klass_node = alloc->in(AllocateNode::KlassNode);
2275 const TypeKlassPtr* tkls = phase->type(klass_node)->isa_klassptr();
2276 if (tkls != nullptr && tkls->is_loaded() && tkls->klass_is_exact()) {
2277 return TypeX::make(tkls->exact_klass()->prototype_header().value());
2278 }
2279 } else {
2280 return TypeX::make(markWord::prototype().value());
2281 }
2282 }
2283
2284 return _type;
2285 }
2286
2287 //------------------------------match_edge-------------------------------------
2288 // Do we Match on this edge index or not? Match only the address.
2289 uint LoadNode::match_edge(uint idx) const {
2290 return idx == MemNode::Address;
2291 }
2292
2293 //--------------------------LoadBNode::Ideal--------------------------------------
2294 //
2295 // If the previous store is to the same address as this load,
2296 // and the value stored was larger than a byte, replace this load
2297 // with the value stored truncated to a byte. If no truncation is
2298 // needed, the replacement is done in LoadNode::Identity().
2299 //
2300 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2301 Node* mem = in(MemNode::Memory);
2412 return LoadNode::Ideal(phase, can_reshape);
2413 }
2414
2415 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2416 Node* mem = in(MemNode::Memory);
2417 Node* value = can_see_stored_value(mem,phase);
2418 if (value != nullptr && value->is_Con() &&
2419 !value->bottom_type()->higher_equal(_type)) {
2420 // If the input to the store does not fit with the load's result type,
2421 // it must be truncated. We can't delay until Ideal call since
2422 // a singleton Value is needed for split_thru_phi optimization.
2423 int con = value->get_int();
2424 return TypeInt::make((con << 16) >> 16);
2425 }
2426 return LoadNode::Value(phase);
2427 }
2428
2429 //=============================================================================
2430 //----------------------------LoadKlassNode::make------------------------------
2431 // Polymorphic factory method:
2432 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
2433 const TypeKlassPtr* tk) {
2434 // sanity check the alias category against the created node type
2435 const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2436 assert(adr_type != nullptr, "expecting TypeKlassPtr");
2437 #ifdef _LP64
2438 if (adr_type->is_ptr_to_narrowklass()) {
2439 assert(UseCompressedClassPointers, "no compressed klasses");
2440 Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2441 return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2442 }
2443 #endif
2444 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2445 return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
2446 }
2447
2448 //------------------------------Value------------------------------------------
2449 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2450 return klass_value_common(phase);
2451 }
2452
2453 // In most cases, LoadKlassNode does not have the control input set. If the control
2460 // Either input is TOP ==> the result is TOP
2461 const Type *t1 = phase->type( in(MemNode::Memory) );
2462 if (t1 == Type::TOP) return Type::TOP;
2463 Node *adr = in(MemNode::Address);
2464 const Type *t2 = phase->type( adr );
2465 if (t2 == Type::TOP) return Type::TOP;
2466 const TypePtr *tp = t2->is_ptr();
2467 if (TypePtr::above_centerline(tp->ptr()) ||
2468 tp->ptr() == TypePtr::Null) return Type::TOP;
2469
2470 // Return a more precise klass, if possible
2471 const TypeInstPtr *tinst = tp->isa_instptr();
2472 if (tinst != nullptr) {
2473 ciInstanceKlass* ik = tinst->instance_klass();
2474 int offset = tinst->offset();
2475 if (ik == phase->C->env()->Class_klass()
2476 && (offset == java_lang_Class::klass_offset() ||
2477 offset == java_lang_Class::array_klass_offset())) {
2478 // We are loading a special hidden field from a Class mirror object,
2479 // the field which points to the VM's Klass metaobject.
2480 bool is_null_free_array = false;
2481 ciType* t = tinst->java_mirror_type(&is_null_free_array);
2482 // java_mirror_type returns non-null for compile-time Class constants.
2483 if (t != nullptr) {
2484 // constant oop => constant klass
2485 if (offset == java_lang_Class::array_klass_offset()) {
2486 if (t->is_void()) {
2487 // We cannot create a void array. Since void is a primitive type return null
2488 // klass. Users of this result need to do a null check on the returned klass.
2489 return TypePtr::NULL_PTR;
2490 }
2491 const TypeKlassPtr* tklass = TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
2492 if (is_null_free_array) {
2493 tklass = tklass->is_aryklassptr()->cast_to_null_free();
2494 }
2495 return tklass;
2496 }
2497 if (!t->is_klass()) {
2498 // a primitive Class (e.g., int.class) has null for a klass field
2499 return TypePtr::NULL_PTR;
2500 }
2501 // (Folds up the 1st indirection in aClassConstant.getModifiers().)
2502 const TypeKlassPtr* tklass = TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
2503 if (is_null_free_array) {
2504 tklass = tklass->is_aryklassptr()->cast_to_null_free();
2505 }
2506 return tklass;
2507 }
2508 // non-constant mirror, so we can't tell what's going on
2509 }
2510 if (!tinst->is_loaded())
2511 return _type; // Bail out if not loaded
2512 if (offset == oopDesc::klass_offset_in_bytes()) {
2513 return tinst->as_klass_type(true);
2514 }
2515 }
2516
2517 // Check for loading klass from an array
2518 const TypeAryPtr* tary = tp->isa_aryptr();
2519 if (tary != nullptr &&
2520 tary->offset() == oopDesc::klass_offset_in_bytes()) {
2521 return tary->as_klass_type(true);
2522 }
2523
2524 // Check for loading klass from an array klass
2525 const TypeKlassPtr *tkls = tp->isa_klassptr();
2526 if (tkls != nullptr && !StressReflectiveCode) {
2527 if (!tkls->is_loaded())
2528 return _type; // Bail out if not loaded
2529 if (tkls->isa_aryklassptr() && tkls->is_aryklassptr()->elem()->isa_klassptr() &&
2530 tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2531 // // Always returning precise element type is incorrect,
2532 // // e.g., element type could be object and array may contain strings
2533 // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2534
2535 // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2536 // according to the element type's subclassing.
2537 return tkls->is_aryklassptr()->elem()->isa_klassptr()->cast_to_exactness(tkls->klass_is_exact());
2538 }
3495 }
3496 ss.print_cr("[TraceMergeStores]: with");
3497 merged_input_value->dump("\n", false, &ss);
3498 merged_store->dump("\n", false, &ss);
3499 tty->print("%s", ss.as_string());
3500 }
3501 #endif
3502
3503 //------------------------------Ideal------------------------------------------
3504 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
3505 // When a store immediately follows a relevant allocation/initialization,
3506 // try to capture it into the initialization, or hoist it above.
3507 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3508 Node* p = MemNode::Ideal_common(phase, can_reshape);
3509 if (p) return (p == NodeSentinel) ? nullptr : p;
3510
3511 Node* mem = in(MemNode::Memory);
3512 Node* address = in(MemNode::Address);
3513 Node* value = in(MemNode::ValueIn);
3514 // Back-to-back stores to same address? Fold em up. Generally
3515 // unsafe if I have intervening uses...
3516 if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::INLINES) {
3517 Node* st = mem;
3518 // If Store 'st' has more than one use, we cannot fold 'st' away.
3519 // For example, 'st' might be the final state at a conditional
3520 // return. Or, 'st' might be used by some node which is live at
3521 // the same time 'st' is live, which might be unschedulable. So,
3522 // require exactly ONE user until such time as we clone 'mem' for
3523 // each of 'mem's uses (thus making the exactly-1-user-rule hold
3524 // true).
3525 while (st->is_Store() && st->outcnt() == 1) {
3526 // Looking at a dead closed cycle of memory?
3527 assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
3528 assert(Opcode() == st->Opcode() ||
3529 st->Opcode() == Op_StoreVector ||
3530 Opcode() == Op_StoreVector ||
3531 st->Opcode() == Op_StoreVectorScatter ||
3532 Opcode() == Op_StoreVectorScatter ||
3533 phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
3534 (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
3535 (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
3536 (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
3537 (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
3538 "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
3539
3540 if (st->in(MemNode::Address)->eqv_uncast(address) &&
3541 st->as_Store()->memory_size() <= this->memory_size()) {
3542 Node* use = st->raw_out(0);
3543 if (phase->is_IterGVN()) {
3544 phase->is_IterGVN()->rehash_node_delayed(use);
3545 }
3546 // It's OK to do this in the parser, since DU info is always accurate,
3547 // and the parser always refers to nodes via SafePointNode maps.
3548 use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
3549 return this;
3550 }
3551 st = st->in(MemNode::Memory);
3552 }
3553 }
3554
3555
3556 // Capture an unaliased, unconditional, simple store into an initializer.
3643 const StoreVectorNode* store_vector = as_StoreVector();
3644 const StoreVectorNode* mem_vector = mem->as_StoreVector();
3645 const Node* store_indices = store_vector->indices();
3646 const Node* mem_indices = mem_vector->indices();
3647 const Node* store_mask = store_vector->mask();
3648 const Node* mem_mask = mem_vector->mask();
3649 // Ensure types, indices, and masks match
3650 if (store_vector->vect_type() == mem_vector->vect_type() &&
3651 ((store_indices == nullptr) == (mem_indices == nullptr) &&
3652 (store_indices == nullptr || store_indices->eqv_uncast(mem_indices))) &&
3653 ((store_mask == nullptr) == (mem_mask == nullptr) &&
3654 (store_mask == nullptr || store_mask->eqv_uncast(mem_mask)))) {
3655 result = mem;
3656 }
3657 }
3658 }
3659
3660 // Store of zero anywhere into a freshly-allocated object?
3661 // Then the store is useless.
3662 // (It must already have been captured by the InitializeNode.)
3663 if (result == this && ReduceFieldZeroing) {
3664 // a newly allocated object is already all-zeroes everywhere
3665 if (mem->is_Proj() && mem->in(0)->is_Allocate() &&
3666 (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == val)) {
3667 result = mem;
3668 }
3669
3670 if (result == this && phase->type(val)->is_zero_type()) {
3671 // the store may also apply to zero-bits in an earlier object
3672 Node* prev_mem = find_previous_store(phase);
3673 // Steps (a), (b): Walk past independent stores to find an exact match.
3674 if (prev_mem != nullptr) {
3675 Node* prev_val = can_see_stored_value(prev_mem, phase);
3676 if (prev_val != nullptr && prev_val == val) {
3677 // prev_val and val might differ by a cast; it would be good
3678 // to keep the more informative of the two.
3679 result = mem;
3680 }
3681 }
3682 }
3683 }
3684
3685 PhaseIterGVN* igvn = phase->is_IterGVN();
3686 if (result != this && igvn != nullptr) {
3687 MemBarNode* trailing = trailing_membar();
3688 if (trailing != nullptr) {
3689 #ifdef ASSERT
3690 const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();
3966 // Clearing a short array is faster with stores
3967 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3968 // Already know this is a large node, do not try to ideal it
3969 if (_is_large) return nullptr;
3970
3971 const int unit = BytesPerLong;
3972 const TypeX* t = phase->type(in(2))->isa_intptr_t();
3973 if (!t) return nullptr;
3974 if (!t->is_con()) return nullptr;
3975 intptr_t raw_count = t->get_con();
3976 intptr_t size = raw_count;
3977 if (!Matcher::init_array_count_is_in_bytes) size *= unit;
3978 // Clearing nothing uses the Identity call.
3979 // Negative clears are possible on dead ClearArrays
3980 // (see jck test stmt114.stmt11402.val).
3981 if (size <= 0 || size % unit != 0) return nullptr;
3982 intptr_t count = size / unit;
3983 // Length too long; communicate this to matchers and assemblers.
3984 // Assemblers are responsible to produce fast hardware clears for it.
3985 if (size > InitArrayShortSize) {
3986 return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true);
3987 } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
3988 return nullptr;
3989 }
3990 if (!IdealizeClearArrayNode) return nullptr;
3991 Node *mem = in(1);
3992 if( phase->type(mem)==Type::TOP ) return nullptr;
3993 Node *adr = in(3);
3994 const Type* at = phase->type(adr);
3995 if( at==Type::TOP ) return nullptr;
3996 const TypePtr* atp = at->isa_ptr();
3997 // adjust atp to be the correct array element address type
3998 if (atp == nullptr) atp = TypePtr::BOTTOM;
3999 else atp = atp->add_offset(Type::OffsetBot);
4000 // Get base for derived pointer purposes
4001 if( adr->Opcode() != Op_AddP ) Unimplemented();
4002 Node *base = adr->in(1);
4003
4004 Node *val = in(4);
4005 Node *off = phase->MakeConX(BytesPerLong);
4006 mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
4007 count--;
4008 while( count-- ) {
4009 mem = phase->transform(mem);
4010 adr = phase->transform(new AddPNode(base,adr,off));
4011 mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
4012 }
4013 return mem;
4014 }
4015
4016 //----------------------------step_through----------------------------------
4017 // Return allocation input memory edge if it is different instance
4018 // or itself if it is the one we are looking for.
4019 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseValues* phase) {
4020 Node* n = *np;
4021 assert(n->is_ClearArray(), "sanity");
4022 intptr_t offset;
4023 AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
4024 // This method is called only before Allocate nodes are expanded
4025 // during macro nodes expansion. Before that ClearArray nodes are
4026 // only generated in PhaseMacroExpand::generate_arraycopy() (before
4027 // Allocate nodes are expanded) which follows allocations.
4028 assert(alloc != nullptr, "should have allocation");
4029 if (alloc->_idx == instance_id) {
4030 // Can not bypass initialization of the instance we are looking for.
4031 return false;
4032 }
4033 // Otherwise skip it.
4034 InitializeNode* init = alloc->initialization();
4035 if (init != nullptr)
4036 *np = init->in(TypeFunc::Memory);
4037 else
4038 *np = alloc->in(TypeFunc::Memory);
4039 return true;
4040 }
4041
4042 //----------------------------clear_memory-------------------------------------
4043 // Generate code to initialize object storage to zero.
4044 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4045 Node* val,
4046 Node* raw_val,
4047 intptr_t start_offset,
4048 Node* end_offset,
4049 PhaseGVN* phase) {
4050 intptr_t offset = start_offset;
4051
4052 int unit = BytesPerLong;
4053 if ((offset % unit) != 0) {
4054 Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
4055 adr = phase->transform(adr);
4056 const TypePtr* atp = TypeRawPtr::BOTTOM;
4057 if (val != nullptr) {
4058 assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
4059 mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
4060 } else {
4061 assert(raw_val == nullptr, "val may not be null");
4062 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4063 }
4064 mem = phase->transform(mem);
4065 offset += BytesPerInt;
4066 }
4067 assert((offset % unit) == 0, "");
4068
4069 // Initialize the remaining stuff, if any, with a ClearArray.
4070 return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase);
4071 }
4072
4073 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4074 Node* raw_val,
4075 Node* start_offset,
4076 Node* end_offset,
4077 PhaseGVN* phase) {
4078 if (start_offset == end_offset) {
4079 // nothing to do
4080 return mem;
4081 }
4082
4083 int unit = BytesPerLong;
4084 Node* zbase = start_offset;
4085 Node* zend = end_offset;
4086
4087 // Scale to the unit required by the CPU:
4088 if (!Matcher::init_array_count_is_in_bytes) {
4089 Node* shift = phase->intcon(exact_log2(unit));
4090 zbase = phase->transform(new URShiftXNode(zbase, shift) );
4091 zend = phase->transform(new URShiftXNode(zend, shift) );
4092 }
4093
4094 // Bulk clear double-words
4095 Node* zsize = phase->transform(new SubXNode(zend, zbase) );
4096 Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
4097 if (raw_val == nullptr) {
4098 raw_val = phase->MakeConX(0);
4099 }
4100 mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false);
4101 return phase->transform(mem);
4102 }
4103
4104 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4105 Node* val,
4106 Node* raw_val,
4107 intptr_t start_offset,
4108 intptr_t end_offset,
4109 PhaseGVN* phase) {
4110 if (start_offset == end_offset) {
4111 // nothing to do
4112 return mem;
4113 }
4114
4115 assert((end_offset % BytesPerInt) == 0, "odd end offset");
4116 intptr_t done_offset = end_offset;
4117 if ((done_offset % BytesPerLong) != 0) {
4118 done_offset -= BytesPerInt;
4119 }
4120 if (done_offset > start_offset) {
4121 mem = clear_memory(ctl, mem, dest, val, raw_val,
4122 start_offset, phase->MakeConX(done_offset), phase);
4123 }
4124 if (done_offset < end_offset) { // emit the final 32-bit store
4125 Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
4126 adr = phase->transform(adr);
4127 const TypePtr* atp = TypeRawPtr::BOTTOM;
4128 if (val != nullptr) {
4129 assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
4130 mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
4131 } else {
4132 assert(raw_val == nullptr, "val may not be null");
4133 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4134 }
4135 mem = phase->transform(mem);
4136 done_offset += BytesPerInt;
4137 }
4138 assert(done_offset == end_offset, "");
4139 return mem;
4140 }
4141
4142 //=============================================================================
4143 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
4144 : MultiNode(TypeFunc::Parms + (precedent == nullptr? 0: 1)),
4145 _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
4146 #ifdef ASSERT
4147 , _pair_idx(0)
4148 #endif
4149 {
4150 init_class_id(Class_MemBar);
4151 Node* top = C->top();
4152 init_req(TypeFunc::I_O,top);
4153 init_req(TypeFunc::FramePtr,top);
4154 init_req(TypeFunc::ReturnAdr,top);
4260 PhaseIterGVN* igvn = phase->is_IterGVN();
4261 remove(igvn);
4262 // Must return either the original node (now dead) or a new node
4263 // (Do not return a top here, since that would break the uniqueness of top.)
4264 return new ConINode(TypeInt::ZERO);
4265 }
4266 }
4267 return progress ? this : nullptr;
4268 }
4269
4270 //------------------------------Value------------------------------------------
4271 const Type* MemBarNode::Value(PhaseGVN* phase) const {
4272 if( !in(0) ) return Type::TOP;
4273 if( phase->type(in(0)) == Type::TOP )
4274 return Type::TOP;
4275 return TypeTuple::MEMBAR;
4276 }
4277
4278 //------------------------------match------------------------------------------
4279 // Construct projections for memory.
4280 Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) {
4281 switch (proj->_con) {
4282 case TypeFunc::Control:
4283 case TypeFunc::Memory:
4284 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
4285 }
4286 ShouldNotReachHere();
4287 return nullptr;
4288 }
4289
4290 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4291 trailing->_kind = TrailingStore;
4292 leading->_kind = LeadingStore;
4293 #ifdef ASSERT
4294 trailing->_pair_idx = leading->_idx;
4295 leading->_pair_idx = leading->_idx;
4296 #endif
4297 }
4298
4299 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4300 trailing->_kind = TrailingLoadStore;
4547 return (req() > RawStores);
4548 }
4549
4550 void InitializeNode::set_complete(PhaseGVN* phase) {
4551 assert(!is_complete(), "caller responsibility");
4552 _is_complete = Complete;
4553
4554 // After this node is complete, it contains a bunch of
4555 // raw-memory initializations. There is no need for
4556 // it to have anything to do with non-raw memory effects.
4557 // Therefore, tell all non-raw users to re-optimize themselves,
4558 // after skipping the memory effects of this initialization.
4559 PhaseIterGVN* igvn = phase->is_IterGVN();
4560 if (igvn) igvn->add_users_to_worklist(this);
4561 }
4562
4563 // convenience function
4564 // return false if the init contains any stores already
4565 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
4566 InitializeNode* init = initialization();
4567 if (init == nullptr || init->is_complete()) {
4568 return false;
4569 }
4570 init->remove_extra_zeroes();
4571 // for now, if this allocation has already collected any inits, bail:
4572 if (init->is_non_zero()) return false;
4573 init->set_complete(phase);
4574 return true;
4575 }
4576
4577 void InitializeNode::remove_extra_zeroes() {
4578 if (req() == RawStores) return;
4579 Node* zmem = zero_memory();
4580 uint fill = RawStores;
4581 for (uint i = fill; i < req(); i++) {
4582 Node* n = in(i);
4583 if (n->is_top() || n == zmem) continue; // skip
4584 if (fill < i) set_req(fill, n); // compact
4585 ++fill;
4586 }
4587 // delete any empty spaces created:
4588 while (fill < req()) {
4589 del_req(fill);
4733 // store node that we'd like to capture. We need to check
4734 // the uses of the MergeMemNode.
4735 mems.push(n);
4736 }
4737 } else if (n->is_Mem()) {
4738 Node* other_adr = n->in(MemNode::Address);
4739 if (other_adr == adr) {
4740 failed = true;
4741 break;
4742 } else {
4743 const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
4744 if (other_t_adr != nullptr) {
4745 int other_alias_idx = phase->C->get_alias_index(other_t_adr);
4746 if (other_alias_idx == alias_idx) {
4747 // A load from the same memory slice as the store right
4748 // after the InitializeNode. We check the control of the
4749 // object/array that is loaded from. If it's the same as
4750 // the store control then we cannot capture the store.
4751 assert(!n->is_Store(), "2 stores to same slice on same control?");
4752 Node* base = other_adr;
4753 if (base->is_Phi()) {
4754 // In rare case, base may be a PhiNode and it may read
4755 // the same memory slice between InitializeNode and store.
4756 failed = true;
4757 break;
4758 }
4759 assert(base->is_AddP(), "should be addp but is %s", base->Name());
4760 base = base->in(AddPNode::Base);
4761 if (base != nullptr) {
4762 base = base->uncast();
4763 if (base->is_Proj() && base->in(0) == alloc) {
4764 failed = true;
4765 break;
4766 }
4767 }
4768 }
4769 }
4770 }
4771 } else {
4772 failed = true;
4773 break;
4774 }
4775 }
4776 }
4777 }
4778 if (failed) {
5325 // z's_done 12 16 16 16 12 16 12
5326 // z's_needed 12 16 16 16 16 16 16
5327 // zsize 0 0 0 0 4 0 4
5328 if (next_full_store < 0) {
5329 // Conservative tack: Zero to end of current word.
5330 zeroes_needed = align_up(zeroes_needed, BytesPerInt);
5331 } else {
5332 // Zero to beginning of next fully initialized word.
5333 // Or, don't zero at all, if we are already in that word.
5334 assert(next_full_store >= zeroes_needed, "must go forward");
5335 assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
5336 zeroes_needed = next_full_store;
5337 }
5338 }
5339
5340 if (zeroes_needed > zeroes_done) {
5341 intptr_t zsize = zeroes_needed - zeroes_done;
5342 // Do some incremental zeroing on rawmem, in parallel with inits.
5343 zeroes_done = align_down(zeroes_done, BytesPerInt);
5344 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5345 allocation()->in(AllocateNode::DefaultValue),
5346 allocation()->in(AllocateNode::RawDefaultValue),
5347 zeroes_done, zeroes_needed,
5348 phase);
5349 zeroes_done = zeroes_needed;
5350 if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
5351 do_zeroing = false; // leave the hole, next time
5352 }
5353 }
5354
5355 // Collect the store and move on:
5356 phase->replace_input_of(st, MemNode::Memory, inits);
5357 inits = st; // put it on the linearized chain
5358 set_req(i, zmem); // unhook from previous position
5359
5360 if (zeroes_done == st_off)
5361 zeroes_done = next_init_off;
5362
5363 assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
5364
5365 #ifdef ASSERT
5366 // Various order invariants. Weaker than stores_are_sane because
5386 remove_extra_zeroes(); // clear out all the zmems left over
5387 add_req(inits);
5388
5389 if (!(UseTLAB && ZeroTLAB)) {
5390 // If anything remains to be zeroed, zero it all now.
5391 zeroes_done = align_down(zeroes_done, BytesPerInt);
5392 // if it is the last unused 4 bytes of an instance, forget about it
5393 intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
5394 if (zeroes_done + BytesPerLong >= size_limit) {
5395 AllocateNode* alloc = allocation();
5396 assert(alloc != nullptr, "must be present");
5397 if (alloc != nullptr && alloc->Opcode() == Op_Allocate) {
5398 Node* klass_node = alloc->in(AllocateNode::KlassNode);
5399 ciKlass* k = phase->type(klass_node)->is_instklassptr()->instance_klass();
5400 if (zeroes_done == k->layout_helper())
5401 zeroes_done = size_limit;
5402 }
5403 }
5404 if (zeroes_done < size_limit) {
5405 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5406 allocation()->in(AllocateNode::DefaultValue),
5407 allocation()->in(AllocateNode::RawDefaultValue),
5408 zeroes_done, size_in_bytes, phase);
5409 }
5410 }
5411
5412 set_complete(phase);
5413 return rawmem;
5414 }
5415
5416
5417 #ifdef ASSERT
5418 bool InitializeNode::stores_are_sane(PhaseValues* phase) {
5419 if (is_complete())
5420 return true; // stores could be anything at this point
5421 assert(allocation() != nullptr, "must be present");
5422 intptr_t last_off = allocation()->minimum_header_size();
5423 for (uint i = InitializeNode::RawStores; i < req(); i++) {
5424 Node* st = in(i);
5425 intptr_t st_off = get_store_offset(st, phase);
5426 if (st_off < 0) continue; // ignore dead garbage
5427 if (last_off > st_off) {
|