6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "classfile/javaClasses.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/c2/barrierSetC2.hpp"
30 #include "gc/shared/tlab_globals.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "oops/objArrayKlass.hpp"
34 #include "opto/addnode.hpp"
35 #include "opto/arraycopynode.hpp"
36 #include "opto/cfgnode.hpp"
37 #include "opto/regalloc.hpp"
38 #include "opto/compile.hpp"
39 #include "opto/connode.hpp"
40 #include "opto/convertnode.hpp"
41 #include "opto/loopnode.hpp"
42 #include "opto/machnode.hpp"
43 #include "opto/matcher.hpp"
44 #include "opto/memnode.hpp"
45 #include "opto/mempointer.hpp"
46 #include "opto/mulnode.hpp"
47 #include "opto/narrowptrnode.hpp"
48 #include "opto/phaseX.hpp"
49 #include "opto/regmask.hpp"
50 #include "opto/rootnode.hpp"
51 #include "opto/traceMergeStoresTag.hpp"
52 #include "opto/vectornode.hpp"
53 #include "utilities/align.hpp"
54 #include "utilities/copy.hpp"
55 #include "utilities/macros.hpp"
56 #include "utilities/powerOfTwo.hpp"
57 #include "utilities/vmError.hpp"
58
59 // Portions of code courtesy of Clifford Click
60
216 bool is_instance = t_oop->is_known_instance_field();
217 PhaseIterGVN *igvn = phase->is_IterGVN();
218 if (is_instance && igvn != nullptr && result->is_Phi()) {
219 PhiNode *mphi = result->as_Phi();
220 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
221 const TypePtr *t = mphi->adr_type();
222 bool do_split = false;
223 // In the following cases, Load memory input can be further optimized based on
224 // its precise address type
225 if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ) {
226 do_split = true;
227 } else if (t->isa_oopptr() && !t->is_oopptr()->is_known_instance()) {
228 const TypeOopPtr* mem_t =
229 t->is_oopptr()->cast_to_exactness(true)
230 ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
231 ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
232 if (t_oop->isa_aryptr()) {
233 mem_t = mem_t->is_aryptr()
234 ->cast_to_stable(t_oop->is_aryptr()->is_stable())
235 ->cast_to_size(t_oop->is_aryptr()->size())
236 ->with_offset(t_oop->is_aryptr()->offset())
237 ->is_aryptr();
238 }
239 do_split = mem_t == t_oop;
240 }
241 if (do_split) {
242 // clone the Phi with our address type
243 result = mphi->split_out_instance(t_adr, igvn);
244 } else {
245 assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
246 }
247 }
248 return result;
249 }
250
251 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
252 uint alias_idx = phase->C->get_alias_index(tp);
253 Node *mem = mmem;
254 #ifdef ASSERT
255 {
256 // Check that current type is consistent with the alias index used during graph construction
257 assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
258 bool consistent = adr_check == nullptr || adr_check->empty() ||
259 phase->C->must_alias(adr_check, alias_idx );
260 // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
261 if( !consistent && adr_check != nullptr && !adr_check->empty() &&
262 tp->isa_aryptr() && tp->offset() == Type::OffsetBot &&
263 adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
264 ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
265 adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
266 adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
267 // don't assert if it is dead code.
268 consistent = true;
269 }
270 if( !consistent ) {
271 st->print("alias_idx==%d, adr_check==", alias_idx);
272 if( adr_check == nullptr ) {
273 st->print("null");
274 } else {
275 adr_check->dump();
276 }
277 st->cr();
278 print_alias_types();
279 assert(consistent, "adr_check must match alias idx");
280 }
281 }
282 #endif
995 Node* ld = gvn.transform(load);
996 return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
997 }
998
999 return load;
1000 }
1001
1002 //------------------------------hash-------------------------------------------
1003 uint LoadNode::hash() const {
1004 // unroll addition of interesting fields
1005 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
1006 }
1007
1008 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
1009 if ((atp != nullptr) && (atp->index() >= Compile::AliasIdxRaw)) {
1010 bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
1011 bool is_stable_ary = FoldStableValues &&
1012 (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
1013 tp->isa_aryptr()->is_stable();
1014
1015 return (eliminate_boxing && non_volatile) || is_stable_ary;
1016 }
1017
1018 return false;
1019 }
1020
1021 LoadNode* LoadNode::pin_array_access_node() const {
1022 const TypePtr* adr_type = this->adr_type();
1023 if (adr_type != nullptr && adr_type->isa_aryptr()) {
1024 return clone_pinned();
1025 }
1026 return nullptr;
1027 }
1028
1029 // Is the value loaded previously stored by an arraycopy? If so return
1030 // a load node that reads from the source array so we may be able to
1031 // optimize out the ArrayCopy node later.
1032 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
1033 Node* ld_adr = in(MemNode::Address);
1034 intptr_t ld_off = 0;
1035 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
1052 assert(ld_alloc != nullptr, "need an alloc");
1053 assert(addp->is_AddP(), "address must be addp");
1054 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1055 assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1056 assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1057 addp->set_req(AddPNode::Base, src);
1058 addp->set_req(AddPNode::Address, src);
1059 } else {
1060 assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1061 ac->as_ArrayCopy()->is_copyof_validated() ||
1062 ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1063 assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1064 addp->set_req(AddPNode::Base, src);
1065 addp->set_req(AddPNode::Address, src);
1066
1067 const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1068 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1069 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1070
1071 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1072 uint shift = exact_log2(type2aelembytes(ary_elem));
1073
1074 Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1075 #ifdef _LP64
1076 diff = phase->transform(new ConvI2LNode(diff));
1077 #endif
1078 diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1079
1080 Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1081 addp->set_req(AddPNode::Offset, offset);
1082 }
1083 addp = phase->transform(addp);
1084 #ifdef ASSERT
1085 const TypePtr* adr_type = phase->type(addp)->is_ptr();
1086 ld->_adr_type = adr_type;
1087 #endif
1088 ld->set_req(MemNode::Address, addp);
1089 ld->set_req(0, ctl);
1090 ld->set_req(MemNode::Memory, mem);
1091 return ld;
1092 }
1093 return nullptr;
1094 }
1095
1096
1097 //---------------------------can_see_stored_value------------------------------
1098 // This routine exists to make sure this set of tests is done the same
1099 // everywhere. We need to make a coordinated change: first LoadNode::Ideal
1100 // will change the graph shape in a way which makes memory alive twice at the
1101 // same time (uses the Oracle model of aliasing), then some
1102 // LoadXNode::Identity will fold things back to the equivalence-class model
1103 // of aliasing.
1104 Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
1105 Node* ld_adr = in(MemNode::Address);
1106 intptr_t ld_off = 0;
1107 Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
1108 Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
1109 const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
1110 Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
1111 // This is more general than load from boxing objects.
1112 if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
1113 uint alias_idx = atp->index();
1114 Node* result = nullptr;
1115 Node* current = st;
1116 // Skip through chains of MemBarNodes checking the MergeMems for
1117 // new states for the slice of this load. Stop once any other
1118 // kind of node is encountered. Loads from final memory can skip
1119 // through any kind of MemBar but normal loads shouldn't skip
1120 // through MemBarAcquire since the could allow them to move out of
1121 // a synchronized region. It is not safe to step over MemBarCPUOrder,
1122 // because alias info above them may be inaccurate (e.g., due to
1123 // mixed/mismatched unsafe accesses).
1124 bool is_final_mem = !atp->is_rewritable();
1125 while (current->is_Proj()) {
1126 int opc = current->in(0)->Opcode();
1127 if ((is_final_mem && (opc == Op_MemBarAcquire ||
1171 // Same base, same offset.
1172 // Possible improvement for arrays: check index value instead of absolute offset.
1173
1174 // At this point we have proven something like this setup:
1175 // B = << base >>
1176 // L = LoadQ(AddP(Check/CastPP(B), #Off))
1177 // S = StoreQ(AddP( B , #Off), V)
1178 // (Actually, we haven't yet proven the Q's are the same.)
1179 // In other words, we are loading from a casted version of
1180 // the same pointer-and-offset that we stored to.
1181 // Casted version may carry a dependency and it is respected.
1182 // Thus, we are able to replace L by V.
1183 }
1184 // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1185 if (store_Opcode() != st->Opcode()) {
1186 return nullptr;
1187 }
1188 // LoadVector/StoreVector needs additional check to ensure the types match.
1189 if (st->is_StoreVector()) {
1190 const TypeVect* in_vt = st->as_StoreVector()->vect_type();
1191 const TypeVect* out_vt = as_LoadVector()->vect_type();
1192 if (in_vt != out_vt) {
1193 return nullptr;
1194 }
1195 }
1196 return st->in(MemNode::ValueIn);
1197 }
1198
1199 // A load from a freshly-created object always returns zero.
1200 // (This can happen after LoadNode::Ideal resets the load's memory input
1201 // to find_captured_store, which returned InitializeNode::zero_memory.)
1202 if (st->is_Proj() && st->in(0)->is_Allocate() &&
1203 (st->in(0) == ld_alloc) &&
1204 (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1205 // return a zero value for the load's basic type
1206 // (This is one of the few places where a generic PhaseTransform
1207 // can create new nodes. Think of it as lazily manifesting
1208 // virtually pre-existing constants.)
1209 if (memory_type() != T_VOID) {
1210 if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
1211 // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1212 // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1213 // by the ArrayCopyNode.
1214 return phase->zerocon(memory_type());
1215 }
1216 } else {
1217 // TODO: materialize all-zero vector constant
1218 assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1219 }
1220 }
1221
1222 // A load from an initialization barrier can match a captured store.
1223 if (st->is_Proj() && st->in(0)->is_Initialize()) {
1224 InitializeNode* init = st->in(0)->as_Initialize();
1225 AllocateNode* alloc = init->allocation();
1226 if ((alloc != nullptr) && (alloc == ld_alloc)) {
1227 // examine a captured store value
1228 st = init->find_captured_store(ld_off, memory_size(), phase);
1849 bool addr_mark = ((phase->type(address)->isa_oopptr() || phase->type(address)->isa_narrowoop()) &&
1850 phase->type(address)->is_ptr()->offset() == oopDesc::mark_offset_in_bytes());
1851
1852 // Skip up past a SafePoint control. Cannot do this for Stores because
1853 // pointer stores & cardmarks must stay on the same side of a SafePoint.
1854 if( ctrl != nullptr && ctrl->Opcode() == Op_SafePoint &&
1855 phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw &&
1856 !addr_mark &&
1857 (depends_only_on_test() || has_unknown_control_dependency())) {
1858 ctrl = ctrl->in(0);
1859 set_req(MemNode::Control,ctrl);
1860 progress = true;
1861 }
1862
1863 intptr_t ignore = 0;
1864 Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore);
1865 if (base != nullptr
1866 && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
1867 // Check for useless control edge in some common special cases
1868 if (in(MemNode::Control) != nullptr
1869 && can_remove_control()
1870 && phase->type(base)->higher_equal(TypePtr::NOTNULL)
1871 && all_controls_dominate(base, phase->C->start())) {
1872 // A method-invariant, non-null address (constant or 'this' argument).
1873 set_req(MemNode::Control, nullptr);
1874 progress = true;
1875 }
1876 }
1877
1878 Node* mem = in(MemNode::Memory);
1879 const TypePtr *addr_t = phase->type(address)->isa_ptr();
1880
1881 if (can_reshape && (addr_t != nullptr)) {
1882 // try to optimize our memory input
1883 Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase);
1884 if (opt_mem != mem) {
1885 set_req_X(MemNode::Memory, opt_mem, phase);
1886 if (phase->type( opt_mem ) == Type::TOP) return nullptr;
1887 return this;
1888 }
2047 }
2048 }
2049
2050 // Don't do this for integer types. There is only potential profit if
2051 // the element type t is lower than _type; that is, for int types, if _type is
2052 // more restrictive than t. This only happens here if one is short and the other
2053 // char (both 16 bits), and in those cases we've made an intentional decision
2054 // to use one kind of load over the other. See AndINode::Ideal and 4965907.
2055 // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
2056 //
2057 // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
2058 // where the _gvn.type of the AddP is wider than 8. This occurs when an earlier
2059 // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
2060 // subsumed by p1. If p1 is on the worklist but has not yet been re-transformed,
2061 // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
2062 // In fact, that could have been the original type of p1, and p1 could have
2063 // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
2064 // expression (LShiftL quux 3) independently optimized to the constant 8.
2065 if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
2066 && (_type->isa_vect() == nullptr)
2067 && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
2068 // t might actually be lower than _type, if _type is a unique
2069 // concrete subclass of abstract class t.
2070 if (off_beyond_header || off == Type::OffsetBot) { // is the offset beyond the header?
2071 const Type* jt = t->join_speculative(_type);
2072 // In any case, do not allow the join, per se, to empty out the type.
2073 if (jt->empty() && !t->empty()) {
2074 // This can happen if a interface-typed array narrows to a class type.
2075 jt = _type;
2076 }
2077 #ifdef ASSERT
2078 if (phase->C->eliminate_boxing() && adr->is_AddP()) {
2079 // The pointers in the autobox arrays are always non-null
2080 Node* base = adr->in(AddPNode::Base);
2081 if ((base != nullptr) && base->is_DecodeN()) {
2082 // Get LoadN node which loads IntegerCache.cache field
2083 base = base->in(1);
2084 }
2085 if ((base != nullptr) && base->is_Con()) {
2086 const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
2087 if ((base_type != nullptr) && base_type->is_autobox_cache()) {
2088 // It could be narrow oop
2089 assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
2090 }
2091 }
2092 }
2093 #endif
2094 return jt;
2095 }
2096 }
2097 } else if (tp->base() == Type::InstPtr) {
2098 assert( off != Type::OffsetBot ||
2099 // arrays can be cast to Objects
2100 !tp->isa_instptr() ||
2101 tp->is_instptr()->instance_klass()->is_java_lang_Object() ||
2102 // unsafe field access may not have a constant offset
2103 C->has_unsafe_access(),
2104 "Field accesses must be precise" );
2105 // For oop loads, we expect the _type to be precise.
2106
2107 // Optimize loads from constant fields.
2108 const TypeInstPtr* tinst = tp->is_instptr();
2109 ciObject* const_oop = tinst->const_oop();
2110 if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
2111 const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());
2112 if (con_type != nullptr) {
2113 return con_type;
2114 }
2115 }
2116 } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
2117 assert(off != Type::OffsetBot ||
2118 !tp->isa_instklassptr() ||
2119 // arrays can be cast to Objects
2120 tp->isa_instklassptr()->instance_klass()->is_java_lang_Object() ||
2121 // also allow array-loading from the primary supertype
2122 // array during subtype checks
2123 Opcode() == Op_LoadKlass,
2124 "Field accesses must be precise");
2125 // For klass/static loads, we expect the _type to be precise
2126 } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
2127 /* With mirrors being an indirect in the Klass*
2128 * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
2129 * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
2130 *
2131 * So check the type and klass of the node before the LoadP.
2138 assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2139 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2140 return TypeInstPtr::make(klass->java_mirror());
2141 }
2142 }
2143 }
2144
2145 const TypeKlassPtr *tkls = tp->isa_klassptr();
2146 if (tkls != nullptr) {
2147 if (tkls->is_loaded() && tkls->klass_is_exact()) {
2148 ciKlass* klass = tkls->exact_klass();
2149 // We are loading a field from a Klass metaobject whose identity
2150 // is known at compile time (the type is "exact" or "precise").
2151 // Check for fields we know are maintained as constants by the VM.
2152 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2153 // The field is Klass::_super_check_offset. Return its (constant) value.
2154 // (Folds up type checking code.)
2155 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2156 return TypeInt::make(klass->super_check_offset());
2157 }
2158 if (UseCompactObjectHeaders) {
2159 if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2160 // The field is Klass::_prototype_header. Return its (constant) value.
2161 assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2162 return TypeX::make(klass->prototype_header());
2163 }
2164 }
2165 // Compute index into primary_supers array
2166 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2167 // Check for overflowing; use unsigned compare to handle the negative case.
2168 if( depth < ciKlass::primary_super_limit() ) {
2169 // The field is an element of Klass::_primary_supers. Return its (constant) value.
2170 // (Folds up type checking code.)
2171 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2172 ciKlass *ss = klass->super_of_depth(depth);
2173 return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2174 }
2175 const Type* aift = load_array_final_field(tkls, klass);
2176 if (aift != nullptr) return aift;
2177 }
2178
2216 // Note: When interfaces are reliable, we can narrow the interface
2217 // test to (klass != Serializable && klass != Cloneable).
2218 assert(Opcode() == Op_LoadI, "must load an int from _layout_helper");
2219 jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false);
2220 // The key property of this type is that it folds up tests
2221 // for array-ness, since it proves that the layout_helper is positive.
2222 // Thus, a generic value like the basic object layout helper works fine.
2223 return TypeInt::make(min_size, max_jint, Type::WidenMin);
2224 }
2225 }
2226
2227 bool is_vect = (_type->isa_vect() != nullptr);
2228 if (is_instance && !is_vect) {
2229 // If we have an instance type and our memory input is the
2230 // programs's initial memory state, there is no matching store,
2231 // so just return a zero of the appropriate type -
2232 // except if it is vectorized - then we have no zero constant.
2233 Node *mem = in(MemNode::Memory);
2234 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2235 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2236 return Type::get_zero_type(_type->basic_type());
2237 }
2238 }
2239
2240 if (!UseCompactObjectHeaders) {
2241 Node* alloc = is_new_object_mark_load();
2242 if (alloc != nullptr) {
2243 return TypeX::make(markWord::prototype().value());
2244 }
2245 }
2246
2247 return _type;
2248 }
2249
2250 //------------------------------match_edge-------------------------------------
2251 // Do we Match on this edge index or not? Match only the address.
2252 uint LoadNode::match_edge(uint idx) const {
2253 return idx == MemNode::Address;
2254 }
2255
2256 //--------------------------LoadBNode::Ideal--------------------------------------
2257 //
2258 // If the previous store is to the same address as this load,
2259 // and the value stored was larger than a byte, replace this load
2260 // with the value stored truncated to a byte. If no truncation is
2261 // needed, the replacement is done in LoadNode::Identity().
2262 //
2263 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2372 }
2373 }
2374 // Identity call will handle the case where truncation is not needed.
2375 return LoadNode::Ideal(phase, can_reshape);
2376 }
2377
2378 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2379 Node* mem = in(MemNode::Memory);
2380 Node* value = can_see_stored_value(mem,phase);
2381 if (value != nullptr && value->is_Con() &&
2382 !value->bottom_type()->higher_equal(_type)) {
2383 // If the input to the store does not fit with the load's result type,
2384 // it must be truncated. We can't delay until Ideal call since
2385 // a singleton Value is needed for split_thru_phi optimization.
2386 int con = value->get_int();
2387 return TypeInt::make((con << 16) >> 16);
2388 }
2389 return LoadNode::Value(phase);
2390 }
2391
2392 //=============================================================================
2393 //----------------------------LoadKlassNode::make------------------------------
2394 // Polymorphic factory method:
2395 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
2396 // sanity check the alias category against the created node type
2397 const TypePtr* adr_type = adr->bottom_type()->isa_ptr();
2398 assert(adr_type != nullptr, "expecting TypeKlassPtr");
2399 #ifdef _LP64
2400 if (adr_type->is_ptr_to_narrowklass()) {
2401 assert(UseCompressedClassPointers, "no compressed klasses");
2402 Node* load_klass = gvn.transform(new LoadNKlassNode(mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2403 return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2404 }
2405 #endif
2406 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2407 return new LoadKlassNode(mem, adr, at, tk, MemNode::unordered);
2408 }
2409
2410 //------------------------------Value------------------------------------------
2411 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2416 // Either input is TOP ==> the result is TOP
2417 const Type *t1 = phase->type( in(MemNode::Memory) );
2418 if (t1 == Type::TOP) return Type::TOP;
2419 Node *adr = in(MemNode::Address);
2420 const Type *t2 = phase->type( adr );
2421 if (t2 == Type::TOP) return Type::TOP;
2422 const TypePtr *tp = t2->is_ptr();
2423 if (TypePtr::above_centerline(tp->ptr()) ||
2424 tp->ptr() == TypePtr::Null) return Type::TOP;
2425
2426 // Return a more precise klass, if possible
2427 const TypeInstPtr *tinst = tp->isa_instptr();
2428 if (tinst != nullptr) {
2429 ciInstanceKlass* ik = tinst->instance_klass();
2430 int offset = tinst->offset();
2431 if (ik == phase->C->env()->Class_klass()
2432 && (offset == java_lang_Class::klass_offset() ||
2433 offset == java_lang_Class::array_klass_offset())) {
2434 // We are loading a special hidden field from a Class mirror object,
2435 // the field which points to the VM's Klass metaobject.
2436 ciType* t = tinst->java_mirror_type();
2437 // java_mirror_type returns non-null for compile-time Class constants.
2438 if (t != nullptr) {
2439 // constant oop => constant klass
2440 if (offset == java_lang_Class::array_klass_offset()) {
2441 if (t->is_void()) {
2442 // We cannot create a void array. Since void is a primitive type return null
2443 // klass. Users of this result need to do a null check on the returned klass.
2444 return TypePtr::NULL_PTR;
2445 }
2446 return TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
2447 }
2448 if (!t->is_klass()) {
2449 // a primitive Class (e.g., int.class) has null for a klass field
2450 return TypePtr::NULL_PTR;
2451 }
2452 // Fold up the load of the hidden field
2453 return TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
2454 }
2455 // non-constant mirror, so we can't tell what's going on
2456 }
2457 if (!tinst->is_loaded())
2458 return _type; // Bail out if not loaded
2459 if (offset == oopDesc::klass_offset_in_bytes()) {
2460 return tinst->as_klass_type(true);
2461 }
2462 }
2463
2464 // Check for loading klass from an array
2465 const TypeAryPtr *tary = tp->isa_aryptr();
2466 if (tary != nullptr &&
2467 tary->offset() == oopDesc::klass_offset_in_bytes()) {
2468 return tary->as_klass_type(true);
2469 }
2470
2471 // Check for loading klass from an array klass
2472 const TypeKlassPtr *tkls = tp->isa_klassptr();
2473 if (tkls != nullptr && !StressReflectiveCode) {
2474 if (!tkls->is_loaded())
2475 return _type; // Bail out if not loaded
2476 if (tkls->isa_aryklassptr() && tkls->is_aryklassptr()->elem()->isa_klassptr() &&
2477 tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2478 // // Always returning precise element type is incorrect,
2479 // // e.g., element type could be object and array may contain strings
2480 // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2481
2482 // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2483 // according to the element type's subclassing.
2484 return tkls->is_aryklassptr()->elem()->isa_klassptr()->cast_to_exactness(tkls->klass_is_exact());
2485 }
3361 }
3362 ss.print_cr("[TraceMergeStores]: with");
3363 merged_input_value->dump("\n", false, &ss);
3364 merged_store->dump("\n", false, &ss);
3365 tty->print("%s", ss.as_string());
3366 }
3367 #endif
3368
3369 //------------------------------Ideal------------------------------------------
3370 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
3371 // When a store immediately follows a relevant allocation/initialization,
3372 // try to capture it into the initialization, or hoist it above.
3373 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3374 Node* p = MemNode::Ideal_common(phase, can_reshape);
3375 if (p) return (p == NodeSentinel) ? nullptr : p;
3376
3377 Node* mem = in(MemNode::Memory);
3378 Node* address = in(MemNode::Address);
3379 Node* value = in(MemNode::ValueIn);
3380 // Back-to-back stores to same address? Fold em up. Generally
3381 // unsafe if I have intervening uses.
3382 {
3383 Node* st = mem;
3384 // If Store 'st' has more than one use, we cannot fold 'st' away.
3385 // For example, 'st' might be the final state at a conditional
3386 // return. Or, 'st' might be used by some node which is live at
3387 // the same time 'st' is live, which might be unschedulable. So,
3388 // require exactly ONE user until such time as we clone 'mem' for
3389 // each of 'mem's uses (thus making the exactly-1-user-rule hold
3390 // true).
3391 while (st->is_Store() && st->outcnt() == 1) {
3392 // Looking at a dead closed cycle of memory?
3393 assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
3394 assert(Opcode() == st->Opcode() ||
3395 st->Opcode() == Op_StoreVector ||
3396 Opcode() == Op_StoreVector ||
3397 st->Opcode() == Op_StoreVectorScatter ||
3398 Opcode() == Op_StoreVectorScatter ||
3399 phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
3400 (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
3401 (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
3402 (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
3403 "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
3404
3405 if (st->in(MemNode::Address)->eqv_uncast(address) &&
3406 st->as_Store()->memory_size() <= this->memory_size()) {
3407 Node* use = st->raw_out(0);
3408 if (phase->is_IterGVN()) {
3409 phase->is_IterGVN()->rehash_node_delayed(use);
3410 }
3411 // It's OK to do this in the parser, since DU info is always accurate,
3412 // and the parser always refers to nodes via SafePointNode maps.
3413 use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
3414 return this;
3415 }
3416 st = st->in(MemNode::Memory);
3417 }
3418 }
3419
3420
3421 // Capture an unaliased, unconditional, simple store into an initializer.
3519 const StoreVectorNode* store_vector = as_StoreVector();
3520 const StoreVectorNode* mem_vector = mem->as_StoreVector();
3521 const Node* store_indices = store_vector->indices();
3522 const Node* mem_indices = mem_vector->indices();
3523 const Node* store_mask = store_vector->mask();
3524 const Node* mem_mask = mem_vector->mask();
3525 // Ensure types, indices, and masks match
3526 if (store_vector->vect_type() == mem_vector->vect_type() &&
3527 ((store_indices == nullptr) == (mem_indices == nullptr) &&
3528 (store_indices == nullptr || store_indices->eqv_uncast(mem_indices))) &&
3529 ((store_mask == nullptr) == (mem_mask == nullptr) &&
3530 (store_mask == nullptr || store_mask->eqv_uncast(mem_mask)))) {
3531 result = mem;
3532 }
3533 }
3534 }
3535
3536 // Store of zero anywhere into a freshly-allocated object?
3537 // Then the store is useless.
3538 // (It must already have been captured by the InitializeNode.)
3539 if (result == this &&
3540 ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
3541 // a newly allocated object is already all-zeroes everywhere
3542 if (mem->is_Proj() && mem->in(0)->is_Allocate()) {
3543 result = mem;
3544 }
3545
3546 if (result == this) {
3547 // the store may also apply to zero-bits in an earlier object
3548 Node* prev_mem = find_previous_store(phase);
3549 // Steps (a), (b): Walk past independent stores to find an exact match.
3550 if (prev_mem != nullptr) {
3551 Node* prev_val = can_see_stored_value(prev_mem, phase);
3552 if (prev_val != nullptr && prev_val == val) {
3553 // prev_val and val might differ by a cast; it would be good
3554 // to keep the more informative of the two.
3555 result = mem;
3556 }
3557 }
3558 }
3559 }
3560
3561 PhaseIterGVN* igvn = phase->is_IterGVN();
3562 if (result != this && igvn != nullptr) {
3563 MemBarNode* trailing = trailing_membar();
3564 if (trailing != nullptr) {
3565 #ifdef ASSERT
3566 const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();
4030 // Clearing a short array is faster with stores
4031 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
4032 // Already know this is a large node, do not try to ideal it
4033 if (_is_large) return nullptr;
4034
4035 const int unit = BytesPerLong;
4036 const TypeX* t = phase->type(in(2))->isa_intptr_t();
4037 if (!t) return nullptr;
4038 if (!t->is_con()) return nullptr;
4039 intptr_t raw_count = t->get_con();
4040 intptr_t size = raw_count;
4041 if (!Matcher::init_array_count_is_in_bytes) size *= unit;
4042 // Clearing nothing uses the Identity call.
4043 // Negative clears are possible on dead ClearArrays
4044 // (see jck test stmt114.stmt11402.val).
4045 if (size <= 0 || size % unit != 0) return nullptr;
4046 intptr_t count = size / unit;
4047 // Length too long; communicate this to matchers and assemblers.
4048 // Assemblers are responsible to produce fast hardware clears for it.
4049 if (size > InitArrayShortSize) {
4050 return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
4051 } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
4052 return nullptr;
4053 }
4054 if (!IdealizeClearArrayNode) return nullptr;
4055 Node *mem = in(1);
4056 if( phase->type(mem)==Type::TOP ) return nullptr;
4057 Node *adr = in(3);
4058 const Type* at = phase->type(adr);
4059 if( at==Type::TOP ) return nullptr;
4060 const TypePtr* atp = at->isa_ptr();
4061 // adjust atp to be the correct array element address type
4062 if (atp == nullptr) atp = TypePtr::BOTTOM;
4063 else atp = atp->add_offset(Type::OffsetBot);
4064 // Get base for derived pointer purposes
4065 if( adr->Opcode() != Op_AddP ) Unimplemented();
4066 Node *base = adr->in(1);
4067
4068 Node *zero = phase->makecon(TypeLong::ZERO);
4069 Node *off = phase->MakeConX(BytesPerLong);
4070 mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
4071 count--;
4072 while( count-- ) {
4073 mem = phase->transform(mem);
4074 adr = phase->transform(new AddPNode(base,adr,off));
4075 mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
4076 }
4077 return mem;
4078 }
4079
4080 //----------------------------step_through----------------------------------
4081 // Return allocation input memory edge if it is different instance
4082 // or itself if it is the one we are looking for.
4083 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseValues* phase) {
4084 Node* n = *np;
4085 assert(n->is_ClearArray(), "sanity");
4086 intptr_t offset;
4087 AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
4088 // This method is called only before Allocate nodes are expanded
4089 // during macro nodes expansion. Before that ClearArray nodes are
4090 // only generated in PhaseMacroExpand::generate_arraycopy() (before
4091 // Allocate nodes are expanded) which follows allocations.
4092 assert(alloc != nullptr, "should have allocation");
4093 if (alloc->_idx == instance_id) {
4094 // Can not bypass initialization of the instance we are looking for.
4095 return false;
4096 }
4097 // Otherwise skip it.
4098 InitializeNode* init = alloc->initialization();
4099 if (init != nullptr)
4100 *np = init->in(TypeFunc::Memory);
4101 else
4102 *np = alloc->in(TypeFunc::Memory);
4103 return true;
4104 }
4105
4106 //----------------------------clear_memory-------------------------------------
4107 // Generate code to initialize object storage to zero.
4108 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4109 intptr_t start_offset,
4110 Node* end_offset,
4111 PhaseGVN* phase) {
4112 intptr_t offset = start_offset;
4113
4114 int unit = BytesPerLong;
4115 if ((offset % unit) != 0) {
4116 Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
4117 adr = phase->transform(adr);
4118 const TypePtr* atp = TypeRawPtr::BOTTOM;
4119 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4120 mem = phase->transform(mem);
4121 offset += BytesPerInt;
4122 }
4123 assert((offset % unit) == 0, "");
4124
4125 // Initialize the remaining stuff, if any, with a ClearArray.
4126 return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
4127 }
4128
4129 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4130 Node* start_offset,
4131 Node* end_offset,
4132 PhaseGVN* phase) {
4133 if (start_offset == end_offset) {
4134 // nothing to do
4135 return mem;
4136 }
4137
4138 int unit = BytesPerLong;
4139 Node* zbase = start_offset;
4140 Node* zend = end_offset;
4141
4142 // Scale to the unit required by the CPU:
4143 if (!Matcher::init_array_count_is_in_bytes) {
4144 Node* shift = phase->intcon(exact_log2(unit));
4145 zbase = phase->transform(new URShiftXNode(zbase, shift) );
4146 zend = phase->transform(new URShiftXNode(zend, shift) );
4147 }
4148
4149 // Bulk clear double-words
4150 Node* zsize = phase->transform(new SubXNode(zend, zbase) );
4151 Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
4152 mem = new ClearArrayNode(ctl, mem, zsize, adr, false);
4153 return phase->transform(mem);
4154 }
4155
4156 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4157 intptr_t start_offset,
4158 intptr_t end_offset,
4159 PhaseGVN* phase) {
4160 if (start_offset == end_offset) {
4161 // nothing to do
4162 return mem;
4163 }
4164
4165 assert((end_offset % BytesPerInt) == 0, "odd end offset");
4166 intptr_t done_offset = end_offset;
4167 if ((done_offset % BytesPerLong) != 0) {
4168 done_offset -= BytesPerInt;
4169 }
4170 if (done_offset > start_offset) {
4171 mem = clear_memory(ctl, mem, dest,
4172 start_offset, phase->MakeConX(done_offset), phase);
4173 }
4174 if (done_offset < end_offset) { // emit the final 32-bit store
4175 Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
4176 adr = phase->transform(adr);
4177 const TypePtr* atp = TypeRawPtr::BOTTOM;
4178 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4179 mem = phase->transform(mem);
4180 done_offset += BytesPerInt;
4181 }
4182 assert(done_offset == end_offset, "");
4183 return mem;
4184 }
4185
4186 //=============================================================================
4187 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
4188 : MultiNode(TypeFunc::Parms + (precedent == nullptr? 0: 1)),
4189 _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
4190 #ifdef ASSERT
4191 , _pair_idx(0)
4192 #endif
4193 {
4194 init_class_id(Class_MemBar);
4195 Node* top = C->top();
4196 init_req(TypeFunc::I_O,top);
4197 init_req(TypeFunc::FramePtr,top);
4198 init_req(TypeFunc::ReturnAdr,top);
4304 PhaseIterGVN* igvn = phase->is_IterGVN();
4305 remove(igvn);
4306 // Must return either the original node (now dead) or a new node
4307 // (Do not return a top here, since that would break the uniqueness of top.)
4308 return new ConINode(TypeInt::ZERO);
4309 }
4310 }
4311 return progress ? this : nullptr;
4312 }
4313
4314 //------------------------------Value------------------------------------------
4315 const Type* MemBarNode::Value(PhaseGVN* phase) const {
4316 if( !in(0) ) return Type::TOP;
4317 if( phase->type(in(0)) == Type::TOP )
4318 return Type::TOP;
4319 return TypeTuple::MEMBAR;
4320 }
4321
4322 //------------------------------match------------------------------------------
4323 // Construct projections for memory.
4324 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
4325 switch (proj->_con) {
4326 case TypeFunc::Control:
4327 case TypeFunc::Memory:
4328 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
4329 }
4330 ShouldNotReachHere();
4331 return nullptr;
4332 }
4333
4334 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4335 trailing->_kind = TrailingStore;
4336 leading->_kind = LeadingStore;
4337 #ifdef ASSERT
4338 trailing->_pair_idx = leading->_idx;
4339 leading->_pair_idx = leading->_idx;
4340 #endif
4341 }
4342
4343 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4344 trailing->_kind = TrailingLoadStore;
4591 return (req() > RawStores);
4592 }
4593
4594 void InitializeNode::set_complete(PhaseGVN* phase) {
4595 assert(!is_complete(), "caller responsibility");
4596 _is_complete = Complete;
4597
4598 // After this node is complete, it contains a bunch of
4599 // raw-memory initializations. There is no need for
4600 // it to have anything to do with non-raw memory effects.
4601 // Therefore, tell all non-raw users to re-optimize themselves,
4602 // after skipping the memory effects of this initialization.
4603 PhaseIterGVN* igvn = phase->is_IterGVN();
4604 if (igvn) igvn->add_users_to_worklist(this);
4605 }
4606
4607 // convenience function
4608 // return false if the init contains any stores already
4609 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
4610 InitializeNode* init = initialization();
4611 if (init == nullptr || init->is_complete()) return false;
4612 init->remove_extra_zeroes();
4613 // for now, if this allocation has already collected any inits, bail:
4614 if (init->is_non_zero()) return false;
4615 init->set_complete(phase);
4616 return true;
4617 }
4618
4619 void InitializeNode::remove_extra_zeroes() {
4620 if (req() == RawStores) return;
4621 Node* zmem = zero_memory();
4622 uint fill = RawStores;
4623 for (uint i = fill; i < req(); i++) {
4624 Node* n = in(i);
4625 if (n->is_top() || n == zmem) continue; // skip
4626 if (fill < i) set_req(fill, n); // compact
4627 ++fill;
4628 }
4629 // delete any empty spaces created:
4630 while (fill < req()) {
4631 del_req(fill);
4775 // store node that we'd like to capture. We need to check
4776 // the uses of the MergeMemNode.
4777 mems.push(n);
4778 }
4779 } else if (n->is_Mem()) {
4780 Node* other_adr = n->in(MemNode::Address);
4781 if (other_adr == adr) {
4782 failed = true;
4783 break;
4784 } else {
4785 const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
4786 if (other_t_adr != nullptr) {
4787 int other_alias_idx = phase->C->get_alias_index(other_t_adr);
4788 if (other_alias_idx == alias_idx) {
4789 // A load from the same memory slice as the store right
4790 // after the InitializeNode. We check the control of the
4791 // object/array that is loaded from. If it's the same as
4792 // the store control then we cannot capture the store.
4793 assert(!n->is_Store(), "2 stores to same slice on same control?");
4794 Node* base = other_adr;
4795 assert(base->is_AddP(), "should be addp but is %s", base->Name());
4796 base = base->in(AddPNode::Base);
4797 if (base != nullptr) {
4798 base = base->uncast();
4799 if (base->is_Proj() && base->in(0) == alloc) {
4800 failed = true;
4801 break;
4802 }
4803 }
4804 }
4805 }
4806 }
4807 } else {
4808 failed = true;
4809 break;
4810 }
4811 }
4812 }
4813 }
4814 if (failed) {
5361 // z's_done 12 16 16 16 12 16 12
5362 // z's_needed 12 16 16 16 16 16 16
5363 // zsize 0 0 0 0 4 0 4
5364 if (next_full_store < 0) {
5365 // Conservative tack: Zero to end of current word.
5366 zeroes_needed = align_up(zeroes_needed, BytesPerInt);
5367 } else {
5368 // Zero to beginning of next fully initialized word.
5369 // Or, don't zero at all, if we are already in that word.
5370 assert(next_full_store >= zeroes_needed, "must go forward");
5371 assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
5372 zeroes_needed = next_full_store;
5373 }
5374 }
5375
5376 if (zeroes_needed > zeroes_done) {
5377 intptr_t zsize = zeroes_needed - zeroes_done;
5378 // Do some incremental zeroing on rawmem, in parallel with inits.
5379 zeroes_done = align_down(zeroes_done, BytesPerInt);
5380 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5381 zeroes_done, zeroes_needed,
5382 phase);
5383 zeroes_done = zeroes_needed;
5384 if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
5385 do_zeroing = false; // leave the hole, next time
5386 }
5387 }
5388
5389 // Collect the store and move on:
5390 phase->replace_input_of(st, MemNode::Memory, inits);
5391 inits = st; // put it on the linearized chain
5392 set_req(i, zmem); // unhook from previous position
5393
5394 if (zeroes_done == st_off)
5395 zeroes_done = next_init_off;
5396
5397 assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
5398
5399 #ifdef ASSERT
5400 // Various order invariants. Weaker than stores_are_sane because
5420 remove_extra_zeroes(); // clear out all the zmems left over
5421 add_req(inits);
5422
5423 if (!(UseTLAB && ZeroTLAB)) {
5424 // If anything remains to be zeroed, zero it all now.
5425 zeroes_done = align_down(zeroes_done, BytesPerInt);
5426 // if it is the last unused 4 bytes of an instance, forget about it
5427 intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
5428 if (zeroes_done + BytesPerLong >= size_limit) {
5429 AllocateNode* alloc = allocation();
5430 assert(alloc != nullptr, "must be present");
5431 if (alloc != nullptr && alloc->Opcode() == Op_Allocate) {
5432 Node* klass_node = alloc->in(AllocateNode::KlassNode);
5433 ciKlass* k = phase->type(klass_node)->is_instklassptr()->instance_klass();
5434 if (zeroes_done == k->layout_helper())
5435 zeroes_done = size_limit;
5436 }
5437 }
5438 if (zeroes_done < size_limit) {
5439 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5440 zeroes_done, size_in_bytes, phase);
5441 }
5442 }
5443
5444 set_complete(phase);
5445 return rawmem;
5446 }
5447
5448
5449 #ifdef ASSERT
5450 bool InitializeNode::stores_are_sane(PhaseValues* phase) {
5451 if (is_complete())
5452 return true; // stores could be anything at this point
5453 assert(allocation() != nullptr, "must be present");
5454 intptr_t last_off = allocation()->minimum_header_size();
5455 for (uint i = InitializeNode::RawStores; i < req(); i++) {
5456 Node* st = in(i);
5457 intptr_t st_off = get_store_offset(st, phase);
5458 if (st_off < 0) continue; // ignore dead garbage
5459 if (last_off > st_off) {
|
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "ci/ciFlatArrayKlass.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "compiler/compileLog.hpp"
30 #include "gc/shared/barrierSet.hpp"
31 #include "gc/shared/c2/barrierSetC2.hpp"
32 #include "gc/shared/tlab_globals.hpp"
33 #include "memory/allocation.inline.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/objArrayKlass.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/cfgnode.hpp"
39 #include "opto/regalloc.hpp"
40 #include "opto/compile.hpp"
41 #include "opto/connode.hpp"
42 #include "opto/convertnode.hpp"
43 #include "opto/inlinetypenode.hpp"
44 #include "opto/loopnode.hpp"
45 #include "opto/machnode.hpp"
46 #include "opto/matcher.hpp"
47 #include "opto/memnode.hpp"
48 #include "opto/mempointer.hpp"
49 #include "opto/mulnode.hpp"
50 #include "opto/narrowptrnode.hpp"
51 #include "opto/phaseX.hpp"
52 #include "opto/regmask.hpp"
53 #include "opto/rootnode.hpp"
54 #include "opto/traceMergeStoresTag.hpp"
55 #include "opto/vectornode.hpp"
56 #include "utilities/align.hpp"
57 #include "utilities/copy.hpp"
58 #include "utilities/macros.hpp"
59 #include "utilities/powerOfTwo.hpp"
60 #include "utilities/vmError.hpp"
61
62 // Portions of code courtesy of Clifford Click
63
219 bool is_instance = t_oop->is_known_instance_field();
220 PhaseIterGVN *igvn = phase->is_IterGVN();
221 if (is_instance && igvn != nullptr && result->is_Phi()) {
222 PhiNode *mphi = result->as_Phi();
223 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
224 const TypePtr *t = mphi->adr_type();
225 bool do_split = false;
226 // In the following cases, Load memory input can be further optimized based on
227 // its precise address type
228 if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ) {
229 do_split = true;
230 } else if (t->isa_oopptr() && !t->is_oopptr()->is_known_instance()) {
231 const TypeOopPtr* mem_t =
232 t->is_oopptr()->cast_to_exactness(true)
233 ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
234 ->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
235 if (t_oop->isa_aryptr()) {
236 mem_t = mem_t->is_aryptr()
237 ->cast_to_stable(t_oop->is_aryptr()->is_stable())
238 ->cast_to_size(t_oop->is_aryptr()->size())
239 ->cast_to_not_flat(t_oop->is_aryptr()->is_not_flat())
240 ->cast_to_not_null_free(t_oop->is_aryptr()->is_not_null_free())
241 ->with_offset(t_oop->is_aryptr()->offset())
242 ->is_aryptr();
243 }
244 do_split = mem_t == t_oop;
245 }
246 if (do_split) {
247 // clone the Phi with our address type
248 result = mphi->split_out_instance(t_adr, igvn);
249 } else {
250 assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
251 }
252 }
253 return result;
254 }
255
256 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
257 uint alias_idx = phase->C->get_alias_index(tp);
258 Node *mem = mmem;
259 #ifdef ASSERT
260 {
261 // Check that current type is consistent with the alias index used during graph construction
262 assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
263 bool consistent = adr_check == nullptr || adr_check->empty() ||
264 phase->C->must_alias(adr_check, alias_idx );
265 // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
266 if( !consistent && adr_check != nullptr && !adr_check->empty() &&
267 tp->isa_aryptr() && tp->offset() == Type::OffsetBot &&
268 adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
269 ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
270 adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
271 adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
272 // don't assert if it is dead code.
273 consistent = true;
274 }
275 if( !consistent ) {
276 st->print("alias_idx==%d, adr_check==", alias_idx);
277 if( adr_check == nullptr ) {
278 st->print("null");
279 } else {
280 adr_check->dump();
281 }
282 st->cr();
283 print_alias_types();
284 assert(consistent, "adr_check must match alias idx");
285 }
286 }
287 #endif
1000 Node* ld = gvn.transform(load);
1001 return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
1002 }
1003
1004 return load;
1005 }
1006
1007 //------------------------------hash-------------------------------------------
1008 uint LoadNode::hash() const {
1009 // unroll addition of interesting fields
1010 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
1011 }
1012
1013 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
1014 if ((atp != nullptr) && (atp->index() >= Compile::AliasIdxRaw)) {
1015 bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
1016 bool is_stable_ary = FoldStableValues &&
1017 (tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
1018 tp->isa_aryptr()->is_stable();
1019
1020 return (eliminate_boxing && non_volatile) || is_stable_ary || tp->is_inlinetypeptr();
1021 }
1022
1023 return false;
1024 }
1025
1026 LoadNode* LoadNode::pin_array_access_node() const {
1027 const TypePtr* adr_type = this->adr_type();
1028 if (adr_type != nullptr && adr_type->isa_aryptr()) {
1029 return clone_pinned();
1030 }
1031 return nullptr;
1032 }
1033
1034 // Is the value loaded previously stored by an arraycopy? If so return
1035 // a load node that reads from the source array so we may be able to
1036 // optimize out the ArrayCopy node later.
1037 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
1038 Node* ld_adr = in(MemNode::Address);
1039 intptr_t ld_off = 0;
1040 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
1057 assert(ld_alloc != nullptr, "need an alloc");
1058 assert(addp->is_AddP(), "address must be addp");
1059 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1060 assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1061 assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
1062 addp->set_req(AddPNode::Base, src);
1063 addp->set_req(AddPNode::Address, src);
1064 } else {
1065 assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
1066 ac->as_ArrayCopy()->is_copyof_validated() ||
1067 ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
1068 assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
1069 addp->set_req(AddPNode::Base, src);
1070 addp->set_req(AddPNode::Address, src);
1071
1072 const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
1073 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
1074 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
1075
1076 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
1077 uint shift = ary_t->is_flat() ? ary_t->flat_log_elem_size() : exact_log2(type2aelembytes(ary_elem));
1078
1079 Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
1080 #ifdef _LP64
1081 diff = phase->transform(new ConvI2LNode(diff));
1082 #endif
1083 diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
1084
1085 Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
1086 addp->set_req(AddPNode::Offset, offset);
1087 }
1088 addp = phase->transform(addp);
1089 #ifdef ASSERT
1090 const TypePtr* adr_type = phase->type(addp)->is_ptr();
1091 ld->_adr_type = adr_type;
1092 #endif
1093 ld->set_req(MemNode::Address, addp);
1094 ld->set_req(0, ctl);
1095 ld->set_req(MemNode::Memory, mem);
1096 return ld;
1097 }
1098 return nullptr;
1099 }
1100
1101 static Node* see_through_inline_type(PhaseValues* phase, const MemNode* load, Node* base, int offset) {
1102 if (!load->is_mismatched_access() && base != nullptr && base->is_InlineType() && offset > oopDesc::klass_offset_in_bytes()) {
1103 InlineTypeNode* vt = base->as_InlineType();
1104 assert(!vt->is_larval(), "must not load from a larval object");
1105 Node* value = vt->field_value_by_offset(offset, true);
1106 assert(value != nullptr, "must see some value");
1107 return value;
1108 }
1109
1110 return nullptr;
1111 }
1112
1113 //---------------------------can_see_stored_value------------------------------
1114 // This routine exists to make sure this set of tests is done the same
1115 // everywhere. We need to make a coordinated change: first LoadNode::Ideal
1116 // will change the graph shape in a way which makes memory alive twice at the
1117 // same time (uses the Oracle model of aliasing), then some
1118 // LoadXNode::Identity will fold things back to the equivalence-class model
1119 // of aliasing.
1120 Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
1121 Node* ld_adr = in(MemNode::Address);
1122 intptr_t ld_off = 0;
1123 Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
1124 // Try to see through an InlineTypeNode
1125 // LoadN is special because the input is not compressed
1126 if (Opcode() != Op_LoadN) {
1127 Node* value = see_through_inline_type(phase, this, ld_base, ld_off);
1128 if (value != nullptr) {
1129 return value;
1130 }
1131 }
1132
1133 Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
1134 const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
1135 Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
1136 // This is more general than load from boxing objects.
1137 if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
1138 uint alias_idx = atp->index();
1139 Node* result = nullptr;
1140 Node* current = st;
1141 // Skip through chains of MemBarNodes checking the MergeMems for
1142 // new states for the slice of this load. Stop once any other
1143 // kind of node is encountered. Loads from final memory can skip
1144 // through any kind of MemBar but normal loads shouldn't skip
1145 // through MemBarAcquire since the could allow them to move out of
1146 // a synchronized region. It is not safe to step over MemBarCPUOrder,
1147 // because alias info above them may be inaccurate (e.g., due to
1148 // mixed/mismatched unsafe accesses).
1149 bool is_final_mem = !atp->is_rewritable();
1150 while (current->is_Proj()) {
1151 int opc = current->in(0)->Opcode();
1152 if ((is_final_mem && (opc == Op_MemBarAcquire ||
1196 // Same base, same offset.
1197 // Possible improvement for arrays: check index value instead of absolute offset.
1198
1199 // At this point we have proven something like this setup:
1200 // B = << base >>
1201 // L = LoadQ(AddP(Check/CastPP(B), #Off))
1202 // S = StoreQ(AddP( B , #Off), V)
1203 // (Actually, we haven't yet proven the Q's are the same.)
1204 // In other words, we are loading from a casted version of
1205 // the same pointer-and-offset that we stored to.
1206 // Casted version may carry a dependency and it is respected.
1207 // Thus, we are able to replace L by V.
1208 }
1209 // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1210 if (store_Opcode() != st->Opcode()) {
1211 return nullptr;
1212 }
1213 // LoadVector/StoreVector needs additional check to ensure the types match.
1214 if (st->is_StoreVector()) {
1215 const TypeVect* in_vt = st->as_StoreVector()->vect_type();
1216 const TypeVect* out_vt = is_Load() ? as_LoadVector()->vect_type() : as_StoreVector()->vect_type();
1217 if (in_vt != out_vt) {
1218 return nullptr;
1219 }
1220 }
1221 return st->in(MemNode::ValueIn);
1222 }
1223
1224 // A load from a freshly-created object always returns zero.
1225 // (This can happen after LoadNode::Ideal resets the load's memory input
1226 // to find_captured_store, which returned InitializeNode::zero_memory.)
1227 if (st->is_Proj() && st->in(0)->is_Allocate() &&
1228 (st->in(0) == ld_alloc) &&
1229 (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1230 // return a zero value for the load's basic type
1231 // (This is one of the few places where a generic PhaseTransform
1232 // can create new nodes. Think of it as lazily manifesting
1233 // virtually pre-existing constants.)
1234 Node* init_value = ld_alloc->in(AllocateNode::InitValue);
1235 if (init_value != nullptr) {
1236 // TODO 8350865 Is this correct for non-all-zero init values? Don't we need field_value_by_offset?
1237 return init_value;
1238 }
1239 assert(ld_alloc->in(AllocateNode::RawInitValue) == nullptr, "init value may not be null");
1240 if (memory_type() != T_VOID) {
1241 if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
1242 // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
1243 // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
1244 // by the ArrayCopyNode.
1245 return phase->zerocon(memory_type());
1246 }
1247 } else {
1248 // TODO: materialize all-zero vector constant
1249 assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1250 }
1251 }
1252
1253 // A load from an initialization barrier can match a captured store.
1254 if (st->is_Proj() && st->in(0)->is_Initialize()) {
1255 InitializeNode* init = st->in(0)->as_Initialize();
1256 AllocateNode* alloc = init->allocation();
1257 if ((alloc != nullptr) && (alloc == ld_alloc)) {
1258 // examine a captured store value
1259 st = init->find_captured_store(ld_off, memory_size(), phase);
1880 bool addr_mark = ((phase->type(address)->isa_oopptr() || phase->type(address)->isa_narrowoop()) &&
1881 phase->type(address)->is_ptr()->offset() == oopDesc::mark_offset_in_bytes());
1882
1883 // Skip up past a SafePoint control. Cannot do this for Stores because
1884 // pointer stores & cardmarks must stay on the same side of a SafePoint.
1885 if( ctrl != nullptr && ctrl->Opcode() == Op_SafePoint &&
1886 phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw &&
1887 !addr_mark &&
1888 (depends_only_on_test() || has_unknown_control_dependency())) {
1889 ctrl = ctrl->in(0);
1890 set_req(MemNode::Control,ctrl);
1891 progress = true;
1892 }
1893
1894 intptr_t ignore = 0;
1895 Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore);
1896 if (base != nullptr
1897 && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
1898 // Check for useless control edge in some common special cases
1899 if (in(MemNode::Control) != nullptr
1900 && !(phase->type(address)->is_inlinetypeptr() && is_mismatched_access())
1901 && can_remove_control()
1902 && phase->type(base)->higher_equal(TypePtr::NOTNULL)
1903 && all_controls_dominate(base, phase->C->start())) {
1904 // A method-invariant, non-null address (constant or 'this' argument).
1905 set_req(MemNode::Control, nullptr);
1906 progress = true;
1907 }
1908 }
1909
1910 Node* mem = in(MemNode::Memory);
1911 const TypePtr *addr_t = phase->type(address)->isa_ptr();
1912
1913 if (can_reshape && (addr_t != nullptr)) {
1914 // try to optimize our memory input
1915 Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase);
1916 if (opt_mem != mem) {
1917 set_req_X(MemNode::Memory, opt_mem, phase);
1918 if (phase->type( opt_mem ) == Type::TOP) return nullptr;
1919 return this;
1920 }
2079 }
2080 }
2081
2082 // Don't do this for integer types. There is only potential profit if
2083 // the element type t is lower than _type; that is, for int types, if _type is
2084 // more restrictive than t. This only happens here if one is short and the other
2085 // char (both 16 bits), and in those cases we've made an intentional decision
2086 // to use one kind of load over the other. See AndINode::Ideal and 4965907.
2087 // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
2088 //
2089 // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
2090 // where the _gvn.type of the AddP is wider than 8. This occurs when an earlier
2091 // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
2092 // subsumed by p1. If p1 is on the worklist but has not yet been re-transformed,
2093 // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
2094 // In fact, that could have been the original type of p1, and p1 could have
2095 // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
2096 // expression (LShiftL quux 3) independently optimized to the constant 8.
2097 if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
2098 && (_type->isa_vect() == nullptr)
2099 && !ary->is_flat()
2100 && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
2101 // t might actually be lower than _type, if _type is a unique
2102 // concrete subclass of abstract class t.
2103 if (off_beyond_header || off == Type::OffsetBot) { // is the offset beyond the header?
2104 const Type* jt = t->join_speculative(_type);
2105 // In any case, do not allow the join, per se, to empty out the type.
2106 if (jt->empty() && !t->empty()) {
2107 // This can happen if a interface-typed array narrows to a class type.
2108 jt = _type;
2109 }
2110 #ifdef ASSERT
2111 if (phase->C->eliminate_boxing() && adr->is_AddP()) {
2112 // The pointers in the autobox arrays are always non-null
2113 Node* base = adr->in(AddPNode::Base);
2114 if ((base != nullptr) && base->is_DecodeN()) {
2115 // Get LoadN node which loads IntegerCache.cache field
2116 base = base->in(1);
2117 }
2118 if ((base != nullptr) && base->is_Con()) {
2119 const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
2120 if ((base_type != nullptr) && base_type->is_autobox_cache()) {
2121 // It could be narrow oop
2122 assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
2123 }
2124 }
2125 }
2126 #endif
2127 return jt;
2128 }
2129 }
2130 } else if (tp->base() == Type::InstPtr) {
2131 assert( off != Type::OffsetBot ||
2132 // arrays can be cast to Objects
2133 !tp->isa_instptr() ||
2134 tp->is_instptr()->instance_klass()->is_java_lang_Object() ||
2135 // Default value load
2136 tp->is_instptr()->instance_klass() == ciEnv::current()->Class_klass() ||
2137 // unsafe field access may not have a constant offset
2138 C->has_unsafe_access(),
2139 "Field accesses must be precise" );
2140 // For oop loads, we expect the _type to be precise.
2141
2142 const TypeInstPtr* tinst = tp->is_instptr();
2143 BasicType bt = memory_type();
2144
2145 // Optimize loads from constant fields.
2146 ciObject* const_oop = tinst->const_oop();
2147 if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
2148 const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt);
2149 if (con_type != nullptr) {
2150 return con_type;
2151 }
2152 }
2153 } else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
2154 assert(off != Type::OffsetBot ||
2155 !tp->isa_instklassptr() ||
2156 // arrays can be cast to Objects
2157 tp->isa_instklassptr()->instance_klass()->is_java_lang_Object() ||
2158 // also allow array-loading from the primary supertype
2159 // array during subtype checks
2160 Opcode() == Op_LoadKlass,
2161 "Field accesses must be precise");
2162 // For klass/static loads, we expect the _type to be precise
2163 } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
2164 /* With mirrors being an indirect in the Klass*
2165 * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
2166 * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
2167 *
2168 * So check the type and klass of the node before the LoadP.
2175 assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2176 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
2177 return TypeInstPtr::make(klass->java_mirror());
2178 }
2179 }
2180 }
2181
2182 const TypeKlassPtr *tkls = tp->isa_klassptr();
2183 if (tkls != nullptr) {
2184 if (tkls->is_loaded() && tkls->klass_is_exact()) {
2185 ciKlass* klass = tkls->exact_klass();
2186 // We are loading a field from a Klass metaobject whose identity
2187 // is known at compile time (the type is "exact" or "precise").
2188 // Check for fields we know are maintained as constants by the VM.
2189 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
2190 // The field is Klass::_super_check_offset. Return its (constant) value.
2191 // (Folds up type checking code.)
2192 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
2193 return TypeInt::make(klass->super_check_offset());
2194 }
2195 if (UseCompactObjectHeaders) { // TODO: Should EnableValhalla also take this path ?
2196 if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) {
2197 // The field is Klass::_prototype_header. Return its (constant) value.
2198 assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header");
2199 return TypeX::make(klass->prototype_header());
2200 }
2201 }
2202 // Compute index into primary_supers array
2203 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2204 // Check for overflowing; use unsigned compare to handle the negative case.
2205 if( depth < ciKlass::primary_super_limit() ) {
2206 // The field is an element of Klass::_primary_supers. Return its (constant) value.
2207 // (Folds up type checking code.)
2208 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2209 ciKlass *ss = klass->super_of_depth(depth);
2210 return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR;
2211 }
2212 const Type* aift = load_array_final_field(tkls, klass);
2213 if (aift != nullptr) return aift;
2214 }
2215
2253 // Note: When interfaces are reliable, we can narrow the interface
2254 // test to (klass != Serializable && klass != Cloneable).
2255 assert(Opcode() == Op_LoadI, "must load an int from _layout_helper");
2256 jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false);
2257 // The key property of this type is that it folds up tests
2258 // for array-ness, since it proves that the layout_helper is positive.
2259 // Thus, a generic value like the basic object layout helper works fine.
2260 return TypeInt::make(min_size, max_jint, Type::WidenMin);
2261 }
2262 }
2263
2264 bool is_vect = (_type->isa_vect() != nullptr);
2265 if (is_instance && !is_vect) {
2266 // If we have an instance type and our memory input is the
2267 // programs's initial memory state, there is no matching store,
2268 // so just return a zero of the appropriate type -
2269 // except if it is vectorized - then we have no zero constant.
2270 Node *mem = in(MemNode::Memory);
2271 if (mem->is_Parm() && mem->in(0)->is_Start()) {
2272 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
2273 // TODO 8350865 This is needed for flat array accesses, somehow the memory of the loads bypasses the intrinsic
2274 // Run TestArrays.test6 in Scenario4, we need more tests for this. TestBasicFunctionality::test20 also needs this.
2275 if (tp->isa_aryptr() && tp->is_aryptr()->is_flat() && !UseFieldFlattening) {
2276 return _type;
2277 }
2278 return Type::get_zero_type(_type->basic_type());
2279 }
2280 }
2281 if (!UseCompactObjectHeaders) {
2282 Node* alloc = is_new_object_mark_load();
2283 if (alloc != nullptr) {
2284 if (EnableValhalla) {
2285 // The mark word may contain property bits (inline, flat, null-free)
2286 Node* klass_node = alloc->in(AllocateNode::KlassNode);
2287 const TypeKlassPtr* tkls = phase->type(klass_node)->isa_klassptr();
2288 if (tkls != nullptr && tkls->is_loaded() && tkls->klass_is_exact()) {
2289 return TypeX::make(tkls->exact_klass()->prototype_header());
2290 }
2291 } else {
2292 return TypeX::make(markWord::prototype().value());
2293 }
2294 }
2295 }
2296
2297 return _type;
2298 }
2299
2300 //------------------------------match_edge-------------------------------------
2301 // Do we Match on this edge index or not? Match only the address.
2302 uint LoadNode::match_edge(uint idx) const {
2303 return idx == MemNode::Address;
2304 }
2305
2306 //--------------------------LoadBNode::Ideal--------------------------------------
2307 //
2308 // If the previous store is to the same address as this load,
2309 // and the value stored was larger than a byte, replace this load
2310 // with the value stored truncated to a byte. If no truncation is
2311 // needed, the replacement is done in LoadNode::Identity().
2312 //
2313 Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2422 }
2423 }
2424 // Identity call will handle the case where truncation is not needed.
2425 return LoadNode::Ideal(phase, can_reshape);
2426 }
2427
2428 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2429 Node* mem = in(MemNode::Memory);
2430 Node* value = can_see_stored_value(mem,phase);
2431 if (value != nullptr && value->is_Con() &&
2432 !value->bottom_type()->higher_equal(_type)) {
2433 // If the input to the store does not fit with the load's result type,
2434 // it must be truncated. We can't delay until Ideal call since
2435 // a singleton Value is needed for split_thru_phi optimization.
2436 int con = value->get_int();
2437 return TypeInt::make((con << 16) >> 16);
2438 }
2439 return LoadNode::Value(phase);
2440 }
2441
2442 Node* LoadNNode::Ideal(PhaseGVN* phase, bool can_reshape) {
2443 // Loading from an InlineType, find the input and make an EncodeP
2444 Node* addr = in(Address);
2445 intptr_t offset;
2446 Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset);
2447 Node* value = see_through_inline_type(phase, this, base, offset);
2448 if (value != nullptr) {
2449 return new EncodePNode(value, type());
2450 }
2451
2452 return LoadNode::Ideal(phase, can_reshape);
2453 }
2454
2455 //=============================================================================
2456 //----------------------------LoadKlassNode::make------------------------------
2457 // Polymorphic factory method:
2458 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
2459 // sanity check the alias category against the created node type
2460 const TypePtr* adr_type = adr->bottom_type()->isa_ptr();
2461 assert(adr_type != nullptr, "expecting TypeKlassPtr");
2462 #ifdef _LP64
2463 if (adr_type->is_ptr_to_narrowklass()) {
2464 assert(UseCompressedClassPointers, "no compressed klasses");
2465 Node* load_klass = gvn.transform(new LoadNKlassNode(mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2466 return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2467 }
2468 #endif
2469 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2470 return new LoadKlassNode(mem, adr, at, tk, MemNode::unordered);
2471 }
2472
2473 //------------------------------Value------------------------------------------
2474 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2479 // Either input is TOP ==> the result is TOP
2480 const Type *t1 = phase->type( in(MemNode::Memory) );
2481 if (t1 == Type::TOP) return Type::TOP;
2482 Node *adr = in(MemNode::Address);
2483 const Type *t2 = phase->type( adr );
2484 if (t2 == Type::TOP) return Type::TOP;
2485 const TypePtr *tp = t2->is_ptr();
2486 if (TypePtr::above_centerline(tp->ptr()) ||
2487 tp->ptr() == TypePtr::Null) return Type::TOP;
2488
2489 // Return a more precise klass, if possible
2490 const TypeInstPtr *tinst = tp->isa_instptr();
2491 if (tinst != nullptr) {
2492 ciInstanceKlass* ik = tinst->instance_klass();
2493 int offset = tinst->offset();
2494 if (ik == phase->C->env()->Class_klass()
2495 && (offset == java_lang_Class::klass_offset() ||
2496 offset == java_lang_Class::array_klass_offset())) {
2497 // We are loading a special hidden field from a Class mirror object,
2498 // the field which points to the VM's Klass metaobject.
2499 bool is_null_free_array = false;
2500 ciType* t = tinst->java_mirror_type(&is_null_free_array);
2501 // java_mirror_type returns non-null for compile-time Class constants.
2502 if (t != nullptr) {
2503 // constant oop => constant klass
2504 if (offset == java_lang_Class::array_klass_offset()) {
2505 if (t->is_void()) {
2506 // We cannot create a void array. Since void is a primitive type return null
2507 // klass. Users of this result need to do a null check on the returned klass.
2508 return TypePtr::NULL_PTR;
2509 }
2510 const TypeKlassPtr* tklass = TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
2511 if (is_null_free_array) {
2512 tklass = tklass->is_aryklassptr()->cast_to_null_free();
2513 }
2514 return tklass;
2515 }
2516 if (!t->is_klass()) {
2517 // a primitive Class (e.g., int.class) has null for a klass field
2518 return TypePtr::NULL_PTR;
2519 }
2520 // Fold up the load of the hidden field
2521 const TypeKlassPtr* tklass = TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
2522 if (is_null_free_array) {
2523 tklass = tklass->is_aryklassptr()->cast_to_null_free();
2524 }
2525 return tklass;
2526 }
2527 // non-constant mirror, so we can't tell what's going on
2528 }
2529 if (!tinst->is_loaded())
2530 return _type; // Bail out if not loaded
2531 if (offset == oopDesc::klass_offset_in_bytes()) {
2532 return tinst->as_klass_type(true);
2533 }
2534 }
2535
2536 // Check for loading klass from an array
2537 const TypeAryPtr* tary = tp->isa_aryptr();
2538 if (tary != nullptr &&
2539 tary->offset() == oopDesc::klass_offset_in_bytes()) {
2540 return tary->as_klass_type(true);
2541 }
2542
2543 // Check for loading klass from an array klass
2544 const TypeKlassPtr *tkls = tp->isa_klassptr();
2545 if (tkls != nullptr && !StressReflectiveCode) {
2546 if (!tkls->is_loaded())
2547 return _type; // Bail out if not loaded
2548 if (tkls->isa_aryklassptr() && tkls->is_aryklassptr()->elem()->isa_klassptr() &&
2549 tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2550 // // Always returning precise element type is incorrect,
2551 // // e.g., element type could be object and array may contain strings
2552 // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2553
2554 // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2555 // according to the element type's subclassing.
2556 return tkls->is_aryklassptr()->elem()->isa_klassptr()->cast_to_exactness(tkls->klass_is_exact());
2557 }
3433 }
3434 ss.print_cr("[TraceMergeStores]: with");
3435 merged_input_value->dump("\n", false, &ss);
3436 merged_store->dump("\n", false, &ss);
3437 tty->print("%s", ss.as_string());
3438 }
3439 #endif
3440
3441 //------------------------------Ideal------------------------------------------
3442 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
3443 // When a store immediately follows a relevant allocation/initialization,
3444 // try to capture it into the initialization, or hoist it above.
3445 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3446 Node* p = MemNode::Ideal_common(phase, can_reshape);
3447 if (p) return (p == NodeSentinel) ? nullptr : p;
3448
3449 Node* mem = in(MemNode::Memory);
3450 Node* address = in(MemNode::Address);
3451 Node* value = in(MemNode::ValueIn);
3452 // Back-to-back stores to same address? Fold em up. Generally
3453 // unsafe if I have intervening uses...
3454 if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::INLINES) {
3455 Node* st = mem;
3456 // If Store 'st' has more than one use, we cannot fold 'st' away.
3457 // For example, 'st' might be the final state at a conditional
3458 // return. Or, 'st' might be used by some node which is live at
3459 // the same time 'st' is live, which might be unschedulable. So,
3460 // require exactly ONE user until such time as we clone 'mem' for
3461 // each of 'mem's uses (thus making the exactly-1-user-rule hold
3462 // true).
3463 while (st->is_Store() && st->outcnt() == 1) {
3464 // Looking at a dead closed cycle of memory?
3465 assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
3466 assert(Opcode() == st->Opcode() ||
3467 st->Opcode() == Op_StoreVector ||
3468 Opcode() == Op_StoreVector ||
3469 st->Opcode() == Op_StoreVectorScatter ||
3470 Opcode() == Op_StoreVectorScatter ||
3471 phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
3472 (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
3473 (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
3474 (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
3475 (st->adr_type()->isa_aryptr() && st->adr_type()->is_aryptr()->is_flat()) || // TODO 8343835
3476 (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
3477 "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
3478
3479 if (st->in(MemNode::Address)->eqv_uncast(address) &&
3480 st->as_Store()->memory_size() <= this->memory_size()) {
3481 Node* use = st->raw_out(0);
3482 if (phase->is_IterGVN()) {
3483 phase->is_IterGVN()->rehash_node_delayed(use);
3484 }
3485 // It's OK to do this in the parser, since DU info is always accurate,
3486 // and the parser always refers to nodes via SafePointNode maps.
3487 use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase);
3488 return this;
3489 }
3490 st = st->in(MemNode::Memory);
3491 }
3492 }
3493
3494
3495 // Capture an unaliased, unconditional, simple store into an initializer.
3593 const StoreVectorNode* store_vector = as_StoreVector();
3594 const StoreVectorNode* mem_vector = mem->as_StoreVector();
3595 const Node* store_indices = store_vector->indices();
3596 const Node* mem_indices = mem_vector->indices();
3597 const Node* store_mask = store_vector->mask();
3598 const Node* mem_mask = mem_vector->mask();
3599 // Ensure types, indices, and masks match
3600 if (store_vector->vect_type() == mem_vector->vect_type() &&
3601 ((store_indices == nullptr) == (mem_indices == nullptr) &&
3602 (store_indices == nullptr || store_indices->eqv_uncast(mem_indices))) &&
3603 ((store_mask == nullptr) == (mem_mask == nullptr) &&
3604 (store_mask == nullptr || store_mask->eqv_uncast(mem_mask)))) {
3605 result = mem;
3606 }
3607 }
3608 }
3609
3610 // Store of zero anywhere into a freshly-allocated object?
3611 // Then the store is useless.
3612 // (It must already have been captured by the InitializeNode.)
3613 if (result == this && ReduceFieldZeroing) {
3614 // a newly allocated object is already all-zeroes everywhere
3615 if (mem->is_Proj() && mem->in(0)->is_Allocate() &&
3616 (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::InitValue) == val)) {
3617 result = mem;
3618 }
3619
3620 if (result == this && phase->type(val)->is_zero_type()) {
3621 // the store may also apply to zero-bits in an earlier object
3622 Node* prev_mem = find_previous_store(phase);
3623 // Steps (a), (b): Walk past independent stores to find an exact match.
3624 if (prev_mem != nullptr) {
3625 Node* prev_val = can_see_stored_value(prev_mem, phase);
3626 if (prev_val != nullptr && prev_val == val) {
3627 // prev_val and val might differ by a cast; it would be good
3628 // to keep the more informative of the two.
3629 result = mem;
3630 }
3631 }
3632 }
3633 }
3634
3635 PhaseIterGVN* igvn = phase->is_IterGVN();
3636 if (result != this && igvn != nullptr) {
3637 MemBarNode* trailing = trailing_membar();
3638 if (trailing != nullptr) {
3639 #ifdef ASSERT
3640 const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();
4104 // Clearing a short array is faster with stores
4105 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
4106 // Already know this is a large node, do not try to ideal it
4107 if (_is_large) return nullptr;
4108
4109 const int unit = BytesPerLong;
4110 const TypeX* t = phase->type(in(2))->isa_intptr_t();
4111 if (!t) return nullptr;
4112 if (!t->is_con()) return nullptr;
4113 intptr_t raw_count = t->get_con();
4114 intptr_t size = raw_count;
4115 if (!Matcher::init_array_count_is_in_bytes) size *= unit;
4116 // Clearing nothing uses the Identity call.
4117 // Negative clears are possible on dead ClearArrays
4118 // (see jck test stmt114.stmt11402.val).
4119 if (size <= 0 || size % unit != 0) return nullptr;
4120 intptr_t count = size / unit;
4121 // Length too long; communicate this to matchers and assemblers.
4122 // Assemblers are responsible to produce fast hardware clears for it.
4123 if (size > InitArrayShortSize) {
4124 return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true);
4125 } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
4126 return nullptr;
4127 }
4128 if (!IdealizeClearArrayNode) return nullptr;
4129 Node *mem = in(1);
4130 if( phase->type(mem)==Type::TOP ) return nullptr;
4131 Node *adr = in(3);
4132 const Type* at = phase->type(adr);
4133 if( at==Type::TOP ) return nullptr;
4134 const TypePtr* atp = at->isa_ptr();
4135 // adjust atp to be the correct array element address type
4136 if (atp == nullptr) atp = TypePtr::BOTTOM;
4137 else atp = atp->add_offset(Type::OffsetBot);
4138 // Get base for derived pointer purposes
4139 if( adr->Opcode() != Op_AddP ) Unimplemented();
4140 Node *base = adr->in(1);
4141
4142 Node *val = in(4);
4143 Node *off = phase->MakeConX(BytesPerLong);
4144 mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
4145 count--;
4146 while( count-- ) {
4147 mem = phase->transform(mem);
4148 adr = phase->transform(new AddPNode(base,adr,off));
4149 mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
4150 }
4151 return mem;
4152 }
4153
4154 //----------------------------step_through----------------------------------
4155 // Return allocation input memory edge if it is different instance
4156 // or itself if it is the one we are looking for.
4157 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseValues* phase) {
4158 Node* n = *np;
4159 assert(n->is_ClearArray(), "sanity");
4160 intptr_t offset;
4161 AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
4162 // This method is called only before Allocate nodes are expanded
4163 // during macro nodes expansion. Before that ClearArray nodes are
4164 // only generated in PhaseMacroExpand::generate_arraycopy() (before
4165 // Allocate nodes are expanded) which follows allocations.
4166 assert(alloc != nullptr, "should have allocation");
4167 if (alloc->_idx == instance_id) {
4168 // Can not bypass initialization of the instance we are looking for.
4169 return false;
4170 }
4171 // Otherwise skip it.
4172 InitializeNode* init = alloc->initialization();
4173 if (init != nullptr)
4174 *np = init->in(TypeFunc::Memory);
4175 else
4176 *np = alloc->in(TypeFunc::Memory);
4177 return true;
4178 }
4179
4180 //----------------------------clear_memory-------------------------------------
4181 // Generate code to initialize object storage to zero.
4182 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4183 Node* val,
4184 Node* raw_val,
4185 intptr_t start_offset,
4186 Node* end_offset,
4187 PhaseGVN* phase) {
4188 intptr_t offset = start_offset;
4189
4190 int unit = BytesPerLong;
4191 if ((offset % unit) != 0) {
4192 Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
4193 adr = phase->transform(adr);
4194 const TypePtr* atp = TypeRawPtr::BOTTOM;
4195 if (val != nullptr) {
4196 assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
4197 mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
4198 } else {
4199 assert(raw_val == nullptr, "val may not be null");
4200 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4201 }
4202 mem = phase->transform(mem);
4203 offset += BytesPerInt;
4204 }
4205 assert((offset % unit) == 0, "");
4206
4207 // Initialize the remaining stuff, if any, with a ClearArray.
4208 return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase);
4209 }
4210
4211 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4212 Node* raw_val,
4213 Node* start_offset,
4214 Node* end_offset,
4215 PhaseGVN* phase) {
4216 if (start_offset == end_offset) {
4217 // nothing to do
4218 return mem;
4219 }
4220
4221 int unit = BytesPerLong;
4222 Node* zbase = start_offset;
4223 Node* zend = end_offset;
4224
4225 // Scale to the unit required by the CPU:
4226 if (!Matcher::init_array_count_is_in_bytes) {
4227 Node* shift = phase->intcon(exact_log2(unit));
4228 zbase = phase->transform(new URShiftXNode(zbase, shift) );
4229 zend = phase->transform(new URShiftXNode(zend, shift) );
4230 }
4231
4232 // Bulk clear double-words
4233 Node* zsize = phase->transform(new SubXNode(zend, zbase) );
4234 Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
4235 if (raw_val == nullptr) {
4236 raw_val = phase->MakeConX(0);
4237 }
4238 mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false);
4239 return phase->transform(mem);
4240 }
4241
4242 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
4243 Node* val,
4244 Node* raw_val,
4245 intptr_t start_offset,
4246 intptr_t end_offset,
4247 PhaseGVN* phase) {
4248 if (start_offset == end_offset) {
4249 // nothing to do
4250 return mem;
4251 }
4252
4253 assert((end_offset % BytesPerInt) == 0, "odd end offset");
4254 intptr_t done_offset = end_offset;
4255 if ((done_offset % BytesPerLong) != 0) {
4256 done_offset -= BytesPerInt;
4257 }
4258 if (done_offset > start_offset) {
4259 mem = clear_memory(ctl, mem, dest, val, raw_val,
4260 start_offset, phase->MakeConX(done_offset), phase);
4261 }
4262 if (done_offset < end_offset) { // emit the final 32-bit store
4263 Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
4264 adr = phase->transform(adr);
4265 const TypePtr* atp = TypeRawPtr::BOTTOM;
4266 if (val != nullptr) {
4267 assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
4268 mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
4269 } else {
4270 assert(raw_val == nullptr, "val may not be null");
4271 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
4272 }
4273 mem = phase->transform(mem);
4274 done_offset += BytesPerInt;
4275 }
4276 assert(done_offset == end_offset, "");
4277 return mem;
4278 }
4279
4280 //=============================================================================
4281 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
4282 : MultiNode(TypeFunc::Parms + (precedent == nullptr? 0: 1)),
4283 _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
4284 #ifdef ASSERT
4285 , _pair_idx(0)
4286 #endif
4287 {
4288 init_class_id(Class_MemBar);
4289 Node* top = C->top();
4290 init_req(TypeFunc::I_O,top);
4291 init_req(TypeFunc::FramePtr,top);
4292 init_req(TypeFunc::ReturnAdr,top);
4398 PhaseIterGVN* igvn = phase->is_IterGVN();
4399 remove(igvn);
4400 // Must return either the original node (now dead) or a new node
4401 // (Do not return a top here, since that would break the uniqueness of top.)
4402 return new ConINode(TypeInt::ZERO);
4403 }
4404 }
4405 return progress ? this : nullptr;
4406 }
4407
4408 //------------------------------Value------------------------------------------
4409 const Type* MemBarNode::Value(PhaseGVN* phase) const {
4410 if( !in(0) ) return Type::TOP;
4411 if( phase->type(in(0)) == Type::TOP )
4412 return Type::TOP;
4413 return TypeTuple::MEMBAR;
4414 }
4415
4416 //------------------------------match------------------------------------------
4417 // Construct projections for memory.
4418 Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) {
4419 switch (proj->_con) {
4420 case TypeFunc::Control:
4421 case TypeFunc::Memory:
4422 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
4423 }
4424 ShouldNotReachHere();
4425 return nullptr;
4426 }
4427
4428 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4429 trailing->_kind = TrailingStore;
4430 leading->_kind = LeadingStore;
4431 #ifdef ASSERT
4432 trailing->_pair_idx = leading->_idx;
4433 leading->_pair_idx = leading->_idx;
4434 #endif
4435 }
4436
4437 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
4438 trailing->_kind = TrailingLoadStore;
4685 return (req() > RawStores);
4686 }
4687
4688 void InitializeNode::set_complete(PhaseGVN* phase) {
4689 assert(!is_complete(), "caller responsibility");
4690 _is_complete = Complete;
4691
4692 // After this node is complete, it contains a bunch of
4693 // raw-memory initializations. There is no need for
4694 // it to have anything to do with non-raw memory effects.
4695 // Therefore, tell all non-raw users to re-optimize themselves,
4696 // after skipping the memory effects of this initialization.
4697 PhaseIterGVN* igvn = phase->is_IterGVN();
4698 if (igvn) igvn->add_users_to_worklist(this);
4699 }
4700
4701 // convenience function
4702 // return false if the init contains any stores already
4703 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
4704 InitializeNode* init = initialization();
4705 if (init == nullptr || init->is_complete()) {
4706 return false;
4707 }
4708 init->remove_extra_zeroes();
4709 // for now, if this allocation has already collected any inits, bail:
4710 if (init->is_non_zero()) return false;
4711 init->set_complete(phase);
4712 return true;
4713 }
4714
4715 void InitializeNode::remove_extra_zeroes() {
4716 if (req() == RawStores) return;
4717 Node* zmem = zero_memory();
4718 uint fill = RawStores;
4719 for (uint i = fill; i < req(); i++) {
4720 Node* n = in(i);
4721 if (n->is_top() || n == zmem) continue; // skip
4722 if (fill < i) set_req(fill, n); // compact
4723 ++fill;
4724 }
4725 // delete any empty spaces created:
4726 while (fill < req()) {
4727 del_req(fill);
4871 // store node that we'd like to capture. We need to check
4872 // the uses of the MergeMemNode.
4873 mems.push(n);
4874 }
4875 } else if (n->is_Mem()) {
4876 Node* other_adr = n->in(MemNode::Address);
4877 if (other_adr == adr) {
4878 failed = true;
4879 break;
4880 } else {
4881 const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
4882 if (other_t_adr != nullptr) {
4883 int other_alias_idx = phase->C->get_alias_index(other_t_adr);
4884 if (other_alias_idx == alias_idx) {
4885 // A load from the same memory slice as the store right
4886 // after the InitializeNode. We check the control of the
4887 // object/array that is loaded from. If it's the same as
4888 // the store control then we cannot capture the store.
4889 assert(!n->is_Store(), "2 stores to same slice on same control?");
4890 Node* base = other_adr;
4891 if (base->is_Phi()) {
4892 // In rare case, base may be a PhiNode and it may read
4893 // the same memory slice between InitializeNode and store.
4894 failed = true;
4895 break;
4896 }
4897 assert(base->is_AddP(), "should be addp but is %s", base->Name());
4898 base = base->in(AddPNode::Base);
4899 if (base != nullptr) {
4900 base = base->uncast();
4901 if (base->is_Proj() && base->in(0) == alloc) {
4902 failed = true;
4903 break;
4904 }
4905 }
4906 }
4907 }
4908 }
4909 } else {
4910 failed = true;
4911 break;
4912 }
4913 }
4914 }
4915 }
4916 if (failed) {
5463 // z's_done 12 16 16 16 12 16 12
5464 // z's_needed 12 16 16 16 16 16 16
5465 // zsize 0 0 0 0 4 0 4
5466 if (next_full_store < 0) {
5467 // Conservative tack: Zero to end of current word.
5468 zeroes_needed = align_up(zeroes_needed, BytesPerInt);
5469 } else {
5470 // Zero to beginning of next fully initialized word.
5471 // Or, don't zero at all, if we are already in that word.
5472 assert(next_full_store >= zeroes_needed, "must go forward");
5473 assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
5474 zeroes_needed = next_full_store;
5475 }
5476 }
5477
5478 if (zeroes_needed > zeroes_done) {
5479 intptr_t zsize = zeroes_needed - zeroes_done;
5480 // Do some incremental zeroing on rawmem, in parallel with inits.
5481 zeroes_done = align_down(zeroes_done, BytesPerInt);
5482 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5483 allocation()->in(AllocateNode::InitValue),
5484 allocation()->in(AllocateNode::RawInitValue),
5485 zeroes_done, zeroes_needed,
5486 phase);
5487 zeroes_done = zeroes_needed;
5488 if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
5489 do_zeroing = false; // leave the hole, next time
5490 }
5491 }
5492
5493 // Collect the store and move on:
5494 phase->replace_input_of(st, MemNode::Memory, inits);
5495 inits = st; // put it on the linearized chain
5496 set_req(i, zmem); // unhook from previous position
5497
5498 if (zeroes_done == st_off)
5499 zeroes_done = next_init_off;
5500
5501 assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
5502
5503 #ifdef ASSERT
5504 // Various order invariants. Weaker than stores_are_sane because
5524 remove_extra_zeroes(); // clear out all the zmems left over
5525 add_req(inits);
5526
5527 if (!(UseTLAB && ZeroTLAB)) {
5528 // If anything remains to be zeroed, zero it all now.
5529 zeroes_done = align_down(zeroes_done, BytesPerInt);
5530 // if it is the last unused 4 bytes of an instance, forget about it
5531 intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
5532 if (zeroes_done + BytesPerLong >= size_limit) {
5533 AllocateNode* alloc = allocation();
5534 assert(alloc != nullptr, "must be present");
5535 if (alloc != nullptr && alloc->Opcode() == Op_Allocate) {
5536 Node* klass_node = alloc->in(AllocateNode::KlassNode);
5537 ciKlass* k = phase->type(klass_node)->is_instklassptr()->instance_klass();
5538 if (zeroes_done == k->layout_helper())
5539 zeroes_done = size_limit;
5540 }
5541 }
5542 if (zeroes_done < size_limit) {
5543 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
5544 allocation()->in(AllocateNode::InitValue),
5545 allocation()->in(AllocateNode::RawInitValue),
5546 zeroes_done, size_in_bytes, phase);
5547 }
5548 }
5549
5550 set_complete(phase);
5551 return rawmem;
5552 }
5553
5554
5555 #ifdef ASSERT
5556 bool InitializeNode::stores_are_sane(PhaseValues* phase) {
5557 if (is_complete())
5558 return true; // stores could be anything at this point
5559 assert(allocation() != nullptr, "must be present");
5560 intptr_t last_off = allocation()->minimum_header_size();
5561 for (uint i = InitializeNode::RawStores; i < req(); i++) {
5562 Node* st = in(i);
5563 intptr_t st_off = get_store_offset(st, phase);
5564 if (st_off < 0) continue; // ignore dead garbage
5565 if (last_off > st_off) {
|