< prev index next > src/hotspot/share/opto/memnode.cpp
Print this page
* questions.
*
*/
#include "precompiled.hpp"
+ #include "ci/ciFlatArrayKlass.hpp"
#include "classfile/javaClasses.hpp"
+ #include "classfile/systemDictionary.hpp"
#include "compiler/compileLog.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
#include "gc/shared/tlab_globals.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/cfgnode.hpp"
#include "opto/regalloc.hpp"
#include "opto/compile.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
+ #include "opto/inlinetypenode.hpp"
#include "opto/loopnode.hpp"
#include "opto/machnode.hpp"
#include "opto/matcher.hpp"
#include "opto/memnode.hpp"
#include "opto/mulnode.hpp"
->is_oopptr()->cast_to_instance_id(t_oop->instance_id());
if (t_oop->isa_aryptr()) {
mem_t = mem_t->is_aryptr()
->cast_to_stable(t_oop->is_aryptr()->is_stable())
->cast_to_size(t_oop->is_aryptr()->size())
+ ->cast_to_not_flat(t_oop->is_aryptr()->is_not_flat())
+ ->cast_to_not_null_free(t_oop->is_aryptr()->is_not_null_free())
->with_offset(t_oop->is_aryptr()->offset())
->is_aryptr();
}
do_split = mem_t == t_oop;
}
assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
bool consistent = adr_check == nullptr || adr_check->empty() ||
phase->C->must_alias(adr_check, alias_idx );
// Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
if( !consistent && adr_check != nullptr && !adr_check->empty() &&
- tp->isa_aryptr() && tp->offset() == Type::OffsetBot &&
+ tp->isa_aryptr() && tp->offset() == Type::OffsetBot &&
adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
// don't assert if it is dead code.
bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile();
bool is_stable_ary = FoldStableValues &&
(tp != nullptr) && (tp->isa_aryptr() != nullptr) &&
tp->isa_aryptr()->is_stable();
- return (eliminate_boxing && non_volatile) || is_stable_ary;
+ return (eliminate_boxing && non_volatile) || is_stable_ary || tp->is_inlinetypeptr();
}
return false;
}
const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
- uint shift = exact_log2(type2aelembytes(ary_elem));
+ uint shift = ary_t->is_flat() ? ary_t->flat_log_elem_size() : exact_log2(type2aelembytes(ary_elem));
Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
#ifdef _LP64
diff = phase->transform(new ConvI2LNode(diff));
#endif
return nullptr;
}
// LoadVector/StoreVector needs additional check to ensure the types match.
if (st->is_StoreVector()) {
const TypeVect* in_vt = st->as_StoreVector()->vect_type();
- const TypeVect* out_vt = as_LoadVector()->vect_type();
+ const TypeVect* out_vt = is_Load() ? as_LoadVector()->vect_type() : as_StoreVector()->vect_type();
if (in_vt != out_vt) {
return nullptr;
}
}
return st->in(MemNode::ValueIn);
(ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
// return a zero value for the load's basic type
// (This is one of the few places where a generic PhaseTransform
// can create new nodes. Think of it as lazily manifesting
// virtually pre-existing constants.)
+ Node* default_value = ld_alloc->in(AllocateNode::DefaultValue);
+ if (default_value != nullptr) {
+ return default_value;
+ }
+ assert(ld_alloc->in(AllocateNode::RawDefaultValue) == nullptr, "default value may not be null");
if (memory_type() != T_VOID) {
if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
// If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
// ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
// by the ArrayCopyNode.
}
//------------------------------Identity---------------------------------------
// Loads are identity if previous store is to same address
Node* LoadNode::Identity(PhaseGVN* phase) {
+ // Loading from an InlineType? The InlineType has the values of
+ // all fields as input. Look for the field with matching offset.
+ Node* addr = in(Address);
+ intptr_t offset;
+ Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset);
+ if (base != nullptr && base->is_InlineType() && offset > oopDesc::klass_offset_in_bytes()) {
+ Node* value = base->as_InlineType()->field_value_by_offset((int)offset, true);
+ if (value != nullptr) {
+ if (Opcode() == Op_LoadN) {
+ // Encode oop value if we are loading a narrow oop
+ assert(!phase->type(value)->isa_narrowoop(), "should already be decoded");
+ value = phase->transform(new EncodePNode(value, bottom_type()));
+ }
+ return value;
+ }
+ }
+
// If the previous store-maker is the right kind of Store, and the store is
// to the same address, then we are equal to the value stored.
Node* mem = in(Memory);
Node* value = can_see_stored_value(mem, phase);
if( value ) {
// In fact, that could have been the original type of p1, and p1 could have
// had an original form like p1:(AddP x x (LShiftL quux 3)), where the
// expression (LShiftL quux 3) independently optimized to the constant 8.
if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr)
&& (_type->isa_vect() == nullptr)
+ && !ary->is_flat()
&& Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
// t might actually be lower than _type, if _type is a unique
// concrete subclass of abstract class t.
if (off_beyond_header || off == Type::OffsetBot) { // is the offset beyond the header?
const Type* jt = t->join_speculative(_type);
} else if (tp->base() == Type::InstPtr) {
assert( off != Type::OffsetBot ||
// arrays can be cast to Objects
!tp->isa_instptr() ||
tp->is_instptr()->instance_klass()->is_java_lang_Object() ||
+ // Default value load
+ tp->is_instptr()->instance_klass() == ciEnv::current()->Class_klass() ||
// unsafe field access may not have a constant offset
C->has_unsafe_access(),
"Field accesses must be precise" );
// For oop loads, we expect the _type to be precise.
- // Optimize loads from constant fields.
const TypeInstPtr* tinst = tp->is_instptr();
+ BasicType bt = memory_type();
+
+ // Optimize loads from constant fields.
ciObject* const_oop = tinst->const_oop();
if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) {
- const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());
+ const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt);
if (con_type != nullptr) {
return con_type;
}
}
} else if (tp->base() == Type::KlassPtr || tp->base() == Type::InstKlassPtr || tp->base() == Type::AryKlassPtr) {
if (mem->is_Parm() && mem->in(0)->is_Start()) {
assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
return Type::get_zero_type(_type->basic_type());
}
}
-
Node* alloc = is_new_object_mark_load();
if (alloc != nullptr) {
- return TypeX::make(markWord::prototype().value());
+ if (EnableValhalla) {
+ // The mark word may contain property bits (inline, flat, null-free)
+ Node* klass_node = alloc->in(AllocateNode::KlassNode);
+ const TypeKlassPtr* tkls = phase->type(klass_node)->isa_klassptr();
+ if (tkls != nullptr && tkls->is_loaded() && tkls->klass_is_exact()) {
+ return TypeX::make(tkls->exact_klass()->prototype_header().value());
+ }
+ } else {
+ return TypeX::make(markWord::prototype().value());
+ }
}
return _type;
}
}
//=============================================================================
//----------------------------LoadKlassNode::make------------------------------
// Polymorphic factory method:
- Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
+ Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
+ const TypeKlassPtr* tk) {
// sanity check the alias category against the created node type
const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
assert(adr_type != nullptr, "expecting TypeKlassPtr");
#ifdef _LP64
if (adr_type->is_ptr_to_narrowklass()) {
if (ik == phase->C->env()->Class_klass()
&& (offset == java_lang_Class::klass_offset() ||
offset == java_lang_Class::array_klass_offset())) {
// We are loading a special hidden field from a Class mirror object,
// the field which points to the VM's Klass metaobject.
- ciType* t = tinst->java_mirror_type();
+ bool is_null_free_array = false;
+ ciType* t = tinst->java_mirror_type(&is_null_free_array);
// java_mirror_type returns non-null for compile-time Class constants.
if (t != nullptr) {
// constant oop => constant klass
if (offset == java_lang_Class::array_klass_offset()) {
if (t->is_void()) {
// We cannot create a void array. Since void is a primitive type return null
// klass. Users of this result need to do a null check on the returned klass.
return TypePtr::NULL_PTR;
}
- return TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
+ const TypeKlassPtr* tklass = TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces);
+ if (is_null_free_array) {
+ tklass = tklass->is_aryklassptr()->cast_to_null_free();
+ }
+ return tklass;
}
if (!t->is_klass()) {
// a primitive Class (e.g., int.class) has null for a klass field
return TypePtr::NULL_PTR;
}
// (Folds up the 1st indirection in aClassConstant.getModifiers().)
- return TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
+ const TypeKlassPtr* tklass = TypeKlassPtr::make(t->as_klass(), Type::trust_interfaces);
+ if (is_null_free_array) {
+ tklass = tklass->is_aryklassptr()->cast_to_null_free();
+ }
+ return tklass;
}
// non-constant mirror, so we can't tell what's going on
}
if (!tinst->is_loaded())
return _type; // Bail out if not loaded
return tinst->as_klass_type(true);
}
}
// Check for loading klass from an array
- const TypeAryPtr *tary = tp->isa_aryptr();
+ const TypeAryPtr* tary = tp->isa_aryptr();
if (tary != nullptr &&
tary->offset() == oopDesc::klass_offset_in_bytes()) {
return tary->as_klass_type(true);
}
Node* mem = in(MemNode::Memory);
Node* address = in(MemNode::Address);
Node* value = in(MemNode::ValueIn);
// Back-to-back stores to same address? Fold em up. Generally
- // unsafe if I have intervening uses.
- {
+ // unsafe if I have intervening uses...
+ if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::INLINES) {
Node* st = mem;
// If Store 'st' has more than one use, we cannot fold 'st' away.
// For example, 'st' might be the final state at a conditional
// return. Or, 'st' might be used by some node which is live at
// the same time 'st' is live, which might be unschedulable. So,
st->Opcode() == Op_StoreVectorScatter ||
Opcode() == Op_StoreVectorScatter ||
phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
(Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
(Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
+ (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
(is_mismatched_access() || st->as_Store()->is_mismatched_access()),
"no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
if (st->in(MemNode::Address)->eqv_uncast(address) &&
st->as_Store()->memory_size() <= this->memory_size()) {
}
// Store of zero anywhere into a freshly-allocated object?
// Then the store is useless.
// (It must already have been captured by the InitializeNode.)
- if (result == this &&
- ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
+ if (result == this && ReduceFieldZeroing) {
// a newly allocated object is already all-zeroes everywhere
- if (mem->is_Proj() && mem->in(0)->is_Allocate()) {
+ if (mem->is_Proj() && mem->in(0)->is_Allocate() &&
+ (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == val)) {
result = mem;
}
- if (result == this) {
+ if (result == this && phase->type(val)->is_zero_type()) {
// the store may also apply to zero-bits in an earlier object
Node* prev_mem = find_previous_store(phase);
// Steps (a), (b): Walk past independent stores to find an exact match.
if (prev_mem != nullptr) {
Node* prev_val = can_see_stored_value(prev_mem, phase);
if (size <= 0 || size % unit != 0) return nullptr;
intptr_t count = size / unit;
// Length too long; communicate this to matchers and assemblers.
// Assemblers are responsible to produce fast hardware clears for it.
if (size > InitArrayShortSize) {
- return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
+ return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true);
} else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) {
return nullptr;
}
if (!IdealizeClearArrayNode) return nullptr;
Node *mem = in(1);
else atp = atp->add_offset(Type::OffsetBot);
// Get base for derived pointer purposes
if( adr->Opcode() != Op_AddP ) Unimplemented();
Node *base = adr->in(1);
- Node *zero = phase->makecon(TypeLong::ZERO);
+ Node *val = in(4);
Node *off = phase->MakeConX(BytesPerLong);
- mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
+ mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
count--;
while( count-- ) {
mem = phase->transform(mem);
adr = phase->transform(new AddPNode(base,adr,off));
- mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
+ mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
}
return mem;
}
//----------------------------step_through----------------------------------
}
//----------------------------clear_memory-------------------------------------
// Generate code to initialize object storage to zero.
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
+ Node* val,
+ Node* raw_val,
intptr_t start_offset,
Node* end_offset,
PhaseGVN* phase) {
intptr_t offset = start_offset;
int unit = BytesPerLong;
if ((offset % unit) != 0) {
Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
adr = phase->transform(adr);
const TypePtr* atp = TypeRawPtr::BOTTOM;
- mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+ if (val != nullptr) {
+ assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
+ mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
+ } else {
+ assert(raw_val == nullptr, "val may not be null");
+ mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+ }
mem = phase->transform(mem);
offset += BytesPerInt;
}
assert((offset % unit) == 0, "");
// Initialize the remaining stuff, if any, with a ClearArray.
- return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
+ return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase);
}
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
+ Node* raw_val,
Node* start_offset,
Node* end_offset,
PhaseGVN* phase) {
if (start_offset == end_offset) {
// nothing to do
}
// Bulk clear double-words
Node* zsize = phase->transform(new SubXNode(zend, zbase) );
Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
- mem = new ClearArrayNode(ctl, mem, zsize, adr, false);
+ if (raw_val == nullptr) {
+ raw_val = phase->MakeConX(0);
+ }
+ mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false);
return phase->transform(mem);
}
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
+ Node* val,
+ Node* raw_val,
intptr_t start_offset,
intptr_t end_offset,
PhaseGVN* phase) {
if (start_offset == end_offset) {
// nothing to do
intptr_t done_offset = end_offset;
if ((done_offset % BytesPerLong) != 0) {
done_offset -= BytesPerInt;
}
if (done_offset > start_offset) {
- mem = clear_memory(ctl, mem, dest,
+ mem = clear_memory(ctl, mem, dest, val, raw_val,
start_offset, phase->MakeConX(done_offset), phase);
}
if (done_offset < end_offset) { // emit the final 32-bit store
Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
adr = phase->transform(adr);
const TypePtr* atp = TypeRawPtr::BOTTOM;
- mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+ if (val != nullptr) {
+ assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
+ mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
+ } else {
+ assert(raw_val == nullptr, "val may not be null");
+ mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
+ }
mem = phase->transform(mem);
done_offset += BytesPerInt;
}
assert(done_offset == end_offset, "");
return mem;
return TypeTuple::MEMBAR;
}
//------------------------------match------------------------------------------
// Construct projections for memory.
- Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
+ Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) {
switch (proj->_con) {
case TypeFunc::Control:
case TypeFunc::Memory:
return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
}
// convenience function
// return false if the init contains any stores already
bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
InitializeNode* init = initialization();
- if (init == nullptr || init->is_complete()) return false;
+ if (init == nullptr || init->is_complete()) {
+ return false;
+ }
init->remove_extra_zeroes();
// for now, if this allocation has already collected any inits, bail:
if (init->is_non_zero()) return false;
init->set_complete(phase);
return true;
// after the InitializeNode. We check the control of the
// object/array that is loaded from. If it's the same as
// the store control then we cannot capture the store.
assert(!n->is_Store(), "2 stores to same slice on same control?");
Node* base = other_adr;
+ if (base->is_Phi()) {
+ // In rare case, base may be a PhiNode and it may read
+ // the same memory slice between InitializeNode and store.
+ failed = true;
+ break;
+ }
assert(base->is_AddP(), "should be addp but is %s", base->Name());
base = base->in(AddPNode::Base);
if (base != nullptr) {
base = base->uncast();
if (base->is_Proj() && base->in(0) == alloc) {
if (zeroes_needed > zeroes_done) {
intptr_t zsize = zeroes_needed - zeroes_done;
// Do some incremental zeroing on rawmem, in parallel with inits.
zeroes_done = align_down(zeroes_done, BytesPerInt);
rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
+ allocation()->in(AllocateNode::DefaultValue),
+ allocation()->in(AllocateNode::RawDefaultValue),
zeroes_done, zeroes_needed,
phase);
zeroes_done = zeroes_needed;
if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
do_zeroing = false; // leave the hole, next time
zeroes_done = size_limit;
}
}
if (zeroes_done < size_limit) {
rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
+ allocation()->in(AllocateNode::DefaultValue),
+ allocation()->in(AllocateNode::RawDefaultValue),
zeroes_done, size_in_bytes, phase);
}
}
set_complete(phase);
< prev index next >