< prev index next > src/hotspot/share/opto/macro.cpp
Print this page
* questions.
*
*/
#include "precompiled.hpp"
+ #include "ci/ciFlatArrayKlass.hpp"
#include "compiler/compileLog.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/tlab_globals.hpp"
#include "libadt/vectset.hpp"
#include "memory/universe.hpp"
#include "opto/castnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/convertnode.hpp"
#include "opto/graphKit.hpp"
+ #include "opto/inlinetypenode.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/locknode.hpp"
#include "opto/loopnode.hpp"
#include "opto/macro.hpp"
#include "opto/memnode.hpp"
#include "opto/subtypenode.hpp"
#include "opto/type.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/continuation.hpp"
#include "runtime/sharedRuntime.hpp"
+ #include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#include "utilities/powerOfTwo.hpp"
#if INCLUDE_G1GC
#include "gc/g1/g1ThreadLocalData.hpp"
#endif // INCLUDE_G1GC
}
}
return nreplacements;
}
- void PhaseMacroExpand::migrate_outs(Node *old, Node *target) {
- assert(old != nullptr, "sanity");
- for (DUIterator_Fast imax, i = old->fast_outs(imax); i < imax; i++) {
- Node* use = old->fast_out(i);
- _igvn.rehash_node_delayed(use);
- imax -= replace_input(use, old, target);
- // back up iterator
- --i;
- }
- assert(old->outcnt() == 0, "all uses must be deleted");
- }
-
Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) {
Node* cmp;
if (mask != 0) {
Node* and_node = transform_later(new AndXNode(word, MakeConX(mask)));
cmp = transform_later(new CmpXNode(and_node, MakeConX(bits)));
return call;
}
void PhaseMacroExpand::eliminate_gc_barrier(Node* p2x) {
BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
- bs->eliminate_gc_barrier(this, p2x);
+ bs->eliminate_gc_barrier(&_igvn, p2x);
#ifndef PRODUCT
if (PrintOptoStatistics) {
Atomic::inc(&PhaseMacroExpand::_GC_barriers_removed_counter);
}
#endif
} else if (mem->is_Store()) {
const TypePtr* atype = mem->as_Store()->adr_type();
int adr_idx = phase->C->get_alias_index(atype);
if (adr_idx == alias_idx) {
assert(atype->isa_oopptr(), "address type must be oopptr");
- int adr_offset = atype->offset();
+ int adr_offset = atype->flat_offset();
uint adr_iid = atype->is_oopptr()->instance_id();
// Array elements references have the same alias_idx
// but different offset and different instance_id.
if (adr_offset == offset && adr_iid == alloc->_idx) {
return mem;
DEBUG_ONLY(mem->dump();)
assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
return nullptr;
}
mem = mem->in(MemNode::Memory);
- } else if (mem->Opcode() == Op_StrInflatedCopy) {
+ } else if (mem->Opcode() == Op_StrInflatedCopy) {
Node* adr = mem->in(3); // Destination array
const TypePtr* atype = adr->bottom_type()->is_ptr();
int adr_idx = phase->C->get_alias_index(atype);
if (adr_idx == alias_idx) {
DEBUG_ONLY(mem->dump();)
Node* dest_pos = ac->in(ArrayCopyNode::DestPos);
const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int();
const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int();
Node* adr = nullptr;
- const TypePtr* adr_type = nullptr;
+ Node* base = ac->in(ArrayCopyNode::Src);
+ const TypeAryPtr* adr_type = _igvn.type(base)->is_aryptr();
+ if (adr_type->is_flat()) {
+ shift = adr_type->flat_log_elem_size();
+ }
if (src_pos_t->is_con() && dest_pos_t->is_con()) {
intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset;
- Node* base = ac->in(ArrayCopyNode::Src);
adr = _igvn.transform(new AddPNode(base, base, _igvn.MakeConX(off)));
- adr_type = _igvn.type(base)->is_ptr()->add_offset(off);
+ adr_type = _igvn.type(adr)->is_aryptr();
+ assert(adr_type == _igvn.type(base)->is_aryptr()->add_field_offset_and_offset(off), "incorrect address type");
if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
// Don't emit a new load from src if src == dst but try to get the value from memory instead
- return value_from_mem(ac->in(TypeFunc::Memory), ctl, ft, ftype, adr_type->isa_oopptr(), alloc);
+ return value_from_mem(ac->in(TypeFunc::Memory), ctl, ft, ftype, adr_type, alloc);
}
} else {
+ if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
+ // Non constant offset in the array: we can't statically
+ // determine the value
+ return nullptr;
+ }
Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
#ifdef _LP64
diff = _igvn.transform(new ConvI2LNode(diff));
#endif
diff = _igvn.transform(new LShiftXNode(diff, _igvn.intcon(shift)));
Node* off = _igvn.transform(new AddXNode(_igvn.MakeConX(offset), diff));
- Node* base = ac->in(ArrayCopyNode::Src);
adr = _igvn.transform(new AddPNode(base, base, off));
- adr_type = _igvn.type(base)->is_ptr()->add_offset(Type::OffsetBot);
- if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
- // Non constant offset in the array: we can't statically
- // determine the value
- return nullptr;
- }
+ // In the case of a flat inline type array, each field has its
+ // own slice so we need to extract the field being accessed from
+ // the address computation
+ adr_type = adr_type->add_field_offset_and_offset(offset)->add_offset(Type::OffsetBot)->is_aryptr();
+ adr = _igvn.transform(new CastPPNode(ctl, adr, adr_type));
}
MergeMemNode* mergemen = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem();
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemen, adr, adr_type, type, bt);
}
}
if (res != nullptr) {
if (ftype->isa_narrowoop()) {
// PhaseMacroExpand::scalar_replacement adds DecodeN nodes
+ assert(res->isa_DecodeN(), "should be narrow oop");
res = _igvn.transform(new EncodePNode(res, ftype));
}
return res;
}
return nullptr;
// Note: this function is recursive, its depth is limited by the "level" argument
// Returns the computed Phi, or null if it cannot compute it.
Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) {
assert(mem->is_Phi(), "sanity");
int alias_idx = C->get_alias_index(adr_t);
- int offset = adr_t->offset();
+ int offset = adr_t->flat_offset();
int instance_id = adr_t->instance_id();
// Check if an appropriate value phi already exists.
Node* region = mem->in(0);
for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
for (uint j = 1; j < length; j++) {
Node *in = mem->in(j);
if (in == nullptr || in->is_top()) {
values.at_put(j, in);
- } else {
+ } else {
Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
if (val == start_mem || val == alloc_mem) {
// hit a sentinel, return appropriate 0 value
- values.at_put(j, _igvn.zerocon(ft));
+ Node* default_value = alloc->in(AllocateNode::DefaultValue);
+ if (default_value != nullptr) {
+ values.at_put(j, default_value);
+ } else {
+ assert(alloc->in(AllocateNode::RawDefaultValue) == nullptr, "default value may not be null");
+ values.at_put(j, _igvn.zerocon(ft));
+ }
continue;
}
if (val->is_Initialize()) {
val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
}
if (is_subword_type(ft)) {
n = Compile::narrow_value(ft, n, phi_type, &_igvn, true);
}
values.at_put(j, n);
} else if(val->is_Proj() && val->in(0) == alloc) {
- values.at_put(j, _igvn.zerocon(ft));
+ Node* default_value = alloc->in(AllocateNode::DefaultValue);
+ if (default_value != nullptr) {
+ values.at_put(j, default_value);
+ } else {
+ assert(alloc->in(AllocateNode::RawDefaultValue) == nullptr, "default value may not be null");
+ values.at_put(j, _igvn.zerocon(ft));
+ }
} else if (val->is_Phi()) {
val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
if (val == nullptr) {
return nullptr;
}
assert(adr_t->is_known_instance_field(), "instance required");
int instance_id = adr_t->instance_id();
assert((uint)instance_id == alloc->_idx, "wrong allocation");
int alias_idx = C->get_alias_index(adr_t);
- int offset = adr_t->offset();
+ int offset = adr_t->flat_offset();
Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
- Node *alloc_ctrl = alloc->in(TypeFunc::Control);
Node *alloc_mem = alloc->proj_out_or_null(TypeFunc::Memory, /*io_use:*/false);
assert(alloc_mem != nullptr, "Allocation without a memory projection.");
VectorSet visited;
bool done = sfpt_mem == alloc_mem;
if (mem == start_mem || mem == alloc_mem) {
done = true; // hit a sentinel, return appropriate 0 value
} else if (mem->is_Initialize()) {
mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
if (mem == nullptr) {
- done = true; // Something go wrong.
+ done = true; // Something went wrong.
} else if (mem->is_Store()) {
const TypePtr* atype = mem->as_Store()->adr_type();
assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice");
done = true;
}
} else if (mem->is_Store()) {
const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr();
assert(atype != nullptr, "address type must be oopptr");
assert(C->get_alias_index(atype) == alias_idx &&
- atype->is_known_instance_field() && atype->offset() == offset &&
+ atype->is_known_instance_field() && atype->flat_offset() == offset &&
atype->instance_id() == instance_id, "store is correct memory slice");
done = true;
} else if (mem->is_Phi()) {
// try to find a phi's unique input
Node *unique_input = nullptr;
}
}
if (mem != nullptr) {
if (mem == start_mem || mem == alloc_mem) {
// hit a sentinel, return appropriate 0 value
+ Node* default_value = alloc->in(AllocateNode::DefaultValue);
+ if (default_value != nullptr) {
+ return default_value;
+ }
+ assert(alloc->in(AllocateNode::RawDefaultValue) == nullptr, "default value may not be null");
return _igvn.zerocon(ft);
} else if (mem->is_Store()) {
Node* n = mem->in(MemNode::ValueIn);
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
n = bs->step_over_gc_barrier(n);
m = sfpt_mem;
}
return make_arraycopy_load(mem->as_ArrayCopy(), offset, ctl, m, ft, ftype, alloc);
}
}
- // Something go wrong.
+ // Something went wrong.
return nullptr;
}
+ // Search the last value stored into the inline type's fields.
+ Node* PhaseMacroExpand::inline_type_from_mem(Node* mem, Node* ctl, ciInlineKlass* vk, const TypeAryPtr* adr_type, int offset, AllocateNode* alloc) {
+ // Subtract the offset of the first field to account for the missing oop header
+ offset -= vk->first_field_offset();
+ // Create a new InlineTypeNode and retrieve the field values from memory
+ InlineTypeNode* vt = InlineTypeNode::make_uninitialized(_igvn, vk);
+ transform_later(vt);
+ for (int i = 0; i < vk->nof_declared_nonstatic_fields(); ++i) {
+ ciType* field_type = vt->field_type(i);
+ int field_offset = offset + vt->field_offset(i);
+ Node* value = nullptr;
+ if (vt->field_is_flat(i)) {
+ value = inline_type_from_mem(mem, ctl, field_type->as_inline_klass(), adr_type, field_offset, alloc);
+ } else {
+ const Type* ft = Type::get_const_type(field_type);
+ BasicType bt = type2field[field_type->basic_type()];
+ if (UseCompressedOops && !is_java_primitive(bt)) {
+ ft = ft->make_narrowoop();
+ bt = T_NARROWOOP;
+ }
+ // Each inline type field has its own memory slice
+ adr_type = adr_type->with_field_offset(field_offset);
+ value = value_from_mem(mem, ctl, bt, ft, adr_type, alloc);
+ if (value != nullptr && ft->isa_narrowoop()) {
+ assert(UseCompressedOops, "unexpected narrow oop");
+ if (value->is_EncodeP()) {
+ value = value->in(1);
+ } else {
+ value = transform_later(new DecodeNNode(value, value->get_ptr_type()));
+ }
+ }
+ }
+ if (value != nullptr) {
+ vt->set_field_value(i, value);
+ } else {
+ // We might have reached the TrackedInitializationLimit
+ return nullptr;
+ }
+ }
+ return vt;
+ }
+
// Check the possibility of scalar replacement.
bool PhaseMacroExpand::can_eliminate_allocation(PhaseIterGVN* igvn, AllocateNode *alloc, GrowableArray <SafePointNode *>* safepoints) {
// Scan the uses of the allocation to check for anything that would
// prevent us from eliminating it.
NOT_PRODUCT( const char* fail_eliminate = nullptr; )
DEBUG_ONLY( Node* disq_node = nullptr; )
bool can_eliminate = true;
bool reduce_merge_precheck = (safepoints == nullptr);
+ Unique_Node_List worklist;
Node* res = alloc->result_cast();
const TypeOopPtr* res_type = nullptr;
if (res == nullptr) {
// All users were eliminated.
} else if (!res->is_CheckCastPP()) {
NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
can_eliminate = false;
} else {
+ worklist.push(res);
res_type = igvn->type(res)->isa_oopptr();
if (res_type == nullptr) {
NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";)
can_eliminate = false;
} else if (!res_type->klass_is_exact()) {
can_eliminate = false;
}
}
}
- if (can_eliminate && res != nullptr) {
+ while (can_eliminate && worklist.size() > 0) {
BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
- for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
- j < jmax && can_eliminate; j++) {
+ res = worklist.pop();
+ for (DUIterator_Fast jmax, j = res->fast_outs(jmax); j < jmax && can_eliminate; j++) {
Node* use = res->fast_out(j);
if (use->is_AddP()) {
const TypePtr* addp_type = igvn->type(use)->is_ptr();
int offset = addp_type->offset();
if (sfptMem == nullptr || sfptMem->is_top()) {
DEBUG_ONLY(disq_node = use;)
NOT_PRODUCT(fail_eliminate = "null or TOP memory";)
can_eliminate = false;
} else if (!reduce_merge_precheck) {
+ assert(!res->is_Phi() || !res->as_Phi()->can_be_inline_type(), "Inline type allocations should not have safepoint uses");
safepoints->append_if_missing(sfpt);
}
+ } else if (use->is_InlineType() && use->as_InlineType()->get_oop() == res) {
+ // Look at uses
+ for (DUIterator_Fast kmax, k = use->fast_outs(kmax); k < kmax; k++) {
+ Node* u = use->fast_out(k);
+ if (u->is_InlineType()) {
+ // Use in flat field can be eliminated
+ InlineTypeNode* vt = u->as_InlineType();
+ for (uint i = 0; i < vt->field_count(); ++i) {
+ if (vt->field_value(i) == use && !vt->field_is_flat(i)) {
+ can_eliminate = false; // Use in non-flat field
+ break;
+ }
+ }
+ } else {
+ // Add other uses to the worklist to process individually
+ worklist.push(use);
+ }
+ }
+ } else if (use->Opcode() == Op_StoreX && use->in(MemNode::Address) == res) {
+ // Store to mark word of inline type larval buffer
+ assert(res_type->is_inlinetypeptr(), "Unexpected store to mark word");
+ } else if (res_type->is_inlinetypeptr() && (use->Opcode() == Op_MemBarRelease || use->Opcode() == Op_MemBarStoreStore)) {
+ // Inline type buffer allocations are followed by a membar
} else if (reduce_merge_precheck &&
(use->is_Phi() || use->is_EncodeP() ||
use->Opcode() == Op_MemBarRelease ||
(UseStoreStoreForCtor && use->Opcode() == Op_MemBarStoreStore))) {
// Nothing to do
NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
}
DEBUG_ONLY(disq_node = use;)
}
can_eliminate = false;
+ } else {
+ assert(use->Opcode() == Op_CastP2X, "should be");
+ assert(!use->has_out_with(Op_OrL), "should have been removed because oop is never null");
}
}
}
#ifndef PRODUCT
tty->print("Scalar ");
if (res == nullptr)
alloc->dump();
else
res->dump();
- } else if (alloc->_is_scalar_replaceable) {
+ } else {
tty->print("NotScalar (%s)", fail_eliminate);
if (res == nullptr)
alloc->dump();
else
res->dump();
}
_igvn._worklist.push(sfpt_done);
}
}
- SafePointScalarObjectNode* PhaseMacroExpand::create_scalarized_object_description(AllocateNode *alloc, SafePointNode* sfpt) {
+ SafePointScalarObjectNode* PhaseMacroExpand::create_scalarized_object_description(AllocateNode *alloc, SafePointNode* sfpt,
+ Unique_Node_List* value_worklist) {
// Fields of scalar objs are referenced only at the end
// of regular debuginfo at the last (youngest) JVMS.
// Record relative start index.
ciInstanceKlass* iklass = nullptr;
BasicType basic_elem_type = T_ILLEGAL;
assert(nfields >= 0, "must be an array klass.");
basic_elem_type = res_type->is_aryptr()->elem()->array_element_basic_type();
array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
element_size = type2aelembytes(basic_elem_type);
field_type = res_type->is_aryptr()->elem();
+ if (res_type->is_flat()) {
+ // Flat inline type array
+ element_size = res_type->is_aryptr()->flat_elem_size();
+ }
+ }
+
+ if (res->bottom_type()->is_inlinetypeptr()) {
+ // Nullable inline types have an IsInit field which is added to the safepoint when scalarizing them (see
+ // InlineTypeNode::make_scalar_in_safepoint()). When having circular inline types, we stop scalarizing at depth 1
+ // to avoid an endless recursion. Therefore, we do not have a SafePointScalarObjectNode node here, yet.
+ // We are about to create a SafePointScalarObjectNode as if this is a normal object. Add an additional int input
+ // with value 1 which sets IsInit to true to indicate that the object is always non-null. This input is checked
+ // later in PhaseOutput::filLocArray() for inline types.
+ sfpt->add_req(_igvn.intcon(1));
}
}
SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(res_type, alloc, first_ind, sfpt->jvms()->depth(), nfields);
sobj->init_req(0, C->root());
if (iklass != nullptr) {
field = iklass->nonstatic_field_at(j);
offset = field->offset_in_bytes();
ciType* elem_type = field->type();
basic_elem_type = field->layout_type();
+ assert(!field->is_flat(), "flat inline type fields should not have safepoint uses");
// The next code is taken from Parse::do_get_xxx().
if (is_reference_type(basic_elem_type)) {
if (!elem_type->is_loaded()) {
field_type = TypeInstPtr::BOTTOM;
}
} else {
offset = array_base + j * (intptr_t)element_size;
}
- const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr();
-
- Node *field_val = value_from_mem(sfpt->memory(), sfpt->control(), basic_elem_type, field_type, field_addr_type, alloc);
+ Node* field_val = nullptr;
+ const TypeOopPtr* field_addr_type = res_type->add_offset(offset)->isa_oopptr();
+ if (res_type->is_flat()) {
+ ciInlineKlass* inline_klass = res_type->is_aryptr()->elem()->inline_klass();
+ assert(inline_klass->flat_in_array(), "must be flat in array");
+ field_val = inline_type_from_mem(sfpt->memory(), sfpt->control(), inline_klass, field_addr_type->isa_aryptr(), 0, alloc);
+ } else {
+ field_val = value_from_mem(sfpt->memory(), sfpt->control(), basic_elem_type, field_type, field_addr_type, alloc);
+ }
// We weren't able to find a value for this field,
// give up on eliminating this allocation.
if (field_val == nullptr) {
uint last = sfpt->req() - 1;
if (UseCompressedOops && field_type->isa_narrowoop()) {
// Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
// to be able scalar replace the allocation.
if (field_val->is_EncodeP()) {
field_val = field_val->in(1);
- } else {
+ } else if (!field_val->is_InlineType()) {
field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type()));
}
}
+
+ // Keep track of inline types to scalarize them later
+ if (field_val->is_InlineType()) {
+ value_worklist->push(field_val);
+ } else if (field_val->is_Phi()) {
+ PhiNode* phi = field_val->as_Phi();
+ // Eagerly replace inline type phis now since we could be removing an inline type allocation where we must
+ // scalarize all its fields in safepoints.
+ field_val = phi->try_push_inline_types_down(&_igvn, true);
+ if (field_val->is_InlineType()) {
+ value_worklist->push(field_val);
+ }
+ }
sfpt->add_req(field_val);
}
sfpt->jvms()->set_endoff(sfpt->req());
// Do scalar replacement.
bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
GrowableArray <SafePointNode *> safepoints_done;
Node* res = alloc->result_cast();
assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");
+ const TypeOopPtr* res_type = nullptr;
+ if (res != nullptr) { // Could be null when there are no users
+ res_type = _igvn.type(res)->isa_oopptr();
+ }
// Process the safepoint uses
+ assert(safepoints.length() == 0 || !res_type->is_inlinetypeptr() || C->has_circular_inline_type(),
+ "Inline type allocations should have been scalarized earlier");
+ Unique_Node_List value_worklist;
while (safepoints.length() > 0) {
SafePointNode* sfpt = safepoints.pop();
- SafePointScalarObjectNode* sobj = create_scalarized_object_description(alloc, sfpt);
+ SafePointScalarObjectNode* sobj = create_scalarized_object_description(alloc, sfpt, &value_worklist);
if (sobj == nullptr) {
undo_previous_scalarizations(safepoints_done, alloc);
return false;
}
_igvn._worklist.push(sfpt);
// keep it for rollback
safepoints_done.append_if_missing(sfpt);
}
-
+ // Scalarize inline types that were added to the safepoint.
+ // Don't allow linking a constant oop (if available) for flat array elements
+ // because Deoptimization::reassign_flat_array_elements needs field values.
+ bool allow_oop = (res_type != nullptr) && !res_type->is_flat();
+ for (uint i = 0; i < value_worklist.size(); ++i) {
+ InlineTypeNode* vt = value_worklist.at(i)->as_InlineType();
+ vt->make_scalar_in_safepoints(&_igvn, allow_oop);
+ }
return true;
}
static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
igvn.replace_node(mem_proj, n->in(TypeFunc::Memory));
}
}
// Process users of eliminated allocation.
- void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
+ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc, bool inline_alloc) {
+ Unique_Node_List worklist;
Node* res = alloc->result_cast();
if (res != nullptr) {
+ worklist.push(res);
+ }
+ while (worklist.size() > 0) {
+ res = worklist.pop();
for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
Node *use = res->last_out(j);
uint oc1 = res->outcnt();
if (use->is_AddP()) {
for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
Node *n = use->last_out(k);
uint oc2 = use->outcnt();
if (n->is_Store()) {
- #ifdef ASSERT
- // Verify that there is no dependent MemBarVolatile nodes,
- // they should be removed during IGVN, see MemBarNode::Ideal().
- for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
- p < pmax; p++) {
- Node* mb = n->fast_out(p);
- assert(mb->is_Initialize() || !mb->is_MemBar() ||
- mb->req() <= MemBarNode::Precedent ||
- mb->in(MemBarNode::Precedent) != n,
- "MemBarVolatile should be eliminated for non-escaping object");
+ for (DUIterator_Fast pmax, p = n->fast_outs(pmax); p < pmax; p++) {
+ MemBarNode* mb = n->fast_out(p)->isa_MemBar();
+ if (mb != nullptr && mb->req() <= MemBarNode::Precedent && mb->in(MemBarNode::Precedent) == n) {
+ // MemBarVolatiles should have been removed by MemBarNode::Ideal() for non-inline allocations
+ assert(inline_alloc, "MemBarVolatile should be eliminated for non-escaping object");
+ mb->remove(&_igvn);
+ }
}
- #endif
_igvn.replace_node(n, n->in(MemNode::Memory));
} else {
eliminate_gc_barrier(n);
}
k -= (oc2 - use->outcnt());
}
} else {
assert(ac->is_arraycopy_validated() ||
ac->is_copyof_validated() ||
ac->is_copyofrange_validated(), "unsupported");
- CallProjections callprojs;
- ac->extract_projections(&callprojs, true);
+ CallProjections* callprojs = ac->extract_projections(true);
- _igvn.replace_node(callprojs.fallthrough_ioproj, ac->in(TypeFunc::I_O));
- _igvn.replace_node(callprojs.fallthrough_memproj, ac->in(TypeFunc::Memory));
- _igvn.replace_node(callprojs.fallthrough_catchproj, ac->in(TypeFunc::Control));
+ _igvn.replace_node(callprojs->fallthrough_ioproj, ac->in(TypeFunc::I_O));
+ _igvn.replace_node(callprojs->fallthrough_memproj, ac->in(TypeFunc::Memory));
+ _igvn.replace_node(callprojs->fallthrough_catchproj, ac->in(TypeFunc::Control));
// Set control to top. IGVN will remove the remaining projections
ac->set_req(0, top());
ac->replace_edge(res, top(), &_igvn);
if (src->outcnt() == 0 && !src->is_top()) {
_igvn.remove_dead_node(src);
}
}
_igvn._worklist.push(ac);
+ } else if (use->is_InlineType()) {
+ assert(use->as_InlineType()->get_oop() == res, "unexpected inline type ptr use");
+ // Cut off oop input and remove known instance id from type
+ _igvn.rehash_node_delayed(use);
+ use->as_InlineType()->set_oop(_igvn, _igvn.zerocon(T_OBJECT));
+ const TypeOopPtr* toop = _igvn.type(use)->is_oopptr()->cast_to_instance_id(TypeOopPtr::InstanceBot);
+ _igvn.set_type(use, toop);
+ use->as_InlineType()->set_type(toop);
+ // Process users
+ for (DUIterator_Fast kmax, k = use->fast_outs(kmax); k < kmax; k++) {
+ Node* u = use->fast_out(k);
+ if (!u->is_InlineType()) {
+ worklist.push(u);
+ }
+ }
+ } else if (use->Opcode() == Op_StoreX && use->in(MemNode::Address) == res) {
+ // Store to mark word of inline type larval buffer
+ assert(inline_alloc, "Unexpected store to mark word");
+ _igvn.replace_node(use, use->in(MemNode::Memory));
+ } else if (use->Opcode() == Op_MemBarRelease || use->Opcode() == Op_MemBarStoreStore) {
+ // Inline type buffer allocations are followed by a membar
+ assert(inline_alloc, "Unexpected MemBarRelease");
+ use->as_MemBar()->remove(&_igvn);
} else {
eliminate_gc_barrier(use);
}
j -= (oc1 - res->outcnt());
}
}
//
// Process other users of allocation's projections
//
- if (_callprojs.resproj != nullptr && _callprojs.resproj->outcnt() != 0) {
+ if (_callprojs->resproj[0] != nullptr && _callprojs->resproj[0]->outcnt() != 0) {
// First disconnect stores captured by Initialize node.
// If Initialize node is eliminated first in the following code,
// it will kill such stores and DUIterator_Last will assert.
- for (DUIterator_Fast jmax, j = _callprojs.resproj->fast_outs(jmax); j < jmax; j++) {
- Node* use = _callprojs.resproj->fast_out(j);
+ for (DUIterator_Fast jmax, j = _callprojs->resproj[0]->fast_outs(jmax); j < jmax; j++) {
+ Node* use = _callprojs->resproj[0]->fast_out(j);
if (use->is_AddP()) {
// raw memory addresses used only by the initialization
_igvn.replace_node(use, C->top());
--j; --jmax;
}
}
- for (DUIterator_Last jmin, j = _callprojs.resproj->last_outs(jmin); j >= jmin; ) {
- Node* use = _callprojs.resproj->last_out(j);
- uint oc1 = _callprojs.resproj->outcnt();
+ for (DUIterator_Last jmin, j = _callprojs->resproj[0]->last_outs(jmin); j >= jmin; ) {
+ Node* use = _callprojs->resproj[0]->last_out(j);
+ uint oc1 = _callprojs->resproj[0]->outcnt();
if (use->is_Initialize()) {
// Eliminate Initialize node.
InitializeNode *init = use->as_Initialize();
assert(init->outcnt() <= 2, "only a control and memory projection expected");
Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
if (ctrl_proj != nullptr) {
_igvn.replace_node(ctrl_proj, init->in(TypeFunc::Control));
#ifdef ASSERT
// If the InitializeNode has no memory out, it will die, and tmp will become null
Node* tmp = init->in(TypeFunc::Control);
- assert(tmp == nullptr || tmp == _callprojs.fallthrough_catchproj, "allocation control projection");
+ assert(tmp == nullptr || tmp == _callprojs->fallthrough_catchproj, "allocation control projection");
#endif
}
Node *mem_proj = init->proj_out_or_null(TypeFunc::Memory);
if (mem_proj != nullptr) {
Node *mem = init->in(TypeFunc::Memory);
#ifdef ASSERT
if (mem->is_MergeMem()) {
- assert(mem->in(TypeFunc::Memory) == _callprojs.fallthrough_memproj, "allocation memory projection");
+ assert(mem->in(TypeFunc::Memory) == _callprojs->fallthrough_memproj, "allocation memory projection");
} else {
- assert(mem == _callprojs.fallthrough_memproj, "allocation memory projection");
+ assert(mem == _callprojs->fallthrough_memproj, "allocation memory projection");
}
#endif
_igvn.replace_node(mem_proj, mem);
}
+ } else if (use->Opcode() == Op_MemBarStoreStore) {
+ // Inline type buffer allocations are followed by a membar
+ assert(inline_alloc, "Unexpected MemBarStoreStore");
+ use->as_MemBar()->remove(&_igvn);
} else {
assert(false, "only Initialize or AddP expected");
}
- j -= (oc1 - _callprojs.resproj->outcnt());
+ j -= (oc1 - _callprojs->resproj[0]->outcnt());
}
}
- if (_callprojs.fallthrough_catchproj != nullptr) {
- _igvn.replace_node(_callprojs.fallthrough_catchproj, alloc->in(TypeFunc::Control));
+ if (_callprojs->fallthrough_catchproj != nullptr) {
+ _igvn.replace_node(_callprojs->fallthrough_catchproj, alloc->in(TypeFunc::Control));
}
- if (_callprojs.fallthrough_memproj != nullptr) {
- _igvn.replace_node(_callprojs.fallthrough_memproj, alloc->in(TypeFunc::Memory));
+ if (_callprojs->fallthrough_memproj != nullptr) {
+ _igvn.replace_node(_callprojs->fallthrough_memproj, alloc->in(TypeFunc::Memory));
}
- if (_callprojs.catchall_memproj != nullptr) {
- _igvn.replace_node(_callprojs.catchall_memproj, C->top());
+ if (_callprojs->catchall_memproj != nullptr) {
+ _igvn.replace_node(_callprojs->catchall_memproj, C->top());
}
- if (_callprojs.fallthrough_ioproj != nullptr) {
- _igvn.replace_node(_callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
+ if (_callprojs->fallthrough_ioproj != nullptr) {
+ _igvn.replace_node(_callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
}
- if (_callprojs.catchall_ioproj != nullptr) {
- _igvn.replace_node(_callprojs.catchall_ioproj, C->top());
+ if (_callprojs->catchall_ioproj != nullptr) {
+ _igvn.replace_node(_callprojs->catchall_ioproj, C->top());
}
- if (_callprojs.catchall_catchproj != nullptr) {
- _igvn.replace_node(_callprojs.catchall_catchproj, C->top());
+ if (_callprojs->catchall_catchproj != nullptr) {
+ _igvn.replace_node(_callprojs->catchall_catchproj, C->top());
}
}
bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
// If reallocation fails during deoptimization we'll pop all
// interpreter frames for this compiled frame and that won't play
// nice with JVMTI popframe.
// We avoid this issue by eager reallocation when the popframe request
// is received.
- if (!EliminateAllocations || !alloc->_is_non_escaping) {
+ if (!EliminateAllocations) {
return false;
}
Node* klass = alloc->in(AllocateNode::KlassNode);
const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
- Node* res = alloc->result_cast();
+
+ // Attempt to eliminate inline type buffer allocations
+ // regardless of usage and escape/replaceable status.
+ bool inline_alloc = tklass->isa_instklassptr() &&
+ tklass->is_instklassptr()->instance_klass()->is_inlinetype();
+ if (!alloc->_is_non_escaping && !inline_alloc) {
+ return false;
+ }
// Eliminate boxing allocations which are not used
// regardless scalar replaceable status.
- bool boxing_alloc = C->eliminate_boxing() &&
+ Node* res = alloc->result_cast();
+ bool boxing_alloc = (res == nullptr) && C->eliminate_boxing() &&
tklass->isa_instklassptr() &&
tklass->is_instklassptr()->instance_klass()->is_box_klass();
- if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != nullptr))) {
+ if (!alloc->_is_scalar_replaceable && !boxing_alloc && !inline_alloc) {
return false;
}
- alloc->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
+ _callprojs = alloc->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
GrowableArray <SafePointNode *> safepoints;
if (!can_eliminate_allocation(&_igvn, alloc, &safepoints)) {
return false;
}
if (!alloc->_is_scalar_replaceable) {
- assert(res == nullptr, "sanity");
+ assert(res == nullptr || inline_alloc, "sanity");
// We can only eliminate allocation if all debug info references
// are already replaced with SafePointScalarObject because
// we can't search for a fields value without instance_id.
if (safepoints.length() > 0) {
+ assert(!inline_alloc || C->has_circular_inline_type(),
+ "Inline type allocations should have been scalarized earlier");
return false;
}
}
if (!scalar_replacement(alloc, safepoints)) {
p = p->caller();
}
log->tail("eliminate_allocation");
}
- process_users_of_allocation(alloc);
+ process_users_of_allocation(alloc, inline_alloc);
#ifndef PRODUCT
if (PrintEliminateAllocations) {
if (alloc->is_AllocateArray())
tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
return false;
}
assert(boxing->result_cast() == nullptr, "unexpected boxing node result");
- boxing->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
+ _callprojs = boxing->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
- const TypeTuple* r = boxing->tf()->range();
+ const TypeTuple* r = boxing->tf()->range_sig();
assert(r->cnt() > TypeFunc::Parms, "sanity");
const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
assert(t != nullptr, "sanity");
CompileLog* log = C->log();
// Now make the initial failure test. Usually a too-big test but
// might be a TRUE for finalizers.
IfNode *toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
transform_later(toobig_iff);
// Plug the failing-too-big test into the slow-path region
- Node *toobig_true = new IfTrueNode( toobig_iff );
+ Node* toobig_true = new IfTrueNode(toobig_iff);
transform_later(toobig_true);
slow_region ->init_req( too_big_or_final_path, toobig_true );
- toobig_false = new IfFalseNode( toobig_iff );
+ toobig_false = new IfFalseNode(toobig_iff);
transform_later(toobig_false);
} else {
// No initial test, just fall into next case
assert(allocation_has_use || !expand_fast_path, "Should already have been handled");
toobig_false = ctrl;
result_phi_i_o->init_req(slow_result_path, i_o);
// Name successful fast-path variables
Node* fast_oop_ctrl;
Node* fast_oop_rawmem;
+
if (allocation_has_use) {
Node* needgc_ctrl = nullptr;
result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM);
intx prefetch_lines = length != nullptr ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
call->init_req(TypeFunc::Parms+0, klass_node);
if (length != nullptr) {
call->init_req(TypeFunc::Parms+1, length);
+ } else {
+ // Let the runtime know if this is a larval allocation
+ call->init_req(TypeFunc::Parms+1, _igvn.intcon(alloc->_larval));
}
// Copy debug information and adjust JVMState information, then replace
// allocate node with the call
call->copy_call_debug_info(&_igvn, alloc);
// Allocate Catch
// ^---Proj(io) <-------+ ^---CatchProj(io)
//
// We are interested in the CatchProj nodes.
//
- call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
+ _callprojs = call->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
// An allocate node has separate memory projections for the uses on
// the control and i_o paths. Replace the control memory projection with
// result_phi_rawmem (unless we are only generating a slow call when
// both memory projections are combined)
- if (expand_fast_path && _callprojs.fallthrough_memproj != nullptr) {
- migrate_outs(_callprojs.fallthrough_memproj, result_phi_rawmem);
+ if (expand_fast_path && _callprojs->fallthrough_memproj != nullptr) {
+ _igvn.replace_in_uses(_callprojs->fallthrough_memproj, result_phi_rawmem);
}
// Now change uses of catchall_memproj to use fallthrough_memproj and delete
// catchall_memproj so we end up with a call that has only 1 memory projection.
- if (_callprojs.catchall_memproj != nullptr ) {
- if (_callprojs.fallthrough_memproj == nullptr) {
- _callprojs.fallthrough_memproj = new ProjNode(call, TypeFunc::Memory);
- transform_later(_callprojs.fallthrough_memproj);
+ if (_callprojs->catchall_memproj != nullptr) {
+ if (_callprojs->fallthrough_memproj == nullptr) {
+ _callprojs->fallthrough_memproj = new ProjNode(call, TypeFunc::Memory);
+ transform_later(_callprojs->fallthrough_memproj);
}
- migrate_outs(_callprojs.catchall_memproj, _callprojs.fallthrough_memproj);
- _igvn.remove_dead_node(_callprojs.catchall_memproj);
+ _igvn.replace_in_uses(_callprojs->catchall_memproj, _callprojs->fallthrough_memproj);
+ _igvn.remove_dead_node(_callprojs->catchall_memproj);
}
// An allocate node has separate i_o projections for the uses on the control
// and i_o paths. Always replace the control i_o projection with result i_o
// otherwise incoming i_o become dead when only a slow call is generated
// (it is different from memory projections where both projections are
// combined in such case).
- if (_callprojs.fallthrough_ioproj != nullptr) {
- migrate_outs(_callprojs.fallthrough_ioproj, result_phi_i_o);
+ if (_callprojs->fallthrough_ioproj != nullptr) {
+ _igvn.replace_in_uses(_callprojs->fallthrough_ioproj, result_phi_i_o);
}
// Now change uses of catchall_ioproj to use fallthrough_ioproj and delete
// catchall_ioproj so we end up with a call that has only 1 i_o projection.
- if (_callprojs.catchall_ioproj != nullptr ) {
- if (_callprojs.fallthrough_ioproj == nullptr) {
- _callprojs.fallthrough_ioproj = new ProjNode(call, TypeFunc::I_O);
- transform_later(_callprojs.fallthrough_ioproj);
+ if (_callprojs->catchall_ioproj != nullptr) {
+ if (_callprojs->fallthrough_ioproj == nullptr) {
+ _callprojs->fallthrough_ioproj = new ProjNode(call, TypeFunc::I_O);
+ transform_later(_callprojs->fallthrough_ioproj);
}
- migrate_outs(_callprojs.catchall_ioproj, _callprojs.fallthrough_ioproj);
- _igvn.remove_dead_node(_callprojs.catchall_ioproj);
+ _igvn.replace_in_uses(_callprojs->catchall_ioproj, _callprojs->fallthrough_ioproj);
+ _igvn.remove_dead_node(_callprojs->catchall_ioproj);
}
// if we generated only a slow call, we are done
if (!expand_fast_path) {
// Now we can unhook i_o.
// Leave i_o attached to this call to avoid problems in preceding graph.
}
return;
}
- if (_callprojs.fallthrough_catchproj != nullptr) {
- ctrl = _callprojs.fallthrough_catchproj->clone();
+ if (_callprojs->fallthrough_catchproj != nullptr) {
+ ctrl = _callprojs->fallthrough_catchproj->clone();
transform_later(ctrl);
- _igvn.replace_node(_callprojs.fallthrough_catchproj, result_region);
+ _igvn.replace_node(_callprojs->fallthrough_catchproj, result_region);
} else {
ctrl = top();
}
Node *slow_result;
- if (_callprojs.resproj == nullptr) {
+ if (_callprojs->resproj[0] == nullptr) {
// no uses of the allocation result
slow_result = top();
} else {
- slow_result = _callprojs.resproj->clone();
+ slow_result = _callprojs->resproj[0]->clone();
transform_later(slow_result);
- _igvn.replace_node(_callprojs.resproj, result_phi_rawoop);
+ _igvn.replace_node(_callprojs->resproj[0], result_phi_rawoop);
}
// Plug slow-path into result merge point
result_region->init_req( slow_result_path, ctrl);
transform_later(result_region);
if (allocation_has_use) {
result_phi_rawoop->init_req(slow_result_path, slow_result);
transform_later(result_phi_rawoop);
}
- result_phi_rawmem->init_req(slow_result_path, _callprojs.fallthrough_memproj);
+ result_phi_rawmem->init_req(slow_result_path, _callprojs->fallthrough_memproj);
transform_later(result_phi_rawmem);
transform_later(result_phi_i_o);
// This completes all paths into the result merge point
}
void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
Node* ctrl = alloc->in(TypeFunc::Control);
Node* mem = alloc->in(TypeFunc::Memory);
Node* i_o = alloc->in(TypeFunc::I_O);
- alloc->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
- if (_callprojs.resproj != nullptr) {
- for (DUIterator_Fast imax, i = _callprojs.resproj->fast_outs(imax); i < imax; i++) {
- Node* use = _callprojs.resproj->fast_out(i);
+ _callprojs = alloc->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
+ if (_callprojs->resproj[0] != nullptr) {
+ for (DUIterator_Fast imax, i = _callprojs->resproj[0]->fast_outs(imax); i < imax; i++) {
+ Node* use = _callprojs->resproj[0]->fast_out(i);
use->isa_MemBar()->remove(&_igvn);
--imax;
--i; // back up iterator
}
- assert(_callprojs.resproj->outcnt() == 0, "all uses must be deleted");
- _igvn.remove_dead_node(_callprojs.resproj);
+ assert(_callprojs->resproj[0]->outcnt() == 0, "all uses must be deleted");
+ _igvn.remove_dead_node(_callprojs->resproj[0]);
}
- if (_callprojs.fallthrough_catchproj != nullptr) {
- migrate_outs(_callprojs.fallthrough_catchproj, ctrl);
- _igvn.remove_dead_node(_callprojs.fallthrough_catchproj);
+ if (_callprojs->fallthrough_catchproj != nullptr) {
+ _igvn.replace_in_uses(_callprojs->fallthrough_catchproj, ctrl);
+ _igvn.remove_dead_node(_callprojs->fallthrough_catchproj);
}
- if (_callprojs.catchall_catchproj != nullptr) {
- _igvn.rehash_node_delayed(_callprojs.catchall_catchproj);
- _callprojs.catchall_catchproj->set_req(0, top());
+ if (_callprojs->catchall_catchproj != nullptr) {
+ _igvn.rehash_node_delayed(_callprojs->catchall_catchproj);
+ _callprojs->catchall_catchproj->set_req(0, top());
}
- if (_callprojs.fallthrough_proj != nullptr) {
- Node* catchnode = _callprojs.fallthrough_proj->unique_ctrl_out();
+ if (_callprojs->fallthrough_proj != nullptr) {
+ Node* catchnode = _callprojs->fallthrough_proj->unique_ctrl_out();
_igvn.remove_dead_node(catchnode);
- _igvn.remove_dead_node(_callprojs.fallthrough_proj);
+ _igvn.remove_dead_node(_callprojs->fallthrough_proj);
}
- if (_callprojs.fallthrough_memproj != nullptr) {
- migrate_outs(_callprojs.fallthrough_memproj, mem);
- _igvn.remove_dead_node(_callprojs.fallthrough_memproj);
+ if (_callprojs->fallthrough_memproj != nullptr) {
+ _igvn.replace_in_uses(_callprojs->fallthrough_memproj, mem);
+ _igvn.remove_dead_node(_callprojs->fallthrough_memproj);
}
- if (_callprojs.fallthrough_ioproj != nullptr) {
- migrate_outs(_callprojs.fallthrough_ioproj, i_o);
- _igvn.remove_dead_node(_callprojs.fallthrough_ioproj);
+ if (_callprojs->fallthrough_ioproj != nullptr) {
+ _igvn.replace_in_uses(_callprojs->fallthrough_ioproj, i_o);
+ _igvn.remove_dead_node(_callprojs->fallthrough_ioproj);
}
- if (_callprojs.catchall_memproj != nullptr) {
- _igvn.rehash_node_delayed(_callprojs.catchall_memproj);
- _callprojs.catchall_memproj->set_req(0, top());
+ if (_callprojs->catchall_memproj != nullptr) {
+ _igvn.rehash_node_delayed(_callprojs->catchall_memproj);
+ _callprojs->catchall_memproj->set_req(0, top());
}
- if (_callprojs.catchall_ioproj != nullptr) {
- _igvn.rehash_node_delayed(_callprojs.catchall_ioproj);
- _callprojs.catchall_ioproj->set_req(0, top());
+ if (_callprojs->catchall_ioproj != nullptr) {
+ _igvn.rehash_node_delayed(_callprojs->catchall_ioproj);
+ _callprojs->catchall_ioproj->set_req(0, top());
}
#ifndef PRODUCT
if (PrintEliminateAllocations) {
if (alloc->is_AllocateArray()) {
tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
}
}
// Helper for PhaseMacroExpand::expand_allocate_common.
// Initializes the newly-allocated storage.
- Node*
- PhaseMacroExpand::initialize_object(AllocateNode* alloc,
- Node* control, Node* rawmem, Node* object,
- Node* klass_node, Node* length,
- Node* size_in_bytes) {
+ Node* PhaseMacroExpand::initialize_object(AllocateNode* alloc,
+ Node* control, Node* rawmem, Node* object,
+ Node* klass_node, Node* length,
+ Node* size_in_bytes) {
InitializeNode* init = alloc->initialization();
// Store the klass & mark bits
- Node* mark_node = alloc->make_ideal_mark(&_igvn, object, control, rawmem);
+ Node* mark_node = alloc->make_ideal_mark(&_igvn, control, rawmem);
if (!mark_node->is_Con()) {
transform_later(mark_node);
}
rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type());
// there can be two Allocates to one Initialize. The answer in all these
// edge cases is safety first. It is always safe to clear immediately
// within an Allocate, and then (maybe or maybe not) clear some more later.
if (!(UseTLAB && ZeroTLAB)) {
rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
+ alloc->in(AllocateNode::DefaultValue),
+ alloc->in(AllocateNode::RawDefaultValue),
header_size, size_in_bytes,
&_igvn);
}
} else {
if (!init->is_complete()) {
Node* mem = alock->in(TypeFunc::Memory);
Node* ctrl = alock->in(TypeFunc::Control);
guarantee(ctrl != nullptr, "missing control projection, cannot replace_node() with null");
- alock->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
+ _callprojs = alock->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
// There are 2 projections from the lock. The lock node will
// be deleted when its last use is subsumed below.
assert(alock->outcnt() == 2 &&
- _callprojs.fallthrough_proj != nullptr &&
- _callprojs.fallthrough_memproj != nullptr,
+ _callprojs->fallthrough_proj != nullptr &&
+ _callprojs->fallthrough_memproj != nullptr,
"Unexpected projections from Lock/Unlock");
- Node* fallthroughproj = _callprojs.fallthrough_proj;
- Node* memproj_fallthrough = _callprojs.fallthrough_memproj;
+ Node* fallthroughproj = _callprojs->fallthrough_proj;
+ Node* memproj_fallthrough = _callprojs->fallthrough_memproj;
// The memory projection from a lock/unlock is RawMem
// The input to a Lock is merged memory, so extract its RawMem input
// (unless the MergeMem has been optimized away.)
if (alock->is_Lock()) {
// Make slow path call
CallNode *call = make_slow_call((CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(),
OptoRuntime::complete_monitor_locking_Java(), nullptr, slow_path,
obj, box, nullptr);
- call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
+ _callprojs = call->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
// Slow path can only throw asynchronous exceptions, which are always
// de-opted. So the compiler thinks the slow-call can never throw an
// exception. If it DOES throw an exception we would need the debug
// info removed first (since if it throws there is no monitor).
- assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr &&
- _callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock");
+ assert(_callprojs->fallthrough_ioproj == nullptr && _callprojs->catchall_ioproj == nullptr &&
+ _callprojs->catchall_memproj == nullptr && _callprojs->catchall_catchproj == nullptr, "Unexpected projection from Lock");
// Capture slow path
// disconnect fall-through projection from call and create a new one
// hook up users of fall-through projection to region
- Node *slow_ctrl = _callprojs.fallthrough_proj->clone();
+ Node *slow_ctrl = _callprojs->fallthrough_proj->clone();
transform_later(slow_ctrl);
- _igvn.hash_delete(_callprojs.fallthrough_proj);
- _callprojs.fallthrough_proj->disconnect_inputs(C);
+ _igvn.hash_delete(_callprojs->fallthrough_proj);
+ _callprojs->fallthrough_proj->disconnect_inputs(C);
region->init_req(1, slow_ctrl);
// region inputs are now complete
transform_later(region);
- _igvn.replace_node(_callprojs.fallthrough_proj, region);
+ _igvn.replace_node(_callprojs->fallthrough_proj, region);
Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory));
mem_phi->init_req(1, memproj);
transform_later(mem_phi);
- _igvn.replace_node(_callprojs.fallthrough_memproj, mem_phi);
+ _igvn.replace_node(_callprojs->fallthrough_memproj, mem_phi);
}
//------------------------------expand_unlock_node----------------------
void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
CallNode *call = make_slow_call((CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(),
CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C),
"complete_monitor_unlocking_C", slow_path, obj, box, thread);
- call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
- assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr &&
- _callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock");
+ _callprojs = call->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
+ assert(_callprojs->fallthrough_ioproj == nullptr && _callprojs->catchall_ioproj == nullptr &&
+ _callprojs->catchall_memproj == nullptr && _callprojs->catchall_catchproj == nullptr, "Unexpected projection from Lock");
// No exceptions for unlocking
// Capture slow path
// disconnect fall-through projection from call and create a new one
// hook up users of fall-through projection to region
- Node *slow_ctrl = _callprojs.fallthrough_proj->clone();
+ Node *slow_ctrl = _callprojs->fallthrough_proj->clone();
transform_later(slow_ctrl);
- _igvn.hash_delete(_callprojs.fallthrough_proj);
- _callprojs.fallthrough_proj->disconnect_inputs(C);
+ _igvn.hash_delete(_callprojs->fallthrough_proj);
+ _callprojs->fallthrough_proj->disconnect_inputs(C);
region->init_req(1, slow_ctrl);
// region inputs are now complete
transform_later(region);
- _igvn.replace_node(_callprojs.fallthrough_proj, region);
+ _igvn.replace_node(_callprojs->fallthrough_proj, region);
Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory) );
mem_phi->init_req(1, memproj );
mem_phi->init_req(2, mem);
transform_later(mem_phi);
- _igvn.replace_node(_callprojs.fallthrough_memproj, mem_phi);
+ _igvn.replace_node(_callprojs->fallthrough_memproj, mem_phi);
}
+ // An inline type might be returned from the call but we don't know its
+ // type. Either we get a buffered inline type (and nothing needs to be done)
+ // or one of the values being returned is the klass of the inline type
+ // and we need to allocate an inline type instance of that type and
+ // initialize it with other values being returned. In that case, we
+ // first try a fast path allocation and initialize the value with the
+ // inline klass's pack handler or we fall back to a runtime call.
+ void PhaseMacroExpand::expand_mh_intrinsic_return(CallStaticJavaNode* call) {
+ assert(call->method()->is_method_handle_intrinsic(), "must be a method handle intrinsic call");
+ Node* ret = call->proj_out_or_null(TypeFunc::Parms);
+ if (ret == nullptr) {
+ return;
+ }
+ const TypeFunc* tf = call->_tf;
+ const TypeTuple* domain = OptoRuntime::store_inline_type_fields_Type()->domain_cc();
+ const TypeFunc* new_tf = TypeFunc::make(tf->domain_sig(), tf->domain_cc(), tf->range_sig(), domain);
+ call->_tf = new_tf;
+ // Make sure the change of type is applied before projections are processed by igvn
+ _igvn.set_type(call, call->Value(&_igvn));
+ _igvn.set_type(ret, ret->Value(&_igvn));
+
+ // Before any new projection is added:
+ CallProjections* projs = call->extract_projections(true, true);
+
+ // Create temporary hook nodes that will be replaced below.
+ // Add an input to prevent hook nodes from being dead.
+ Node* ctl = new Node(call);
+ Node* mem = new Node(ctl);
+ Node* io = new Node(ctl);
+ Node* ex_ctl = new Node(ctl);
+ Node* ex_mem = new Node(ctl);
+ Node* ex_io = new Node(ctl);
+ Node* res = new Node(ctl);
+
+ // Allocate a new buffered inline type only if a new one is not returned
+ Node* cast = transform_later(new CastP2XNode(ctl, res));
+ Node* mask = MakeConX(0x1);
+ Node* masked = transform_later(new AndXNode(cast, mask));
+ Node* cmp = transform_later(new CmpXNode(masked, mask));
+ Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq));
+ IfNode* allocation_iff = new IfNode(ctl, bol, PROB_MAX, COUNT_UNKNOWN);
+ transform_later(allocation_iff);
+ Node* allocation_ctl = transform_later(new IfTrueNode(allocation_iff));
+ Node* no_allocation_ctl = transform_later(new IfFalseNode(allocation_iff));
+ Node* no_allocation_res = transform_later(new CheckCastPPNode(no_allocation_ctl, res, TypeInstPtr::BOTTOM));
+
+ // Try to allocate a new buffered inline instance either from TLAB or eden space
+ Node* needgc_ctrl = nullptr; // needgc means slowcase, i.e. allocation failed
+ CallLeafNoFPNode* handler_call;
+ const bool alloc_in_place = UseTLAB;
+ if (alloc_in_place) {
+ Node* fast_oop_ctrl = nullptr;
+ Node* fast_oop_rawmem = nullptr;
+ Node* mask2 = MakeConX(-2);
+ Node* masked2 = transform_later(new AndXNode(cast, mask2));
+ Node* rawklassptr = transform_later(new CastX2PNode(masked2));
+ Node* klass_node = transform_later(new CheckCastPPNode(allocation_ctl, rawklassptr, TypeInstKlassPtr::OBJECT_OR_NULL));
+ Node* layout_val = make_load(nullptr, mem, klass_node, in_bytes(Klass::layout_helper_offset()), TypeInt::INT, T_INT);
+ Node* size_in_bytes = ConvI2X(layout_val);
+ BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+ Node* fast_oop = bs->obj_allocate(this, mem, allocation_ctl, size_in_bytes, io, needgc_ctrl,
+ fast_oop_ctrl, fast_oop_rawmem,
+ AllocateInstancePrefetchLines);
+ // Allocation succeed, initialize buffered inline instance header firstly,
+ // and then initialize its fields with an inline class specific handler
+ Node* mark_node = makecon(TypeRawPtr::make((address)markWord::inline_type_prototype().value()));
+ fast_oop_rawmem = make_store(fast_oop_ctrl, fast_oop_rawmem, fast_oop, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
+ fast_oop_rawmem = make_store(fast_oop_ctrl, fast_oop_rawmem, fast_oop, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
+ if (UseCompressedClassPointers) {
+ fast_oop_rawmem = make_store(fast_oop_ctrl, fast_oop_rawmem, fast_oop, oopDesc::klass_gap_offset_in_bytes(), intcon(0), T_INT);
+ }
+ Node* fixed_block = make_load(fast_oop_ctrl, fast_oop_rawmem, klass_node, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
+ Node* pack_handler = make_load(fast_oop_ctrl, fast_oop_rawmem, fixed_block, in_bytes(InlineKlass::pack_handler_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
+ handler_call = new CallLeafNoFPNode(OptoRuntime::pack_inline_type_Type(),
+ nullptr,
+ "pack handler",
+ TypeRawPtr::BOTTOM);
+ handler_call->init_req(TypeFunc::Control, fast_oop_ctrl);
+ handler_call->init_req(TypeFunc::Memory, fast_oop_rawmem);
+ handler_call->init_req(TypeFunc::I_O, top());
+ handler_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
+ handler_call->init_req(TypeFunc::ReturnAdr, top());
+ handler_call->init_req(TypeFunc::Parms, pack_handler);
+ handler_call->init_req(TypeFunc::Parms+1, fast_oop);
+ } else {
+ needgc_ctrl = allocation_ctl;
+ }
+
+ // Allocation failed, fall back to a runtime call
+ CallStaticJavaNode* slow_call = new CallStaticJavaNode(OptoRuntime::store_inline_type_fields_Type(),
+ StubRoutines::store_inline_type_fields_to_buf(),
+ "store_inline_type_fields",
+ TypePtr::BOTTOM);
+ slow_call->init_req(TypeFunc::Control, needgc_ctrl);
+ slow_call->init_req(TypeFunc::Memory, mem);
+ slow_call->init_req(TypeFunc::I_O, io);
+ slow_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
+ slow_call->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
+ slow_call->init_req(TypeFunc::Parms, res);
+
+ Node* slow_ctl = transform_later(new ProjNode(slow_call, TypeFunc::Control));
+ Node* slow_mem = transform_later(new ProjNode(slow_call, TypeFunc::Memory));
+ Node* slow_io = transform_later(new ProjNode(slow_call, TypeFunc::I_O));
+ Node* slow_res = transform_later(new ProjNode(slow_call, TypeFunc::Parms));
+ Node* slow_catc = transform_later(new CatchNode(slow_ctl, slow_io, 2));
+ Node* slow_norm = transform_later(new CatchProjNode(slow_catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci));
+ Node* slow_excp = transform_later(new CatchProjNode(slow_catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci));
+
+ Node* ex_r = new RegionNode(3);
+ Node* ex_mem_phi = new PhiNode(ex_r, Type::MEMORY, TypePtr::BOTTOM);
+ Node* ex_io_phi = new PhiNode(ex_r, Type::ABIO);
+ ex_r->init_req(1, slow_excp);
+ ex_mem_phi->init_req(1, slow_mem);
+ ex_io_phi->init_req(1, slow_io);
+ ex_r->init_req(2, ex_ctl);
+ ex_mem_phi->init_req(2, ex_mem);
+ ex_io_phi->init_req(2, ex_io);
+ transform_later(ex_r);
+ transform_later(ex_mem_phi);
+ transform_later(ex_io_phi);
+
+ // We don't know how many values are returned. This assumes the
+ // worst case, that all available registers are used.
+ for (uint i = TypeFunc::Parms+1; i < domain->cnt(); i++) {
+ if (domain->field_at(i) == Type::HALF) {
+ slow_call->init_req(i, top());
+ if (alloc_in_place) {
+ handler_call->init_req(i+1, top());
+ }
+ continue;
+ }
+ Node* proj = transform_later(new ProjNode(call, i));
+ slow_call->init_req(i, proj);
+ if (alloc_in_place) {
+ handler_call->init_req(i+1, proj);
+ }
+ }
+ // We can safepoint at that new call
+ slow_call->copy_call_debug_info(&_igvn, call);
+ transform_later(slow_call);
+ if (alloc_in_place) {
+ transform_later(handler_call);
+ }
+
+ Node* fast_ctl = nullptr;
+ Node* fast_res = nullptr;
+ MergeMemNode* fast_mem = nullptr;
+ if (alloc_in_place) {
+ fast_ctl = transform_later(new ProjNode(handler_call, TypeFunc::Control));
+ Node* rawmem = transform_later(new ProjNode(handler_call, TypeFunc::Memory));
+ fast_res = transform_later(new ProjNode(handler_call, TypeFunc::Parms));
+ fast_mem = MergeMemNode::make(mem);
+ fast_mem->set_memory_at(Compile::AliasIdxRaw, rawmem);
+ transform_later(fast_mem);
+ }
+
+ Node* r = new RegionNode(alloc_in_place ? 4 : 3);
+ Node* mem_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
+ Node* io_phi = new PhiNode(r, Type::ABIO);
+ Node* res_phi = new PhiNode(r, TypeInstPtr::BOTTOM);
+ r->init_req(1, no_allocation_ctl);
+ mem_phi->init_req(1, mem);
+ io_phi->init_req(1, io);
+ res_phi->init_req(1, no_allocation_res);
+ r->init_req(2, slow_norm);
+ mem_phi->init_req(2, slow_mem);
+ io_phi->init_req(2, slow_io);
+ res_phi->init_req(2, slow_res);
+ if (alloc_in_place) {
+ r->init_req(3, fast_ctl);
+ mem_phi->init_req(3, fast_mem);
+ io_phi->init_req(3, io);
+ res_phi->init_req(3, fast_res);
+ }
+ transform_later(r);
+ transform_later(mem_phi);
+ transform_later(io_phi);
+ transform_later(res_phi);
+
+ // Do not let stores that initialize this buffer be reordered with a subsequent
+ // store that would make this buffer accessible by other threads.
+ MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
+ transform_later(mb);
+ mb->init_req(TypeFunc::Memory, mem_phi);
+ mb->init_req(TypeFunc::Control, r);
+ r = new ProjNode(mb, TypeFunc::Control);
+ transform_later(r);
+ mem_phi = new ProjNode(mb, TypeFunc::Memory);
+ transform_later(mem_phi);
+
+ assert(projs->nb_resproj == 1, "unexpected number of results");
+ _igvn.replace_in_uses(projs->fallthrough_catchproj, r);
+ _igvn.replace_in_uses(projs->fallthrough_memproj, mem_phi);
+ _igvn.replace_in_uses(projs->fallthrough_ioproj, io_phi);
+ _igvn.replace_in_uses(projs->resproj[0], res_phi);
+ _igvn.replace_in_uses(projs->catchall_catchproj, ex_r);
+ _igvn.replace_in_uses(projs->catchall_memproj, ex_mem_phi);
+ _igvn.replace_in_uses(projs->catchall_ioproj, ex_io_phi);
+ // The CatchNode should not use the ex_io_phi. Re-connect it to the catchall_ioproj.
+ Node* cn = projs->fallthrough_catchproj->in(0);
+ _igvn.replace_input_of(cn, 1, projs->catchall_ioproj);
+
+ _igvn.replace_node(ctl, projs->fallthrough_catchproj);
+ _igvn.replace_node(mem, projs->fallthrough_memproj);
+ _igvn.replace_node(io, projs->fallthrough_ioproj);
+ _igvn.replace_node(res, projs->resproj[0]);
+ _igvn.replace_node(ex_ctl, projs->catchall_catchproj);
+ _igvn.replace_node(ex_mem, projs->catchall_memproj);
+ _igvn.replace_node(ex_io, projs->catchall_ioproj);
+ }
+
void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) {
assert(check->in(SubTypeCheckNode::Control) == nullptr, "should be pinned");
Node* bol = check->unique_out();
Node* obj_or_subklass = check->in(SubTypeCheckNode::ObjOrSubKlass);
Node* superklass = check->in(SubTypeCheckNode::SuperKlass);
Node* subklass = nullptr;
if (_igvn.type(obj_or_subklass)->isa_klassptr()) {
subklass = obj_or_subklass;
} else {
Node* k_adr = basic_plus_adr(obj_or_subklass, oopDesc::klass_offset_in_bytes());
- subklass = _igvn.transform(LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), k_adr, TypeInstPtr::KLASS));
+ subklass = _igvn.transform(LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
}
Node* not_subtype_ctrl = Phase::gen_subtype_check(subklass, superklass, &ctrl, nullptr, _igvn, check->method(), check->bci());
_igvn.replace_input_of(iff, 0, C->top());
_igvn.replace_node(iffalse, ctrl);
}
_igvn.replace_node(check, C->top());
}
+ // FlatArrayCheckNode (array1 array2 ...) is expanded into:
+ //
+ // long mark = array1.mark | array2.mark | ...;
+ // long locked_bit = markWord::unlocked_value & array1.mark & array2.mark & ...;
+ // if (locked_bit == 0) {
+ // // One array is locked, load prototype header from the klass
+ // mark = array1.klass.proto | array2.klass.proto | ...
+ // }
+ // if ((mark & markWord::flat_array_bit_in_place) == 0) {
+ // ...
+ // }
+ void PhaseMacroExpand::expand_flatarraycheck_node(FlatArrayCheckNode* check) {
+ bool array_inputs = _igvn.type(check->in(FlatArrayCheckNode::ArrayOrKlass))->isa_oopptr() != nullptr;
+ if (array_inputs) {
+ Node* mark = MakeConX(0);
+ Node* locked_bit = MakeConX(markWord::unlocked_value);
+ Node* mem = check->in(FlatArrayCheckNode::Memory);
+ for (uint i = FlatArrayCheckNode::ArrayOrKlass; i < check->req(); ++i) {
+ Node* ary = check->in(i);
+ const TypeOopPtr* t = _igvn.type(ary)->isa_oopptr();
+ assert(t != nullptr, "Mixing array and klass inputs");
+ assert(!t->is_flat() && !t->is_not_flat(), "Should have been optimized out");
+ Node* mark_adr = basic_plus_adr(ary, oopDesc::mark_offset_in_bytes());
+ Node* mark_load = _igvn.transform(LoadNode::make(_igvn, nullptr, mem, mark_adr, mark_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
+ mark = _igvn.transform(new OrXNode(mark, mark_load));
+ locked_bit = _igvn.transform(new AndXNode(locked_bit, mark_load));
+ }
+ assert(!mark->is_Con(), "Should have been optimized out");
+ Node* cmp = _igvn.transform(new CmpXNode(locked_bit, MakeConX(0)));
+ Node* is_unlocked = _igvn.transform(new BoolNode(cmp, BoolTest::ne));
+
+ // BoolNode might be shared, replace each if user
+ Node* old_bol = check->unique_out();
+ assert(old_bol->is_Bool() && old_bol->as_Bool()->_test._test == BoolTest::ne, "unexpected condition");
+ for (DUIterator_Last imin, i = old_bol->last_outs(imin); i >= imin; --i) {
+ IfNode* old_iff = old_bol->last_out(i)->as_If();
+ Node* ctrl = old_iff->in(0);
+ RegionNode* region = new RegionNode(3);
+ Node* mark_phi = new PhiNode(region, TypeX_X);
+
+ // Check if array is unlocked
+ IfNode* iff = _igvn.transform(new IfNode(ctrl, is_unlocked, PROB_MAX, COUNT_UNKNOWN))->as_If();
+
+ // Unlocked: Use bits from mark word
+ region->init_req(1, _igvn.transform(new IfTrueNode(iff)));
+ mark_phi->init_req(1, mark);
+
+ // Locked: Load prototype header from klass
+ ctrl = _igvn.transform(new IfFalseNode(iff));
+ Node* proto = MakeConX(0);
+ for (uint i = FlatArrayCheckNode::ArrayOrKlass; i < check->req(); ++i) {
+ Node* ary = check->in(i);
+ // Make loads control dependent to make sure they are only executed if array is locked
+ Node* klass_adr = basic_plus_adr(ary, oopDesc::klass_offset_in_bytes());
+ Node* klass = _igvn.transform(LoadKlassNode::make(_igvn, ctrl, C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
+ Node* proto_adr = basic_plus_adr(klass, in_bytes(Klass::prototype_header_offset()));
+ Node* proto_load = _igvn.transform(LoadNode::make(_igvn, ctrl, C->immutable_memory(), proto_adr, proto_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
+ proto = _igvn.transform(new OrXNode(proto, proto_load));
+ }
+ region->init_req(2, ctrl);
+ mark_phi->init_req(2, proto);
+
+ // Check if flat array bits are set
+ Node* mask = MakeConX(markWord::flat_array_bit_in_place);
+ Node* masked = _igvn.transform(new AndXNode(_igvn.transform(mark_phi), mask));
+ cmp = _igvn.transform(new CmpXNode(masked, MakeConX(0)));
+ Node* is_not_flat = _igvn.transform(new BoolNode(cmp, BoolTest::eq));
+
+ ctrl = _igvn.transform(region);
+ iff = _igvn.transform(new IfNode(ctrl, is_not_flat, PROB_MAX, COUNT_UNKNOWN))->as_If();
+ _igvn.replace_node(old_iff, iff);
+ }
+ _igvn.replace_node(check, C->top());
+ } else {
+ // Fall back to layout helper check
+ Node* lhs = intcon(0);
+ for (uint i = FlatArrayCheckNode::ArrayOrKlass; i < check->req(); ++i) {
+ Node* array_or_klass = check->in(i);
+ Node* klass = nullptr;
+ const TypePtr* t = _igvn.type(array_or_klass)->is_ptr();
+ assert(!t->is_flat() && !t->is_not_flat(), "Should have been optimized out");
+ if (t->isa_oopptr() != nullptr) {
+ Node* klass_adr = basic_plus_adr(array_or_klass, oopDesc::klass_offset_in_bytes());
+ klass = transform_later(LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
+ } else {
+ assert(t->isa_klassptr(), "Unexpected input type");
+ klass = array_or_klass;
+ }
+ Node* lh_addr = basic_plus_adr(klass, in_bytes(Klass::layout_helper_offset()));
+ Node* lh_val = _igvn.transform(LoadNode::make(_igvn, nullptr, C->immutable_memory(), lh_addr, lh_addr->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered));
+ lhs = _igvn.transform(new OrINode(lhs, lh_val));
+ }
+ Node* masked = transform_later(new AndINode(lhs, intcon(Klass::_lh_array_tag_flat_value_bit_inplace)));
+ Node* cmp = transform_later(new CmpINode(masked, intcon(0)));
+ Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq));
+ Node* m2b = transform_later(new Conv2BNode(masked));
+ // The matcher expects the input to If nodes to be produced by a Bool(CmpI..)
+ // pattern, but the input to other potential users (e.g. Phi) to be some
+ // other pattern (e.g. a Conv2B node, possibly idealized as a CMoveI).
+ Node* old_bol = check->unique_out();
+ for (DUIterator_Last imin, i = old_bol->last_outs(imin); i >= imin; --i) {
+ Node* user = old_bol->last_out(i);
+ for (uint j = 0; j < user->req(); j++) {
+ Node* n = user->in(j);
+ if (n == old_bol) {
+ _igvn.replace_input_of(user, j, user->is_If() ? bol : m2b);
+ }
+ }
+ }
+ _igvn.replace_node(check, C->top());
+ }
+ }
+
//---------------------------eliminate_macro_nodes----------------------
// Eliminate scalar replaced allocations and associated locks.
void PhaseMacroExpand::eliminate_macro_nodes() {
if (C->macro_count() == 0)
return;
if (success && PrintOptoStatistics) {
Atomic::inc(&PhaseMacroExpand::_objs_scalar_replaced_counter);
}
#endif
break;
- case Node::Class_CallStaticJava:
- success = eliminate_boxing_node(n->as_CallStaticJava());
+ case Node::Class_CallStaticJava: {
+ CallStaticJavaNode* call = n->as_CallStaticJava();
+ if (!call->method()->is_method_handle_intrinsic()) {
+ success = eliminate_boxing_node(n->as_CallStaticJava());
+ }
break;
+ }
case Node::Class_Lock:
case Node::Class_Unlock:
assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
_has_locks = true;
break;
break;
case Node::Class_SubTypeCheck:
break;
case Node::Class_Opaque1:
break;
+ case Node::Class_FlatArrayCheck:
+ break;
default:
assert(n->Opcode() == Op_LoopLimit ||
n->is_OpaqueNotNull() ||
n->is_OpaqueInitializedAssertionPredicate() ||
n->Opcode() == Op_MaxL ||
// Remove it from macro list and put on IGVN worklist to optimize.
C->remove_macro_node(n);
_igvn._worklist.push(n);
success = true;
} else if (n->Opcode() == Op_CallStaticJava) {
- // Remove it from macro list and put on IGVN worklist to optimize.
- C->remove_macro_node(n);
- _igvn._worklist.push(n);
- success = true;
+ CallStaticJavaNode* call = n->as_CallStaticJava();
+ if (!call->method()->is_method_handle_intrinsic()) {
+ // Remove it from macro list and put on IGVN worklist to optimize.
+ C->remove_macro_node(n);
+ _igvn._worklist.push(n);
+ success = true;
+ }
} else if (n->is_Opaque1()) {
_igvn.replace_node(n, n->in(1));
success = true;
} else if (n->is_OpaqueNotNull()) {
// Tests with OpaqueNotNull nodes are implicitly known to be true. Replace the node with true. In debug builds,
expand_arraycopy_node(n->as_ArrayCopy());
break;
case Node::Class_SubTypeCheck:
expand_subtypecheck_node(n->as_SubTypeCheck());
break;
+ case Node::Class_CallStaticJava:
+ expand_mh_intrinsic_return(n->as_CallStaticJava());
+ C->remove_macro_node(n);
+ break;
+ case Node::Class_FlatArrayCheck:
+ expand_flatarraycheck_node(n->as_FlatArrayCheck());
+ break;
default:
assert(false, "unknown node type in macro list");
}
assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list");
if (C->failing()) return true;
< prev index next >