< prev index next > src/hotspot/share/opto/callnode.cpp
Print this page
*
*/
#include "precompiled.hpp"
#include "compiler/compileLog.hpp"
+ #include "ci/ciFlatArrayKlass.hpp"
#include "ci/bcEscapeAnalyzer.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
#include "interpreter/interpreter.hpp"
#include "opto/callGenerator.hpp"
#include "opto/callnode.hpp"
#include "opto/castnode.hpp"
#include "opto/convertnode.hpp"
#include "opto/escape.hpp"
+ #include "opto/inlinetypenode.hpp"
#include "opto/locknode.hpp"
#include "opto/machnode.hpp"
#include "opto/matcher.hpp"
#include "opto/parse.hpp"
#include "opto/regalloc.hpp"
#include "opto/regmask.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "runtime/sharedRuntime.hpp"
+ #include "runtime/stubRoutines.hpp"
#include "utilities/powerOfTwo.hpp"
#include "code/vmreg.hpp"
// Portions of code courtesy of Clifford Click
return RegMask::Empty;
}
//------------------------------match------------------------------------------
// Construct projections for incoming parameters, and their RegMask info
! Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
switch (proj->_con) {
case TypeFunc::Control:
case TypeFunc::I_O:
case TypeFunc::Memory:
return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
return RegMask::Empty;
}
//------------------------------match------------------------------------------
// Construct projections for incoming parameters, and their RegMask info
! Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
switch (proj->_con) {
case TypeFunc::Control:
case TypeFunc::I_O:
case TypeFunc::Memory:
return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
}
}
return nullptr;
}
- //------------------------------StartOSRNode----------------------------------
- // The method start node for an on stack replacement adapter
-
- //------------------------------osr_domain-----------------------------
- const TypeTuple *StartOSRNode::osr_domain() {
- const Type **fields = TypeTuple::fields(2);
- fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
-
- return TypeTuple::make(TypeFunc::Parms+1, fields);
- }
-
//=============================================================================
const char * const ParmNode::names[TypeFunc::Parms+1] = {
"Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
};
st->print("[%d]", spobj->n_fields());
int ndim = cik->as_array_klass()->dimension() - 1;
while (ndim-- > 0) {
st->print("[]");
}
+ } else if (cik->is_flat_array_klass()) {
+ ciKlass* cie = cik->as_flat_array_klass()->base_element_klass();
+ cie->print_name_on(st);
+ st->print("[%d]", spobj->n_fields());
+ int ndim = cik->as_array_klass()->dimension() - 1;
+ while (ndim-- > 0) {
+ st->print("[]");
+ }
}
st->print("={");
uint nf = spobj->n_fields();
if (nf > 0) {
uint first_ind = spobj->first_index(mcall->jvms());
+ if (iklass != nullptr && iklass->is_inlinetype()) {
+ Node* init_node = mcall->in(first_ind++);
+ if (!init_node->is_top()) {
+ st->print(" [is_init");
+ format_helper(regalloc, st, init_node, ":", -1, nullptr);
+ }
+ }
Node* fld_node = mcall->in(first_ind);
ciField* cifield;
if (iklass != nullptr) {
st->print(" [");
cifield = iklass->nonstatic_field_at(0);
if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
if (jvms() != nullptr) jvms()->dump_spec(st);
}
#endif
! const Type *CallNode::bottom_type() const { return tf()->range(); }
const Type* CallNode::Value(PhaseGVN* phase) const {
! if (phase->type(in(0)) == Type::TOP) return Type::TOP;
! return tf()->range();
}
//------------------------------calling_convention-----------------------------
void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
// Use the standard compiler calling convention
SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
}
//------------------------------match------------------------------------------
// Construct projections for control, I/O, memory-fields, ..., and
// return result(s) along with their RegMask info
! Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
! switch (proj->_con) {
! case TypeFunc::Control:
! case TypeFunc::I_O:
! case TypeFunc::Memory:
! return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
!
! case TypeFunc::Parms+1: // For LONG & DOUBLE returns
! assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
! // 2nd half of doubles and longs
! return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
!
! case TypeFunc::Parms: { // Normal returns
! uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
! OptoRegPair regs = Opcode() == Op_CallLeafVector
! ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine
! : is_CallRuntime()
! ? match->c_return_value(ideal_reg) // Calls into C runtime
! : match-> return_value(ideal_reg); // Calls into compiled Java code
! RegMask rm = RegMask(regs.first());
!
! if (Opcode() == Op_CallLeafVector) {
! // If the return is in vector, compute appropriate regmask taking into account the whole range
! if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
! if(OptoReg::is_valid(regs.second())) {
! for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
- rm.Insert(r);
}
}
}
}
-
- if( OptoReg::is_valid(regs.second()) )
- rm.Insert( regs.second() );
- return new MachProjNode(this,proj->_con,rm,ideal_reg);
}
case TypeFunc::ReturnAdr:
case TypeFunc::FramePtr:
default:
ShouldNotReachHere();
}
if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
if (jvms() != nullptr) jvms()->dump_spec(st);
}
#endif
! const Type *CallNode::bottom_type() const { return tf()->range_cc(); }
const Type* CallNode::Value(PhaseGVN* phase) const {
! if (!in(0) || phase->type(in(0)) == Type::TOP) {
! return Type::TOP;
+ }
+ return tf()->range_cc();
}
//------------------------------calling_convention-----------------------------
void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
+ if (_entry_point == StubRoutines::store_inline_type_fields_to_buf()) {
+ // The call to that stub is a special case: its inputs are
+ // multiple values returned from a call and so it should follow
+ // the return convention.
+ SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
+ return;
+ }
// Use the standard compiler calling convention
SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
}
//------------------------------match------------------------------------------
// Construct projections for control, I/O, memory-fields, ..., and
// return result(s) along with their RegMask info
! Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
! uint con = proj->_con;
! const TypeTuple* range_cc = tf()->range_cc();
! if (con >= TypeFunc::Parms) {
! if (tf()->returns_inline_type_as_fields()) {
! // The call returns multiple values (inline type fields): we
! // create one projection per returned value.
! assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return");
! uint ideal_reg = range_cc->field_at(con)->ideal_reg();
! return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg);
! } else {
! if (con == TypeFunc::Parms) {
! uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg();
! OptoRegPair regs = Opcode() == Op_CallLeafVector
! ? match->vector_return_value(ideal_reg) // Calls into assembly vector routine
! : match->c_return_value(ideal_reg);
! RegMask rm = RegMask(regs.first());
!
! if (Opcode() == Op_CallLeafVector) {
! // If the return is in vector, compute appropriate regmask taking into account the whole range
! if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
! if(OptoReg::is_valid(regs.second())) {
! for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
! rm.Insert(r);
! }
! }
}
}
+
+ if (OptoReg::is_valid(regs.second())) {
+ rm.Insert(regs.second());
+ }
+ return new MachProjNode(this,con,rm,ideal_reg);
+ } else {
+ assert(con == TypeFunc::Parms+1, "only one return value");
+ assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, "");
+ return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad);
}
}
}
+ switch (con) {
+ case TypeFunc::Control:
+ case TypeFunc::I_O:
+ case TypeFunc::Memory:
+ return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
+
case TypeFunc::ReturnAdr:
case TypeFunc::FramePtr:
default:
ShouldNotReachHere();
}
// instance at the specified offset.
//
bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
assert((t_oop != nullptr), "sanity");
if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
! const TypeTuple* args = _tf->domain();
Node* dest = nullptr;
// Stubs that can be called once an ArrayCopyNode is expanded have
// different signatures. Look for the second pointer argument,
// that is the destination of the copy.
for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
// instance at the specified offset.
//
bool CallNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
assert((t_oop != nullptr), "sanity");
if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
! const TypeTuple* args = _tf->domain_sig();
Node* dest = nullptr;
// Stubs that can be called once an ArrayCopyNode is expanded have
// different signatures. Look for the second pointer argument,
// that is the destination of the copy.
for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
(inst_t->instance_klass() == boxing_klass))) {
return true;
}
}
! const TypeTuple* d = tf()->domain();
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
(inst_t->instance_klass() == boxing_klass))) {
return true;
if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
(inst_t->instance_klass() == boxing_klass))) {
return true;
}
}
! const TypeTuple* d = tf()->domain_cc();
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
(inst_t->instance_klass() == boxing_klass))) {
return true;
}
return true;
}
// Does this call have a direct reference to n other than debug information?
! bool CallNode::has_non_debug_use(Node *n) {
! const TypeTuple * d = tf()->domain();
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
! Node *arg = in(i);
- if (arg == n) {
return true;
}
}
return false;
}
// Returns the unique CheckCastPP of a call
// or 'this' if there are several CheckCastPP or unexpected uses
// or returns null if there is no one.
Node *CallNode::result_cast() {
Node *cast = nullptr;
}
return true;
}
// Does this call have a direct reference to n other than debug information?
! bool CallNode::has_non_debug_use(Node* n) {
! const TypeTuple* d = tf()->domain_cc();
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
! if (in(i) == n) {
return true;
}
}
return false;
}
+ bool CallNode::has_debug_use(Node* n) {
+ if (jvms() != nullptr) {
+ for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
+ if (in(i) == n) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
// Returns the unique CheckCastPP of a call
// or 'this' if there are several CheckCastPP or unexpected uses
// or returns null if there is no one.
Node *CallNode::result_cast() {
Node *cast = nullptr;
}
return cast;
}
! void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
! projs->fallthrough_proj = nullptr;
! projs->fallthrough_catchproj = nullptr;
! projs->fallthrough_ioproj = nullptr;
! projs->catchall_ioproj = nullptr;
! projs->catchall_catchproj = nullptr;
! projs->fallthrough_memproj = nullptr;
! projs->catchall_memproj = nullptr;
! projs->resproj = nullptr;
! projs->exobj = nullptr;
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
ProjNode *pn = fast_out(i)->as_Proj();
if (pn->outcnt() == 0) continue;
switch (pn->_con) {
}
return cast;
}
! CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) {
! uint max_res = TypeFunc::Parms-1;
! for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
! ProjNode *pn = fast_out(i)->as_Proj();
! max_res = MAX2(max_res, pn->_con);
! }
!
! assert(max_res < _tf->range_cc()->cnt(), "result out of bounds");
!
! uint projs_size = sizeof(CallProjections);
+ if (max_res > TypeFunc::Parms) {
+ projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*);
+ }
+ char* projs_storage = resource_allocate_bytes(projs_size);
+ CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1);
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
ProjNode *pn = fast_out(i)->as_Proj();
if (pn->outcnt() == 0) continue;
switch (pn->_con) {
projs->catchall_memproj = pn;
else
projs->fallthrough_memproj = pn;
break;
case TypeFunc::Parms:
! projs->resproj = pn;
break;
default:
! assert(false, "unexpected projection from allocation node.");
}
}
// The resproj may not exist because the result could be ignored
// and the exception object may not exist if an exception handler
// swallows the exception but all the other must exist and be found.
- assert(projs->fallthrough_proj != nullptr, "must be found");
do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found");
assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found");
assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found");
if (separate_io_proj) {
assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found");
assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found");
}
}
Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
#ifdef ASSERT
// Validate attached generator
projs->catchall_memproj = pn;
else
projs->fallthrough_memproj = pn;
break;
case TypeFunc::Parms:
! projs->resproj[0] = pn;
break;
default:
! assert(pn->_con <= max_res, "unexpected projection from allocation node.");
+ projs->resproj[pn->_con-TypeFunc::Parms] = pn;
+ break;
}
}
// The resproj may not exist because the result could be ignored
// and the exception object may not exist if an exception handler
// swallows the exception but all the other must exist and be found.
do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
+ assert(!do_asserts || projs->fallthrough_proj != nullptr, "must be found");
assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found");
assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found");
assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found");
if (separate_io_proj) {
assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found");
assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found");
}
+ return projs;
}
Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
#ifdef ASSERT
// Validate attached generator
_override_symbolic_info == call._override_symbolic_info;
}
void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
// Copy debug information and adjust JVMState information
! uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1;
! uint new_dbg_start = tf()->domain()->cnt();
int jvms_adj = new_dbg_start - old_dbg_start;
assert (new_dbg_start == req(), "argument count mismatch");
Compile* C = phase->C;
// SafePointScalarObject node could be referenced several times in debug info.
_override_symbolic_info == call._override_symbolic_info;
}
void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
// Copy debug information and adjust JVMState information
! uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain_sig()->cnt() : (uint)TypeFunc::Parms+1;
! uint new_dbg_start = tf()->domain_sig()->cnt();
int jvms_adj = new_dbg_start - old_dbg_start;
assert (new_dbg_start == req(), "argument count mismatch");
Compile* C = phase->C;
// SafePointScalarObject node could be referenced several times in debug info.
#ifdef ASSERT
bool CallJavaNode::validate_symbolic_info() const {
if (method() == nullptr) {
return true; // call into runtime or uncommon trap
}
+ Bytecodes::Code bc = jvms()->method()->java_code_at_bci(jvms()->bci());
+ if (EnableValhalla && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) {
+ return true;
+ }
ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
ciMethod* callee = method();
if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
assert(override_symbolic_info(), "should be set");
}
CallStaticJavaNode &call = (CallStaticJavaNode&)n;
return CallJavaNode::cmp(call);
}
Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+ if (can_reshape && uncommon_trap_request() != 0) {
+ PhaseIterGVN* igvn = phase->is_IterGVN();
+ if (remove_unknown_flat_array_load(igvn, in(0), in(TypeFunc::Memory), in(TypeFunc::Parms))) {
+ if (!in(0)->is_Region()) {
+ igvn->replace_input_of(this, 0, phase->C->top());
+ }
+ return this;
+ }
+ }
+
CallGenerator* cg = generator();
if (can_reshape && cg != nullptr) {
assert(IncrementalInlineMH, "required");
assert(cg->call_node() == this, "mismatch");
assert(cg->is_mh_late_inline(), "not virtual");
}
#endif
return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
}
+ // Split if can cause the flat array branch of an array load with unknown type (see
+ // Parse::array_load) to end in an uncommon trap. In that case, the call to
+ // 'load_unknown_inline' is useless. Replace it with an uncommon trap with the same JVMState.
+ bool CallStaticJavaNode::remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg) {
+ if (ctl == nullptr || ctl->is_top() || mem == nullptr || mem->is_top() || !mem->is_MergeMem()) {
+ return false;
+ }
+ if (ctl->is_Region()) {
+ bool res = false;
+ for (uint i = 1; i < ctl->req(); i++) {
+ MergeMemNode* mm = mem->clone()->as_MergeMem();
+ for (MergeMemStream mms(mm); mms.next_non_empty(); ) {
+ Node* m = mms.memory();
+ if (m->is_Phi() && m->in(0) == ctl) {
+ mms.set_memory(m->in(i));
+ }
+ }
+ if (remove_unknown_flat_array_load(igvn, ctl->in(i), mm, unc_arg)) {
+ res = true;
+ if (!ctl->in(i)->is_Region()) {
+ igvn->replace_input_of(ctl, i, igvn->C->top());
+ }
+ }
+ igvn->remove_dead_node(mm);
+ }
+ return res;
+ }
+ // Verify the control flow is ok
+ Node* call = ctl;
+ MemBarNode* membar = nullptr;
+ for (;;) {
+ if (call == nullptr || call->is_top()) {
+ return false;
+ }
+ if (call->is_Proj() || call->is_Catch() || call->is_MemBar()) {
+ call = call->in(0);
+ } else if (call->Opcode() == Op_CallStaticJava && !call->in(0)->is_top() &&
+ call->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
+ assert(call->in(0)->is_Proj() && call->in(0)->in(0)->is_MemBar(), "missing membar");
+ membar = call->in(0)->in(0)->as_MemBar();
+ break;
+ } else {
+ return false;
+ }
+ }
+
+ JVMState* jvms = call->jvms();
+ if (igvn->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) {
+ return false;
+ }
+
+ Node* call_mem = call->in(TypeFunc::Memory);
+ if (call_mem == nullptr || call_mem->is_top()) {
+ return false;
+ }
+ if (!call_mem->is_MergeMem()) {
+ call_mem = MergeMemNode::make(call_mem);
+ igvn->register_new_node_with_optimizer(call_mem);
+ }
+
+ // Verify that there's no unexpected side effect
+ for (MergeMemStream mms2(mem->as_MergeMem(), call_mem->as_MergeMem()); mms2.next_non_empty2(); ) {
+ Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory();
+ Node* m2 = mms2.memory2();
+
+ for (uint i = 0; i < 100; i++) {
+ if (m1 == m2) {
+ break;
+ } else if (m1->is_Proj()) {
+ m1 = m1->in(0);
+ } else if (m1->is_MemBar()) {
+ m1 = m1->in(TypeFunc::Memory);
+ } else if (m1->Opcode() == Op_CallStaticJava &&
+ m1->as_Call()->entry_point() == OptoRuntime::load_unknown_inline_Java()) {
+ if (m1 != call) {
+ return false;
+ }
+ break;
+ } else if (m1->is_MergeMem()) {
+ MergeMemNode* mm = m1->as_MergeMem();
+ int idx = mms2.alias_idx();
+ if (idx == Compile::AliasIdxBot) {
+ m1 = mm->base_memory();
+ } else {
+ m1 = mm->memory_at(idx);
+ }
+ } else {
+ return false;
+ }
+ }
+ }
+ if (call_mem->outcnt() == 0) {
+ igvn->remove_dead_node(call_mem);
+ }
+
+ // Remove membar preceding the call
+ membar->remove(igvn);
+
+ address call_addr = OptoRuntime::uncommon_trap_blob()->entry_point();
+ CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", nullptr);
+ unc->init_req(TypeFunc::Control, call->in(0));
+ unc->init_req(TypeFunc::I_O, call->in(TypeFunc::I_O));
+ unc->init_req(TypeFunc::Memory, call->in(TypeFunc::Memory));
+ unc->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
+ unc->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
+ unc->init_req(TypeFunc::Parms+0, unc_arg);
+ unc->set_cnt(PROB_UNLIKELY_MAG(4));
+ unc->copy_call_debug_info(igvn, call->as_CallStaticJava());
+
+ // Replace the call with an uncommon trap
+ igvn->replace_input_of(call, 0, igvn->C->top());
+
+ igvn->register_new_node_with_optimizer(unc);
+
+ Node* ctrl = igvn->transform(new ProjNode(unc, TypeFunc::Control));
+ Node* halt = igvn->transform(new HaltNode(ctrl, call->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen"));
+ igvn->add_input_to(igvn->C->root(), halt);
+
+ return true;
+ }
+
+
#ifndef PRODUCT
void CallStaticJavaNode::dump_spec(outputStream *st) const {
st->print("# Static ");
if (_name != nullptr) {
st->print("%s", _name);
return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
}
//------------------------------calling_convention-----------------------------
void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
}
void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
#ifdef ASSERT
! assert(tf()->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
"return vector size must match");
! const TypeTuple* d = tf()->domain();
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
Node* arg = in(i);
assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
"vector argument size must match");
}
return CallLeafNode::cmp(call) && _num_bits == call._num_bits;
}
//------------------------------calling_convention-----------------------------
void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
+ if (_entry_point == nullptr) {
+ // The call to that stub is a special case: its inputs are
+ // multiple values returned from a call and so it should follow
+ // the return convention.
+ SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
+ return;
+ }
SharedRuntime::c_calling_convention(sig_bt, parm_regs, argcnt);
}
void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
#ifdef ASSERT
! assert(tf()->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
"return vector size must match");
! const TypeTuple* d = tf()->domain_sig();
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
Node* arg = in(i);
assert(arg->bottom_type()->is_vect()->length_in_bytes() * BitsPerByte == _num_bits,
"vector argument size must match");
}
st->print("%s", _name);
CallNode::dump_spec(st);
}
#endif
+ uint CallLeafNoFPNode::match_edge(uint idx) const {
+ // Null entry point is a special case for which the target is in a
+ // register. Need to match that edge.
+ return entry_point() == nullptr && idx == TypeFunc::Parms;
+ }
+
//=============================================================================
void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
assert(verify_jvms(jvms), "jvms must match");
int loc = jvms->locoff() + idx;
//------------------------------Ideal------------------------------------------
// Skip over any collapsed Regions
Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
! return remove_dead_region(phase, can_reshape) ? this : nullptr;
}
//------------------------------Identity---------------------------------------
// Remove obviously duplicate safepoints
Node* SafePointNode::Identity(PhaseGVN* phase) {
//------------------------------Ideal------------------------------------------
// Skip over any collapsed Regions
Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
! if (remove_dead_region(phase, can_reshape)) {
+ return this;
+ }
+ // Scalarize inline types in safepoint debug info.
+ // Delay this until all inlining is over to avoid getting inconsistent debug info.
+ if (phase->C->scalarize_in_safepoints() && can_reshape && jvms() != nullptr) {
+ for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
+ Node* n = in(i)->uncast();
+ if (n->is_InlineType()) {
+ n->as_InlineType()->make_scalar_in_safepoints(phase->is_IterGVN());
+ }
+ }
+ }
+ return nullptr;
}
//------------------------------Identity---------------------------------------
// Remove obviously duplicate safepoints
Node* SafePointNode::Identity(PhaseGVN* phase) {
_depth(depth),
_n_fields(n_fields),
_alloc(alloc)
{
#ifdef ASSERT
! if (!alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
alloc->dump();
assert(false, "unexpected call node");
}
#endif
init_class_id(Class_SafePointScalarObject);
_depth(depth),
_n_fields(n_fields),
_alloc(alloc)
{
#ifdef ASSERT
! if (alloc != nullptr && !alloc->is_Allocate() && !(alloc->Opcode() == Op_VectorBox)) {
alloc->dump();
assert(false, "unexpected call node");
}
#endif
init_class_id(Class_SafePointScalarObject);
//=============================================================================
uint AllocateNode::size_of() const { return sizeof(*this); }
AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
Node *ctrl, Node *mem, Node *abio,
! Node *size, Node *klass_node, Node *initial_test)
: CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
{
init_class_id(Class_Allocate);
init_flags(Flag_is_macro);
_is_scalar_replaceable = false;
_is_non_escaping = false;
_is_allocation_MemBar_redundant = false;
Node *topnode = C->top();
init_req( TypeFunc::Control , ctrl );
init_req( TypeFunc::I_O , abio );
init_req( TypeFunc::Memory , mem );
//=============================================================================
uint AllocateNode::size_of() const { return sizeof(*this); }
AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
Node *ctrl, Node *mem, Node *abio,
! Node *size, Node *klass_node,
+ Node* initial_test,
+ InlineTypeNode* inline_type_node)
: CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
{
init_class_id(Class_Allocate);
init_flags(Flag_is_macro);
_is_scalar_replaceable = false;
_is_non_escaping = false;
_is_allocation_MemBar_redundant = false;
+ _larval = false;
Node *topnode = C->top();
init_req( TypeFunc::Control , ctrl );
init_req( TypeFunc::I_O , abio );
init_req( TypeFunc::Memory , mem );
init_req( AllocSize , size);
init_req( KlassNode , klass_node);
init_req( InitialTest , initial_test);
init_req( ALength , topnode);
init_req( ValidLengthTest , topnode);
C->add_macro_node(this);
}
void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
{
! assert(initializer != nullptr && initializer->is_object_initializer(),
"unexpected initializer method");
BCEscapeAnalyzer* analyzer = initializer->get_bcea();
if (analyzer == nullptr) {
return;
}
init_req( AllocSize , size);
init_req( KlassNode , klass_node);
init_req( InitialTest , initial_test);
init_req( ALength , topnode);
init_req( ValidLengthTest , topnode);
+ init_req( InlineType , inline_type_node);
+ // DefaultValue defaults to nullptr
+ // RawDefaultValue defaults to nullptr
C->add_macro_node(this);
}
void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
{
! assert(initializer != nullptr &&
+ (initializer->is_object_constructor() || initializer->is_class_initializer()),
"unexpected initializer method");
BCEscapeAnalyzer* analyzer = initializer->get_bcea();
if (analyzer == nullptr) {
return;
}
// Allocation node is first parameter in its initializer
if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
_is_allocation_MemBar_redundant = true;
}
}
! Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {
Node* mark_node = nullptr;
! // For now only enable fast locking for non-array types
! mark_node = phase->MakeConX(markWord::prototype().value());
! return mark_node;
}
// Retrieve the length from the AllocateArrayNode. Narrow the type with a
// CastII, if appropriate. If we are not allowed to create new nodes, and
// a CastII is appropriate, return null.
// Allocation node is first parameter in its initializer
if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
_is_allocation_MemBar_redundant = true;
}
}
!
+ Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
Node* mark_node = nullptr;
! if (EnableValhalla) {
! Node* klass_node = in(AllocateNode::KlassNode);
! Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
+ mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
+ } else {
+ mark_node = phase->MakeConX(markWord::prototype().value());
+ }
+ mark_node = phase->transform(mark_node);
+ // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal
+ return new OrXNode(mark_node, phase->MakeConX(_larval ? markWord::larval_bit_in_place : 0));
}
// Retrieve the length from the AllocateArrayNode. Narrow the type with a
// CastII, if appropriate. If we are not allowed to create new nodes, and
// a CastII is appropriate, return null.
// Now see if we can optimize away this lock. We don't actually
// remove the locking here, we simply set the _eliminate flag which
// prevents macro expansion from expanding the lock. Since we don't
// modify the graph, the value returned from this function is the
// one computed above.
! if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
//
// If we are locking an non-escaped object, the lock/unlock is unnecessary
//
ConnectionGraph *cgr = phase->C->congraph();
if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
// Now see if we can optimize away this lock. We don't actually
// remove the locking here, we simply set the _eliminate flag which
// prevents macro expansion from expanding the lock. Since we don't
// modify the graph, the value returned from this function is the
// one computed above.
! const Type* obj_type = phase->type(obj_node());
+ if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
//
// If we are locking an non-escaped object, the lock/unlock is unnecessary
//
ConnectionGraph *cgr = phase->C->congraph();
if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
// remove the unlocking here, we simply set the _eliminate flag which
// prevents macro expansion from expanding the unlock. Since we don't
// modify the graph, the value returned from this function is the
// one computed above.
// Escape state is defined after Parse phase.
! if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
//
// If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
//
ConnectionGraph *cgr = phase->C->congraph();
if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
// remove the unlocking here, we simply set the _eliminate flag which
// prevents macro expansion from expanding the unlock. Since we don't
// modify the graph, the value returned from this function is the
// one computed above.
// Escape state is defined after Parse phase.
! const Type* obj_type = phase->type(obj_node());
+ if (can_reshape && EliminateLocks && !is_non_esc_obj() && !obj_type->is_inlinetypeptr()) {
//
// If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
//
ConnectionGraph *cgr = phase->C->congraph();
if (cgr != nullptr && cgr->can_eliminate_lock(this)) {
if (elem == Type::BOTTOM) {
// An array but we don't know what elements are
return true;
}
! dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();
uint dest_alias = phase->C->get_alias_index(dest_t);
uint t_oop_alias = phase->C->get_alias_index(t_oop);
return dest_alias == t_oop_alias;
}
if (elem == Type::BOTTOM) {
// An array but we don't know what elements are
return true;
}
! dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr();
+ t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot);
uint dest_alias = phase->C->get_alias_index(dest_t);
uint t_oop_alias = phase->C->get_alias_index(t_oop);
return dest_alias == t_oop_alias;
}
< prev index next >