< prev index next > src/hotspot/share/opto/graphKit.cpp
Print this page
* questions.
*
*/
#include "precompiled.hpp"
+ #include "ci/ciFlatArrayKlass.hpp"
+ #include "ci/ciInlineKlass.hpp"
#include "ci/ciUtilities.hpp"
#include "classfile/javaClasses.hpp"
#include "ci/ciObjArray.hpp"
#include "asm/register.hpp"
#include "compiler/compileLog.hpp"
#include "opto/addnode.hpp"
#include "opto/castnode.hpp"
#include "opto/convertnode.hpp"
#include "opto/graphKit.hpp"
#include "opto/idealKit.hpp"
+ #include "opto/inlinetypenode.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/locknode.hpp"
#include "opto/machnode.hpp"
+ #include "opto/narrowptrnode.hpp"
#include "opto/opaquenode.hpp"
#include "opto/parse.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/subtypenode.hpp"
#include "utilities/powerOfTwo.hpp"
#include "utilities/growableArray.hpp"
//----------------------------GraphKit-----------------------------------------
// Main utility constructor.
! GraphKit::GraphKit(JVMState* jvms)
: Phase(Phase::Parser),
_env(C->env()),
! _gvn(*C->initial_gvn()),
_barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
{
_exceptions = jvms->map()->next_exception();
if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr);
set_jvms(jvms);
}
// Private constructor for parser.
GraphKit::GraphKit()
: Phase(Phase::Parser),
#include "utilities/powerOfTwo.hpp"
#include "utilities/growableArray.hpp"
//----------------------------GraphKit-----------------------------------------
// Main utility constructor.
! GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
: Phase(Phase::Parser),
_env(C->env()),
! _gvn((gvn != nullptr) ? *gvn : *C->initial_gvn()),
_barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
{
+ assert(gvn == nullptr || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled");
_exceptions = jvms->map()->next_exception();
if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr);
set_jvms(jvms);
+ #ifdef ASSERT
+ if (_gvn.is_IterGVN() != nullptr) {
+ assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used");
+ // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
+ _worklist_size = _gvn.C->igvn_worklist()->size();
+ }
+ #endif
}
// Private constructor for parser.
GraphKit::GraphKit()
: Phase(Phase::Parser),
ciMethod* cur_method = jvms->method();
int cur_bci = jvms->bci();
if (cur_method != nullptr && cur_bci != InvocationEntryBci) {
Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
return Interpreter::bytecode_should_reexecute(code) ||
! (is_anewarray && code == Bytecodes::_multianewarray);
// Reexecute _multianewarray bytecode which was replaced with
// sequence of [a]newarray. See Parse::do_multianewarray().
//
// Note: interpreter should not have it set since this optimization
// is limited by dimensions and guarded by flag so in some cases
ciMethod* cur_method = jvms->method();
int cur_bci = jvms->bci();
if (cur_method != nullptr && cur_bci != InvocationEntryBci) {
Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
return Interpreter::bytecode_should_reexecute(code) ||
! (is_anewarray && (code == Bytecodes::_multianewarray));
// Reexecute _multianewarray bytecode which was replaced with
// sequence of [a]newarray. See Parse::do_multianewarray().
//
// Note: interpreter should not have it set since this optimization
// is limited by dimensions and guarded by flag so in some cases
// Fill pointer walks backwards from "young:" to "root:" in the diagram above:
uint debug_ptr = call->req();
// Loop over the map input edges associated with jvms, add them
// to the call node, & reset all offsets to match call node array.
+
+ JVMState* callee_jvms = nullptr;
for (JVMState* in_jvms = youngest_jvms; in_jvms != nullptr; ) {
uint debug_end = debug_ptr;
uint debug_start = debug_ptr - in_jvms->debug_size();
debug_ptr = debug_start; // back up the ptr
// Add the Locals
k = in_jvms->locoff();
l = in_jvms->loc_size();
out_jvms->set_locoff(p);
if (!can_prune_locals) {
! for (j = 0; j < l; j++)
! call->set_req(p++, in_map->in(k+j));
} else {
p += l; // already set to top above by add_req_batch
}
// Add the Expression Stack
k = in_jvms->stkoff();
l = in_jvms->sp();
out_jvms->set_stkoff(p);
if (!can_prune_locals) {
! for (j = 0; j < l; j++)
! call->set_req(p++, in_map->in(k+j));
} else if (can_prune_locals && stack_slots_not_pruned != 0) {
// Divide stack into {S0,...,S1}, where S0 is set to top.
uint s1 = stack_slots_not_pruned;
stack_slots_not_pruned = 0; // for next iteration
if (s1 > l) s1 = l;
// Add the Locals
k = in_jvms->locoff();
l = in_jvms->loc_size();
out_jvms->set_locoff(p);
if (!can_prune_locals) {
! for (j = 0; j < l; j++) {
! Node* val = in_map->in(k + j);
+ // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state
+ if (callee_jvms != nullptr && val->is_InlineType() && val->as_InlineType()->is_larval() &&
+ callee_jvms->method()->is_object_constructor() && val == in_map->argument(in_jvms, 0) &&
+ val->bottom_type()->is_inlinetypeptr()) {
+ val = callee_jvms->map()->local(callee_jvms, 0); // Receiver
+ }
+ call->set_req(p++, val);
+ }
} else {
p += l; // already set to top above by add_req_batch
}
// Add the Expression Stack
k = in_jvms->stkoff();
l = in_jvms->sp();
out_jvms->set_stkoff(p);
if (!can_prune_locals) {
! for (j = 0; j < l; j++) {
! Node* val = in_map->in(k + j);
+ // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state
+ if (callee_jvms != nullptr && val->is_InlineType() && val->as_InlineType()->is_larval() &&
+ callee_jvms->method()->is_object_constructor() && val == in_map->argument(in_jvms, 0) &&
+ val->bottom_type()->is_inlinetypeptr()) {
+ val = callee_jvms->map()->local(callee_jvms, 0); // Receiver
+ }
+ call->set_req(p++, val);
+ }
} else if (can_prune_locals && stack_slots_not_pruned != 0) {
// Divide stack into {S0,...,S1}, where S0 is set to top.
uint s1 = stack_slots_not_pruned;
stack_slots_not_pruned = 0; // for next iteration
if (s1 > l) s1 = l;
assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");
assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");
assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
// Update the two tail pointers in parallel.
+ callee_jvms = out_jvms;
out_jvms = out_jvms->caller();
in_jvms = in_jvms->caller();
}
assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
Node* GraphKit::load_object_klass(Node* obj) {
// Special-case a fresh allocation to avoid building nodes:
Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
if (akls != nullptr) return akls;
Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
! return _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), k_adr, TypeInstPtr::KLASS));
}
//-------------------------load_array_length-----------------------------------
Node* GraphKit::load_array_length(Node* array) {
// Special-case a fresh allocation to avoid building nodes:
Node* GraphKit::load_object_klass(Node* obj) {
// Special-case a fresh allocation to avoid building nodes:
Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
if (akls != nullptr) return akls;
Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
! return _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
}
//-------------------------load_array_length-----------------------------------
Node* GraphKit::load_array_length(Node* array) {
// Special-case a fresh allocation to avoid building nodes:
#endif
Node* GraphKit::null_check_common(Node* value, BasicType type,
// optional arguments for variations:
bool assert_null,
Node* *null_control,
! bool speculative) {
assert(!assert_null || null_control == nullptr, "not both at once");
if (stopped()) return top();
NOT_PRODUCT(explicit_null_checks_inserted++);
// Construct null check
Node *chk = nullptr;
switch(type) {
case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
#endif
Node* GraphKit::null_check_common(Node* value, BasicType type,
// optional arguments for variations:
bool assert_null,
Node* *null_control,
! bool speculative,
+ bool is_init_check) {
assert(!assert_null || null_control == nullptr, "not both at once");
if (stopped()) return top();
NOT_PRODUCT(explicit_null_checks_inserted++);
+ if (value->is_InlineType()) {
+ // Null checking a scalarized but nullable inline type. Check the IsInit
+ // input instead of the oop input to avoid keeping buffer allocations alive.
+ InlineTypeNode* vtptr = value->as_InlineType();
+ while (vtptr->get_oop()->is_InlineType()) {
+ vtptr = vtptr->get_oop()->as_InlineType();
+ }
+ null_check_common(vtptr->get_is_init(), T_INT, assert_null, null_control, speculative, true);
+ if (stopped()) {
+ return top();
+ }
+ if (assert_null) {
+ // TODO 8284443 Scalarize here (this currently leads to compilation bailouts)
+ // vtptr = InlineTypeNode::make_null(_gvn, vtptr->type()->inline_klass());
+ // replace_in_map(value, vtptr);
+ // return vtptr;
+ replace_in_map(value, null());
+ return null();
+ }
+ bool do_replace_in_map = (null_control == nullptr || (*null_control) == top());
+ return cast_not_null(value, do_replace_in_map);
+ }
+
// Construct null check
Node *chk = nullptr;
switch(type) {
case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
// Branch to failure if null
float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
Deoptimization::DeoptReason reason;
if (assert_null) {
reason = Deoptimization::reason_null_assert(speculative);
! } else if (type == T_OBJECT) {
reason = Deoptimization::reason_null_check(speculative);
} else {
reason = Deoptimization::Reason_div0_check;
}
// %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
// Branch to failure if null
float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
Deoptimization::DeoptReason reason;
if (assert_null) {
reason = Deoptimization::reason_null_assert(speculative);
! } else if (type == T_OBJECT || is_init_check) {
reason = Deoptimization::reason_null_check(speculative);
} else {
reason = Deoptimization::Reason_div0_check;
}
// %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
}
return value;
}
-
//------------------------------cast_not_null----------------------------------
// Cast obj to not-null on this path
Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
const Type *t = _gvn.type(obj);
const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
// Object is already not-null?
if( t == t_not_null ) return obj;
}
return value;
}
//------------------------------cast_not_null----------------------------------
// Cast obj to not-null on this path
Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
+ if (obj->is_InlineType()) {
+ Node* vt = obj->isa_InlineType()->clone_if_required(&gvn(), map(), do_replace_in_map);
+ vt->as_InlineType()->set_is_init(_gvn);
+ vt = _gvn.transform(vt);
+ if (do_replace_in_map) {
+ replace_in_map(obj, vt);
+ }
+ return vt;
+ }
const Type *t = _gvn.type(obj);
const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
// Object is already not-null?
if( t == t_not_null ) return obj;
bool require_atomic_access,
bool unaligned,
bool mismatched,
bool unsafe,
uint8_t barrier_data) {
! assert(adr_idx == C->get_alias_index(_gvn.type(adr)->isa_ptr()), "slice of address and input slice don't match");
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = nullptr; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
ld = _gvn.transform(ld);
if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
// Improve graph before escape analysis and boxing elimination.
record_for_igvn(ld);
if (ld->is_DecodeN()) {
// Also record the actual load (LoadN) in case ld is DecodeN. In some
bool require_atomic_access,
bool unaligned,
bool mismatched,
bool unsafe,
uint8_t barrier_data) {
! // Fix 8344108 and renable the commented assert
+ //assert(adr_idx == C->get_alias_index(_gvn.type(adr)->isa_ptr()), "slice of address and input slice don't match");
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = nullptr; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
ld = _gvn.transform(ld);
+
if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
// Improve graph before escape analysis and boxing elimination.
record_for_igvn(ld);
if (ld->is_DecodeN()) {
// Also record the actual load (LoadN) in case ld is DecodeN. In some
bool unaligned,
bool mismatched,
bool unsafe,
int barrier_data) {
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
! assert(adr_idx == C->get_alias_index(_gvn.type(adr)->isa_ptr()), "slice of address and input slice don't match");
const TypePtr* adr_type = nullptr;
debug_only(adr_type = C->get_adr_type(adr_idx));
Node *mem = memory(adr_idx);
Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access);
if (unaligned) {
bool unaligned,
bool mismatched,
bool unsafe,
int barrier_data) {
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
! // Fix 8344108 and renable the commented assert
+ //assert(adr_idx == C->get_alias_index(_gvn.type(adr)->isa_ptr()), "slice of address and input slice don't match");
const TypePtr* adr_type = nullptr;
debug_only(adr_type = C->get_adr_type(adr_idx));
Node *mem = memory(adr_idx);
Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access);
if (unaligned) {
Node* adr,
const TypePtr* adr_type,
Node* val,
const Type* val_type,
BasicType bt,
! DecoratorSet decorators) {
// Transformation of a value which could be null pointer (CastPP #null)
// could be delayed during Parse (for example, in adjust_map_after_if()).
// Execute transformation here to avoid barrier generation in such case.
if (_gvn.type(val) == TypePtr::NULL_PTR) {
val = _gvn.makecon(TypePtr::NULL_PTR);
Node* adr,
const TypePtr* adr_type,
Node* val,
const Type* val_type,
BasicType bt,
! DecoratorSet decorators,
+ bool safe_for_replace) {
// Transformation of a value which could be null pointer (CastPP #null)
// could be delayed during Parse (for example, in adjust_map_after_if()).
// Execute transformation here to avoid barrier generation in such case.
if (_gvn.type(val) == TypePtr::NULL_PTR) {
val = _gvn.makecon(TypePtr::NULL_PTR);
if (stopped()) {
return top(); // Dead path ?
}
assert(val != nullptr, "not dead path");
+ if (val->is_InlineType()) {
+ // Store to non-flat field. Buffer the inline type and make sure
+ // the store is re-executed if the allocation triggers deoptimization.
+ PreserveReexecuteState preexecs(this);
+ jvms()->set_should_reexecute(true);
+ val = val->as_InlineType()->buffer(this, safe_for_replace);
+ }
C2AccessValuePtr addr(adr, adr_type);
C2AccessValue value(val, val_type);
C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
if (access.is_raw()) {
Node* GraphKit::access_load_at(Node* obj, // containing obj
Node* adr, // actual address to store val at
const TypePtr* adr_type,
const Type* val_type,
BasicType bt,
! DecoratorSet decorators) {
if (stopped()) {
return top(); // Dead path ?
}
C2AccessValuePtr addr(adr, adr_type);
! C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr);
if (access.is_raw()) {
return _barrier_set->BarrierSetC2::load_at(access, val_type);
} else {
return _barrier_set->load_at(access, val_type);
}
Node* GraphKit::access_load_at(Node* obj, // containing obj
Node* adr, // actual address to store val at
const TypePtr* adr_type,
const Type* val_type,
BasicType bt,
! DecoratorSet decorators,
+ Node* ctl) {
if (stopped()) {
return top(); // Dead path ?
}
C2AccessValuePtr addr(adr, adr_type);
! C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl);
if (access.is_raw()) {
return _barrier_set->BarrierSetC2::load_at(access, val_type);
} else {
return _barrier_set->load_at(access, val_type);
}
}
//-------------------------array_element_address-------------------------
Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
const TypeInt* sizetype, Node* ctrl) {
! uint shift = exact_log2(type2aelembytes(elembt));
uint header = arrayOopDesc::base_offset_in_bytes(elembt);
// short-circuit a common case (saves lots of confusing waste motion)
jint idx_con = find_int_con(idx, -1);
if (idx_con >= 0) {
}
//-------------------------array_element_address-------------------------
Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
const TypeInt* sizetype, Node* ctrl) {
! const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
+ uint shift = arytype->is_flat() ? arytype->flat_log_elem_size() : exact_log2(type2aelembytes(elembt));
uint header = arrayOopDesc::base_offset_in_bytes(elembt);
// short-circuit a common case (saves lots of confusing waste motion)
jint idx_con = find_int_con(idx, -1);
if (idx_con >= 0) {
return ld;
}
//-------------------------set_arguments_for_java_call-------------------------
// Arguments (pre-popped from the stack) are taken from the JVMS.
! void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
! // Add the call arguments:
! uint nargs = call->method()->arg_size();
! for (uint i = 0; i < nargs; i++) {
! Node* arg = argument(i);
! call->init_req(i + TypeFunc::Parms, arg);
}
}
//---------------------------set_edges_for_java_call---------------------------
// Connect a newly created call into the current JVMS.
return ld;
}
//-------------------------set_arguments_for_java_call-------------------------
// Arguments (pre-popped from the stack) are taken from the JVMS.
! void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline) {
! PreserveReexecuteState preexecs(this);
! if (EnableValhalla) {
! // Make sure the call is "re-executed", if buffering of inline type arguments triggers deoptimization.
! // At this point, the call hasn't been executed yet, so we will only ever execute the call once.
! jvms()->set_should_reexecute(true);
+ int arg_size = method()->get_declared_signature_at_bci(bci())->arg_size_for_bc(java_bc());
+ inc_sp(arg_size);
+ }
+ // Add the call arguments
+ const TypeTuple* domain = call->tf()->domain_sig();
+ uint nargs = domain->cnt();
+ int arg_num = 0;
+ for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
+ Node* arg = argument(i-TypeFunc::Parms);
+ const Type* t = domain->field_at(i);
+ // TODO 8284443 A static call to a mismatched method should still be scalarized
+ if (t->is_inlinetypeptr() && !call->method()->get_Method()->mismatch() && call->method()->is_scalarized_arg(arg_num)) {
+ // We don't pass inline type arguments by reference but instead pass each field of the inline type
+ if (!arg->is_InlineType()) {
+ assert(_gvn.type(arg)->is_zero_type() && !t->inline_klass()->is_null_free(), "Unexpected argument type");
+ arg = InlineTypeNode::make_from_oop(this, arg, t->inline_klass(), t->inline_klass()->is_null_free());
+ }
+ InlineTypeNode* vt = arg->as_InlineType();
+ vt->pass_fields(this, call, idx, true, !t->maybe_null());
+ // If an inline type argument is passed as fields, attach the Method* to the call site
+ // to be able to access the extended signature later via attached_method_before_pc().
+ // For example, see CompiledMethod::preserve_callee_argument_oops().
+ call->set_override_symbolic_info(true);
+ // Register an evol dependency on the callee method to make sure that this method is deoptimized and
+ // re-compiled with a non-scalarized calling convention if the callee method is later marked as mismatched.
+ C->dependencies()->assert_evol_method(call->method());
+ arg_num++;
+ continue;
+ } else if (arg->is_InlineType()) {
+ // Pass inline type argument via oop to callee
+ InlineTypeNode* inline_type = arg->as_InlineType();
+ const ciMethod* method = call->method();
+ ciInstanceKlass* holder = method->holder();
+ const bool is_receiver = (i == TypeFunc::Parms);
+ const bool is_abstract_or_object_klass_constructor = method->is_object_constructor() &&
+ (holder->is_abstract() || holder->is_java_lang_Object());
+ const bool is_larval_receiver_on_super_constructor = is_receiver && is_abstract_or_object_klass_constructor;
+ bool must_init_buffer = true;
+ // We always need to buffer inline types when they are escaping. However, we can skip the actual initialization
+ // of the buffer if the inline type is a larval because we are going to update the buffer anyway which requires
+ // us to create a new one. But there is one special case where we are still required to initialize the buffer:
+ // When we have a larval receiver invoked on an abstract (value class) constructor or the Object constructor (that
+ // is not going to be inlined). After this call, the larval is completely initialized and thus not a larval anymore.
+ // We therefore need to force an initialization of the buffer to not lose all the field writes so far in case the
+ // buffer needs to be used (e.g. to read from when deoptimizing at runtime) or further updated in abstract super
+ // value class constructors which could have more fields to be initialized. Note that we do not need to
+ // initialize the buffer when invoking another constructor in the same class on a larval receiver because we
+ // have not initialized any fields, yet (this is done completely by the other constructor call).
+ if (inline_type->is_larval() && !is_larval_receiver_on_super_constructor) {
+ must_init_buffer = false;
+ }
+ arg = inline_type->buffer(this, true, must_init_buffer);
+ }
+ if (t != Type::HALF) {
+ arg_num++;
+ }
+ call->init_req(idx++, arg);
}
}
//---------------------------set_edges_for_java_call---------------------------
// Connect a newly created call into the current JVMS.
}
Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
if (stopped()) return top(); // maybe the call folded up?
- // Capture the return value, if any.
- Node* ret;
- if (call->method() == nullptr ||
- call->method()->return_type()->basic_type() == T_VOID)
- ret = top();
- else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
-
// Note: Since any out-of-line call can produce an exception,
// we always insert an I_O projection from the call into the result.
make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
// through and exceptional paths, so replace the projections for
// the fall through path.
set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
}
+
+ // Capture the return value, if any.
+ Node* ret;
+ if (call->method() == nullptr || call->method()->return_type()->basic_type() == T_VOID) {
+ ret = top();
+ } else if (call->tf()->returns_inline_type_as_fields()) {
+ // Return of multiple values (inline type fields): we create a
+ // InlineType node, each field is a projection from the call.
+ ciInlineKlass* vk = call->method()->return_type()->as_inline_klass();
+ uint base_input = TypeFunc::Parms;
+ ret = InlineTypeNode::make_from_multi(this, call, vk, base_input, false, false);
+ } else {
+ ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
+ ciType* t = call->method()->return_type();
+ if (t->is_klass()) {
+ const Type* type = TypeOopPtr::make_from_klass(t->as_klass());
+ if (type->is_inlinetypeptr()) {
+ ret = InlineTypeNode::make_from_oop(this, ret, type->inline_klass(), type->inline_klass()->is_null_free());
+ }
+ }
+ }
+
+ // We just called the constructor on a value type receiver. Reload it from the buffer
+ ciMethod* method = call->method();
+ if (method->is_object_constructor() && !method->holder()->is_java_lang_Object()) {
+ InlineTypeNode* inline_type_receiver = call->in(TypeFunc::Parms)->isa_InlineType();
+ if (inline_type_receiver != nullptr) {
+ assert(inline_type_receiver->is_larval(), "must be larval");
+ assert(inline_type_receiver->is_allocated(&gvn()), "larval must be buffered");
+ InlineTypeNode* reloaded = InlineTypeNode::make_from_oop(this, inline_type_receiver->get_oop(),
+ inline_type_receiver->bottom_type()->inline_klass(), true);
+ assert(!reloaded->is_larval(), "should not be larval anymore");
+ replace_in_map(inline_type_receiver, reloaded);
+ }
+ }
+
return ret;
}
//--------------------set_predefined_input_for_runtime_call--------------------
// Reading and setting the memory state is way conservative here.
Node* ex_ctl = top();
SafePointNode* final_state = stop();
// Find all the needed outputs of this call
! CallProjections callprojs;
- call->extract_projections(&callprojs, true, do_asserts);
Unique_Node_List wl;
Node* init_mem = call->in(TypeFunc::Memory);
Node* final_mem = final_state->in(TypeFunc::Memory);
Node* final_ctl = final_state->in(TypeFunc::Control);
Node* final_io = final_state->in(TypeFunc::I_O);
// Replace all the old call edges with the edges from the inlining result
! if (callprojs.fallthrough_catchproj != nullptr) {
! C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
}
! if (callprojs.fallthrough_memproj != nullptr) {
if (final_mem->is_MergeMem()) {
// Parser's exits MergeMem was not transformed but may be optimized
final_mem = _gvn.transform(final_mem);
}
! C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem);
add_mergemem_users_to_worklist(wl, final_mem);
}
! if (callprojs.fallthrough_ioproj != nullptr) {
! C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io);
}
// Replace the result with the new result if it exists and is used
! if (callprojs.resproj != nullptr && result != nullptr) {
! C->gvn_replace_by(callprojs.resproj, result);
}
if (ejvms == nullptr) {
// No exception edges to simply kill off those paths
! if (callprojs.catchall_catchproj != nullptr) {
! C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
}
! if (callprojs.catchall_memproj != nullptr) {
! C->gvn_replace_by(callprojs.catchall_memproj, C->top());
}
! if (callprojs.catchall_ioproj != nullptr) {
! C->gvn_replace_by(callprojs.catchall_ioproj, C->top());
}
// Replace the old exception object with top
! if (callprojs.exobj != nullptr) {
! C->gvn_replace_by(callprojs.exobj, C->top());
}
} else {
GraphKit ekit(ejvms);
// Load my combined exception state into the kit, with all phis transformed:
SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
replaced_nodes_exception = ex_map->replaced_nodes();
Node* ex_oop = ekit.use_exception_state(ex_map);
! if (callprojs.catchall_catchproj != nullptr) {
! C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
ex_ctl = ekit.control();
}
! if (callprojs.catchall_memproj != nullptr) {
Node* ex_mem = ekit.reset_memory();
! C->gvn_replace_by(callprojs.catchall_memproj, ex_mem);
add_mergemem_users_to_worklist(wl, ex_mem);
}
! if (callprojs.catchall_ioproj != nullptr) {
! C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o());
}
// Replace the old exception object with the newly created one
! if (callprojs.exobj != nullptr) {
! C->gvn_replace_by(callprojs.exobj, ex_oop);
}
}
// Disconnect the call from the graph
call->disconnect_inputs(C);
Node* ex_ctl = top();
SafePointNode* final_state = stop();
// Find all the needed outputs of this call
! CallProjections* callprojs = call->extract_projections(true, do_asserts);
Unique_Node_List wl;
Node* init_mem = call->in(TypeFunc::Memory);
Node* final_mem = final_state->in(TypeFunc::Memory);
Node* final_ctl = final_state->in(TypeFunc::Control);
Node* final_io = final_state->in(TypeFunc::I_O);
// Replace all the old call edges with the edges from the inlining result
! if (callprojs->fallthrough_catchproj != nullptr) {
! C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl);
}
! if (callprojs->fallthrough_memproj != nullptr) {
if (final_mem->is_MergeMem()) {
// Parser's exits MergeMem was not transformed but may be optimized
final_mem = _gvn.transform(final_mem);
}
! C->gvn_replace_by(callprojs->fallthrough_memproj, final_mem);
add_mergemem_users_to_worklist(wl, final_mem);
}
! if (callprojs->fallthrough_ioproj != nullptr) {
! C->gvn_replace_by(callprojs->fallthrough_ioproj, final_io);
}
// Replace the result with the new result if it exists and is used
! if (callprojs->resproj[0] != nullptr && result != nullptr) {
! // If the inlined code is dead, the result projections for an inline type returned as
+ // fields have not been replaced. They will go away once the call is replaced by TOP below.
+ assert(callprojs->nb_resproj == 1 || (call->tf()->returns_inline_type_as_fields() && stopped()),
+ "unexpected number of results");
+ C->gvn_replace_by(callprojs->resproj[0], result);
}
if (ejvms == nullptr) {
// No exception edges to simply kill off those paths
! if (callprojs->catchall_catchproj != nullptr) {
! C->gvn_replace_by(callprojs->catchall_catchproj, C->top());
}
! if (callprojs->catchall_memproj != nullptr) {
! C->gvn_replace_by(callprojs->catchall_memproj, C->top());
}
! if (callprojs->catchall_ioproj != nullptr) {
! C->gvn_replace_by(callprojs->catchall_ioproj, C->top());
}
// Replace the old exception object with top
! if (callprojs->exobj != nullptr) {
! C->gvn_replace_by(callprojs->exobj, C->top());
}
} else {
GraphKit ekit(ejvms);
// Load my combined exception state into the kit, with all phis transformed:
SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
replaced_nodes_exception = ex_map->replaced_nodes();
Node* ex_oop = ekit.use_exception_state(ex_map);
! if (callprojs->catchall_catchproj != nullptr) {
! C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control());
ex_ctl = ekit.control();
}
! if (callprojs->catchall_memproj != nullptr) {
Node* ex_mem = ekit.reset_memory();
! C->gvn_replace_by(callprojs->catchall_memproj, ex_mem);
add_mergemem_users_to_worklist(wl, ex_mem);
}
! if (callprojs->catchall_ioproj != nullptr) {
! C->gvn_replace_by(callprojs->catchall_ioproj, ekit.i_o());
}
// Replace the old exception object with the newly created one
! if (callprojs->exobj != nullptr) {
! C->gvn_replace_by(callprojs->exobj, ex_oop);
}
}
// Disconnect the call from the graph
call->disconnect_inputs(C);
// optimizer doesn't like that.
while (wl.size() > 0) {
_gvn.transform(wl.pop());
}
! if (callprojs.fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) {
replaced_nodes.apply(C, final_ctl);
}
if (!ex_ctl->is_top() && do_replaced_nodes) {
replaced_nodes_exception.apply(C, ex_ctl);
}
// optimizer doesn't like that.
while (wl.size() > 0) {
_gvn.transform(wl.pop());
}
! if (callprojs->fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) {
replaced_nodes.apply(C, final_ctl);
}
if (!ex_ctl->is_top() && do_replaced_nodes) {
replaced_nodes_exception.apply(C, ex_ctl);
}
const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces);
const TypeOopPtr* xtype = tklass->as_instance_type();
assert(xtype->klass_is_exact(), "Should be exact");
// Any reason to believe n is not null (from this profiling or a previous one)?
assert(ptr_kind != ProfileAlwaysNull, "impossible here");
! const TypePtr* ptr = (ptr_kind == ProfileMaybeNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
// record the new speculative type's depth
speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
speculative = speculative->with_inline_depth(jvms()->depth());
} else if (current_type->would_improve_ptr(ptr_kind)) {
// Profiling report that null was never seen so we can change the
const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces);
const TypeOopPtr* xtype = tklass->as_instance_type();
assert(xtype->klass_is_exact(), "Should be exact");
// Any reason to believe n is not null (from this profiling or a previous one)?
assert(ptr_kind != ProfileAlwaysNull, "impossible here");
! const TypePtr* ptr = (ptr_kind != ProfileNeverNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
// record the new speculative type's depth
speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
speculative = speculative->with_inline_depth(jvms()->depth());
} else if (current_type->would_improve_ptr(ptr_kind)) {
// Profiling report that null was never seen so we can change the
}
if (speculative != current_type->speculative()) {
// Build a type with a speculative type (what we think we know
// about the type but will need a guard when we use it)
! const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);
// We're changing the type, we need a new CheckCast node to carry
// the new type. The new type depends on the control: what
// profiling tells us is only valid from here as far as we can
// tell.
Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
}
if (speculative != current_type->speculative()) {
// Build a type with a speculative type (what we think we know
// about the type but will need a guard when we use it)
! const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative);
// We're changing the type, we need a new CheckCast node to carry
// the new type. The new type depends on the control: what
// profiling tells us is only valid from here as far as we can
// tell.
Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
java_bc() == Bytecodes::_instanceof ||
java_bc() == Bytecodes::_aastore) &&
method()->method_data()->is_mature()) {
ciProfileData* data = method()->method_data()->bci_to_data(bci());
if (data != nullptr) {
! if (!data->as_BitData()->null_seen()) {
! ptr_kind = ProfileNeverNull;
} else {
! assert(data->is_ReceiverTypeData(), "bad profile data type");
! ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
! uint i = 0;
! for (; i < call->row_limit(); i++) {
! ciKlass* receiver = call->receiver(i);
! if (receiver != nullptr) {
! break;
}
}
- ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
}
}
}
return record_profile_for_speculation(n, exact_kls, ptr_kind);
}
java_bc() == Bytecodes::_instanceof ||
java_bc() == Bytecodes::_aastore) &&
method()->method_data()->is_mature()) {
ciProfileData* data = method()->method_data()->bci_to_data(bci());
if (data != nullptr) {
! if (java_bc() == Bytecodes::_aastore) {
! ciKlass* array_type = nullptr;
+ ciKlass* element_type = nullptr;
+ ProfilePtrKind element_ptr = ProfileMaybeNull;
+ bool flat_array = true;
+ bool null_free_array = true;
+ method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
+ exact_kls = element_type;
+ ptr_kind = element_ptr;
} else {
! if (!data->as_BitData()->null_seen()) {
! ptr_kind = ProfileNeverNull;
! } else {
! assert(data->is_ReceiverTypeData(), "bad profile data type");
! ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
! uint i = 0;
! for (; i < call->row_limit(); i++) {
+ ciKlass* receiver = call->receiver(i);
+ if (receiver != nullptr) {
+ break;
+ }
}
+ ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
}
}
}
}
return record_profile_for_speculation(n, exact_kls, ptr_kind);
}
void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
if (!UseTypeSpeculation) {
return;
}
const TypeFunc* tf = TypeFunc::make(dest_method);
! int nargs = tf->domain()->cnt() - TypeFunc::Parms;
int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
! const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
if (is_reference_type(targ->basic_type())) {
ProfilePtrKind ptr_kind = ProfileMaybeNull;
ciKlass* better_type = nullptr;
if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
record_profile_for_speculation(argument(j), better_type, ptr_kind);
void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
if (!UseTypeSpeculation) {
return;
}
const TypeFunc* tf = TypeFunc::make(dest_method);
! int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
! const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
if (is_reference_type(targ->basic_type())) {
ProfilePtrKind ptr_kind = ProfileMaybeNull;
ciKlass* better_type = nullptr;
if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
record_profile_for_speculation(argument(j), better_type, ptr_kind);
void GraphKit::round_double_arguments(ciMethod* dest_method) {
if (Matcher::strict_fp_requires_explicit_rounding) {
// (Note: TypeFunc::make has a cache that makes this fast.)
const TypeFunc* tf = TypeFunc::make(dest_method);
! int nargs = tf->domain()->cnt() - TypeFunc::Parms;
for (int j = 0; j < nargs; j++) {
! const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
if (targ->basic_type() == T_DOUBLE) {
// If any parameters are doubles, they must be rounded before
// the call, dprecision_rounding does gvn.transform
Node *arg = argument(j);
arg = dprecision_rounding(arg);
void GraphKit::round_double_arguments(ciMethod* dest_method) {
if (Matcher::strict_fp_requires_explicit_rounding) {
// (Note: TypeFunc::make has a cache that makes this fast.)
const TypeFunc* tf = TypeFunc::make(dest_method);
! int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
for (int j = 0; j < nargs; j++) {
! const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
if (targ->basic_type() == T_DOUBLE) {
// If any parameters are doubles, they must be rounded before
// the call, dprecision_rounding does gvn.transform
Node *arg = argument(j);
arg = dprecision_rounding(arg);
if (!is_leaf) {
call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
} else if (flags & RC_NO_FP) {
call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
} else if (flags & RC_VECTOR){
! uint num_bits = call_type->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
} else {
call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
}
if (!is_leaf) {
call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
} else if (flags & RC_NO_FP) {
call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
} else if (flags & RC_VECTOR){
! uint num_bits = call_type->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
} else {
call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
}
Node* GraphKit::sign_extend_short(Node* in) {
Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
}
+
//------------------------------merge_memory-----------------------------------
// Merge memory from one path into the current memory state.
void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
Node* old_slice = mms.force_memory();
*ctrl = gvn.transform(r_ok_subtype);
return gvn.transform(r_not_subtype);
}
Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
bool expand_subtype_check = C->post_loop_opts_phase(); // macro node expansion is over
if (expand_subtype_check) {
MergeMemNode* mem = merged_memory();
Node* ctrl = control();
Node* subklass = obj_or_subklass;
! if (!_gvn.type(obj_or_subklass)->isa_klassptr()) {
subklass = load_object_klass(obj_or_subklass);
}
Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci());
set_control(ctrl);
*ctrl = gvn.transform(r_ok_subtype);
return gvn.transform(r_not_subtype);
}
Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
+ const Type* sub_t = _gvn.type(obj_or_subklass);
+ if (sub_t->make_oopptr() != nullptr && sub_t->make_oopptr()->is_inlinetypeptr()) {
+ sub_t = TypeKlassPtr::make(sub_t->inline_klass());
+ obj_or_subklass = makecon(sub_t);
+ }
bool expand_subtype_check = C->post_loop_opts_phase(); // macro node expansion is over
if (expand_subtype_check) {
MergeMemNode* mem = merged_memory();
Node* ctrl = control();
Node* subklass = obj_or_subklass;
! if (!sub_t->isa_klassptr()) {
subklass = load_object_klass(obj_or_subklass);
}
Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci());
set_control(ctrl);
return _gvn.transform(new IfFalseNode(iff));
}
// Profile-driven exact type check:
Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
! float prob,
- Node* *casted_receiver) {
assert(!klass->is_interface(), "no exact type check on interfaces");
!
const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces);
Node* recv_klass = load_object_klass(receiver);
! Node* want_klass = makecon(tklass);
- Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
- Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
- IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
- set_control( _gvn.transform(new IfTrueNode (iff)));
- Node* fail = _gvn.transform(new IfFalseNode(iff));
if (!stopped()) {
const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
! const TypeOopPtr* recvx_type = tklass->as_instance_type();
! assert(recvx_type->klass_is_exact(), "");
! if (!receiver_type->higher_equal(recvx_type)) { // ignore redundant casts
// Subsume downstream occurrences of receiver with a cast to
// recv_xtype, since now we know what the type will be.
! Node* cast = new CheckCastPPNode(control(), receiver, recvx_type);
! (*casted_receiver) = _gvn.transform(cast);
assert(!(*casted_receiver)->is_top(), "that path should be unreachable");
// (User must make the replace_in_map call.)
}
}
return fail;
}
//------------------------------subtype_check_receiver-------------------------
Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
Node** casted_receiver) {
const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve();
Node* want_klass = makecon(tklass);
return _gvn.transform(new IfFalseNode(iff));
}
// Profile-driven exact type check:
Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
! float prob, Node* *casted_receiver) {
assert(!klass->is_interface(), "no exact type check on interfaces");
! Node* fail = top();
+ const Type* rec_t = _gvn.type(receiver);
+ if (rec_t->is_inlinetypeptr()) {
+ if (klass->equals(rec_t->inline_klass())) {
+ (*casted_receiver) = receiver; // Always passes
+ } else {
+ (*casted_receiver) = top(); // Always fails
+ fail = control();
+ set_control(top());
+ }
+ return fail;
+ }
const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces);
Node* recv_klass = load_object_klass(receiver);
! fail = type_check(recv_klass, tklass, prob);
if (!stopped()) {
const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
! const TypeOopPtr* recv_xtype = tklass->as_instance_type();
! assert(recv_xtype->klass_is_exact(), "");
! if (!receiver_type->higher_equal(recv_xtype)) { // ignore redundant casts
// Subsume downstream occurrences of receiver with a cast to
// recv_xtype, since now we know what the type will be.
! Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype);
! Node* res = _gvn.transform(cast);
+ if (recv_xtype->is_inlinetypeptr()) {
+ assert(!gvn().type(res)->maybe_null(), "receiver should never be null");
+ res = InlineTypeNode::make_from_oop(this, res, recv_xtype->inline_klass());
+ }
+ (*casted_receiver) = res;
assert(!(*casted_receiver)->is_top(), "that path should be unreachable");
// (User must make the replace_in_map call.)
}
}
return fail;
}
+ Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass,
+ float prob) {
+ Node* want_klass = makecon(tklass);
+ Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
+ Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
+ IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
+ set_control(_gvn.transform(new IfTrueNode (iff)));
+ Node* fail = _gvn.transform(new IfFalseNode(iff));
+ return fail;
+ }
+
//------------------------------subtype_check_receiver-------------------------
Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
Node** casted_receiver) {
const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve();
Node* want_klass = makecon(tklass);
// Ignore interface type information until interface types are properly tracked.
if (!stopped() && !klass->is_interface()) {
const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
! if (!receiver_type->higher_equal(recv_type)) { // ignore redundant casts
! Node* cast = new CheckCastPPNode(control(), receiver, recv_type);
! (*casted_receiver) = _gvn.transform(cast);
}
}
return slow_ctl;
}
// Ignore interface type information until interface types are properly tracked.
if (!stopped() && !klass->is_interface()) {
const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
! if (receiver_type != nullptr && !receiver_type->higher_equal(recv_type)) { // ignore redundant casts
! Node* cast = _gvn.transform(new CheckCastPPNode(control(), receiver, recv_type));
! if (recv_type->is_inlinetypeptr()) {
+ cast = InlineTypeNode::make_from_oop(this, cast, recv_type->inline_klass());
+ }
+ (*casted_receiver) = cast;
}
}
return slow_ctl;
}
// (No, this isn't a call, but it's enough like a virtual call
// to use the same ciMethod accessor to get the profile info...)
// If we have a speculative type use it instead of profiling (which
// may not help us)
! ciKlass* exact_kls = spec_klass == nullptr ? profile_has_unique_klass() : spec_klass;
if (exact_kls != nullptr) {// no cast failures here
if (require_klass == nullptr ||
C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) {
// If we narrow the type to match what the type profile sees or
// the speculative type, we can then remove the rest of the
// (No, this isn't a call, but it's enough like a virtual call
// to use the same ciMethod accessor to get the profile info...)
// If we have a speculative type use it instead of profiling (which
// may not help us)
! ciKlass* exact_kls = spec_klass;
+ if (exact_kls == nullptr) {
+ if (java_bc() == Bytecodes::_aastore) {
+ ciKlass* array_type = nullptr;
+ ciKlass* element_type = nullptr;
+ ProfilePtrKind element_ptr = ProfileMaybeNull;
+ bool flat_array = true;
+ bool null_free_array = true;
+ method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
+ exact_kls = element_type;
+ } else {
+ exact_kls = profile_has_unique_klass();
+ }
+ }
if (exact_kls != nullptr) {// no cast failures here
if (require_klass == nullptr ||
C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) {
// If we narrow the type to match what the type profile sees or
// the speculative type, we can then remove the rest of the
// Do we know the type check always succeed?
bool known_statically = false;
if (_gvn.type(superklass)->singleton()) {
const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr();
const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type();
! if (subk->is_loaded()) {
int static_res = C->static_subtype_check(superk, subk);
known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
}
}
// Do we know the type check always succeed?
bool known_statically = false;
if (_gvn.type(superklass)->singleton()) {
const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr();
const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type();
! if (subk != nullptr && subk->is_loaded()) {
int static_res = C->static_subtype_check(superk, subk);
known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
}
}
// array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
// uncommon-trap paths work. Adjust stack after this call.
// If failure_control is supplied and not null, it is filled in with
// the control edge for the cast failure. Otherwise, an appropriate
// uncommon trap or exception is thrown.
! Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
- Node* *failure_control) {
kill_dead_locals(); // Benefit all the uncommon traps
const TypeKlassPtr* klass_ptr_type = _gvn.type(superklass)->is_klassptr();
const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve();
const TypeOopPtr* toop = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type();
// Fast cutout: Check the case that the cast is vacuously true.
// This detects the common cases where the test will short-circuit
// away completely. We do this before we perform the null check,
// because if the test is going to turn into zero code, we don't
// want a residual null check left around. (Causes a slowdown,
// for example, in some objArray manipulations, such as a[i]=a[j].)
if (improved_klass_ptr_type->singleton()) {
! const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
! if (objtp != nullptr) {
! switch (C->static_subtype_check(improved_klass_ptr_type, objtp->as_klass_type())) {
case Compile::SSC_always_true:
// If we know the type check always succeed then we don't use
// the profiling data at this bytecode. Don't lose it, feed it
// to the type system as a speculative type.
! return record_profiled_receiver_for_speculation(obj);
case Compile::SSC_always_false:
// It needs a null check because a null will *pass* the cast check.
! // A non-null value will always produce an exception.
- if (!objtp->maybe_null()) {
bool is_aastore = (java_bc() == Bytecodes::_aastore);
Deoptimization::DeoptReason reason = is_aastore ?
Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
builtin_throw(reason);
return top();
// array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
// uncommon-trap paths work. Adjust stack after this call.
// If failure_control is supplied and not null, it is filled in with
// the control edge for the cast failure. Otherwise, an appropriate
// uncommon trap or exception is thrown.
! Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control, bool null_free) {
kill_dead_locals(); // Benefit all the uncommon traps
const TypeKlassPtr* klass_ptr_type = _gvn.type(superklass)->is_klassptr();
const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve();
const TypeOopPtr* toop = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type();
+ bool safe_for_replace = (failure_control == nullptr);
+ assert(!null_free || toop->is_inlinetypeptr(), "must be an inline type pointer");
// Fast cutout: Check the case that the cast is vacuously true.
// This detects the common cases where the test will short-circuit
// away completely. We do this before we perform the null check,
// because if the test is going to turn into zero code, we don't
// want a residual null check left around. (Causes a slowdown,
// for example, in some objArray manipulations, such as a[i]=a[j].)
if (improved_klass_ptr_type->singleton()) {
! const TypeKlassPtr* kptr = nullptr;
! const Type* t = _gvn.type(obj);
! if (t->isa_oop_ptr()) {
+ kptr = t->is_oopptr()->as_klass_type();
+ } else if (obj->is_InlineType()) {
+ ciInlineKlass* vk = t->inline_klass();
+ kptr = TypeInstKlassPtr::make(TypePtr::NotNull, vk, Type::Offset(0));
+ }
+ if (kptr != nullptr) {
+ switch (C->static_subtype_check(improved_klass_ptr_type, kptr)) {
case Compile::SSC_always_true:
// If we know the type check always succeed then we don't use
// the profiling data at this bytecode. Don't lose it, feed it
// to the type system as a speculative type.
! obj = record_profiled_receiver_for_speculation(obj);
+ if (null_free) {
+ assert(safe_for_replace, "must be");
+ obj = null_check(obj);
+ }
+ assert(stopped() || !toop->is_inlinetypeptr() || obj->is_InlineType(), "should have been scalarized");
+ return obj;
case Compile::SSC_always_false:
+ if (null_free) {
+ assert(safe_for_replace, "must be");
+ obj = null_check(obj);
+ }
// It needs a null check because a null will *pass* the cast check.
! if (t->isa_oopptr() != nullptr && !t->is_oopptr()->maybe_null()) {
bool is_aastore = (java_bc() == Bytecodes::_aastore);
Deoptimization::DeoptReason reason = is_aastore ?
Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
builtin_throw(reason);
return top();
}
}
}
ciProfileData* data = nullptr;
- bool safe_for_replace = false;
if (failure_control == nullptr) { // use MDO in regular case only
assert(java_bc() == Bytecodes::_aastore ||
java_bc() == Bytecodes::_checkcast,
"interpreter profiles type checks only for these BCs");
! data = method()->method_data()->bci_to_data(bci());
! safe_for_replace = true;
}
// Make the merge point
enum { _obj_path = 1, _null_path, PATH_LIMIT };
RegionNode* region = new RegionNode(PATH_LIMIT);
Node* phi = new PhiNode(region, toop);
C->set_has_split_ifs(true); // Has chance for split-if optimization
// Use null-cast information if it is available
bool speculative_not_null = false;
bool never_see_null = ((failure_control == nullptr) // regular case only
&& seems_never_null(obj, data, speculative_not_null));
// Null check; get casted pointer; set region slot 3
Node* null_ctl = top();
! Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
// If not_null_obj is dead, only null-path is taken
if (stopped()) { // Doing instance-of on a null?
set_control(null_ctl);
return null();
}
region->init_req(_null_path, null_ctl);
phi ->init_req(_null_path, null()); // Set null path value
if (null_ctl == top()) {
}
}
}
ciProfileData* data = nullptr;
if (failure_control == nullptr) { // use MDO in regular case only
assert(java_bc() == Bytecodes::_aastore ||
java_bc() == Bytecodes::_checkcast,
"interpreter profiles type checks only for these BCs");
! if (method()->method_data()->is_mature()) {
! data = method()->method_data()->bci_to_data(bci());
+ }
}
// Make the merge point
enum { _obj_path = 1, _null_path, PATH_LIMIT };
RegionNode* region = new RegionNode(PATH_LIMIT);
Node* phi = new PhiNode(region, toop);
+ _gvn.set_type(region, Type::CONTROL);
+ _gvn.set_type(phi, toop);
+
C->set_has_split_ifs(true); // Has chance for split-if optimization
// Use null-cast information if it is available
bool speculative_not_null = false;
bool never_see_null = ((failure_control == nullptr) // regular case only
&& seems_never_null(obj, data, speculative_not_null));
+ if (obj->is_InlineType()) {
+ // Re-execute if buffering during triggers deoptimization
+ PreserveReexecuteState preexecs(this);
+ jvms()->set_should_reexecute(true);
+ obj = obj->as_InlineType()->buffer(this, safe_for_replace);
+ }
+
// Null check; get casted pointer; set region slot 3
Node* null_ctl = top();
! Node* not_null_obj = nullptr;
+ if (null_free) {
+ assert(safe_for_replace, "must be");
+ not_null_obj = null_check(obj);
+ } else {
+ not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
+ }
// If not_null_obj is dead, only null-path is taken
if (stopped()) { // Doing instance-of on a null?
set_control(null_ctl);
+ if (toop->is_inlinetypeptr()) {
+ return InlineTypeNode::make_null(_gvn, toop->inline_klass());
+ }
return null();
}
region->init_req(_null_path, null_ctl);
phi ->init_req(_null_path, null()); // Set null path value
if (null_ctl == top()) {
if (cast_obj == nullptr) {
// Generate the subtype check
Node* improved_superklass = superklass;
if (improved_klass_ptr_type != klass_ptr_type && improved_klass_ptr_type->singleton()) {
improved_superklass = makecon(improved_klass_ptr_type);
}
Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, improved_superklass);
-
// Plug in success path into the merge
cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
// Failure path ends in uncommon trap (or may be dead - failure impossible)
if (failure_control == nullptr) {
if (not_subtype_ctrl != top()) { // If failure is possible
PreserveJVMState pjvms(this);
set_control(not_subtype_ctrl);
bool is_aastore = (java_bc() == Bytecodes::_aastore);
Deoptimization::DeoptReason reason = is_aastore ?
Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
builtin_throw(reason);
}
if (cast_obj == nullptr) {
// Generate the subtype check
Node* improved_superklass = superklass;
if (improved_klass_ptr_type != klass_ptr_type && improved_klass_ptr_type->singleton()) {
+ // Only improve the super class for constants which allows subsequent sub type checks to possibly be commoned up.
+ // The other non-constant cases cannot be improved with a cast node here since they could be folded to top.
+ // Additionally, the benefit would only be minor in non-constant cases.
improved_superklass = makecon(improved_klass_ptr_type);
}
Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, improved_superklass);
// Plug in success path into the merge
cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
// Failure path ends in uncommon trap (or may be dead - failure impossible)
if (failure_control == nullptr) {
if (not_subtype_ctrl != top()) { // If failure is possible
PreserveJVMState pjvms(this);
set_control(not_subtype_ctrl);
+ Node* obj_klass = nullptr;
+ if (not_null_obj->is_InlineType()) {
+ obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->inline_klass()));
+ } else {
+ obj_klass = load_object_klass(not_null_obj);
+ }
bool is_aastore = (java_bc() == Bytecodes::_aastore);
Deoptimization::DeoptReason reason = is_aastore ?
Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
builtin_throw(reason);
}
// Return final merged results
set_control( _gvn.transform(region) );
record_for_igvn(region);
! return record_profiled_receiver_for_speculation(res);
}
//------------------------------next_monitor-----------------------------------
// What number should be given to the next monitor?
int GraphKit::next_monitor() {
// Return final merged results
set_control( _gvn.transform(region) );
record_for_igvn(region);
! bool not_inline = !toop->can_be_inline_type();
+ bool not_flat_in_array = !UseFlatArray || not_inline || (toop->is_inlinetypeptr() && !toop->inline_klass()->flat_in_array());
+ if (EnableValhalla && not_flat_in_array) {
+ // Check if obj has been loaded from an array
+ obj = obj->isa_DecodeN() ? obj->in(1) : obj;
+ Node* array = nullptr;
+ if (obj->isa_Load()) {
+ Node* address = obj->in(MemNode::Address);
+ if (address->isa_AddP()) {
+ array = address->as_AddP()->in(AddPNode::Base);
+ }
+ } else if (obj->is_Phi()) {
+ Node* region = obj->in(0);
+ // TODO make this more robust (see JDK-8231346)
+ if (region->req() == 3 && region->in(2) != nullptr && region->in(2)->in(0) != nullptr) {
+ IfNode* iff = region->in(2)->in(0)->isa_If();
+ if (iff != nullptr) {
+ iff->is_flat_array_check(&_gvn, &array);
+ }
+ }
+ }
+ if (array != nullptr) {
+ const TypeAryPtr* ary_t = _gvn.type(array)->isa_aryptr();
+ if (ary_t != nullptr && !ary_t->is_flat()) {
+ if (!ary_t->is_not_null_free() && not_inline) {
+ // Casting array element to a non-inline-type, mark array as not null-free.
+ Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_null_free()));
+ replace_in_map(array, cast);
+ } else if (!ary_t->is_not_flat()) {
+ // Casting array element to a non-flat type, mark array as not flat.
+ Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_flat()));
+ replace_in_map(array, cast);
+ }
+ }
+ }
+ }
+
+ if (!stopped() && !res->is_InlineType()) {
+ res = record_profiled_receiver_for_speculation(res);
+ if (toop->is_inlinetypeptr()) {
+ Node* vt = InlineTypeNode::make_from_oop(this, res, toop->inline_klass(), !gvn().type(res)->maybe_null());
+ res = vt;
+ if (safe_for_replace) {
+ replace_in_map(obj, vt);
+ replace_in_map(not_null_obj, vt);
+ replace_in_map(res, vt);
+ }
+ }
+ }
+ return res;
+ }
+
+ Node* GraphKit::mark_word_test(Node* obj, uintptr_t mask_val, bool eq, bool check_lock) {
+ // Load markword
+ Node* mark_adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
+ Node* mark = make_load(nullptr, mark_adr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
+ if (check_lock) {
+ // Check if obj is locked
+ Node* locked_bit = MakeConX(markWord::unlocked_value);
+ locked_bit = _gvn.transform(new AndXNode(locked_bit, mark));
+ Node* cmp = _gvn.transform(new CmpXNode(locked_bit, MakeConX(0)));
+ Node* is_unlocked = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
+ IfNode* iff = new IfNode(control(), is_unlocked, PROB_MAX, COUNT_UNKNOWN);
+ _gvn.transform(iff);
+ Node* locked_region = new RegionNode(3);
+ Node* mark_phi = new PhiNode(locked_region, TypeX_X);
+
+ // Unlocked: Use bits from mark word
+ locked_region->init_req(1, _gvn.transform(new IfTrueNode(iff)));
+ mark_phi->init_req(1, mark);
+
+ // Locked: Load prototype header from klass
+ set_control(_gvn.transform(new IfFalseNode(iff)));
+ // Make loads control dependent to make sure they are only executed if array is locked
+ Node* klass_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
+ Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, control(), C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
+ Node* proto_adr = basic_plus_adr(klass, in_bytes(Klass::prototype_header_offset()));
+ Node* proto = _gvn.transform(LoadNode::make(_gvn, control(), C->immutable_memory(), proto_adr, proto_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
+
+ locked_region->init_req(2, control());
+ mark_phi->init_req(2, proto);
+ set_control(_gvn.transform(locked_region));
+ record_for_igvn(locked_region);
+
+ mark = mark_phi;
+ }
+
+ // Now check if mark word bits are set
+ Node* mask = MakeConX(mask_val);
+ Node* masked = _gvn.transform(new AndXNode(_gvn.transform(mark), mask));
+ record_for_igvn(masked); // Give it a chance to be optimized out by IGVN
+ Node* cmp = _gvn.transform(new CmpXNode(masked, mask));
+ return _gvn.transform(new BoolNode(cmp, eq ? BoolTest::eq : BoolTest::ne));
+ }
+
+ Node* GraphKit::inline_type_test(Node* obj, bool is_inline) {
+ return mark_word_test(obj, markWord::inline_type_pattern, is_inline, /* check_lock = */ false);
+ }
+
+ Node* GraphKit::flat_array_test(Node* array_or_klass, bool flat) {
+ // We can't use immutable memory here because the mark word is mutable.
+ // PhaseIdealLoop::move_flat_array_check_out_of_loop will make sure the
+ // check is moved out of loops (mainly to enable loop unswitching).
+ Node* cmp = _gvn.transform(new FlatArrayCheckNode(C, memory(Compile::AliasIdxRaw), array_or_klass));
+ record_for_igvn(cmp); // Give it a chance to be optimized out by IGVN
+ return _gvn.transform(new BoolNode(cmp, flat ? BoolTest::eq : BoolTest::ne));
+ }
+
+ Node* GraphKit::null_free_array_test(Node* array, bool null_free) {
+ return mark_word_test(array, markWord::null_free_array_bit_in_place, null_free);
+ }
+
+ // Deoptimize if 'ary' is a null-free inline type array and 'val' is null
+ Node* GraphKit::inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace) {
+ RegionNode* region = new RegionNode(3);
+ Node* null_ctl = top();
+ null_check_oop(val, &null_ctl);
+ if (null_ctl != top()) {
+ PreserveJVMState pjvms(this);
+ set_control(null_ctl);
+ {
+ // Deoptimize if null-free array
+ BuildCutout unless(this, null_free_array_test(ary, /* null_free = */ false), PROB_MAX);
+ inc_sp(nargs);
+ uncommon_trap(Deoptimization::Reason_null_check,
+ Deoptimization::Action_none);
+ }
+ region->init_req(1, control());
+ }
+ region->init_req(2, control());
+ set_control(_gvn.transform(region));
+ record_for_igvn(region);
+ if (_gvn.type(val) == TypePtr::NULL_PTR) {
+ // Since we were just successfully storing null, the array can't be null free.
+ const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
+ ary_t = ary_t->cast_to_not_null_free();
+ Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
+ if (safe_for_replace) {
+ replace_in_map(ary, cast);
+ }
+ ary = cast;
+ }
+ return ary;
}
//------------------------------next_monitor-----------------------------------
// What number should be given to the next monitor?
int GraphKit::next_monitor() {
// %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
assert(SynchronizationEntryBCI == InvocationEntryBci, "");
if( !GenerateSynchronizationCode )
return nullptr; // Not locking things?
+
if (stopped()) // Dead monitor?
return nullptr;
assert(dead_locals_are_killed(), "should kill locals before sync. point");
return;
if (stopped()) { // Dead monitor?
map()->pop_monitor(); // Kill monitor from debug info
return;
}
+ assert(!obj->is_InlineType(), "should not unlock on inline type");
// Memory barrier to avoid floating things down past the locked region
insert_mem_bar(Op_MemBarReleaseLock);
const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
// almost always feature constant types.
Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
if (!StressReflectiveCode && klass_t != nullptr) {
bool xklass = klass_t->klass_is_exact();
! if (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM)) {
jint lhelper;
! if (klass_t->isa_aryklassptr()) {
! BasicType elem = klass_t->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
if (is_reference_type(elem, true)) {
elem = T_OBJECT;
}
lhelper = Klass::array_layout_helper(elem);
} else {
// almost always feature constant types.
Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
if (!StressReflectiveCode && klass_t != nullptr) {
bool xklass = klass_t->klass_is_exact();
! bool can_be_flat = false;
+ const TypeAryPtr* ary_type = klass_t->as_instance_type()->isa_aryptr();
+ if (UseFlatArray && !xklass && ary_type != nullptr && !ary_type->is_null_free()) {
+ // Don't constant fold if the runtime type might be a flat array but the static type is not.
+ const TypeOopPtr* elem = ary_type->elem()->make_oopptr();
+ can_be_flat = ary_type->can_be_inline_array() && (!elem->is_inlinetypeptr() || elem->inline_klass()->flat_in_array());
+ }
+ if (!can_be_flat && (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM))) {
jint lhelper;
! if (klass_t->is_flat()) {
! lhelper = ary_type->flat_layout_helper();
+ } else if (klass_t->isa_aryklassptr()) {
+ BasicType elem = ary_type->elem()->array_element_basic_type();
if (is_reference_type(elem, true)) {
elem = T_OBJECT;
}
lhelper = Klass::array_layout_helper(elem);
} else {
DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
Node* prevmem = kit.memory(alias_idx);
init_in_merge->set_memory_at(alias_idx, prevmem);
! kit.set_memory(init_out_raw, alias_idx);
}
//---------------------------set_output_for_allocation-------------------------
Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
const TypeOopPtr* oop_type,
DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
Node* prevmem = kit.memory(alias_idx);
init_in_merge->set_memory_at(alias_idx, prevmem);
! if (init_out_raw != nullptr) {
+ kit.set_memory(init_out_raw, alias_idx);
+ }
}
//---------------------------set_output_for_allocation-------------------------
Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
const TypeOopPtr* oop_type,
// and link them properly (as a group) to the InitializeNode.
assert(init->in(InitializeNode::Memory) == malloc, "");
MergeMemNode* minit_in = MergeMemNode::make(malloc);
init->set_req(InitializeNode::Memory, minit_in);
record_for_igvn(minit_in); // fold it up later, if possible
Node* minit_out = memory(rawidx);
assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
// Add an edge in the MergeMem for the header fields so an access
// to one of those has correct memory state
set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
if (oop_type->isa_aryptr()) {
! const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
! int elemidx = C->get_alias_index(telemref);
! hook_memory_on_init(*this, elemidx, minit_in, minit_out);
} else if (oop_type->isa_instptr()) {
ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
ciField* field = ik->nonstatic_field_at(i);
if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
continue; // do not bother to track really large numbers of fields
// and link them properly (as a group) to the InitializeNode.
assert(init->in(InitializeNode::Memory) == malloc, "");
MergeMemNode* minit_in = MergeMemNode::make(malloc);
init->set_req(InitializeNode::Memory, minit_in);
record_for_igvn(minit_in); // fold it up later, if possible
+ _gvn.set_type(minit_in, Type::MEMORY);
Node* minit_out = memory(rawidx);
assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
// Add an edge in the MergeMem for the header fields so an access
// to one of those has correct memory state
set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
if (oop_type->isa_aryptr()) {
! const TypeAryPtr* arytype = oop_type->is_aryptr();
! if (arytype->is_flat()) {
! // Initially all flat array accesses share a single slice
+ // but that changes after parsing. Prepare the memory graph so
+ // it can optimize flat array accesses properly once they
+ // don't share a single slice.
+ assert(C->flat_accesses_share_alias(), "should be set at parse time");
+ C->set_flat_accesses_share_alias(false);
+ ciInlineKlass* vk = arytype->elem()->inline_klass();
+ for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
+ ciField* field = vk->nonstatic_field_at(i);
+ if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
+ continue; // do not bother to track really large numbers of fields
+ int off_in_vt = field->offset_in_bytes() - vk->first_field_offset();
+ const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
+ int fieldidx = C->get_alias_index(adr_type, true);
+ // Pass nullptr for init_out. Having per flat array element field memory edges as uses of the Initialize node
+ // can result in per flat array field Phis to be created which confuses the logic of
+ // Compile::adjust_flat_array_access_aliases().
+ hook_memory_on_init(*this, fieldidx, minit_in, nullptr);
+ }
+ C->set_flat_accesses_share_alias(true);
+ hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::INLINES), minit_in, minit_out);
+ } else {
+ const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
+ int elemidx = C->get_alias_index(telemref);
+ hook_memory_on_init(*this, elemidx, minit_in, minit_out);
+ }
} else if (oop_type->isa_instptr()) {
+ set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
ciField* field = ik->nonstatic_field_at(i);
if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
continue; // do not bother to track really large numbers of fields
// - If 'return_size_val', report the total object size to the caller.
// - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
Node* GraphKit::new_instance(Node* klass_node,
Node* extra_slow_test,
Node* *return_size_val,
! bool deoptimize_on_exception) {
// Compute size in doublewords
// The size is always an integral number of doublewords, represented
// as a positive bytewise size stored in the klass's layout_helper.
// The layout_helper also encodes (in a low bit) the need for a slow path.
jint layout_con = Klass::_lh_neutral_value;
Node* layout_val = get_layout_helper(klass_node, layout_con);
! int layout_is_con = (layout_val == nullptr);
if (extra_slow_test == nullptr) extra_slow_test = intcon(0);
// Generate the initial go-slow test. It's either ALWAYS (return a
// Node for 1) or NEVER (return a null) or perhaps (in the reflective
// case) a computed value derived from the layout_helper.
// - If 'return_size_val', report the total object size to the caller.
// - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
Node* GraphKit::new_instance(Node* klass_node,
Node* extra_slow_test,
Node* *return_size_val,
! bool deoptimize_on_exception,
+ InlineTypeNode* inline_type_node) {
// Compute size in doublewords
// The size is always an integral number of doublewords, represented
// as a positive bytewise size stored in the klass's layout_helper.
// The layout_helper also encodes (in a low bit) the need for a slow path.
jint layout_con = Klass::_lh_neutral_value;
Node* layout_val = get_layout_helper(klass_node, layout_con);
! bool layout_is_con = (layout_val == nullptr);
if (extra_slow_test == nullptr) extra_slow_test = intcon(0);
// Generate the initial go-slow test. It's either ALWAYS (return a
// Node for 1) or NEVER (return a null) or perhaps (in the reflective
// case) a computed value derived from the layout_helper.
const TypeOopPtr* oop_type = tklass->as_instance_type();
// Now generate allocation code
// The entire memory state is needed for slow path of the allocation
! // since GC and deoptimization can happened.
Node *mem = reset_memory();
set_all_memory(mem); // Create new memory state
AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
control(), mem, i_o(),
size, klass_node,
! initial_slow_test);
return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
}
//-------------------------------new_array-------------------------------------
! // helper for both newarray and anewarray
// The 'length' parameter is (obviously) the length of the array.
// The optional arguments are for specialized use by intrinsics:
// - If 'return_size_val', report the non-padded array size (sum of header size
// and array body) to the caller.
// - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
const TypeOopPtr* oop_type = tklass->as_instance_type();
// Now generate allocation code
// The entire memory state is needed for slow path of the allocation
! // since GC and deoptimization can happen.
Node *mem = reset_memory();
set_all_memory(mem); // Create new memory state
AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
control(), mem, i_o(),
size, klass_node,
! initial_slow_test, inline_type_node);
return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
}
//-------------------------------new_array-------------------------------------
! // helper for newarray and anewarray
// The 'length' parameter is (obviously) the length of the array.
// The optional arguments are for specialized use by intrinsics:
// - If 'return_size_val', report the non-padded array size (sum of header size
// and array body) to the caller.
// - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
int nargs, // number of arguments to push back for uncommon trap
Node* *return_size_val,
bool deoptimize_on_exception) {
jint layout_con = Klass::_lh_neutral_value;
Node* layout_val = get_layout_helper(klass_node, layout_con);
! int layout_is_con = (layout_val == nullptr);
if (!layout_is_con && !StressReflectiveCode &&
!too_many_traps(Deoptimization::Reason_class_check)) {
// This is a reflective array creation site.
// Optimistically assume that it is a subtype of Object[],
int nargs, // number of arguments to push back for uncommon trap
Node* *return_size_val,
bool deoptimize_on_exception) {
jint layout_con = Klass::_lh_neutral_value;
Node* layout_val = get_layout_helper(klass_node, layout_con);
! bool layout_is_con = (layout_val == nullptr);
if (!layout_is_con && !StressReflectiveCode &&
!too_many_traps(Deoptimization::Reason_class_check)) {
// This is a reflective array creation site.
// Optimistically assume that it is a subtype of Object[],
int fast_size_limit = FastAllocateSizeLimit;
if (layout_is_con) {
assert(!StressReflectiveCode, "stress mode does not use these paths");
// Increase the size limit if we have exact knowledge of array type.
int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
! fast_size_limit <<= (LogBytesPerLong - log2_esize);
}
Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
int fast_size_limit = FastAllocateSizeLimit;
if (layout_is_con) {
assert(!StressReflectiveCode, "stress mode does not use these paths");
// Increase the size limit if we have exact knowledge of array type.
int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
! fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0);
}
Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
Node* header_size = nullptr;
// (T_BYTE has the weakest alignment and size restrictions...)
if (layout_is_con) {
int hsize = Klass::layout_helper_header_size(layout_con);
int eshift = Klass::layout_helper_log2_element_size(layout_con);
if ((round_mask & ~right_n_bits(eshift)) == 0)
round_mask = 0; // strength-reduce it if it goes away completely
! assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
assert(header_size_min <= hsize, "generic minimum is smallest");
header_size = intcon(hsize);
} else {
Node* hss = intcon(Klass::_lh_header_size_shift);
Node* header_size = nullptr;
// (T_BYTE has the weakest alignment and size restrictions...)
if (layout_is_con) {
int hsize = Klass::layout_helper_header_size(layout_con);
int eshift = Klass::layout_helper_log2_element_size(layout_con);
+ bool is_flat_array = Klass::layout_helper_is_flatArray(layout_con);
if ((round_mask & ~right_n_bits(eshift)) == 0)
round_mask = 0; // strength-reduce it if it goes away completely
! assert(is_flat_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
assert(header_size_min <= hsize, "generic minimum is smallest");
header_size = intcon(hsize);
} else {
Node* hss = intcon(Klass::_lh_header_size_shift);
// else if round_mask == 0, the size computation is self-rounding
// Now generate allocation code
// The entire memory state is needed for slow path of the allocation
! // since GC and deoptimization can happened.
Node *mem = reset_memory();
set_all_memory(mem); // Create new memory state
if (initial_slow_test->is_Bool()) {
// Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
}
! const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
Node* valid_length_test = _gvn.intcon(1);
if (ary_type->isa_aryptr()) {
BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type();
jint max = TypeAryPtr::max_array_length(bt);
Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max)));
// else if round_mask == 0, the size computation is self-rounding
// Now generate allocation code
// The entire memory state is needed for slow path of the allocation
! // since GC and deoptimization can happen.
Node *mem = reset_memory();
set_all_memory(mem); // Create new memory state
if (initial_slow_test->is_Bool()) {
// Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
}
! const TypeKlassPtr* ary_klass = _gvn.type(klass_node)->isa_klassptr();
+ const TypeOopPtr* ary_type = ary_klass->as_instance_type();
+ const TypeAryPtr* ary_ptr = ary_type->isa_aryptr();
+
+ // Inline type array variants:
+ // - null-ok: ciObjArrayKlass with is_elem_null_free() = false
+ // - null-free: ciObjArrayKlass with is_elem_null_free() = true
+ // - null-free, flat: ciFlatArrayKlass with is_elem_null_free() = true
+ // Check if array is a null-free, non-flat inline type array
+ // that needs to be initialized with the default inline type.
+ Node* default_value = nullptr;
+ Node* raw_default_value = nullptr;
+ if (ary_ptr != nullptr && ary_ptr->klass_is_exact()) {
+ // Array type is known
+ if (ary_ptr->is_null_free() && !ary_ptr->is_flat()) {
+ ciInlineKlass* vk = ary_ptr->elem()->inline_klass();
+ default_value = InlineTypeNode::default_oop(gvn(), vk);
+ if (UseCompressedOops) {
+ // With compressed oops, the 64-bit init value is built from two 32-bit compressed oops
+ default_value = _gvn.transform(new EncodePNode(default_value, default_value->bottom_type()->make_narrowoop()));
+ Node* lower = _gvn.transform(new CastP2XNode(control(), default_value));
+ Node* upper = _gvn.transform(new LShiftLNode(lower, intcon(32)));
+ raw_default_value = _gvn.transform(new OrLNode(lower, upper));
+ } else {
+ raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value));
+ }
+ }
+ }
+
Node* valid_length_test = _gvn.intcon(1);
if (ary_type->isa_aryptr()) {
BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type();
jint max = TypeAryPtr::max_array_length(bt);
Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max)));
AllocateArrayNode* alloc
= new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
control(), mem, i_o(),
size, klass_node,
initial_slow_test,
! length, valid_length_test);
!
// Cast to correct type. Note that the klass_node may be constant or not,
// and in the latter case the actual array type will be inexact also.
// (This happens via a non-constant argument to inline_native_newArray.)
// In any case, the value of klass_node provides the desired array type.
const TypeInt* length_type = _gvn.find_int_type(length);
AllocateArrayNode* alloc
= new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
control(), mem, i_o(),
size, klass_node,
initial_slow_test,
! length, valid_length_test,
! default_value, raw_default_value);
// Cast to correct type. Note that the klass_node may be constant or not,
// and in the latter case the actual array type will be inexact also.
// (This happens via a non-constant argument to inline_native_newArray.)
// In any case, the value of klass_node provides the desired array type.
const TypeInt* length_type = _gvn.find_int_type(length);
}
Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
int value_offset = java_lang_String::value_offset();
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
! false, nullptr, 0);
const TypePtr* value_field_type = string_type->add_offset(value_offset);
const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
! TypeAry::make(TypeInt::BYTE, TypeInt::POS),
! ciTypeArrayKlass::make(T_BYTE), true, 0);
Node* p = basic_plus_adr(str, str, value_offset);
Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
return load;
}
}
Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
int value_offset = java_lang_String::value_offset();
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
! false, nullptr, Type::Offset(0));
const TypePtr* value_field_type = string_type->add_offset(value_offset);
const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
! TypeAry::make(TypeInt::BYTE, TypeInt::POS, false, false, true, true),
! ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0));
Node* p = basic_plus_adr(str, str, value_offset);
Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
return load;
}
if (!CompactStrings) {
return intcon(java_lang_String::CODER_UTF16);
}
int coder_offset = java_lang_String::coder_offset();
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
! false, nullptr, 0);
const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
Node* p = basic_plus_adr(str, str, coder_offset);
Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
if (!CompactStrings) {
return intcon(java_lang_String::CODER_UTF16);
}
int coder_offset = java_lang_String::coder_offset();
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
! false, nullptr, Type::Offset(0));
const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
Node* p = basic_plus_adr(str, str, coder_offset);
Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
}
void GraphKit::store_String_value(Node* str, Node* value) {
int value_offset = java_lang_String::value_offset();
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
! false, nullptr, 0);
const TypePtr* value_field_type = string_type->add_offset(value_offset);
access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
}
void GraphKit::store_String_coder(Node* str, Node* value) {
int coder_offset = java_lang_String::coder_offset();
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
! false, nullptr, 0);
const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
}
}
void GraphKit::store_String_value(Node* str, Node* value) {
int value_offset = java_lang_String::value_offset();
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
! false, nullptr, Type::Offset(0));
const TypePtr* value_field_type = string_type->add_offset(value_offset);
access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
}
void GraphKit::store_String_coder(Node* str, Node* value) {
int coder_offset = java_lang_String::coder_offset();
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
! false, nullptr, Type::Offset(0));
const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
}
}
}
const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
/*is_unsigned_load=*/false);
if (con_type != nullptr) {
! return makecon(con_type);
}
return nullptr;
}
Node* GraphKit::maybe_narrow_object_type(Node* obj, ciKlass* type) {
! const TypeOopPtr* obj_type = obj->bottom_type()->isa_oopptr();
const TypeOopPtr* sig_type = TypeOopPtr::make_from_klass(type);
! if (obj_type != nullptr && sig_type->is_loaded() && !obj_type->higher_equal(sig_type)) {
const Type* narrow_obj_type = obj_type->filter_speculative(sig_type); // keep speculative part
Node* casted_obj = gvn().transform(new CheckCastPPNode(control(), obj, narrow_obj_type));
! return casted_obj;
}
return obj;
}
}
}
const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
/*is_unsigned_load=*/false);
if (con_type != nullptr) {
! Node* con = makecon(con_type);
+ if (field->type()->is_inlinetype()) {
+ con = InlineTypeNode::make_from_oop(this, con, field->type()->as_inline_klass(), field->is_null_free());
+ } else if (con_type->is_inlinetypeptr()) {
+ con = InlineTypeNode::make_from_oop(this, con, con_type->inline_klass(), field->is_null_free());
+ }
+ return con;
}
return nullptr;
}
+ //---------------------------load_mirror_from_klass----------------------------
+ // Given a klass oop, load its java mirror (a java.lang.Class oop).
+ Node* GraphKit::load_mirror_from_klass(Node* klass) {
+ Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
+ Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
+ // mirror = ((OopHandle)mirror)->resolve();
+ return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
+ }
+
Node* GraphKit::maybe_narrow_object_type(Node* obj, ciKlass* type) {
! const Type* obj_type = obj->bottom_type();
const TypeOopPtr* sig_type = TypeOopPtr::make_from_klass(type);
! if (obj_type->isa_oopptr() && sig_type->is_loaded() && !obj_type->higher_equal(sig_type)) {
const Type* narrow_obj_type = obj_type->filter_speculative(sig_type); // keep speculative part
Node* casted_obj = gvn().transform(new CheckCastPPNode(control(), obj, narrow_obj_type));
! obj = casted_obj;
+ }
+ if (sig_type->is_inlinetypeptr()) {
+ obj = InlineTypeNode::make_from_oop(this, obj, sig_type->inline_klass(), !gvn().type(obj)->maybe_null());
}
return obj;
}
< prev index next >