< prev index next > src/hotspot/share/opto/library_call.cpp
Print this page
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
+ #include "ci/ciFlatArrayKlass.hpp"
#include "ci/ciUtilities.inline.hpp"
#include "classfile/vmIntrinsics.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compileLog.hpp"
#include "gc/shared/barrierSet.hpp"
case vmIntrinsics::_compressStringC:
case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
case vmIntrinsics::_inflateStringC:
case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
+ case vmIntrinsics::_makePrivateBuffer: return inline_unsafe_make_private_buffer();
+ case vmIntrinsics::_finishPrivateBuffer: return inline_unsafe_finish_private_buffer();
case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
+ case vmIntrinsics::_getValue: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false, true);
case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
+ case vmIntrinsics::_putValue: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false, true);
case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
+ case vmIntrinsics::_isFlatArray: return inline_unsafe_isFlatArray();
case vmIntrinsics::_setMemory: return inline_unsafe_setMemory();
case vmIntrinsics::_getLength: return inline_native_getLength();
case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
+ case vmIntrinsics::_newNullRestrictedArray: return inline_newNullRestrictedArray();
case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
case vmIntrinsics::_isInstance:
case vmIntrinsics::_getModifiers:
//----------------------------inline_unsafe_access----------------------------
const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
// Attempt to infer a sharper value type from the offset and base type.
ciKlass* sharpened_klass = nullptr;
+ bool null_free = false;
// See if it is an instance field, with an object type.
if (alias_type->field() != nullptr) {
if (alias_type->field()->type()->is_klass()) {
sharpened_klass = alias_type->field()->type()->as_klass();
+ null_free = alias_type->field()->is_null_free();
}
}
const TypeOopPtr* result = nullptr;
// See if it is a narrow oop array.
if (adr_type->isa_aryptr()) {
if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
+ null_free = adr_type->is_aryptr()->is_null_free();
if (elem_type != nullptr && elem_type->is_loaded()) {
// Sharpen the value type.
result = elem_type;
}
}
// The sharpened class might be unloaded if there is no class loader
// contraint in place.
if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
// Sharpen the value type.
result = TypeOopPtr::make_from_klass(sharpened_klass);
+ if (null_free) {
+ result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
+ }
}
if (result != nullptr) {
#ifndef PRODUCT
if (C->print_intrinsics() || C->print_inlining()) {
tty->print(" from base type: "); adr_type->dump(); tty->cr();
ShouldNotReachHere();
return 0;
}
}
! bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
if (callee()->is_static()) return false; // caller must have the capability!
DecoratorSet decorators = C2_UNSAFE_ACCESS;
guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
ShouldNotReachHere();
return 0;
}
}
! bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned, const bool is_flat) {
if (callee()->is_static()) return false; // caller must have the capability!
DecoratorSet decorators = C2_UNSAFE_ACCESS;
guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
#ifdef ASSERT
if (!is_store) {
// Object getReference(Object base, int/long offset), etc.
BasicType rtype = sig->return_type()->basic_type();
assert(rtype == type, "getter must return the expected value");
! assert(sig->count() == 2, "oop getter has 2 arguments");
assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
} else {
// void putReference(Object base, int/long offset, Object x), etc.
assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
! assert(sig->count() == 3, "oop putter has 3 arguments");
assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
assert(vtype == type, "putter must accept the expected value");
}
#ifdef ASSERT
if (!is_store) {
// Object getReference(Object base, int/long offset), etc.
BasicType rtype = sig->return_type()->basic_type();
assert(rtype == type, "getter must return the expected value");
! assert(sig->count() == 2 || (is_flat && sig->count() == 3), "oop getter has 2 or 3 arguments");
assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
} else {
// void putReference(Object base, int/long offset, Object x), etc.
assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
! assert(sig->count() == 3 || (is_flat && sig->count() == 4), "oop putter has 3 arguments");
assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
assert(vtype == type, "putter must accept the expected value");
}
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
// to be plain byte offsets, which are also the same as those accepted
// by oopDesc::field_addr.
assert(Unsafe_field_offset_to_byte_offset(11) == 11,
"fieldOffset must be byte-scaled");
+
+ ciInlineKlass* inline_klass = nullptr;
+ if (is_flat) {
+ const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
+ if (cls == nullptr || cls->const_oop() == nullptr) {
+ return false;
+ }
+ ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
+ if (!mirror_type->is_inlinetype()) {
+ return false;
+ }
+ inline_klass = mirror_type->as_inline_klass();
+ }
+
+ if (base->is_InlineType()) {
+ InlineTypeNode* vt = base->as_InlineType();
+ if (is_store) {
+ if (!vt->is_allocated(&_gvn)) {
+ return false;
+ }
+ base = vt->get_oop();
+ } else {
+ if (offset->is_Con()) {
+ long off = find_long_con(offset, 0);
+ ciInlineKlass* vk = vt->type()->inline_klass();
+ if ((long)(int)off != off || !vk->contains_field_offset(off)) {
+ return false;
+ }
+
+ ciField* field = vk->get_non_flat_field_by_offset(off);
+ if (field != nullptr) {
+ BasicType bt = type2field[field->type()->basic_type()];
+ if (bt == T_ARRAY || bt == T_NARROWOOP) {
+ bt = T_OBJECT;
+ }
+ if (bt == type && (!field->is_flat() || field->type() == inline_klass)) {
+ Node* value = vt->field_value_by_offset(off, false);
+ if (value->is_InlineType()) {
+ value = value->as_InlineType()->adjust_scalarization_depth(this);
+ }
+ set_result(value);
+ return true;
+ }
+ }
+ }
+ {
+ // Re-execute the unsafe access if allocation triggers deoptimization.
+ PreserveReexecuteState preexecs(this);
+ jvms()->set_should_reexecute(true);
+ vt = vt->buffer(this);
+ }
+ base = vt->get_oop();
+ }
+ }
+
// 32-bit machines ignore the high half!
offset = ConvL2X(offset);
// Save state and restore on bailout
uint old_sp = sp();
Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
! if (type != T_OBJECT) {
decorators |= IN_NATIVE; // off-heap primitive access
} else {
set_map(old_map);
set_sp(old_sp);
return false; // off-heap oop accesses are not supported
Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
assert(!stopped(), "Inlining of unsafe access failed: address construction stopped unexpectedly");
if (_gvn.type(base->uncast())->isa_ptr() == TypePtr::NULL_PTR) {
! if (type != T_OBJECT && (inline_klass == nullptr || !inline_klass->has_object_fields())) {
decorators |= IN_NATIVE; // off-heap primitive access
} else {
set_map(old_map);
set_sp(old_sp);
return false; // off-heap oop accesses are not supported
if (!can_access_non_heap) {
decorators |= IN_HEAP;
}
! Node* val = is_store ? argument(4) : nullptr;
const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
if (adr_type == TypePtr::NULL_PTR) {
set_map(old_map);
set_sp(old_sp);
if (!can_access_non_heap) {
decorators |= IN_HEAP;
}
! Node* val = is_store ? argument(4 + (is_flat ? 1 : 0)) : nullptr;
const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
if (adr_type == TypePtr::NULL_PTR) {
set_map(old_map);
set_sp(old_sp);
set_sp(old_sp);
return false; // not supported
}
bool mismatched = false;
! BasicType bt = alias_type->basic_type();
if (bt != T_ILLEGAL) {
assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
if (bt == T_BYTE && adr_type->isa_aryptr()) {
// Alias type doesn't differentiate between byte[] and boolean[]).
// Use address type to get the element type.
set_sp(old_sp);
return false; // not supported
}
bool mismatched = false;
! BasicType bt = T_ILLEGAL;
+ ciField* field = nullptr;
+ if (adr_type->isa_instptr()) {
+ const TypeInstPtr* instptr = adr_type->is_instptr();
+ ciInstanceKlass* k = instptr->instance_klass();
+ int off = instptr->offset();
+ if (instptr->const_oop() != nullptr &&
+ k == ciEnv::current()->Class_klass() &&
+ instptr->offset() >= (k->size_helper() * wordSize)) {
+ k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
+ field = k->get_field_by_offset(off, true);
+ } else {
+ field = k->get_non_flat_field_by_offset(off);
+ }
+ if (field != nullptr) {
+ bt = type2field[field->type()->basic_type()];
+ }
+ assert(bt == alias_type->basic_type() || is_flat, "should match");
+ } else {
+ bt = alias_type->basic_type();
+ }
+
if (bt != T_ILLEGAL) {
assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
if (bt == T_BYTE && adr_type->isa_aryptr()) {
// Alias type doesn't differentiate between byte[] and boolean[]).
// Use address type to get the element type.
mismatched = (bt != type);
} else if (alias_type->adr_type()->isa_oopptr()) {
mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
}
destruct_map_clone(old_map);
! assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
if (mismatched) {
decorators |= C2_MISMATCHED;
}
mismatched = (bt != type);
} else if (alias_type->adr_type()->isa_oopptr()) {
mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
}
+ if (is_flat) {
+ if (adr_type->isa_instptr()) {
+ if (field == nullptr || field->type() != inline_klass) {
+ mismatched = true;
+ }
+ } else if (adr_type->isa_aryptr()) {
+ const Type* elem = adr_type->is_aryptr()->elem();
+ if (!adr_type->is_flat() || elem->inline_klass() != inline_klass) {
+ mismatched = true;
+ }
+ } else {
+ mismatched = true;
+ }
+ if (is_store) {
+ const Type* val_t = _gvn.type(val);
+ if (!val_t->is_inlinetypeptr() || val_t->inline_klass() != inline_klass) {
+ set_map(old_map);
+ set_sp(old_sp);
+ return false;
+ }
+ }
+ }
+
destruct_map_clone(old_map);
! assert(!mismatched || is_flat || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
if (mismatched) {
decorators |= C2_MISMATCHED;
}
const Type *value_type = Type::get_const_basic_type(type);
// Figure out the memory ordering.
decorators |= mo_decorator_for_access_kind(kind);
! if (!is_store && type == T_OBJECT) {
! const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
! if (tjp != nullptr) {
! value_type = tjp;
}
}
receiver = null_check(receiver);
if (stopped()) {
const Type *value_type = Type::get_const_basic_type(type);
// Figure out the memory ordering.
decorators |= mo_decorator_for_access_kind(kind);
! if (!is_store) {
! if (type == T_OBJECT && !is_flat) {
! const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
! if (tjp != nullptr) {
+ value_type = tjp;
+ }
}
}
receiver = null_check(receiver);
if (stopped()) {
// from intended ones in this API.
if (!is_store) {
Node* p = nullptr;
// Try to constant fold a load from a constant field
! ciField* field = alias_type->field();
! if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
// final or stable field
p = make_constant_from_field(field, heap_base_oop);
}
if (p == nullptr) { // Could not constant fold the load
! p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
// Normalize the value returned by getBoolean in the following cases
if (type == T_BOOLEAN &&
(mismatched ||
heap_base_oop == top() || // - heap_base_oop is null or
(can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
// from intended ones in this API.
if (!is_store) {
Node* p = nullptr;
// Try to constant fold a load from a constant field
!
! if (heap_base_oop != top() && field != nullptr && field->is_constant() && !field->is_flat() && !mismatched) {
// final or stable field
p = make_constant_from_field(field, heap_base_oop);
}
if (p == nullptr) { // Could not constant fold the load
! if (is_flat) {
+ if (adr_type->isa_instptr() && !mismatched) {
+ ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
+ int offset = adr_type->is_instptr()->offset();
+ p = InlineTypeNode::make_from_flat(this, inline_klass, base, base, holder, offset, decorators);
+ } else {
+ p = InlineTypeNode::make_from_flat(this, inline_klass, base, adr, nullptr, 0, decorators);
+ }
+ } else {
+ p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
+ const TypeOopPtr* ptr = value_type->make_oopptr();
+ if (ptr != nullptr && ptr->is_inlinetypeptr()) {
+ // Load a non-flattened inline type from memory
+ p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
+ }
+ }
// Normalize the value returned by getBoolean in the following cases
if (type == T_BOOLEAN &&
(mismatched ||
heap_base_oop == top() || // - heap_base_oop is null or
(can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
if (bt == T_ADDRESS) {
// Repackage the long as a pointer.
val = ConvL2X(val);
val = gvn().transform(new CastX2PNode(val));
}
! access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
}
return true;
}
//----------------------------inline_unsafe_load_store----------------------------
// This method serves a couple of different customers (depending on LoadStoreKind):
//
// LS_cmp_swap:
//
if (bt == T_ADDRESS) {
// Repackage the long as a pointer.
val = ConvL2X(val);
val = gvn().transform(new CastX2PNode(val));
}
! if (is_flat) {
+ if (adr_type->isa_instptr() && !mismatched) {
+ ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
+ int offset = adr_type->is_instptr()->offset();
+ val->as_InlineType()->store_flat(this, base, base, holder, offset, decorators);
+ } else {
+ val->as_InlineType()->store_flat(this, base, adr, nullptr, 0, decorators);
+ }
+ } else {
+ access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
+ }
+ }
+
+ if (argument(1)->is_InlineType() && is_store) {
+ InlineTypeNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(argument(1))->inline_klass());
+ value = value->make_larval(this, false);
+ replace_in_map(argument(1), value);
}
return true;
}
+ bool LibraryCallKit::inline_unsafe_make_private_buffer() {
+ Node* receiver = argument(0);
+ Node* value = argument(1);
+ if (!value->is_InlineType()) {
+ return false;
+ }
+
+ receiver = null_check(receiver);
+ if (stopped()) {
+ return true;
+ }
+
+ set_result(value->as_InlineType()->make_larval(this, true));
+ return true;
+ }
+
+ bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
+ Node* receiver = argument(0);
+ Node* buffer = argument(1);
+ if (!buffer->is_InlineType()) {
+ return false;
+ }
+ InlineTypeNode* vt = buffer->as_InlineType();
+ if (!vt->is_allocated(&_gvn)) {
+ return false;
+ }
+ // TODO 8239003 Why is this needed?
+ if (AllocateNode::Ideal_allocation(vt->get_oop()) == nullptr) {
+ return false;
+ }
+
+ receiver = null_check(receiver);
+ if (stopped()) {
+ return true;
+ }
+
+ set_result(vt->finish_larval(this));
+ return true;
+ }
+
//----------------------------inline_unsafe_load_store----------------------------
// This method serves a couple of different customers (depending on LoadStoreKind):
//
// LS_cmp_swap:
//
int alias_idx = C->get_alias_index(adr_type);
if (is_reference_type(type)) {
decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
+ if (oldval != nullptr && oldval->is_InlineType()) {
+ // Re-execute the unsafe access if allocation triggers deoptimization.
+ PreserveReexecuteState preexecs(this);
+ jvms()->set_should_reexecute(true);
+ oldval = oldval->as_InlineType()->buffer(this)->get_oop();
+ }
+ if (newval != nullptr && newval->is_InlineType()) {
+ // Re-execute the unsafe access if allocation triggers deoptimization.
+ PreserveReexecuteState preexecs(this);
+ jvms()->set_should_reexecute(true);
+ newval = newval->as_InlineType()->buffer(this)->get_oop();
+ }
+
// Transformation of a value which could be null pointer (CastPP #null)
// could be delayed during Parse (for example, in adjust_map_after_if()).
// Execute transformation here to avoid barrier generation in such case.
if (_gvn.type(newval) == TypePtr::NULL_PTR)
newval = _gvn.makecon(TypePtr::NULL_PTR);
Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
Node* bits = intcon(InstanceKlass::fully_initialized);
test = _gvn.transform(new SubINode(inst, bits));
// The 'test' is non-zero if we need to take a slow path.
}
!
! Node* obj = new_instance(kls, test);
set_result(obj);
return true;
}
//------------------------inline_native_time_funcs--------------
Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
Node* bits = intcon(InstanceKlass::fully_initialized);
test = _gvn.transform(new SubINode(inst, bits));
// The 'test' is non-zero if we need to take a slow path.
}
! Node* obj = nullptr;
! const TypeInstKlassPtr* tkls = _gvn.type(kls)->isa_instklassptr();
+ if (tkls != nullptr && tkls->instance_klass()->is_inlinetype()) {
+ obj = InlineTypeNode::make_default(_gvn, tkls->instance_klass()->as_inline_klass())->buffer(this);
+ } else {
+ obj = new_instance(kls, test);
+ }
set_result(obj);
return true;
}
//------------------------inline_native_time_funcs--------------
}
const Type* LibraryCallKit::scopedValueCache_type() {
ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
! const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
// Because we create the scopedValue cache lazily we have to make the
// type of the result BotPTR.
bool xk = etype->klass_is_exact();
! const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
return objects_type;
}
Node* LibraryCallKit::scopedValueCache_helper() {
Node* thread = _gvn.transform(new ThreadLocalNode());
}
const Type* LibraryCallKit::scopedValueCache_type() {
ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
! const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS, /* stable= */ false, /* flat= */ false, /* not_flat= */ true, /* not_null_free= */ true);
// Because we create the scopedValue cache lazily we have to make the
// type of the result BotPTR.
bool xk = etype->klass_is_exact();
! const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
return objects_type;
}
Node* LibraryCallKit::scopedValueCache_helper() {
Node* thread = _gvn.transform(new ThreadLocalNode());
set_all_memory(_gvn.transform(result_mem));
return true;
}
- //---------------------------load_mirror_from_klass----------------------------
- // Given a klass oop, load its java mirror (a java.lang.Class oop).
- Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
- Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
- Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
- // mirror = ((OopHandle)mirror)->resolve();
- return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
- }
-
//-----------------------load_klass_from_mirror_common-------------------------
// Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
// Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
// and branch to the given path on the region.
// If never_see_null, take an uncommon trap on null, so we can optimistically
Node* mbit = _gvn.transform(new AndINode(mods, mask));
Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
return generate_fair_guard(bol, region);
}
+
Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
return generate_klass_flags_guard(kls, JVM_ACC_INTERFACE, 0, region,
Klass::access_flags_offset(), TypeInt::INT, T_INT);
}
C->set_has_split_ifs(true); // Has chance for split-if optimization
set_result(region, phi);
return true;
}
+
//-------------------------inline_Class_cast-------------------
bool LibraryCallKit::inline_Class_cast() {
Node* mirror = argument(0); // Class
Node* obj = argument(1);
const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
}
const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
// First, see if Class.cast() can be folded statically.
// java_mirror_type() returns non-null for compile-time Class constants.
! ciType* tm = mirror_con->java_mirror_type();
if (tm != nullptr && tm->is_klass() &&
tp != nullptr) {
if (!tp->is_loaded()) {
// Don't use intrinsic when class is not loaded.
return false;
} else {
! int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());
if (static_res == Compile::SSC_always_true) {
// isInstance() is true - fold the code.
set_result(obj);
return true;
} else if (static_res == Compile::SSC_always_false) {
}
const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
// First, see if Class.cast() can be folded statically.
// java_mirror_type() returns non-null for compile-time Class constants.
! bool is_null_free_array = false;
+ ciType* tm = mirror_con->java_mirror_type(&is_null_free_array);
if (tm != nullptr && tm->is_klass() &&
tp != nullptr) {
if (!tp->is_loaded()) {
// Don't use intrinsic when class is not loaded.
return false;
} else {
! const TypeKlassPtr* tklass = TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces);
+ if (is_null_free_array) {
+ tklass = tklass->is_aryklassptr()->cast_to_null_free();
+ }
+ int static_res = C->static_subtype_check(tklass, tp->as_klass_type());
if (static_res == Compile::SSC_always_true) {
// isInstance() is true - fold the code.
set_result(obj);
return true;
} else if (static_res == Compile::SSC_always_false) {
// If mirror is dead, only null-path is taken.
if (stopped()) {
return true;
}
! // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
! enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
RegionNode* region = new RegionNode(PATH_LIMIT);
record_for_igvn(region);
// Now load the mirror's klass metaobject, and null-check it.
// If kls is null, we have a primitive mirror and
// nothing is an instance of a primitive type.
Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
Node* res = top();
if (!stopped()) {
Node* bad_type_ctrl = top();
// Do checkcast optimizations.
res = gen_checkcast(obj, kls, &bad_type_ctrl);
region->init_req(_bad_type_path, bad_type_ctrl);
}
if (region->in(_prim_path) != top() ||
! region->in(_bad_type_path) != top()) {
// Let Interpreter throw ClassCastException.
PreserveJVMState pjvms(this);
set_control(_gvn.transform(region));
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_maybe_recompile);
}
if (!stopped()) {
set_result(res);
// If mirror is dead, only null-path is taken.
if (stopped()) {
return true;
}
! // Not-subtype or the mirror's klass ptr is nullptr (in case it is a primitive).
! enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
RegionNode* region = new RegionNode(PATH_LIMIT);
record_for_igvn(region);
// Now load the mirror's klass metaobject, and null-check it.
// If kls is null, we have a primitive mirror and
// nothing is an instance of a primitive type.
Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
Node* res = top();
+ Node* io = i_o();
+ Node* mem = merged_memory();
if (!stopped()) {
+
Node* bad_type_ctrl = top();
// Do checkcast optimizations.
res = gen_checkcast(obj, kls, &bad_type_ctrl);
region->init_req(_bad_type_path, bad_type_ctrl);
}
if (region->in(_prim_path) != top() ||
! region->in(_bad_type_path) != top() ||
+ region->in(_npe_path) != top()) {
// Let Interpreter throw ClassCastException.
PreserveJVMState pjvms(this);
set_control(_gvn.transform(region));
+ // Set IO and memory because gen_checkcast may override them when buffering inline types
+ set_i_o(io);
+ set_all_memory(mem);
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_maybe_recompile);
}
if (!stopped()) {
set_result(res);
_both_ref_path, // {N,N} & subtype check loses => false
PATH_LIMIT
};
RegionNode* region = new RegionNode(PATH_LIMIT);
+ RegionNode* prim_region = new RegionNode(2);
Node* phi = new PhiNode(region, TypeInt::BOOL);
record_for_igvn(region);
+ record_for_igvn(prim_region);
const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
int class_klass_offset = java_lang_Class::klass_offset();
bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
for (which_arg = 0; which_arg <= 1; which_arg++) {
Node* kls = klasses[which_arg];
Node* null_ctl = top();
kls = null_check_oop(kls, &null_ctl, never_see_null);
! int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
! region->init_req(prim_path, null_ctl);
if (stopped()) break;
klasses[which_arg] = kls;
}
if (!stopped()) {
// now we have two reference types, in klasses[0..1]
Node* subk = klasses[1]; // the argument to isAssignableFrom
Node* superk = klasses[0]; // the receiver
region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
- // now we have a successful reference subtype check
region->set_req(_ref_subtype_path, control());
}
// If both operands are primitive (both klasses null), then
// we must return true when they are identical primitives.
// It is convenient to test this after the first null klass check.
! set_control(region->in(_prim_0_path)); // go back to first null check
if (!stopped()) {
// Since superc is primitive, make a guard for the superc==subc case.
Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
! generate_guard(bol_eq, region, PROB_FAIR);
if (region->req() == PATH_LIMIT+1) {
// A guard was added. If the added guard is taken, superc==subc.
region->swap_edges(PATH_LIMIT, _prim_same_path);
region->del_req(PATH_LIMIT);
}
bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
for (which_arg = 0; which_arg <= 1; which_arg++) {
Node* kls = klasses[which_arg];
Node* null_ctl = top();
kls = null_check_oop(kls, &null_ctl, never_see_null);
! if (which_arg == 0) {
! prim_region->init_req(1, null_ctl);
+ } else {
+ region->init_req(_prim_1_path, null_ctl);
+ }
if (stopped()) break;
klasses[which_arg] = kls;
}
if (!stopped()) {
// now we have two reference types, in klasses[0..1]
Node* subk = klasses[1]; // the argument to isAssignableFrom
Node* superk = klasses[0]; // the receiver
region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
region->set_req(_ref_subtype_path, control());
}
// If both operands are primitive (both klasses null), then
// we must return true when they are identical primitives.
// It is convenient to test this after the first null klass check.
! // This path is also used if superc is a value mirror.
+ set_control(_gvn.transform(prim_region));
if (!stopped()) {
// Since superc is primitive, make a guard for the superc==subc case.
Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
! generate_fair_guard(bol_eq, region);
if (region->req() == PATH_LIMIT+1) {
// A guard was added. If the added guard is taken, superc==subc.
region->swap_edges(PATH_LIMIT, _prim_same_path);
region->del_req(PATH_LIMIT);
}
set_result(_gvn.transform(phi));
return true;
}
//---------------------generate_array_guard_common------------------------
! Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
- bool obj_array, bool not_array) {
if (stopped()) {
return nullptr;
}
- // If obj_array/non_array==false/false:
- // Branch around if the given klass is in fact an array (either obj or prim).
- // If obj_array/non_array==false/true:
- // Branch around if the given klass is not an array klass of any kind.
- // If obj_array/non_array==true/true:
- // Branch around if the kls is not an oop array (kls is int[], String, etc.)
- // If obj_array/non_array==true/false:
- // Branch around if the kls is an oop array (Object[] or subtype)
- //
// Like generate_guard, adds a new path onto the region.
jint layout_con = 0;
Node* layout_val = get_layout_helper(kls, layout_con);
if (layout_val == nullptr) {
! bool query = (obj_array
! ? Klass::layout_helper_is_objArray(layout_con)
! : Klass::layout_helper_is_array(layout_con));
! if (query == not_array) {
return nullptr; // never a branch
} else { // always a branch
Node* always_branch = control();
if (region != nullptr)
region->add_req(always_branch);
set_control(top());
return always_branch;
}
}
// Now test the correct condition.
! jint nval = (obj_array
- ? (jint)(Klass::_lh_array_tag_type_value
- << Klass::_lh_array_tag_shift)
- : Klass::_lh_neutral_value);
Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
- BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
- // invert the test if we are looking for a non-array
- if (not_array) btest = BoolTest(btest).negate();
Node* bol = _gvn.transform(new BoolNode(cmp, btest));
return generate_fair_guard(bol, region);
}
//-----------------------inline_native_newArray--------------------------
! // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
// private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
Node* mirror;
Node* count_val;
if (uninitialized) {
set_result(_gvn.transform(phi));
return true;
}
//---------------------generate_array_guard_common------------------------
! Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {
if (stopped()) {
return nullptr;
}
// Like generate_guard, adds a new path onto the region.
jint layout_con = 0;
Node* layout_val = get_layout_helper(kls, layout_con);
if (layout_val == nullptr) {
! bool query = 0;
! switch(kind) {
! case ObjectArray: query = Klass::layout_helper_is_objArray(layout_con); break;
! case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
+ case TypeArray: query = Klass::layout_helper_is_typeArray(layout_con); break;
+ case AnyArray: query = Klass::layout_helper_is_array(layout_con); break;
+ case NonArray: query = !Klass::layout_helper_is_array(layout_con); break;
+ default:
+ ShouldNotReachHere();
+ }
+ if (!query) {
return nullptr; // never a branch
} else { // always a branch
Node* always_branch = control();
if (region != nullptr)
region->add_req(always_branch);
set_control(top());
return always_branch;
}
}
+ unsigned int value = 0;
+ BoolTest::mask btest = BoolTest::illegal;
+ switch(kind) {
+ case ObjectArray:
+ case NonObjectArray: {
+ value = Klass::_lh_array_tag_obj_value;
+ layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
+ btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
+ break;
+ }
+ case TypeArray: {
+ value = Klass::_lh_array_tag_type_value;
+ layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
+ btest = BoolTest::eq;
+ break;
+ }
+ case AnyArray: value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
+ case NonArray: value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
+ default:
+ ShouldNotReachHere();
+ }
// Now test the correct condition.
! jint nval = (jint)value;
Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
Node* bol = _gvn.transform(new BoolNode(cmp, btest));
return generate_fair_guard(bol, region);
}
+ //-----------------------inline_newNullRestrictedArray--------------------------
+ // public static native Object[] newNullRestrictedArray(Class<?> componentType, int length);
+ bool LibraryCallKit::inline_newNullRestrictedArray() {
+ Node* componentType = argument(0);
+ Node* length = argument(1);
+
+ const TypeInstPtr* tp = _gvn.type(componentType)->isa_instptr();
+ if (tp != nullptr) {
+ ciInstanceKlass* ik = tp->instance_klass();
+ if (ik == C->env()->Class_klass()) {
+ ciType* t = tp->java_mirror_type();
+ if (t != nullptr && t->is_inlinetype()) {
+ ciArrayKlass* array_klass = ciArrayKlass::make(t, true);
+ if (array_klass->is_loaded() && array_klass->element_klass()->as_inline_klass()->is_initialized()) {
+ const TypeAryKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces)->is_aryklassptr();
+ array_klass_type = array_klass_type->cast_to_null_free();
+ Node* obj = new_array(makecon(array_klass_type), length, 0, nullptr, false); // no arguments to push
+ set_result(obj);
+ assert(gvn().type(obj)->is_aryptr()->is_null_free(), "must be null-free");
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
//-----------------------inline_native_newArray--------------------------
! // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
// private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
Node* mirror;
Node* count_val;
if (uninitialized) {
RegionNode* bailout = new RegionNode(1);
record_for_igvn(bailout);
// Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
// Bail out if that is so.
! Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
if (not_objArray != nullptr) {
// Improve the klass node's type from the new optimistic assumption:
ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
! const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
Node* cast = new CastPPNode(control(), klass_node, akls);
klass_node = _gvn.transform(cast);
}
// Bail out if either start or end is negative.
RegionNode* bailout = new RegionNode(1);
record_for_igvn(bailout);
// Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
// Bail out if that is so.
! // Inline type array may have object field that would require a
+ // write barrier. Conservatively, go to slow path.
+ // TODO 8251971: Optimize for the case when flat src/dst are later found
+ // to not contain oops (i.e., move this check to the macro expansion phase).
+ BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+ const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
+ const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
+ bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
+ // Can src array be flat and contain oops?
+ (orig_t == nullptr || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
+ // Can dest array be flat and contain oops?
+ tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
+ Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
if (not_objArray != nullptr) {
// Improve the klass node's type from the new optimistic assumption:
ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
! const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
Node* cast = new CastPPNode(control(), klass_node, akls);
klass_node = _gvn.transform(cast);
}
// Bail out if either start or end is negative.
// Without this the new_array would throw
// NegativeArraySizeException but IllegalArgumentException is what
// should be thrown
generate_negative_guard(length, bailout, &length);
+ // Handle inline type arrays
+ bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
+ if (!stopped()) {
+ // TODO JDK-8329224
+ if (!orig_t->is_null_free()) {
+ // Not statically known to be null free, add a check
+ generate_fair_guard(null_free_array_test(original), bailout);
+ }
+ orig_t = _gvn.type(original)->isa_aryptr();
+ if (orig_t != nullptr && orig_t->is_flat()) {
+ // Src is flat, check that dest is flat as well
+ if (exclude_flat) {
+ // Dest can't be flat, bail out
+ bailout->add_req(control());
+ set_control(top());
+ } else {
+ generate_fair_guard(flat_array_test(klass_node, /* flat = */ false), bailout);
+ }
+ } else if (UseFlatArray && (orig_t == nullptr || !orig_t->is_not_flat()) &&
+ // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
+ ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
+ // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
+ // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
+ generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
+ if (orig_t != nullptr) {
+ orig_t = orig_t->cast_to_not_flat();
+ original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
+ }
+ }
+ if (!can_validate) {
+ // No validation. The subtype check emitted at macro expansion time will not go to the slow
+ // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
+ // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
+ generate_fair_guard(flat_array_test(klass_node), bailout);
+ generate_fair_guard(null_free_array_test(original), bailout);
+ }
+ }
+
// Bail out if start is larger than the original length
Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
generate_negative_guard(orig_tail, bailout, &orig_tail);
if (bailout->req() > 1) {
}
bool validated = false;
// Reason_class_check rather than Reason_intrinsic because we
// want to intrinsify even if this traps.
! if (!too_many_traps(Deoptimization::Reason_class_check)) {
Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
if (not_subtype_ctrl != top()) {
PreserveJVMState pjvms(this);
set_control(not_subtype_ctrl);
}
bool validated = false;
// Reason_class_check rather than Reason_intrinsic because we
// want to intrinsify even if this traps.
! if (can_validate) {
Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
if (not_subtype_ctrl != top()) {
PreserveJVMState pjvms(this);
set_control(not_subtype_ctrl);
guarantee(method_id == method->intrinsic_id(), "must match");
const TypeFunc* tf = TypeFunc::make(method);
if (res_not_null) {
assert(tf->return_type() == T_OBJECT, "");
! const TypeTuple* range = tf->range();
const Type** fields = TypeTuple::fields(range->cnt());
fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
! tf = TypeFunc::make(tf->domain(), new_range);
}
CallJavaNode* slow_call;
if (is_static) {
assert(!is_virtual, "");
slow_call = new CallStaticJavaNode(C, tf,
guarantee(method_id == method->intrinsic_id(), "must match");
const TypeFunc* tf = TypeFunc::make(method);
if (res_not_null) {
assert(tf->return_type() == T_OBJECT, "");
! const TypeTuple* range = tf->range_cc();
const Type** fields = TypeTuple::fields(range->cnt());
fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
! tf = TypeFunc::make(tf->domain_cc(), new_range);
}
CallJavaNode* slow_call;
if (is_static) {
assert(!is_virtual, "");
slow_call = new CallStaticJavaNode(C, tf,
RegionNode* result_reg = new RegionNode(PATH_LIMIT);
PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
! Node* obj = nullptr;
if (!is_static) {
// Check for hashing null object
obj = null_check_receiver();
if (stopped()) return true; // unconditionally null
result_reg->init_req(_null_path, top());
result_val->init_req(_null_path, top());
} else {
// Do a null check, and return zero if null.
// System.identityHashCode(null) == 0
- obj = argument(0);
Node* null_ctl = top();
obj = null_check_oop(obj, &null_ctl);
result_reg->init_req(_null_path, null_ctl);
result_val->init_req(_null_path, _gvn.intcon(0));
}
RegionNode* result_reg = new RegionNode(PATH_LIMIT);
PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
! Node* obj = argument(0);
+
+ // Don't intrinsify hashcode on inline types for now.
+ // The "is locked" runtime check below also serves as inline type check and goes to the slow path.
+ if (gvn().type(obj)->is_inlinetypeptr()) {
+ return false;
+ }
+
if (!is_static) {
// Check for hashing null object
obj = null_check_receiver();
if (stopped()) return true; // unconditionally null
result_reg->init_req(_null_path, top());
result_val->init_req(_null_path, top());
} else {
// Do a null check, and return zero if null.
// System.identityHashCode(null) == 0
Node* null_ctl = top();
obj = null_check_oop(obj, &null_ctl);
result_reg->init_req(_null_path, null_ctl);
result_val->init_req(_null_path, _gvn.intcon(0));
}
Node* no_ctrl = nullptr;
Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
if (!UseObjectMonitorTable) {
// Test the header to see if it is safe to read w.r.t. locking.
! Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
if (LockingMode == LM_LIGHTWEIGHT) {
Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
Node* no_ctrl = nullptr;
Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
if (!UseObjectMonitorTable) {
// Test the header to see if it is safe to read w.r.t. locking.
! // This also serves as guard against inline types
+ Node *lock_mask = _gvn.MakeConX(markWord::inline_type_mask_in_place);
Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
if (LockingMode == LM_LIGHTWEIGHT) {
Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
//---------------------------inline_native_getClass----------------------------
// public final native Class<?> java.lang.Object.getClass();
//
// Build special case code for calls to getClass on an object.
bool LibraryCallKit::inline_native_getClass() {
! Node* obj = null_check_receiver();
if (stopped()) return true;
set_result(load_mirror_from_klass(load_object_klass(obj)));
return true;
}
//---------------------------inline_native_getClass----------------------------
// public final native Class<?> java.lang.Object.getClass();
//
// Build special case code for calls to getClass on an object.
bool LibraryCallKit::inline_native_getClass() {
! Node* obj = argument(0);
+ if (obj->is_InlineType()) {
+ const Type* t = _gvn.type(obj);
+ if (t->maybe_null()) {
+ null_check(obj);
+ }
+ set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
+ return true;
+ }
+ obj = null_check_receiver();
if (stopped()) return true;
set_result(load_mirror_from_klass(load_object_klass(obj)));
return true;
}
return true;
}
#undef XTOP
+ //----------------------inline_unsafe_isFlatArray------------------------
+ // public native boolean Unsafe.isFlatArray(Class<?> arrayClass);
+ // This intrinsic exploits assumptions made by the native implementation
+ // (arrayClass is neither null nor primitive) to avoid unnecessary null checks.
+ bool LibraryCallKit::inline_unsafe_isFlatArray() {
+ Node* cls = argument(1);
+ Node* p = basic_plus_adr(cls, java_lang_Class::klass_offset());
+ Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p,
+ TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT));
+ Node* result = flat_array_test(kls);
+ set_result(result);
+ return true;
+ }
+
//------------------------clone_coping-----------------------------------
// Helper function for inline_native_clone.
void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
assert(obj_size != nullptr, "");
Node* raw_obj = alloc_obj->in(1);
// Set the reexecute bit for the interpreter to reexecute
// the bytecode that invokes Object.clone if deoptimization happens.
{ PreserveReexecuteState preexecs(this);
jvms()->set_should_reexecute(true);
! Node* obj = null_check_receiver();
if (stopped()) return true;
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
// If we are going to clone an instance, we need its exact type to
// know the number and types of fields to convert the clone to
// loads/stores. Maybe a speculative type can help us.
if (!obj_type->klass_is_exact() &&
obj_type->speculative_type() != nullptr &&
! obj_type->speculative_type()->is_instance_klass()) {
ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
!spec_ik->has_injected_fields()) {
if (!obj_type->isa_instptr() ||
obj_type->is_instptr()->instance_klass()->has_subklass()) {
// Set the reexecute bit for the interpreter to reexecute
// the bytecode that invokes Object.clone if deoptimization happens.
{ PreserveReexecuteState preexecs(this);
jvms()->set_should_reexecute(true);
! Node* obj = argument(0);
+ obj = null_check_receiver();
if (stopped()) return true;
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
+ if (obj_type->is_inlinetypeptr()) {
+ // If the object to clone is an inline type, we can simply return it (i.e. a nop) since inline types have
+ // no identity.
+ set_result(obj);
+ return true;
+ }
// If we are going to clone an instance, we need its exact type to
// know the number and types of fields to convert the clone to
// loads/stores. Maybe a speculative type can help us.
if (!obj_type->klass_is_exact() &&
obj_type->speculative_type() != nullptr &&
! obj_type->speculative_type()->is_instance_klass() &&
+ !obj_type->speculative_type()->is_inlinetype()) {
ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
!spec_ik->has_injected_fields()) {
if (!obj_type->isa_instptr() ||
obj_type->is_instptr()->instance_klass()->has_subklass()) {
PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
record_for_igvn(result_reg);
Node* obj_klass = load_object_klass(obj);
Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
if (array_ctl != nullptr) {
// It's an array.
PreserveJVMState pjvms(this);
set_control(array_ctl);
- Node* obj_length = load_array_length(obj);
- Node* array_size = nullptr; // Size of the array without object alignment padding.
- Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
! if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
! // If it is an oop array, it requires very special treatment,
! // because gc barriers are required when accessing the array.
! Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
! if (is_obja != nullptr) {
! PreserveJVMState pjvms2(this);
! set_control(is_obja);
- // Generate a direct call to the right arraycopy function(s).
- // Clones are always tightly coupled.
- ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
- ac->set_clone_oop_array();
- Node* n = _gvn.transform(ac);
- assert(n == ac, "cannot disappear");
- ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
-
- result_reg->init_req(_objArray_path, control());
- result_val->init_req(_objArray_path, alloc_obj);
- result_i_o ->set_req(_objArray_path, i_o());
- result_mem ->set_req(_objArray_path, reset_memory());
- }
}
- // Otherwise, there are no barriers to worry about.
- // (We can dispense with card marks if we know the allocation
- // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
- // causes the non-eden paths to take compensating steps to
- // simulate a fresh allocation, so that no further
- // card marks are required in compiled code to initialize
- // the object.)
if (!stopped()) {
! copy_to_clone(obj, alloc_obj, array_size, true);
!
! // Present the results of the copy.
! result_reg->init_req(_array_path, control());
! result_val->init_req(_array_path, alloc_obj);
! result_i_o ->set_req(_array_path, i_o());
! result_mem ->set_req(_array_path, reset_memory());
}
}
- // We only go to the instance fast case code if we pass a number of guards.
- // The paths which do not pass are accumulated in the slow_region.
- RegionNode* slow_region = new RegionNode(1);
- record_for_igvn(slow_region);
if (!stopped()) {
// It's an instance (we did array above). Make the slow-path tests.
// If this is a virtual call, we generate a funny guard. We grab
// the vtable entry corresponding to clone() from the target object.
// If the target method which we are calling happens to be the
PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
record_for_igvn(result_reg);
Node* obj_klass = load_object_klass(obj);
+ // We only go to the fast case code if we pass a number of guards.
+ // The paths which do not pass are accumulated in the slow_region.
+ RegionNode* slow_region = new RegionNode(1);
+ record_for_igvn(slow_region);
+
Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
if (array_ctl != nullptr) {
// It's an array.
PreserveJVMState pjvms(this);
set_control(array_ctl);
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
! const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
! if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
! obj_type->can_be_inline_array() &&
! (ary_ptr == nullptr || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
! // Flat inline type array may have object field that would require a
! // write barrier. Conservatively, go to slow path.
! generate_fair_guard(flat_array_test(obj_klass), slow_region);
}
if (!stopped()) {
! Node* obj_length = load_array_length(obj);
! Node* array_size = nullptr; // Size of the array without object alignment padding.
! Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
!
! BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
! if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
! // If it is an oop array, it requires very special treatment,
+ // because gc barriers are required when accessing the array.
+ Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
+ if (is_obja != nullptr) {
+ PreserveJVMState pjvms2(this);
+ set_control(is_obja);
+ // Generate a direct call to the right arraycopy function(s).
+ // Clones are always tightly coupled.
+ ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
+ ac->set_clone_oop_array();
+ Node* n = _gvn.transform(ac);
+ assert(n == ac, "cannot disappear");
+ ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
+
+ result_reg->init_req(_objArray_path, control());
+ result_val->init_req(_objArray_path, alloc_obj);
+ result_i_o ->set_req(_objArray_path, i_o());
+ result_mem ->set_req(_objArray_path, reset_memory());
+ }
+ }
+ // Otherwise, there are no barriers to worry about.
+ // (We can dispense with card marks if we know the allocation
+ // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
+ // causes the non-eden paths to take compensating steps to
+ // simulate a fresh allocation, so that no further
+ // card marks are required in compiled code to initialize
+ // the object.)
+
+ if (!stopped()) {
+ copy_to_clone(obj, alloc_obj, array_size, true);
+
+ // Present the results of the copy.
+ result_reg->init_req(_array_path, control());
+ result_val->init_req(_array_path, alloc_obj);
+ result_i_o ->set_req(_array_path, i_o());
+ result_mem ->set_req(_array_path, reset_memory());
+ }
}
}
if (!stopped()) {
// It's an instance (we did array above). Make the slow-path tests.
// If this is a virtual call, we generate a funny guard. We grab
// the vtable entry corresponding to clone() from the target object.
// If the target method which we are calling happens to be the
SafePointNode* sfpt = new SafePointNode(size, old_jvms);
old_jvms->set_map(sfpt);
for (uint i = 0; i < size; i++) {
sfpt->init_req(i, alloc->in(i));
}
// re-push array length for deoptimization
! sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
! old_jvms->set_sp(old_jvms->sp()+1);
! old_jvms->set_monoff(old_jvms->monoff()+1);
! old_jvms->set_scloff(old_jvms->scloff()+1);
! old_jvms->set_endoff(old_jvms->endoff()+1);
old_jvms->set_should_reexecute(true);
sfpt->set_i_o(map()->i_o());
sfpt->set_memory(map()->memory());
sfpt->set_control(map()->control());
SafePointNode* sfpt = new SafePointNode(size, old_jvms);
old_jvms->set_map(sfpt);
for (uint i = 0; i < size; i++) {
sfpt->init_req(i, alloc->in(i));
}
+ int adjustment = 1;
+ const TypeAryKlassPtr* ary_klass_ptr = alloc->in(AllocateNode::KlassNode)->bottom_type()->is_aryklassptr();
+ if (ary_klass_ptr->is_null_free()) {
+ // A null-free, tightly coupled array allocation can only come from LibraryCallKit::inline_newNullRestrictedArray
+ // which requires both the component type and the array length on stack for re-execution. Re-create and push
+ // the component type.
+ ciArrayKlass* klass = ary_klass_ptr->exact_klass()->as_array_klass();
+ ciInstance* instance = klass->component_mirror_instance();
+ const TypeInstPtr* t_instance = TypeInstPtr::make(instance);
+ sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), makecon(t_instance));
+ adjustment++;
+ }
// re-push array length for deoptimization
! sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment - 1, alloc->in(AllocateNode::ALength));
! old_jvms->set_sp(old_jvms->sp() + adjustment);
! old_jvms->set_monoff(old_jvms->monoff() + adjustment);
! old_jvms->set_scloff(old_jvms->scloff() + adjustment);
! old_jvms->set_endoff(old_jvms->endoff() + adjustment);
old_jvms->set_should_reexecute(true);
sfpt->set_i_o(map()->i_o());
sfpt->set_memory(map()->memory());
sfpt->set_control(map()->control());
map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
set_jvms(saved_jvms_before_guards);
_reexecute_sp = saved_reexecute_sp;
// Remove the allocation from above the guards
! CallProjections callprojs;
- alloc->extract_projections(&callprojs, true);
InitializeNode* init = alloc->initialization();
Node* alloc_mem = alloc->in(TypeFunc::Memory);
! C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
// The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
// the allocation (i.e. is only valid if the allocation succeeds):
// 1) replace CastIINode with AllocateArrayNode's length here
map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
set_jvms(saved_jvms_before_guards);
_reexecute_sp = saved_reexecute_sp;
// Remove the allocation from above the guards
! CallProjections* callprojs = alloc->extract_projections(true);
InitializeNode* init = alloc->initialization();
Node* alloc_mem = alloc->in(TypeFunc::Memory);
! C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
// The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
// the allocation (i.e. is only valid if the allocation succeeds):
// 1) replace CastIINode with AllocateArrayNode's length here
alloc->set_req(TypeFunc::I_O, i_o());
Node *mem = reset_memory();
set_all_memory(mem);
alloc->set_req(TypeFunc::Memory, mem);
set_control(init->proj_out_or_null(TypeFunc::Control));
! set_i_o(callprojs.fallthrough_ioproj);
// Update memory as done in GraphKit::set_output_for_allocation()
const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
if (ary_type->isa_aryptr() && length_type != nullptr) {
alloc->set_req(TypeFunc::I_O, i_o());
Node *mem = reset_memory();
set_all_memory(mem);
alloc->set_req(TypeFunc::Memory, mem);
set_control(init->proj_out_or_null(TypeFunc::Control));
! set_i_o(callprojs->fallthrough_ioproj);
// Update memory as done in GraphKit::set_output_for_allocation()
const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
if (ary_type->isa_aryptr() && length_type != nullptr) {
BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
! if (src_elem == dest_elem && src_elem == T_OBJECT) {
// If both arrays are object arrays then having the exact types
// for both will remove the need for a subtype check at runtime
// before the call and may make it possible to pick a faster copy
// routine (without a subtype check on every element)
// Do we have the exact type of src?
BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
! if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
// If both arrays are object arrays then having the exact types
// for both will remove the need for a subtype check at runtime
// before the call and may make it possible to pick a faster copy
// routine (without a subtype check on every element)
// Do we have the exact type of src?
}
if (could_have_src && could_have_dest) {
// If we can have both exact types, emit the missing guards
if (could_have_src && !src_spec) {
src = maybe_cast_profiled_obj(src, src_k, true);
+ src_type = _gvn.type(src);
+ top_src = src_type->isa_aryptr();
}
if (could_have_dest && !dest_spec) {
dest = maybe_cast_profiled_obj(dest, dest_k, true);
+ dest_type = _gvn.type(dest);
+ top_dest = dest_type->isa_aryptr();
}
}
}
}
}
bool negative_length_guard_generated = false;
if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
! can_emit_guards &&
- !src->is_top() && !dest->is_top()) {
// validate arguments: enables transformation the ArrayCopyNode
validated = true;
RegionNode* slow_region = new RegionNode(1);
record_for_igvn(slow_region);
}
bool negative_length_guard_generated = false;
if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
! can_emit_guards && !src->is_top() && !dest->is_top()) {
// validate arguments: enables transformation the ArrayCopyNode
validated = true;
RegionNode* slow_region = new RegionNode(1);
record_for_igvn(slow_region);
// (9) each element of an oop array must be assignable
Node* dest_klass = load_object_klass(dest);
if (src != dest) {
Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
! if (not_subtype_ctrl != top()) {
! PreserveJVMState pjvms(this);
! set_control(not_subtype_ctrl);
! uncommon_trap(Deoptimization::Reason_intrinsic,
! Deoptimization::Action_make_not_entrant);
! assert(stopped(), "Should be stopped");
}
}
{
PreserveJVMState pjvms(this);
set_control(_gvn.transform(slow_region));
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_make_not_entrant);
assert(stopped(), "Should be stopped");
}
-
- const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
- const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
- src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
}
if (stopped()) {
return true;
// (9) each element of an oop array must be assignable
Node* dest_klass = load_object_klass(dest);
if (src != dest) {
Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
+ slow_region->add_req(not_subtype_ctrl);
+ }
! const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
! const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
! src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
! src_type = _gvn.type(src);
! top_src = src_type->isa_aryptr();
!
+ // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
+ if (!stopped() && UseFlatArray) {
+ // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
+ assert(top_dest == nullptr || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
+ if (top_src != nullptr && top_src->is_flat()) {
+ // Src is flat, check that dest is flat as well
+ if (top_dest != nullptr && !top_dest->is_flat()) {
+ generate_fair_guard(flat_array_test(dest_klass, /* flat = */ false), slow_region);
+ // Since dest is flat and src <: dest, dest must have the same type as src.
+ top_dest = top_src->cast_to_exactness(false);
+ assert(top_dest->is_flat(), "dest must be flat");
+ dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
+ }
+ } else if (top_src == nullptr || !top_src->is_not_flat()) {
+ // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
+ // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
+ assert(top_dest == nullptr || !top_dest->is_flat(), "dest array must not be flat");
+ generate_fair_guard(flat_array_test(src), slow_region);
+ if (top_src != nullptr) {
+ top_src = top_src->cast_to_not_flat();
+ src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
+ }
}
}
+
{
PreserveJVMState pjvms(this);
set_control(_gvn.transform(slow_region));
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_make_not_entrant);
assert(stopped(), "Should be stopped");
}
arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
}
if (stopped()) {
return true;
< prev index next >