< prev index next > src/hotspot/share/opto/subnode.cpp
Print this page
#include "gc/shared/c2/barrierSetC2.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
+ #include "opto/inlinetypenode.hpp"
#include "opto/loopnode.hpp"
#include "opto/matcher.hpp"
#include "opto/movenode.hpp"
#include "opto/mulnode.hpp"
#include "opto/opaquenode.hpp"
}
}
return nullptr; // No change
}
! Node *CmpLNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
const TypeLong *t2 = phase->type(in(2))->isa_long();
if (Opcode() == Op_CmpL && in(1)->Opcode() == Op_ConvI2L && t2 && t2->is_con()) {
const jlong con = t2->get_con();
if (con >= min_jint && con <= max_jint) {
return new CmpINode(in(1)->in(1), phase->intcon((jint)con));
}
}
return nullptr;
}
//=============================================================================
// Simplify a CmpL (compare 2 longs ) node, based on local information.
// If both inputs are constants, compare them.
const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
const TypeLong *r0 = t1->is_long(); // Handy access
}
}
return nullptr; // No change
}
! //------------------------------Ideal------------------------------------------
+ Node* CmpLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+ Node* a = nullptr;
+ Node* b = nullptr;
+ if (is_double_null_check(phase, a, b) && (phase->type(a)->is_zero_type() || phase->type(b)->is_zero_type())) {
+ // Degraded to a simple null check, use old acmp
+ return new CmpPNode(a, b);
+ }
const TypeLong *t2 = phase->type(in(2))->isa_long();
if (Opcode() == Op_CmpL && in(1)->Opcode() == Op_ConvI2L && t2 && t2->is_con()) {
const jlong con = t2->get_con();
if (con >= min_jint && con <= max_jint) {
return new CmpINode(in(1)->in(1), phase->intcon((jint)con));
}
}
return nullptr;
}
+ // Match double null check emitted by Compile::optimize_acmp()
+ bool CmpLNode::is_double_null_check(PhaseGVN* phase, Node*& a, Node*& b) const {
+ if (in(1)->Opcode() == Op_OrL &&
+ in(1)->in(1)->Opcode() == Op_CastP2X &&
+ in(1)->in(2)->Opcode() == Op_CastP2X &&
+ in(2)->bottom_type()->is_zero_type()) {
+ assert(EnableValhalla, "unexpected double null check");
+ a = in(1)->in(1)->in(1);
+ b = in(1)->in(2)->in(1);
+ return true;
+ }
+ return false;
+ }
+
+ //------------------------------Value------------------------------------------
+ const Type* CmpLNode::Value(PhaseGVN* phase) const {
+ Node* a = nullptr;
+ Node* b = nullptr;
+ if (is_double_null_check(phase, a, b) && (!phase->type(a)->maybe_null() || !phase->type(b)->maybe_null())) {
+ // One operand is never nullptr, emit constant false
+ return TypeInt::CC_GT;
+ }
+ return SubNode::Value(phase);
+ }
+
//=============================================================================
// Simplify a CmpL (compare 2 longs ) node, based on local information.
// If both inputs are constants, compare them.
const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
const TypeLong *r0 = t1->is_long(); // Handy access
unrelated_classes = xklass1;
} else if ((p0 && !p0->maybe_java_subtype_of(p1)) ||
(k0 && !k0->maybe_java_subtype_of(k1))) {
unrelated_classes = xklass0;
}
!
if (unrelated_classes) {
// The oops classes are known to be unrelated. If the joined PTRs of
// two oops is not Null and not Bottom, then we are sure that one
// of the two oops is non-null, and the comparison will always fail.
TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
unrelated_classes = xklass1;
} else if ((p0 && !p0->maybe_java_subtype_of(p1)) ||
(k0 && !k0->maybe_java_subtype_of(k1))) {
unrelated_classes = xklass0;
}
! if (!unrelated_classes) {
+ // Handle inline type arrays
+ if ((r0->flat_in_array() && r1->not_flat_in_array()) ||
+ (r1->flat_in_array() && r0->not_flat_in_array())) {
+ // One type is in flat arrays but the other type is not. Must be unrelated.
+ unrelated_classes = true;
+ } else if ((r0->is_not_flat() && r1->is_flat()) ||
+ (r1->is_not_flat() && r0->is_flat())) {
+ // One type is a non-flat array and the other type is a flat array. Must be unrelated.
+ unrelated_classes = true;
+ } else if ((r0->is_not_null_free() && r1->is_null_free()) ||
+ (r1->is_not_null_free() && r0->is_null_free())) {
+ // One type is a nullable array and the other type is a null-free array. Must be unrelated.
+ unrelated_classes = true;
+ }
+ }
if (unrelated_classes) {
// The oops classes are known to be unrelated. If the joined PTRs of
// two oops is not Null and not Bottom, then we are sure that one
// of the two oops is non-null, and the comparison will always fail.
TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
} else
return TypeInt::CC;
}
! static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) {
// Return the klass node for (indirect load from OopHandle)
// LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
// or null if not matching.
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
n = bs->step_over_gc_barrier(n);
return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
} else
return TypeInt::CC;
}
! static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n, bool& might_be_an_array) {
// Return the klass node for (indirect load from OopHandle)
// LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
// or null if not matching.
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
n = bs->step_over_gc_barrier(n);
intptr_t off = 0;
Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
if (k == nullptr) return nullptr;
const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return nullptr;
// We've found the klass node of a Java mirror load.
return k;
}
! static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n) {
// for ConP(Foo.class) return ConP(Foo.klass)
// otherwise return null
if (!n->is_Con()) return nullptr;
const TypeInstPtr* tp = phase->type(n)->isa_instptr();
intptr_t off = 0;
Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
if (k == nullptr) return nullptr;
const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return nullptr;
+ might_be_an_array |= tkp->isa_aryklassptr() || tkp->is_instklassptr()->might_be_an_array();
// We've found the klass node of a Java mirror load.
return k;
}
! static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n, bool& might_be_an_array) {
// for ConP(Foo.class) return ConP(Foo.klass)
// otherwise return null
if (!n->is_Con()) return nullptr;
const TypeInstPtr* tp = phase->type(n)->isa_instptr();
if (mirror_type->is_classless()) {
return phase->makecon(TypePtr::NULL_PTR);
}
// return the ConP(Foo.klass)
! assert(mirror_type->is_klass(), "mirror_type should represent a Klass*");
! return phase->makecon(TypeKlassPtr::make(mirror_type->as_klass(), Type::trust_interfaces));
}
//------------------------------Ideal------------------------------------------
// Normalize comparisons between Java mirror loads to compare the klass instead.
//
// Also check for the case of comparing an unknown klass loaded from the primary
// super-type array vs a known klass with no subtypes. This amounts to
// checking to see an unknown klass subtypes a known klass with no subtypes;
// this only happens on an exact match. We can shorten this test by 1 load.
! Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
// Normalize comparisons between Java mirrors into comparisons of the low-
// level klass, where a dependent load could be shortened.
//
// The new pattern has a nice effect of matching the same pattern used in the
// fast path of instanceof/checkcast/Class.isInstance(), which allows
if (mirror_type->is_classless()) {
return phase->makecon(TypePtr::NULL_PTR);
}
// return the ConP(Foo.klass)
! ciKlass* mirror_klass = mirror_type->as_klass();
!
+ if (mirror_klass->is_array_klass()) {
+ if (!mirror_klass->can_be_inline_array_klass()) {
+ // Special case for non-value arrays: They only have one (default) refined class, use it
+ return phase->makecon(TypeAryKlassPtr::make(mirror_klass, Type::trust_interfaces, true));
+ }
+ might_be_an_array |= true;
+ }
+
+ return phase->makecon(TypeKlassPtr::make(mirror_klass, Type::trust_interfaces));
}
//------------------------------Ideal------------------------------------------
// Normalize comparisons between Java mirror loads to compare the klass instead.
//
// Also check for the case of comparing an unknown klass loaded from the primary
// super-type array vs a known klass with no subtypes. This amounts to
// checking to see an unknown klass subtypes a known klass with no subtypes;
// this only happens on an exact match. We can shorten this test by 1 load.
! Node* CmpPNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+ // TODO 8284443 in(1) could be cast?
+ if (in(1)->is_InlineType() && phase->type(in(2))->is_zero_type()) {
+ // Null checking a scalarized but nullable inline type. Check the null marker
+ // input instead of the oop input to avoid keeping buffer allocations alive.
+ return new CmpINode(in(1)->as_InlineType()->get_null_marker(), phase->intcon(0));
+ }
+
// Normalize comparisons between Java mirrors into comparisons of the low-
// level klass, where a dependent load could be shortened.
//
// The new pattern has a nice effect of matching the same pattern used in the
// fast path of instanceof/checkcast/Class.isInstance(), which allows
// Foo foo = (Foo) x;
// // ... use a ...
// }
// a CmpPNode could be shared between if_acmpne and checkcast
{
! Node* k1 = isa_java_mirror_load(phase, in(1));
! Node* k2 = isa_java_mirror_load(phase, in(2));
! Node* conk2 = isa_const_java_mirror(phase, in(2));
if (k1 && (k2 || conk2)) {
Node* lhs = k1;
Node* rhs = (k2 != nullptr) ? k2 : conk2;
set_req_X(1, lhs, phase);
// Foo foo = (Foo) x;
// // ... use a ...
// }
// a CmpPNode could be shared between if_acmpne and checkcast
{
! bool might_be_an_array1 = false;
! bool might_be_an_array2 = false;
! Node* k1 = isa_java_mirror_load(phase, in(1), might_be_an_array1);
+ Node* k2 = isa_java_mirror_load(phase, in(2), might_be_an_array2);
+ Node* conk2 = isa_const_java_mirror(phase, in(2), might_be_an_array2);
+ if (might_be_an_array1 && might_be_an_array2) {
+ // Don't optimize if both sides might be an array because arrays with
+ // the same Java mirror can have different refined array klasses.
+ k1 = k2 = nullptr;
+ }
if (k1 && (k2 || conk2)) {
Node* lhs = k1;
Node* rhs = (k2 != nullptr) ? k2 : conk2;
set_req_X(1, lhs, phase);
if (!ik->is_final()) {
phase->C->dependencies()->assert_leaf_type(ik);
}
}
+ // Do not fold the subtype check to an array klass pointer comparison for
+ // value class arrays because they can have multiple refined array klasses.
+ superklass = t2->exact_klass();
+ assert(!superklass->is_flat_array_klass(), "Unexpected flat array klass");
+ if (superklass->is_obj_array_klass()) {
+ if (!superklass->as_array_klass()->is_elem_null_free() &&
+ superklass->as_array_klass()->element_klass()->is_inlinetype()) {
+ return nullptr;
+ } else {
+ // Special case for non-value arrays: They only have one (default) refined class, use it
+ set_req_X(2, phase->makecon(t2->is_aryklassptr()->refined_array_klass_ptr()), phase);
+ }
+ }
+
// Bypass the dependent load, and compare directly
this->set_req_X(1, ldk2, phase);
return this;
}
// Testing value required the precision of a double
}
return nullptr; // No change
}
+ //=============================================================================
+ //------------------------------Value------------------------------------------
+ const Type* FlatArrayCheckNode::Value(PhaseGVN* phase) const {
+ bool all_not_flat = true;
+ for (uint i = ArrayOrKlass; i < req(); ++i) {
+ const Type* t = phase->type(in(i));
+ if (t == Type::TOP) {
+ return Type::TOP;
+ }
+ if (t->is_ptr()->is_flat()) {
+ // One of the input arrays is flat, check always passes
+ return TypeInt::CC_EQ;
+ } else if (!t->is_ptr()->is_not_flat()) {
+ // One of the input arrays might be flat
+ all_not_flat = false;
+ }
+ }
+ if (all_not_flat) {
+ // None of the input arrays can be flat, check always fails
+ return TypeInt::CC_GT;
+ }
+ return TypeInt::CC;
+ }
+
+ //------------------------------Ideal------------------------------------------
+ Node* FlatArrayCheckNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+ bool changed = false;
+ // Remove inputs that are known to be non-flat
+ for (uint i = ArrayOrKlass; i < req(); ++i) {
+ const Type* t = phase->type(in(i));
+ if (t->isa_ptr() && t->is_ptr()->is_not_flat()) {
+ del_req(i--);
+ changed = true;
+ }
+ }
+ return changed ? this : nullptr;
+ }
//=============================================================================
//------------------------------cc2logical-------------------------------------
// Convert a condition code type to a logical type
const Type *BoolTest::cc2logical( const Type *CC ) const {
< prev index next >