5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compileLog.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/cfgnode.hpp"
32 #include "opto/loopnode.hpp"
33 #include "opto/matcher.hpp"
34 #include "opto/movenode.hpp"
35 #include "opto/mulnode.hpp"
36 #include "opto/opaquenode.hpp"
37 #include "opto/opcodes.hpp"
38 #include "opto/phaseX.hpp"
39 #include "opto/subnode.hpp"
40 #include "runtime/sharedRuntime.hpp"
41 #include "utilities/reverse_bits.hpp"
42
43 // Portions of code courtesy of Clifford Click
44
45 // Optimization - Graph Style
46
47 #include "math.h"
48
49 //=============================================================================
50 //------------------------------Identity---------------------------------------
51 // If right input is a constant 0, return the left input.
52 Node* SubNode::Identity(PhaseGVN* phase) {
53 assert(in(1) != this, "Must already have called Value");
54 assert(in(2) != this, "Must already have called Value");
55
56 const Type* zero = add_id();
57
58 // Remove double negation if it is not a floating point number since negation
59 // is not the same as subtraction for floating point numbers
872 switch (in(1)->Opcode()) {
873 case Op_CmpU3: // Collapse a CmpU3/CmpI into a CmpU
874 return new CmpUNode(in(1)->in(1),in(1)->in(2));
875 case Op_CmpL3: // Collapse a CmpL3/CmpI into a CmpL
876 return new CmpLNode(in(1)->in(1),in(1)->in(2));
877 case Op_CmpUL3: // Collapse a CmpUL3/CmpI into a CmpUL
878 return new CmpULNode(in(1)->in(1),in(1)->in(2));
879 case Op_CmpF3: // Collapse a CmpF3/CmpI into a CmpF
880 return new CmpFNode(in(1)->in(1),in(1)->in(2));
881 case Op_CmpD3: // Collapse a CmpD3/CmpI into a CmpD
882 return new CmpDNode(in(1)->in(1),in(1)->in(2));
883 //case Op_SubI:
884 // If (x - y) cannot overflow, then ((x - y) <?> 0)
885 // can be turned into (x <?> y).
886 // This is handled (with more general cases) by Ideal_sub_algebra.
887 }
888 }
889 return nullptr; // No change
890 }
891
892 Node *CmpLNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
893 const TypeLong *t2 = phase->type(in(2))->isa_long();
894 if (Opcode() == Op_CmpL && in(1)->Opcode() == Op_ConvI2L && t2 && t2->is_con()) {
895 const jlong con = t2->get_con();
896 if (con >= min_jint && con <= max_jint) {
897 return new CmpINode(in(1)->in(1), phase->intcon((jint)con));
898 }
899 }
900 return nullptr;
901 }
902
903 //=============================================================================
904 // Simplify a CmpL (compare 2 longs ) node, based on local information.
905 // If both inputs are constants, compare them.
906 const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
907 const TypeLong *r0 = t1->is_long(); // Handy access
908 const TypeLong *r1 = t2->is_long();
909
910 if( r0->_hi < r1->_lo ) // Range is always low?
911 return TypeInt::CC_LT;
912 else if( r0->_lo > r1->_hi ) // Range is always high?
989 if (MemNode::detect_ptr_independence(in1, alloc1, in2, alloc2, nullptr)) {
990 return TypeInt::CC_GT; // different pointers
991 }
992 }
993 bool xklass0 = p0 ? p0->klass_is_exact() : k0->klass_is_exact();
994 bool xklass1 = p1 ? p1->klass_is_exact() : k1->klass_is_exact();
995 bool unrelated_classes = false;
996
997 if ((p0 && p0->is_same_java_type_as(p1)) ||
998 (k0 && k0->is_same_java_type_as(k1))) {
999 } else if ((p0 && !p1->maybe_java_subtype_of(p0) && !p0->maybe_java_subtype_of(p1)) ||
1000 (k0 && !k1->maybe_java_subtype_of(k0) && !k0->maybe_java_subtype_of(k1))) {
1001 unrelated_classes = true;
1002 } else if ((p0 && !p1->maybe_java_subtype_of(p0)) ||
1003 (k0 && !k1->maybe_java_subtype_of(k0))) {
1004 unrelated_classes = xklass1;
1005 } else if ((p0 && !p0->maybe_java_subtype_of(p1)) ||
1006 (k0 && !k0->maybe_java_subtype_of(k1))) {
1007 unrelated_classes = xklass0;
1008 }
1009
1010 if (unrelated_classes) {
1011 // The oops classes are known to be unrelated. If the joined PTRs of
1012 // two oops is not Null and not Bottom, then we are sure that one
1013 // of the two oops is non-null, and the comparison will always fail.
1014 TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
1015 if (jp != TypePtr::Null && jp != TypePtr::BotPTR) {
1016 return TypeInt::CC_GT;
1017 }
1018 }
1019 }
1020
1021 // Known constants can be compared exactly
1022 // Null can be distinguished from any NotNull pointers
1023 // Unknown inputs makes an unknown result
1024 if( r0->singleton() ) {
1025 intptr_t bits0 = r0->get_con();
1026 if( r1->singleton() )
1027 return bits0 == r1->get_con() ? TypeInt::CC_EQ : TypeInt::CC_GT;
1028 return ( r1->_ptr == TypePtr::NotNull && bits0==0 ) ? TypeInt::CC_GT : TypeInt::CC;
1029 } else if( r1->singleton() ) {
1030 intptr_t bits1 = r1->get_con();
1031 return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
1032 } else
1033 return TypeInt::CC;
1034 }
1035
1036 static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) {
1037 // Return the klass node for (indirect load from OopHandle)
1038 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1039 // or null if not matching.
1040 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1041 n = bs->step_over_gc_barrier(n);
1042
1043 if (n->Opcode() != Op_LoadP) return nullptr;
1044
1045 const TypeInstPtr* tp = phase->type(n)->isa_instptr();
1046 if (!tp || tp->instance_klass() != phase->C->env()->Class_klass()) return nullptr;
1047
1048 Node* adr = n->in(MemNode::Address);
1049 // First load from OopHandle: ((OopHandle)mirror)->resolve(); may need barrier.
1050 if (adr->Opcode() != Op_LoadP || !phase->type(adr)->isa_rawptr()) return nullptr;
1051 adr = adr->in(MemNode::Address);
1052
1053 intptr_t off = 0;
1054 Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
1055 if (k == nullptr) return nullptr;
1056 const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
1057 if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return nullptr;
1058
1059 // We've found the klass node of a Java mirror load.
1060 return k;
1061 }
1062
1063 static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n) {
1064 // for ConP(Foo.class) return ConP(Foo.klass)
1065 // otherwise return null
1066 if (!n->is_Con()) return nullptr;
1067
1068 const TypeInstPtr* tp = phase->type(n)->isa_instptr();
1069 if (!tp) return nullptr;
1070
1071 ciType* mirror_type = tp->java_mirror_type();
1072 // TypeInstPtr::java_mirror_type() returns non-null for compile-
1073 // time Class constants only.
1074 if (!mirror_type) return nullptr;
1075
1076 // x.getClass() == int.class can never be true (for all primitive types)
1077 // Return a ConP(null) node for this case.
1078 if (mirror_type->is_classless()) {
1079 return phase->makecon(TypePtr::NULL_PTR);
1080 }
1081
1082 // return the ConP(Foo.klass)
1083 assert(mirror_type->is_klass(), "mirror_type should represent a Klass*");
1084 return phase->makecon(TypeKlassPtr::make(mirror_type->as_klass(), Type::trust_interfaces));
1085 }
1086
1087 //------------------------------Ideal------------------------------------------
1088 // Normalize comparisons between Java mirror loads to compare the klass instead.
1089 //
1090 // Also check for the case of comparing an unknown klass loaded from the primary
1091 // super-type array vs a known klass with no subtypes. This amounts to
1092 // checking to see an unknown klass subtypes a known klass with no subtypes;
1093 // this only happens on an exact match. We can shorten this test by 1 load.
1094 Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
1095 // Normalize comparisons between Java mirrors into comparisons of the low-
1096 // level klass, where a dependent load could be shortened.
1097 //
1098 // The new pattern has a nice effect of matching the same pattern used in the
1099 // fast path of instanceof/checkcast/Class.isInstance(), which allows
1100 // redundant exact type check be optimized away by GVN.
1101 // For example, in
1102 // if (x.getClass() == Foo.class) {
1103 // Foo foo = (Foo) x;
1104 // // ... use a ...
1105 // }
1106 // a CmpPNode could be shared between if_acmpne and checkcast
1107 {
1108 Node* k1 = isa_java_mirror_load(phase, in(1));
1109 Node* k2 = isa_java_mirror_load(phase, in(2));
1110 Node* conk2 = isa_const_java_mirror(phase, in(2));
1111
1112 if (k1 && (k2 || conk2)) {
1113 Node* lhs = k1;
1114 Node* rhs = (k2 != nullptr) ? k2 : conk2;
1115 set_req_X(1, lhs, phase);
1116 set_req_X(2, rhs, phase);
1117 return this;
1118 }
1119 }
1120
1121 // Constant pointer on right?
1122 const TypeKlassPtr* t2 = phase->type(in(2))->isa_klassptr();
1123 if (t2 == nullptr || !t2->klass_is_exact())
1124 return nullptr;
1125 // Get the constant klass we are comparing to.
1126 ciKlass* superklass = t2->exact_klass();
1127
1128 // Now check for LoadKlass on left.
1129 Node* ldk1 = in(1);
1130 if (ldk1->is_DecodeNKlass()) {
1169 //
1170 // We could be more liberal here, and allow the optimization on interfaces
1171 // which have a single implementor. This would require us to increase the
1172 // expressiveness of the add_dependency() mechanism.
1173 // %%% Do this after we fix TypeOopPtr: Deps are expressive enough now.
1174
1175 // Object arrays must have their base element have no subtypes
1176 while (superklass->is_obj_array_klass()) {
1177 ciType* elem = superklass->as_obj_array_klass()->element_type();
1178 superklass = elem->as_klass();
1179 }
1180 if (superklass->is_instance_klass()) {
1181 ciInstanceKlass* ik = superklass->as_instance_klass();
1182 if (ik->has_subklass() || ik->is_interface()) return nullptr;
1183 // Add a dependency if there is a chance that a subclass will be added later.
1184 if (!ik->is_final()) {
1185 phase->C->dependencies()->assert_leaf_type(ik);
1186 }
1187 }
1188
1189 // Bypass the dependent load, and compare directly
1190 this->set_req_X(1, ldk2, phase);
1191
1192 return this;
1193 }
1194
1195 //=============================================================================
1196 //------------------------------sub--------------------------------------------
1197 // Simplify an CmpN (compare 2 pointers) node, based on local information.
1198 // If both inputs are constants, compare them.
1199 const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
1200 ShouldNotReachHere();
1201 return bottom_type();
1202 }
1203
1204 //------------------------------Ideal------------------------------------------
1205 Node *CmpNNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
1206 return nullptr;
1207 }
1208
1289 if( t2_value_as_double == (double)t2_value_as_float ) {
1290 // Test value can be represented as a float
1291 // Eliminate the conversion to double and create new comparison
1292 Node *new_in1 = in(idx_f2d)->in(1);
1293 Node *new_in2 = phase->makecon( TypeF::make(t2_value_as_float) );
1294 if( idx_f2d != 1 ) { // Must flip args to match original order
1295 Node *tmp = new_in1;
1296 new_in1 = new_in2;
1297 new_in2 = tmp;
1298 }
1299 CmpFNode *new_cmp = (Opcode() == Op_CmpD3)
1300 ? new CmpF3Node( new_in1, new_in2 )
1301 : new CmpFNode ( new_in1, new_in2 ) ;
1302 return new_cmp; // Changed to CmpFNode
1303 }
1304 // Testing value required the precision of a double
1305 }
1306 return nullptr; // No change
1307 }
1308
1309
1310 //=============================================================================
1311 //------------------------------cc2logical-------------------------------------
1312 // Convert a condition code type to a logical type
1313 const Type *BoolTest::cc2logical( const Type *CC ) const {
1314 if( CC == Type::TOP ) return Type::TOP;
1315 if( CC->base() != Type::Int ) return TypeInt::BOOL; // Bottom or worse
1316 const TypeInt *ti = CC->is_int();
1317 if( ti->is_con() ) { // Only 1 kind of condition codes set?
1318 // Match low order 2 bits
1319 int tmp = ((ti->get_con()&3) == (_test&3)) ? 1 : 0;
1320 if( _test & 4 ) tmp = 1-tmp; // Optionally complement result
1321 return TypeInt::make(tmp); // Boolean result
1322 }
1323
1324 if( CC == TypeInt::CC_GE ) {
1325 if( _test == ge ) return TypeInt::ONE;
1326 if( _test == lt ) return TypeInt::ZERO;
1327 }
1328 if( CC == TypeInt::CC_LE ) {
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciObjArrayKlass.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/c2/barrierSetC2.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/callnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/inlinetypenode.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/matcher.hpp"
38 #include "opto/movenode.hpp"
39 #include "opto/mulnode.hpp"
40 #include "opto/opaquenode.hpp"
41 #include "opto/opcodes.hpp"
42 #include "opto/phaseX.hpp"
43 #include "opto/subnode.hpp"
44 #include "runtime/arguments.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/reverse_bits.hpp"
47
48 // Portions of code courtesy of Clifford Click
49
50 // Optimization - Graph Style
51
52 #include "math.h"
53
54 //=============================================================================
55 //------------------------------Identity---------------------------------------
56 // If right input is a constant 0, return the left input.
57 Node* SubNode::Identity(PhaseGVN* phase) {
58 assert(in(1) != this, "Must already have called Value");
59 assert(in(2) != this, "Must already have called Value");
60
61 const Type* zero = add_id();
62
63 // Remove double negation if it is not a floating point number since negation
64 // is not the same as subtraction for floating point numbers
877 switch (in(1)->Opcode()) {
878 case Op_CmpU3: // Collapse a CmpU3/CmpI into a CmpU
879 return new CmpUNode(in(1)->in(1),in(1)->in(2));
880 case Op_CmpL3: // Collapse a CmpL3/CmpI into a CmpL
881 return new CmpLNode(in(1)->in(1),in(1)->in(2));
882 case Op_CmpUL3: // Collapse a CmpUL3/CmpI into a CmpUL
883 return new CmpULNode(in(1)->in(1),in(1)->in(2));
884 case Op_CmpF3: // Collapse a CmpF3/CmpI into a CmpF
885 return new CmpFNode(in(1)->in(1),in(1)->in(2));
886 case Op_CmpD3: // Collapse a CmpD3/CmpI into a CmpD
887 return new CmpDNode(in(1)->in(1),in(1)->in(2));
888 //case Op_SubI:
889 // If (x - y) cannot overflow, then ((x - y) <?> 0)
890 // can be turned into (x <?> y).
891 // This is handled (with more general cases) by Ideal_sub_algebra.
892 }
893 }
894 return nullptr; // No change
895 }
896
897 //------------------------------Ideal------------------------------------------
898 Node* CmpLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
899 // Optimize expressions like
900 // CmpL(OrL(CastP2X(..), CastP2X(..)), 0L)
901 // that are used by acmp to implement a "both operands are null" check.
902 // See also the corresponding code in CmpPNode::Ideal.
903 if (can_reshape && in(1)->Opcode() == Op_OrL &&
904 in(2)->bottom_type()->is_zero_type()) {
905 for (int i = 1; i <= 2; ++i) {
906 Node* orIn = in(1)->in(i);
907 if (orIn->Opcode() == Op_CastP2X) {
908 Node* castIn = orIn->in(1);
909 if (castIn->is_InlineType()) {
910 // Replace the CastP2X by the null marker
911 InlineTypeNode* vt = castIn->as_InlineType();
912 Node* nm = phase->transform(new ConvI2LNode(vt->get_null_marker()));
913 phase->is_IterGVN()->replace_input_of(in(1), i, nm);
914 return this;
915 } else if (!phase->type(castIn)->maybe_null()) {
916 // Never null. Replace the CastP2X by constant 1L.
917 phase->is_IterGVN()->replace_input_of(in(1), i, phase->longcon(1));
918 return this;
919 }
920 }
921 }
922 }
923 const TypeLong *t2 = phase->type(in(2))->isa_long();
924 if (Opcode() == Op_CmpL && in(1)->Opcode() == Op_ConvI2L && t2 && t2->is_con()) {
925 const jlong con = t2->get_con();
926 if (con >= min_jint && con <= max_jint) {
927 return new CmpINode(in(1)->in(1), phase->intcon((jint)con));
928 }
929 }
930 return nullptr;
931 }
932
933 //=============================================================================
934 // Simplify a CmpL (compare 2 longs ) node, based on local information.
935 // If both inputs are constants, compare them.
936 const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
937 const TypeLong *r0 = t1->is_long(); // Handy access
938 const TypeLong *r1 = t2->is_long();
939
940 if( r0->_hi < r1->_lo ) // Range is always low?
941 return TypeInt::CC_LT;
942 else if( r0->_lo > r1->_hi ) // Range is always high?
1019 if (MemNode::detect_ptr_independence(in1, alloc1, in2, alloc2, nullptr)) {
1020 return TypeInt::CC_GT; // different pointers
1021 }
1022 }
1023 bool xklass0 = p0 ? p0->klass_is_exact() : k0->klass_is_exact();
1024 bool xklass1 = p1 ? p1->klass_is_exact() : k1->klass_is_exact();
1025 bool unrelated_classes = false;
1026
1027 if ((p0 && p0->is_same_java_type_as(p1)) ||
1028 (k0 && k0->is_same_java_type_as(k1))) {
1029 } else if ((p0 && !p1->maybe_java_subtype_of(p0) && !p0->maybe_java_subtype_of(p1)) ||
1030 (k0 && !k1->maybe_java_subtype_of(k0) && !k0->maybe_java_subtype_of(k1))) {
1031 unrelated_classes = true;
1032 } else if ((p0 && !p1->maybe_java_subtype_of(p0)) ||
1033 (k0 && !k1->maybe_java_subtype_of(k0))) {
1034 unrelated_classes = xklass1;
1035 } else if ((p0 && !p0->maybe_java_subtype_of(p1)) ||
1036 (k0 && !k0->maybe_java_subtype_of(k1))) {
1037 unrelated_classes = xklass0;
1038 }
1039 if (!unrelated_classes) {
1040 // Handle inline type arrays
1041 if ((r0->is_flat_in_array() && r1->is_not_flat_in_array()) ||
1042 (r1->is_flat_in_array() && r0->is_not_flat_in_array())) {
1043 // One type is in flat arrays but the other type is not. Must be unrelated.
1044 unrelated_classes = true;
1045 } else if ((r0->is_not_flat() && r1->is_flat()) ||
1046 (r1->is_not_flat() && r0->is_flat())) {
1047 // One type is a non-flat array and the other type is a flat array. Must be unrelated.
1048 unrelated_classes = true;
1049 } else if ((r0->is_not_null_free() && r1->is_null_free()) ||
1050 (r1->is_not_null_free() && r0->is_null_free())) {
1051 // One type is a nullable array and the other type is a null-free array. Must be unrelated.
1052 unrelated_classes = true;
1053 }
1054 }
1055 if (unrelated_classes) {
1056 // The oops classes are known to be unrelated. If the joined PTRs of
1057 // two oops is not Null and not Bottom, then we are sure that one
1058 // of the two oops is non-null, and the comparison will always fail.
1059 TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
1060 if (jp != TypePtr::Null && jp != TypePtr::BotPTR) {
1061 return TypeInt::CC_GT;
1062 }
1063 }
1064 }
1065
1066 // Known constants can be compared exactly
1067 // Null can be distinguished from any NotNull pointers
1068 // Unknown inputs makes an unknown result
1069 if( r0->singleton() ) {
1070 intptr_t bits0 = r0->get_con();
1071 if( r1->singleton() )
1072 return bits0 == r1->get_con() ? TypeInt::CC_EQ : TypeInt::CC_GT;
1073 return ( r1->_ptr == TypePtr::NotNull && bits0==0 ) ? TypeInt::CC_GT : TypeInt::CC;
1074 } else if( r1->singleton() ) {
1075 intptr_t bits1 = r1->get_con();
1076 return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
1077 } else
1078 return TypeInt::CC;
1079 }
1080
1081 static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n, bool& might_be_an_array) {
1082 // Return the klass node for (indirect load from OopHandle)
1083 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1084 // or null if not matching.
1085 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1086 n = bs->step_over_gc_barrier(n);
1087
1088 if (n->Opcode() != Op_LoadP) return nullptr;
1089
1090 const TypeInstPtr* tp = phase->type(n)->isa_instptr();
1091 if (!tp || tp->instance_klass() != phase->C->env()->Class_klass()) return nullptr;
1092
1093 Node* adr = n->in(MemNode::Address);
1094 // First load from OopHandle: ((OopHandle)mirror)->resolve(); may need barrier.
1095 if (adr->Opcode() != Op_LoadP || !phase->type(adr)->isa_rawptr()) return nullptr;
1096 adr = adr->in(MemNode::Address);
1097
1098 intptr_t off = 0;
1099 Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
1100 if (k == nullptr) return nullptr;
1101 const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
1102 if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return nullptr;
1103 might_be_an_array |= tkp->isa_aryklassptr() || tkp->is_instklassptr()->might_be_an_array();
1104
1105 // We've found the klass node of a Java mirror load.
1106 return k;
1107 }
1108
1109 static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n, bool& might_be_an_array) {
1110 // for ConP(Foo.class) return ConP(Foo.klass)
1111 // otherwise return null
1112 if (!n->is_Con()) return nullptr;
1113
1114 const TypeInstPtr* tp = phase->type(n)->isa_instptr();
1115 if (!tp) return nullptr;
1116
1117 ciType* mirror_type = tp->java_mirror_type();
1118 // TypeInstPtr::java_mirror_type() returns non-null for compile-
1119 // time Class constants only.
1120 if (!mirror_type) return nullptr;
1121
1122 // x.getClass() == int.class can never be true (for all primitive types)
1123 // Return a ConP(null) node for this case.
1124 if (mirror_type->is_classless()) {
1125 return phase->makecon(TypePtr::NULL_PTR);
1126 }
1127
1128 // return the ConP(Foo.klass)
1129 ciKlass* mirror_klass = mirror_type->as_klass();
1130
1131 if (mirror_klass->is_array_klass() && !mirror_klass->is_type_array_klass()) {
1132 if (!mirror_klass->can_be_inline_array_klass()) {
1133 // Special case for non-value arrays: They only have one (default) refined class, use it
1134 ciArrayKlass* refined_mirror_klass = ciObjArrayKlass::make(mirror_klass->as_array_klass()->element_klass(), true);
1135 return phase->makecon(TypeAryKlassPtr::make(refined_mirror_klass, Type::trust_interfaces));
1136 }
1137 might_be_an_array |= true;
1138 }
1139
1140 return phase->makecon(TypeKlassPtr::make(mirror_klass, Type::trust_interfaces));
1141 }
1142
1143 //------------------------------Ideal------------------------------------------
1144 // Normalize comparisons between Java mirror loads to compare the klass instead.
1145 //
1146 // Also check for the case of comparing an unknown klass loaded from the primary
1147 // super-type array vs a known klass with no subtypes. This amounts to
1148 // checking to see an unknown klass subtypes a known klass with no subtypes;
1149 // this only happens on an exact match. We can shorten this test by 1 load.
1150 Node* CmpPNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1151 // TODO 8284443 in(1) could be cast?
1152 if (in(1)->is_InlineType() && phase->type(in(2))->is_zero_type()) {
1153 // Null checking a scalarized but nullable inline type. Check the null marker
1154 // input instead of the oop input to avoid keeping buffer allocations alive.
1155 return new CmpINode(in(1)->as_InlineType()->get_null_marker(), phase->intcon(0));
1156 }
1157 if (in(1)->is_InlineType() || in(2)->is_InlineType()) {
1158 // In C2 IR, CmpP on value objects is a pointer comparison, not a value comparison.
1159 // For non-null operands it cannot reliably be true, since their buffer oops are not
1160 // guaranteed to be identical. Therefore, the comparison can only be true when both
1161 // operands are null. Convert expressions like this to a "both operands are null" check:
1162 // CmpL(OrL(CastP2X(..), CastP2X(..)), 0L)
1163 // CmpLNode::Ideal might optimize this further to avoid keeping buffer allocations alive.
1164 Node* input[2];
1165 for (int i = 1; i <= 2; ++i) {
1166 if (in(i)->is_InlineType()) {
1167 input[i-1] = phase->transform(new ConvI2LNode(in(i)->as_InlineType()->get_null_marker()));
1168 } else {
1169 input[i-1] = phase->transform(new CastP2XNode(nullptr, in(i)));
1170 }
1171 }
1172 Node* orL = phase->transform(new OrXNode(input[0], input[1]));
1173 return new CmpXNode(orL, phase->MakeConX(0));
1174 }
1175
1176 // Normalize comparisons between Java mirrors into comparisons of the low-
1177 // level klass, where a dependent load could be shortened.
1178 //
1179 // The new pattern has a nice effect of matching the same pattern used in the
1180 // fast path of instanceof/checkcast/Class.isInstance(), which allows
1181 // redundant exact type check be optimized away by GVN.
1182 // For example, in
1183 // if (x.getClass() == Foo.class) {
1184 // Foo foo = (Foo) x;
1185 // // ... use a ...
1186 // }
1187 // a CmpPNode could be shared between if_acmpne and checkcast
1188 {
1189 bool might_be_an_array1 = false;
1190 bool might_be_an_array2 = false;
1191 Node* k1 = isa_java_mirror_load(phase, in(1), might_be_an_array1);
1192 Node* k2 = isa_java_mirror_load(phase, in(2), might_be_an_array2);
1193 Node* conk2 = isa_const_java_mirror(phase, in(2), might_be_an_array2);
1194 if (might_be_an_array1 && might_be_an_array2) {
1195 // Don't optimize if both sides might be an array because arrays with
1196 // the same Java mirror can have different refined array klasses.
1197 k1 = k2 = nullptr;
1198 }
1199
1200 if (k1 && (k2 || conk2)) {
1201 Node* lhs = k1;
1202 Node* rhs = (k2 != nullptr) ? k2 : conk2;
1203 set_req_X(1, lhs, phase);
1204 set_req_X(2, rhs, phase);
1205 return this;
1206 }
1207 }
1208
1209 // Constant pointer on right?
1210 const TypeKlassPtr* t2 = phase->type(in(2))->isa_klassptr();
1211 if (t2 == nullptr || !t2->klass_is_exact())
1212 return nullptr;
1213 // Get the constant klass we are comparing to.
1214 ciKlass* superklass = t2->exact_klass();
1215
1216 // Now check for LoadKlass on left.
1217 Node* ldk1 = in(1);
1218 if (ldk1->is_DecodeNKlass()) {
1257 //
1258 // We could be more liberal here, and allow the optimization on interfaces
1259 // which have a single implementor. This would require us to increase the
1260 // expressiveness of the add_dependency() mechanism.
1261 // %%% Do this after we fix TypeOopPtr: Deps are expressive enough now.
1262
1263 // Object arrays must have their base element have no subtypes
1264 while (superklass->is_obj_array_klass()) {
1265 ciType* elem = superklass->as_obj_array_klass()->element_type();
1266 superklass = elem->as_klass();
1267 }
1268 if (superklass->is_instance_klass()) {
1269 ciInstanceKlass* ik = superklass->as_instance_klass();
1270 if (ik->has_subklass() || ik->is_interface()) return nullptr;
1271 // Add a dependency if there is a chance that a subclass will be added later.
1272 if (!ik->is_final()) {
1273 phase->C->dependencies()->assert_leaf_type(ik);
1274 }
1275 }
1276
1277 // Do not fold the subtype check to an array klass pointer comparison for
1278 // value class arrays because they can have multiple refined array klasses.
1279 superklass = t2->exact_klass();
1280 assert(!superklass->is_flat_array_klass(), "Unexpected flat array klass");
1281 if (superklass->is_obj_array_klass()) {
1282 if (superklass->as_array_klass()->element_klass()->is_inlinetype() && !superklass->as_array_klass()->is_refined()) {
1283 return nullptr;
1284 } else {
1285 // Special case for non-value arrays: They only have one (default) refined class, use it
1286 set_req_X(2, phase->makecon(t2->is_aryklassptr()->cast_to_refined_array_klass_ptr()), phase);
1287 }
1288 }
1289
1290 // Bypass the dependent load, and compare directly
1291 this->set_req_X(1, ldk2, phase);
1292
1293 return this;
1294 }
1295
1296 //=============================================================================
1297 //------------------------------sub--------------------------------------------
1298 // Simplify an CmpN (compare 2 pointers) node, based on local information.
1299 // If both inputs are constants, compare them.
1300 const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
1301 ShouldNotReachHere();
1302 return bottom_type();
1303 }
1304
1305 //------------------------------Ideal------------------------------------------
1306 Node *CmpNNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
1307 return nullptr;
1308 }
1309
1390 if( t2_value_as_double == (double)t2_value_as_float ) {
1391 // Test value can be represented as a float
1392 // Eliminate the conversion to double and create new comparison
1393 Node *new_in1 = in(idx_f2d)->in(1);
1394 Node *new_in2 = phase->makecon( TypeF::make(t2_value_as_float) );
1395 if( idx_f2d != 1 ) { // Must flip args to match original order
1396 Node *tmp = new_in1;
1397 new_in1 = new_in2;
1398 new_in2 = tmp;
1399 }
1400 CmpFNode *new_cmp = (Opcode() == Op_CmpD3)
1401 ? new CmpF3Node( new_in1, new_in2 )
1402 : new CmpFNode ( new_in1, new_in2 ) ;
1403 return new_cmp; // Changed to CmpFNode
1404 }
1405 // Testing value required the precision of a double
1406 }
1407 return nullptr; // No change
1408 }
1409
1410 //=============================================================================
1411 //------------------------------Value------------------------------------------
1412 const Type* FlatArrayCheckNode::Value(PhaseGVN* phase) const {
1413 bool all_not_flat = true;
1414 for (uint i = ArrayOrKlass; i < req(); ++i) {
1415 const Type* t = phase->type(in(i));
1416 if (t == Type::TOP) {
1417 return Type::TOP;
1418 }
1419 if (t->is_ptr()->is_flat()) {
1420 // One of the input arrays is flat, check always passes
1421 return TypeInt::CC_EQ;
1422 } else if (!t->is_ptr()->is_not_flat()) {
1423 // One of the input arrays might be flat
1424 all_not_flat = false;
1425 }
1426 }
1427 if (all_not_flat) {
1428 // None of the input arrays can be flat, check always fails
1429 return TypeInt::CC_GT;
1430 }
1431 return TypeInt::CC;
1432 }
1433
1434 //------------------------------Ideal------------------------------------------
1435 Node* FlatArrayCheckNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1436 bool changed = false;
1437 // Remove inputs that are known to be non-flat
1438 for (uint i = ArrayOrKlass; i < req(); ++i) {
1439 const Type* t = phase->type(in(i));
1440 if (t->isa_ptr() && t->is_ptr()->is_not_flat()) {
1441 del_req(i--);
1442 changed = true;
1443 }
1444 }
1445 return changed ? this : nullptr;
1446 }
1447
1448 //=============================================================================
1449 //------------------------------cc2logical-------------------------------------
1450 // Convert a condition code type to a logical type
1451 const Type *BoolTest::cc2logical( const Type *CC ) const {
1452 if( CC == Type::TOP ) return Type::TOP;
1453 if( CC->base() != Type::Int ) return TypeInt::BOOL; // Bottom or worse
1454 const TypeInt *ti = CC->is_int();
1455 if( ti->is_con() ) { // Only 1 kind of condition codes set?
1456 // Match low order 2 bits
1457 int tmp = ((ti->get_con()&3) == (_test&3)) ? 1 : 0;
1458 if( _test & 4 ) tmp = 1-tmp; // Optionally complement result
1459 return TypeInt::make(tmp); // Boolean result
1460 }
1461
1462 if( CC == TypeInt::CC_GE ) {
1463 if( _test == ge ) return TypeInt::ONE;
1464 if( _test == lt ) return TypeInt::ZERO;
1465 }
1466 if( CC == TypeInt::CC_LE ) {
|