5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compileLog.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/cfgnode.hpp"
32 #include "opto/loopnode.hpp"
33 #include "opto/matcher.hpp"
34 #include "opto/movenode.hpp"
35 #include "opto/mulnode.hpp"
36 #include "opto/opaquenode.hpp"
37 #include "opto/opcodes.hpp"
38 #include "opto/phaseX.hpp"
39 #include "opto/subnode.hpp"
40 #include "runtime/sharedRuntime.hpp"
41 #include "utilities/reverse_bits.hpp"
42
43 // Portions of code courtesy of Clifford Click
44
45 // Optimization - Graph Style
46
47 #include "math.h"
48
49 //=============================================================================
50 //------------------------------Identity---------------------------------------
51 // If right input is a constant 0, return the left input.
52 Node* SubNode::Identity(PhaseGVN* phase) {
53 assert(in(1) != this, "Must already have called Value");
54 assert(in(2) != this, "Must already have called Value");
55
56 const Type* zero = add_id();
57
58 // Remove double negation if it is not a floating point number since negation
59 // is not the same as subtraction for floating point numbers
903 switch (in(1)->Opcode()) {
904 case Op_CmpU3: // Collapse a CmpU3/CmpI into a CmpU
905 return new CmpUNode(in(1)->in(1),in(1)->in(2));
906 case Op_CmpL3: // Collapse a CmpL3/CmpI into a CmpL
907 return new CmpLNode(in(1)->in(1),in(1)->in(2));
908 case Op_CmpUL3: // Collapse a CmpUL3/CmpI into a CmpUL
909 return new CmpULNode(in(1)->in(1),in(1)->in(2));
910 case Op_CmpF3: // Collapse a CmpF3/CmpI into a CmpF
911 return new CmpFNode(in(1)->in(1),in(1)->in(2));
912 case Op_CmpD3: // Collapse a CmpD3/CmpI into a CmpD
913 return new CmpDNode(in(1)->in(1),in(1)->in(2));
914 //case Op_SubI:
915 // If (x - y) cannot overflow, then ((x - y) <?> 0)
916 // can be turned into (x <?> y).
917 // This is handled (with more general cases) by Ideal_sub_algebra.
918 }
919 }
920 return nullptr; // No change
921 }
922
923 Node *CmpLNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
924 const TypeLong *t2 = phase->type(in(2))->isa_long();
925 if (Opcode() == Op_CmpL && in(1)->Opcode() == Op_ConvI2L && t2 && t2->is_con()) {
926 const jlong con = t2->get_con();
927 if (con >= min_jint && con <= max_jint) {
928 return new CmpINode(in(1)->in(1), phase->intcon((jint)con));
929 }
930 }
931 return nullptr;
932 }
933
934 //=============================================================================
935 // Simplify a CmpL (compare 2 longs ) node, based on local information.
936 // If both inputs are constants, compare them.
937 const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
938 const TypeLong *r0 = t1->is_long(); // Handy access
939 const TypeLong *r1 = t2->is_long();
940
941 if( r0->_hi < r1->_lo ) // Range is always low?
942 return TypeInt::CC_LT;
943 else if( r0->_lo > r1->_hi ) // Range is always high?
1050 if (MemNode::detect_ptr_independence(in1, alloc1, in2, alloc2, nullptr)) {
1051 return TypeInt::CC_GT; // different pointers
1052 }
1053 }
1054 bool xklass0 = p0 ? p0->klass_is_exact() : k0->klass_is_exact();
1055 bool xklass1 = p1 ? p1->klass_is_exact() : k1->klass_is_exact();
1056 bool unrelated_classes = false;
1057
1058 if ((p0 && p0->is_same_java_type_as(p1)) ||
1059 (k0 && k0->is_same_java_type_as(k1))) {
1060 } else if ((p0 && !p1->maybe_java_subtype_of(p0) && !p0->maybe_java_subtype_of(p1)) ||
1061 (k0 && !k1->maybe_java_subtype_of(k0) && !k0->maybe_java_subtype_of(k1))) {
1062 unrelated_classes = true;
1063 } else if ((p0 && !p1->maybe_java_subtype_of(p0)) ||
1064 (k0 && !k1->maybe_java_subtype_of(k0))) {
1065 unrelated_classes = xklass1;
1066 } else if ((p0 && !p0->maybe_java_subtype_of(p1)) ||
1067 (k0 && !k0->maybe_java_subtype_of(k1))) {
1068 unrelated_classes = xklass0;
1069 }
1070
1071 if (unrelated_classes) {
1072 // The oops classes are known to be unrelated. If the joined PTRs of
1073 // two oops is not Null and not Bottom, then we are sure that one
1074 // of the two oops is non-null, and the comparison will always fail.
1075 TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
1076 if (jp != TypePtr::Null && jp != TypePtr::BotPTR) {
1077 return TypeInt::CC_GT;
1078 }
1079 }
1080 }
1081
1082 // Known constants can be compared exactly
1083 // Null can be distinguished from any NotNull pointers
1084 // Unknown inputs makes an unknown result
1085 if( r0->singleton() ) {
1086 intptr_t bits0 = r0->get_con();
1087 if( r1->singleton() )
1088 return bits0 == r1->get_con() ? TypeInt::CC_EQ : TypeInt::CC_GT;
1089 return ( r1->_ptr == TypePtr::NotNull && bits0==0 ) ? TypeInt::CC_GT : TypeInt::CC;
1090 } else if( r1->singleton() ) {
1091 intptr_t bits1 = r1->get_con();
1092 return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
1093 } else
1094 return TypeInt::CC;
1095 }
1096
1097 static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) {
1098 // Return the klass node for (indirect load from OopHandle)
1099 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1100 // or null if not matching.
1101 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1102 n = bs->step_over_gc_barrier(n);
1103
1104 if (n->Opcode() != Op_LoadP) return nullptr;
1105
1106 const TypeInstPtr* tp = phase->type(n)->isa_instptr();
1107 if (!tp || tp->instance_klass() != phase->C->env()->Class_klass()) return nullptr;
1108
1109 Node* adr = n->in(MemNode::Address);
1110 // First load from OopHandle: ((OopHandle)mirror)->resolve(); may need barrier.
1111 if (adr->Opcode() != Op_LoadP || !phase->type(adr)->isa_rawptr()) return nullptr;
1112 adr = adr->in(MemNode::Address);
1113
1114 intptr_t off = 0;
1115 Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
1116 if (k == nullptr) return nullptr;
1117 const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
1118 if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return nullptr;
1119
1120 // We've found the klass node of a Java mirror load.
1121 return k;
1122 }
1123
1124 static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n) {
1125 // for ConP(Foo.class) return ConP(Foo.klass)
1126 // otherwise return null
1127 if (!n->is_Con()) return nullptr;
1128
1129 const TypeInstPtr* tp = phase->type(n)->isa_instptr();
1130 if (!tp) return nullptr;
1131
1132 ciType* mirror_type = tp->java_mirror_type();
1133 // TypeInstPtr::java_mirror_type() returns non-null for compile-
1134 // time Class constants only.
1135 if (!mirror_type) return nullptr;
1136
1137 // x.getClass() == int.class can never be true (for all primitive types)
1138 // Return a ConP(null) node for this case.
1139 if (mirror_type->is_classless()) {
1140 return phase->makecon(TypePtr::NULL_PTR);
1141 }
1142
1143 // return the ConP(Foo.klass)
1144 assert(mirror_type->is_klass(), "mirror_type should represent a Klass*");
1145 return phase->makecon(TypeKlassPtr::make(mirror_type->as_klass(), Type::trust_interfaces));
1146 }
1147
1148 //------------------------------Ideal------------------------------------------
1149 // Normalize comparisons between Java mirror loads to compare the klass instead.
1150 //
1151 // Also check for the case of comparing an unknown klass loaded from the primary
1152 // super-type array vs a known klass with no subtypes. This amounts to
1153 // checking to see an unknown klass subtypes a known klass with no subtypes;
1154 // this only happens on an exact match. We can shorten this test by 1 load.
1155 Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
1156 // Normalize comparisons between Java mirrors into comparisons of the low-
1157 // level klass, where a dependent load could be shortened.
1158 //
1159 // The new pattern has a nice effect of matching the same pattern used in the
1160 // fast path of instanceof/checkcast/Class.isInstance(), which allows
1161 // redundant exact type check be optimized away by GVN.
1162 // For example, in
1163 // if (x.getClass() == Foo.class) {
1164 // Foo foo = (Foo) x;
1165 // // ... use a ...
1166 // }
1167 // a CmpPNode could be shared between if_acmpne and checkcast
1168 {
1169 Node* k1 = isa_java_mirror_load(phase, in(1));
1170 Node* k2 = isa_java_mirror_load(phase, in(2));
1171 Node* conk2 = isa_const_java_mirror(phase, in(2));
1172
1173 if (k1 && (k2 || conk2)) {
1174 Node* lhs = k1;
1175 Node* rhs = (k2 != nullptr) ? k2 : conk2;
1176 set_req_X(1, lhs, phase);
1177 set_req_X(2, rhs, phase);
1178 return this;
1179 }
1180 }
1181
1182 // Constant pointer on right?
1183 const TypeKlassPtr* t2 = phase->type(in(2))->isa_klassptr();
1184 if (t2 == nullptr || !t2->klass_is_exact())
1185 return nullptr;
1186 // Get the constant klass we are comparing to.
1187 ciKlass* superklass = t2->exact_klass();
1188
1189 // Now check for LoadKlass on left.
1190 Node* ldk1 = in(1);
1191 if (ldk1->is_DecodeNKlass()) {
1230 //
1231 // We could be more liberal here, and allow the optimization on interfaces
1232 // which have a single implementor. This would require us to increase the
1233 // expressiveness of the add_dependency() mechanism.
1234 // %%% Do this after we fix TypeOopPtr: Deps are expressive enough now.
1235
1236 // Object arrays must have their base element have no subtypes
1237 while (superklass->is_obj_array_klass()) {
1238 ciType* elem = superklass->as_obj_array_klass()->element_type();
1239 superklass = elem->as_klass();
1240 }
1241 if (superklass->is_instance_klass()) {
1242 ciInstanceKlass* ik = superklass->as_instance_klass();
1243 if (ik->has_subklass() || ik->is_interface()) return nullptr;
1244 // Add a dependency if there is a chance that a subclass will be added later.
1245 if (!ik->is_final()) {
1246 phase->C->dependencies()->assert_leaf_type(ik);
1247 }
1248 }
1249
1250 // Bypass the dependent load, and compare directly
1251 this->set_req_X(1, ldk2, phase);
1252
1253 return this;
1254 }
1255
1256 //=============================================================================
1257 //------------------------------sub--------------------------------------------
1258 // Simplify an CmpN (compare 2 pointers) node, based on local information.
1259 // If both inputs are constants, compare them.
1260 const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
1261 ShouldNotReachHere();
1262 return bottom_type();
1263 }
1264
1265 //------------------------------Ideal------------------------------------------
1266 Node *CmpNNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
1267 return nullptr;
1268 }
1269
1350 if( t2_value_as_double == (double)t2_value_as_float ) {
1351 // Test value can be represented as a float
1352 // Eliminate the conversion to double and create new comparison
1353 Node *new_in1 = in(idx_f2d)->in(1);
1354 Node *new_in2 = phase->makecon( TypeF::make(t2_value_as_float) );
1355 if( idx_f2d != 1 ) { // Must flip args to match original order
1356 Node *tmp = new_in1;
1357 new_in1 = new_in2;
1358 new_in2 = tmp;
1359 }
1360 CmpFNode *new_cmp = (Opcode() == Op_CmpD3)
1361 ? new CmpF3Node( new_in1, new_in2 )
1362 : new CmpFNode ( new_in1, new_in2 ) ;
1363 return new_cmp; // Changed to CmpFNode
1364 }
1365 // Testing value required the precision of a double
1366 }
1367 return nullptr; // No change
1368 }
1369
1370
1371 //=============================================================================
1372 //------------------------------cc2logical-------------------------------------
1373 // Convert a condition code type to a logical type
1374 const Type *BoolTest::cc2logical( const Type *CC ) const {
1375 if( CC == Type::TOP ) return Type::TOP;
1376 if( CC->base() != Type::Int ) return TypeInt::BOOL; // Bottom or worse
1377 const TypeInt *ti = CC->is_int();
1378 if( ti->is_con() ) { // Only 1 kind of condition codes set?
1379 // Match low order 2 bits
1380 int tmp = ((ti->get_con()&3) == (_test&3)) ? 1 : 0;
1381 if( _test & 4 ) tmp = 1-tmp; // Optionally complement result
1382 return TypeInt::make(tmp); // Boolean result
1383 }
1384
1385 if( CC == TypeInt::CC_GE ) {
1386 if( _test == ge ) return TypeInt::ONE;
1387 if( _test == lt ) return TypeInt::ZERO;
1388 }
1389 if( CC == TypeInt::CC_LE ) {
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciObjArrayKlass.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/c2/barrierSetC2.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/callnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/inlinetypenode.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/matcher.hpp"
38 #include "opto/movenode.hpp"
39 #include "opto/mulnode.hpp"
40 #include "opto/opaquenode.hpp"
41 #include "opto/opcodes.hpp"
42 #include "opto/phaseX.hpp"
43 #include "opto/subnode.hpp"
44 #include "runtime/arguments.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/reverse_bits.hpp"
47
48 // Portions of code courtesy of Clifford Click
49
50 // Optimization - Graph Style
51
52 #include "math.h"
53
54 //=============================================================================
55 //------------------------------Identity---------------------------------------
56 // If right input is a constant 0, return the left input.
57 Node* SubNode::Identity(PhaseGVN* phase) {
58 assert(in(1) != this, "Must already have called Value");
59 assert(in(2) != this, "Must already have called Value");
60
61 const Type* zero = add_id();
62
63 // Remove double negation if it is not a floating point number since negation
64 // is not the same as subtraction for floating point numbers
908 switch (in(1)->Opcode()) {
909 case Op_CmpU3: // Collapse a CmpU3/CmpI into a CmpU
910 return new CmpUNode(in(1)->in(1),in(1)->in(2));
911 case Op_CmpL3: // Collapse a CmpL3/CmpI into a CmpL
912 return new CmpLNode(in(1)->in(1),in(1)->in(2));
913 case Op_CmpUL3: // Collapse a CmpUL3/CmpI into a CmpUL
914 return new CmpULNode(in(1)->in(1),in(1)->in(2));
915 case Op_CmpF3: // Collapse a CmpF3/CmpI into a CmpF
916 return new CmpFNode(in(1)->in(1),in(1)->in(2));
917 case Op_CmpD3: // Collapse a CmpD3/CmpI into a CmpD
918 return new CmpDNode(in(1)->in(1),in(1)->in(2));
919 //case Op_SubI:
920 // If (x - y) cannot overflow, then ((x - y) <?> 0)
921 // can be turned into (x <?> y).
922 // This is handled (with more general cases) by Ideal_sub_algebra.
923 }
924 }
925 return nullptr; // No change
926 }
927
928 //------------------------------Ideal------------------------------------------
929 Node* CmpLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
930 // Optimize expressions like
931 // CmpL(OrL(CastP2X(..), CastP2X(..)), 0L)
932 // that are used by acmp to implement a "both operands are null" check.
933 // See also the corresponding code in CmpPNode::Ideal.
934 if (can_reshape && in(1)->Opcode() == Op_OrL &&
935 in(2)->bottom_type()->is_zero_type()) {
936 for (int i = 1; i <= 2; ++i) {
937 Node* orIn = in(1)->in(i);
938 if (orIn->Opcode() == Op_CastP2X) {
939 Node* castIn = orIn->in(1);
940 if (castIn->is_InlineType()) {
941 // Replace the CastP2X by the null marker
942 InlineTypeNode* vt = castIn->as_InlineType();
943 Node* nm = phase->transform(new ConvI2LNode(vt->get_null_marker()));
944 phase->is_IterGVN()->replace_input_of(in(1), i, nm);
945 return this;
946 } else if (!phase->type(castIn)->maybe_null()) {
947 // Never null. Replace the CastP2X by constant 1L.
948 phase->is_IterGVN()->replace_input_of(in(1), i, phase->longcon(1));
949 return this;
950 }
951 }
952 }
953 }
954 const TypeLong *t2 = phase->type(in(2))->isa_long();
955 if (Opcode() == Op_CmpL && in(1)->Opcode() == Op_ConvI2L && t2 && t2->is_con()) {
956 const jlong con = t2->get_con();
957 if (con >= min_jint && con <= max_jint) {
958 return new CmpINode(in(1)->in(1), phase->intcon((jint)con));
959 }
960 }
961 return nullptr;
962 }
963
964 //=============================================================================
965 // Simplify a CmpL (compare 2 longs ) node, based on local information.
966 // If both inputs are constants, compare them.
967 const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
968 const TypeLong *r0 = t1->is_long(); // Handy access
969 const TypeLong *r1 = t2->is_long();
970
971 if( r0->_hi < r1->_lo ) // Range is always low?
972 return TypeInt::CC_LT;
973 else if( r0->_lo > r1->_hi ) // Range is always high?
1080 if (MemNode::detect_ptr_independence(in1, alloc1, in2, alloc2, nullptr)) {
1081 return TypeInt::CC_GT; // different pointers
1082 }
1083 }
1084 bool xklass0 = p0 ? p0->klass_is_exact() : k0->klass_is_exact();
1085 bool xklass1 = p1 ? p1->klass_is_exact() : k1->klass_is_exact();
1086 bool unrelated_classes = false;
1087
1088 if ((p0 && p0->is_same_java_type_as(p1)) ||
1089 (k0 && k0->is_same_java_type_as(k1))) {
1090 } else if ((p0 && !p1->maybe_java_subtype_of(p0) && !p0->maybe_java_subtype_of(p1)) ||
1091 (k0 && !k1->maybe_java_subtype_of(k0) && !k0->maybe_java_subtype_of(k1))) {
1092 unrelated_classes = true;
1093 } else if ((p0 && !p1->maybe_java_subtype_of(p0)) ||
1094 (k0 && !k1->maybe_java_subtype_of(k0))) {
1095 unrelated_classes = xklass1;
1096 } else if ((p0 && !p0->maybe_java_subtype_of(p1)) ||
1097 (k0 && !k0->maybe_java_subtype_of(k1))) {
1098 unrelated_classes = xklass0;
1099 }
1100 if (!unrelated_classes) {
1101 // Handle inline type arrays
1102 if ((r0->is_flat_in_array() && r1->is_not_flat_in_array()) ||
1103 (r1->is_flat_in_array() && r0->is_not_flat_in_array())) {
1104 // One type is in flat arrays but the other type is not. Must be unrelated.
1105 unrelated_classes = true;
1106 } else if ((r0->is_not_flat() && r1->is_flat()) ||
1107 (r1->is_not_flat() && r0->is_flat())) {
1108 // One type is a non-flat array and the other type is a flat array. Must be unrelated.
1109 unrelated_classes = true;
1110 } else if ((r0->is_not_null_free() && r1->is_null_free()) ||
1111 (r1->is_not_null_free() && r0->is_null_free())) {
1112 // One type is a nullable array and the other type is a null-free array. Must be unrelated.
1113 unrelated_classes = true;
1114 }
1115 }
1116 if (unrelated_classes) {
1117 // The oops classes are known to be unrelated. If the joined PTRs of
1118 // two oops is not Null and not Bottom, then we are sure that one
1119 // of the two oops is non-null, and the comparison will always fail.
1120 TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
1121 if (jp != TypePtr::Null && jp != TypePtr::BotPTR) {
1122 return TypeInt::CC_GT;
1123 }
1124 }
1125 }
1126
1127 // Known constants can be compared exactly
1128 // Null can be distinguished from any NotNull pointers
1129 // Unknown inputs makes an unknown result
1130 if( r0->singleton() ) {
1131 intptr_t bits0 = r0->get_con();
1132 if( r1->singleton() )
1133 return bits0 == r1->get_con() ? TypeInt::CC_EQ : TypeInt::CC_GT;
1134 return ( r1->_ptr == TypePtr::NotNull && bits0==0 ) ? TypeInt::CC_GT : TypeInt::CC;
1135 } else if( r1->singleton() ) {
1136 intptr_t bits1 = r1->get_con();
1137 return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
1138 } else
1139 return TypeInt::CC;
1140 }
1141
1142 static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n, bool& might_be_an_array) {
1143 // Return the klass node for (indirect load from OopHandle)
1144 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1145 // or null if not matching.
1146 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1147 n = bs->step_over_gc_barrier(n);
1148
1149 if (n->Opcode() != Op_LoadP) return nullptr;
1150
1151 const TypeInstPtr* tp = phase->type(n)->isa_instptr();
1152 if (!tp || tp->instance_klass() != phase->C->env()->Class_klass()) return nullptr;
1153
1154 Node* adr = n->in(MemNode::Address);
1155 // First load from OopHandle: ((OopHandle)mirror)->resolve(); may need barrier.
1156 if (adr->Opcode() != Op_LoadP || !phase->type(adr)->isa_rawptr()) return nullptr;
1157 adr = adr->in(MemNode::Address);
1158
1159 intptr_t off = 0;
1160 Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
1161 if (k == nullptr) return nullptr;
1162 const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
1163 if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return nullptr;
1164 might_be_an_array |= tkp->isa_aryklassptr() || tkp->is_instklassptr()->might_be_an_array();
1165
1166 // We've found the klass node of a Java mirror load.
1167 return k;
1168 }
1169
1170 static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n, bool& might_be_an_array) {
1171 // for ConP(Foo.class) return ConP(Foo.klass)
1172 // otherwise return null
1173 if (!n->is_Con()) return nullptr;
1174
1175 const TypeInstPtr* tp = phase->type(n)->isa_instptr();
1176 if (!tp) return nullptr;
1177
1178 ciType* mirror_type = tp->java_mirror_type();
1179 // TypeInstPtr::java_mirror_type() returns non-null for compile-
1180 // time Class constants only.
1181 if (!mirror_type) return nullptr;
1182
1183 // x.getClass() == int.class can never be true (for all primitive types)
1184 // Return a ConP(null) node for this case.
1185 if (mirror_type->is_classless()) {
1186 return phase->makecon(TypePtr::NULL_PTR);
1187 }
1188
1189 // return the ConP(Foo.klass)
1190 ciKlass* mirror_klass = mirror_type->as_klass();
1191
1192 if (mirror_klass->is_array_klass() && !mirror_klass->is_type_array_klass()) {
1193 if (!mirror_klass->can_be_inline_array_klass()) {
1194 // Special case for non-value arrays: They only have one (default) refined class, use it
1195 ciArrayKlass* refined_mirror_klass = ciObjArrayKlass::make(mirror_klass->as_array_klass()->element_klass(), true);
1196 return phase->makecon(TypeAryKlassPtr::make(refined_mirror_klass, Type::trust_interfaces));
1197 }
1198 might_be_an_array |= true;
1199 }
1200
1201 return phase->makecon(TypeKlassPtr::make(mirror_klass, Type::trust_interfaces));
1202 }
1203
1204 //------------------------------Ideal------------------------------------------
1205 // Normalize comparisons between Java mirror loads to compare the klass instead.
1206 //
1207 // Also check for the case of comparing an unknown klass loaded from the primary
1208 // super-type array vs a known klass with no subtypes. This amounts to
1209 // checking to see an unknown klass subtypes a known klass with no subtypes;
1210 // this only happens on an exact match. We can shorten this test by 1 load.
1211 Node* CmpPNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1212 // TODO 8284443 in(1) could be cast?
1213 if (in(1)->is_InlineType() && phase->type(in(2))->is_zero_type()) {
1214 // Null checking a scalarized but nullable inline type. Check the null marker
1215 // input instead of the oop input to avoid keeping buffer allocations alive.
1216 return new CmpINode(in(1)->as_InlineType()->get_null_marker(), phase->intcon(0));
1217 }
1218 if (in(1)->is_InlineType() || in(2)->is_InlineType()) {
1219 // In C2 IR, CmpP on value objects is a pointer comparison, not a value comparison.
1220 // For non-null operands it cannot reliably be true, since their buffer oops are not
1221 // guaranteed to be identical. Therefore, the comparison can only be true when both
1222 // operands are null. Convert expressions like this to a "both operands are null" check:
1223 // CmpL(OrL(CastP2X(..), CastP2X(..)), 0L)
1224 // CmpLNode::Ideal might optimize this further to avoid keeping buffer allocations alive.
1225 Node* input[2];
1226 for (int i = 1; i <= 2; ++i) {
1227 if (in(i)->is_InlineType()) {
1228 input[i-1] = phase->transform(new ConvI2LNode(in(i)->as_InlineType()->get_null_marker()));
1229 } else {
1230 input[i-1] = phase->transform(new CastP2XNode(nullptr, in(i)));
1231 }
1232 }
1233 Node* orL = phase->transform(new OrXNode(input[0], input[1]));
1234 return new CmpXNode(orL, phase->MakeConX(0));
1235 }
1236
1237 // Normalize comparisons between Java mirrors into comparisons of the low-
1238 // level klass, where a dependent load could be shortened.
1239 //
1240 // The new pattern has a nice effect of matching the same pattern used in the
1241 // fast path of instanceof/checkcast/Class.isInstance(), which allows
1242 // redundant exact type check be optimized away by GVN.
1243 // For example, in
1244 // if (x.getClass() == Foo.class) {
1245 // Foo foo = (Foo) x;
1246 // // ... use a ...
1247 // }
1248 // a CmpPNode could be shared between if_acmpne and checkcast
1249 {
1250 bool might_be_an_array1 = false;
1251 bool might_be_an_array2 = false;
1252 Node* k1 = isa_java_mirror_load(phase, in(1), might_be_an_array1);
1253 Node* k2 = isa_java_mirror_load(phase, in(2), might_be_an_array2);
1254 Node* conk2 = isa_const_java_mirror(phase, in(2), might_be_an_array2);
1255 if (might_be_an_array1 && might_be_an_array2) {
1256 // Don't optimize if both sides might be an array because arrays with
1257 // the same Java mirror can have different refined array klasses.
1258 k1 = k2 = nullptr;
1259 }
1260
1261 if (k1 && (k2 || conk2)) {
1262 Node* lhs = k1;
1263 Node* rhs = (k2 != nullptr) ? k2 : conk2;
1264 set_req_X(1, lhs, phase);
1265 set_req_X(2, rhs, phase);
1266 return this;
1267 }
1268 }
1269
1270 // Constant pointer on right?
1271 const TypeKlassPtr* t2 = phase->type(in(2))->isa_klassptr();
1272 if (t2 == nullptr || !t2->klass_is_exact())
1273 return nullptr;
1274 // Get the constant klass we are comparing to.
1275 ciKlass* superklass = t2->exact_klass();
1276
1277 // Now check for LoadKlass on left.
1278 Node* ldk1 = in(1);
1279 if (ldk1->is_DecodeNKlass()) {
1318 //
1319 // We could be more liberal here, and allow the optimization on interfaces
1320 // which have a single implementor. This would require us to increase the
1321 // expressiveness of the add_dependency() mechanism.
1322 // %%% Do this after we fix TypeOopPtr: Deps are expressive enough now.
1323
1324 // Object arrays must have their base element have no subtypes
1325 while (superklass->is_obj_array_klass()) {
1326 ciType* elem = superklass->as_obj_array_klass()->element_type();
1327 superklass = elem->as_klass();
1328 }
1329 if (superklass->is_instance_klass()) {
1330 ciInstanceKlass* ik = superklass->as_instance_klass();
1331 if (ik->has_subklass() || ik->is_interface()) return nullptr;
1332 // Add a dependency if there is a chance that a subclass will be added later.
1333 if (!ik->is_final()) {
1334 phase->C->dependencies()->assert_leaf_type(ik);
1335 }
1336 }
1337
1338 // Do not fold the subtype check to an array klass pointer comparison for
1339 // value class arrays because they can have multiple refined array klasses.
1340 superklass = t2->exact_klass();
1341 assert(!superklass->is_flat_array_klass(), "Unexpected flat array klass");
1342 if (superklass->is_obj_array_klass()) {
1343 if (superklass->as_array_klass()->element_klass()->is_inlinetype() && !superklass->as_array_klass()->is_refined()) {
1344 return nullptr;
1345 } else {
1346 // Special case for non-value arrays: They only have one (default) refined class, use it
1347 set_req_X(2, phase->makecon(t2->is_aryklassptr()->cast_to_refined_array_klass_ptr()), phase);
1348 }
1349 }
1350
1351 // Bypass the dependent load, and compare directly
1352 this->set_req_X(1, ldk2, phase);
1353
1354 return this;
1355 }
1356
1357 //=============================================================================
1358 //------------------------------sub--------------------------------------------
1359 // Simplify an CmpN (compare 2 pointers) node, based on local information.
1360 // If both inputs are constants, compare them.
1361 const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
1362 ShouldNotReachHere();
1363 return bottom_type();
1364 }
1365
1366 //------------------------------Ideal------------------------------------------
1367 Node *CmpNNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
1368 return nullptr;
1369 }
1370
1451 if( t2_value_as_double == (double)t2_value_as_float ) {
1452 // Test value can be represented as a float
1453 // Eliminate the conversion to double and create new comparison
1454 Node *new_in1 = in(idx_f2d)->in(1);
1455 Node *new_in2 = phase->makecon( TypeF::make(t2_value_as_float) );
1456 if( idx_f2d != 1 ) { // Must flip args to match original order
1457 Node *tmp = new_in1;
1458 new_in1 = new_in2;
1459 new_in2 = tmp;
1460 }
1461 CmpFNode *new_cmp = (Opcode() == Op_CmpD3)
1462 ? new CmpF3Node( new_in1, new_in2 )
1463 : new CmpFNode ( new_in1, new_in2 ) ;
1464 return new_cmp; // Changed to CmpFNode
1465 }
1466 // Testing value required the precision of a double
1467 }
1468 return nullptr; // No change
1469 }
1470
1471 //=============================================================================
1472 //------------------------------Value------------------------------------------
1473 const Type* FlatArrayCheckNode::Value(PhaseGVN* phase) const {
1474 bool all_not_flat = true;
1475 for (uint i = ArrayOrKlass; i < req(); ++i) {
1476 const Type* t = phase->type(in(i));
1477 if (t == Type::TOP) {
1478 return Type::TOP;
1479 }
1480 if (t->is_ptr()->is_flat()) {
1481 // One of the input arrays is flat, check always passes
1482 return TypeInt::CC_EQ;
1483 } else if (!t->is_ptr()->is_not_flat()) {
1484 // One of the input arrays might be flat
1485 all_not_flat = false;
1486 }
1487 }
1488 if (all_not_flat) {
1489 // None of the input arrays can be flat, check always fails
1490 return TypeInt::CC_GT;
1491 }
1492 return TypeInt::CC;
1493 }
1494
1495 //------------------------------Ideal------------------------------------------
1496 Node* FlatArrayCheckNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1497 bool changed = false;
1498 // Remove inputs that are known to be non-flat
1499 for (uint i = ArrayOrKlass; i < req(); ++i) {
1500 const Type* t = phase->type(in(i));
1501 if (t->isa_ptr() && t->is_ptr()->is_not_flat()) {
1502 del_req(i--);
1503 changed = true;
1504 }
1505 }
1506 return changed ? this : nullptr;
1507 }
1508
1509 //=============================================================================
1510 //------------------------------cc2logical-------------------------------------
1511 // Convert a condition code type to a logical type
1512 const Type *BoolTest::cc2logical( const Type *CC ) const {
1513 if( CC == Type::TOP ) return Type::TOP;
1514 if( CC->base() != Type::Int ) return TypeInt::BOOL; // Bottom or worse
1515 const TypeInt *ti = CC->is_int();
1516 if( ti->is_con() ) { // Only 1 kind of condition codes set?
1517 // Match low order 2 bits
1518 int tmp = ((ti->get_con()&3) == (_test&3)) ? 1 : 0;
1519 if( _test & 4 ) tmp = 1-tmp; // Optionally complement result
1520 return TypeInt::make(tmp); // Boolean result
1521 }
1522
1523 if( CC == TypeInt::CC_GE ) {
1524 if( _test == ge ) return TypeInt::ONE;
1525 if( _test == lt ) return TypeInt::ZERO;
1526 }
1527 if( CC == TypeInt::CC_LE ) {
|