< prev index next >

src/hotspot/share/opto/subnode.cpp

Print this page

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "compiler/compileLog.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/cfgnode.hpp"

  32 #include "opto/loopnode.hpp"
  33 #include "opto/matcher.hpp"
  34 #include "opto/movenode.hpp"
  35 #include "opto/mulnode.hpp"
  36 #include "opto/opaquenode.hpp"
  37 #include "opto/opcodes.hpp"
  38 #include "opto/phaseX.hpp"
  39 #include "opto/subnode.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "utilities/reverse_bits.hpp"
  42 
  43 // Portions of code courtesy of Clifford Click
  44 
  45 // Optimization - Graph Style
  46 
  47 #include "math.h"
  48 
  49 //=============================================================================
  50 //------------------------------Identity---------------------------------------
  51 // If right input is a constant 0, return the left input.

 903     switch (in(1)->Opcode()) {
 904     case Op_CmpU3:              // Collapse a CmpU3/CmpI into a CmpU
 905       return new CmpUNode(in(1)->in(1),in(1)->in(2));
 906     case Op_CmpL3:              // Collapse a CmpL3/CmpI into a CmpL
 907       return new CmpLNode(in(1)->in(1),in(1)->in(2));
 908     case Op_CmpUL3:             // Collapse a CmpUL3/CmpI into a CmpUL
 909       return new CmpULNode(in(1)->in(1),in(1)->in(2));
 910     case Op_CmpF3:              // Collapse a CmpF3/CmpI into a CmpF
 911       return new CmpFNode(in(1)->in(1),in(1)->in(2));
 912     case Op_CmpD3:              // Collapse a CmpD3/CmpI into a CmpD
 913       return new CmpDNode(in(1)->in(1),in(1)->in(2));
 914     //case Op_SubI:
 915       // If (x - y) cannot overflow, then ((x - y) <?> 0)
 916       // can be turned into (x <?> y).
 917       // This is handled (with more general cases) by Ideal_sub_algebra.
 918     }
 919   }
 920   return nullptr;                  // No change
 921 }
 922 
 923 Node *CmpLNode::Ideal( PhaseGVN *phase, bool can_reshape ) {







 924   const TypeLong *t2 = phase->type(in(2))->isa_long();
 925   if (Opcode() == Op_CmpL && in(1)->Opcode() == Op_ConvI2L && t2 && t2->is_con()) {
 926     const jlong con = t2->get_con();
 927     if (con >= min_jint && con <= max_jint) {
 928       return new CmpINode(in(1)->in(1), phase->intcon((jint)con));
 929     }
 930   }
 931   return nullptr;
 932 }
 933 

























 934 //=============================================================================
 935 // Simplify a CmpL (compare 2 longs ) node, based on local information.
 936 // If both inputs are constants, compare them.
 937 const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
 938   const TypeLong *r0 = t1->is_long(); // Handy access
 939   const TypeLong *r1 = t2->is_long();
 940 
 941   if( r0->_hi < r1->_lo )       // Range is always low?
 942     return TypeInt::CC_LT;
 943   else if( r0->_lo > r1->_hi )  // Range is always high?
 944     return TypeInt::CC_GT;
 945 
 946   else if( r0->is_con() && r1->is_con() ) { // comparing constants?
 947     assert(r0->get_con() == r1->get_con(), "must be equal");
 948     return TypeInt::CC_EQ;      // Equal results.
 949   } else if( r0->_hi == r1->_lo ) // Range is never high?
 950     return TypeInt::CC_LE;
 951   else if( r0->_lo == r1->_hi ) // Range is never low?
 952     return TypeInt::CC_GE;
 953 

1050       if (MemNode::detect_ptr_independence(in1, alloc1, in2, alloc2, nullptr)) {
1051         return TypeInt::CC_GT;  // different pointers
1052       }
1053     }
1054     bool    xklass0 = p0 ? p0->klass_is_exact() : k0->klass_is_exact();
1055     bool    xklass1 = p1 ? p1->klass_is_exact() : k1->klass_is_exact();
1056     bool unrelated_classes = false;
1057 
1058     if ((p0 && p0->is_same_java_type_as(p1)) ||
1059         (k0 && k0->is_same_java_type_as(k1))) {
1060     } else if ((p0 && !p1->maybe_java_subtype_of(p0) && !p0->maybe_java_subtype_of(p1)) ||
1061                (k0 && !k1->maybe_java_subtype_of(k0) && !k0->maybe_java_subtype_of(k1))) {
1062       unrelated_classes = true;
1063     } else if ((p0 && !p1->maybe_java_subtype_of(p0)) ||
1064                (k0 && !k1->maybe_java_subtype_of(k0))) {
1065       unrelated_classes = xklass1;
1066     } else if ((p0 && !p0->maybe_java_subtype_of(p1)) ||
1067                (k0 && !k0->maybe_java_subtype_of(k1))) {
1068       unrelated_classes = xklass0;
1069     }
1070 















1071     if (unrelated_classes) {
1072       // The oops classes are known to be unrelated. If the joined PTRs of
1073       // two oops is not Null and not Bottom, then we are sure that one
1074       // of the two oops is non-null, and the comparison will always fail.
1075       TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
1076       if (jp != TypePtr::Null && jp != TypePtr::BotPTR) {
1077         return TypeInt::CC_GT;
1078       }
1079     }
1080   }
1081 
1082   // Known constants can be compared exactly
1083   // Null can be distinguished from any NotNull pointers
1084   // Unknown inputs makes an unknown result
1085   if( r0->singleton() ) {
1086     intptr_t bits0 = r0->get_con();
1087     if( r1->singleton() )
1088       return bits0 == r1->get_con() ? TypeInt::CC_EQ : TypeInt::CC_GT;
1089     return ( r1->_ptr == TypePtr::NotNull && bits0==0 ) ? TypeInt::CC_GT : TypeInt::CC;
1090   } else if( r1->singleton() ) {
1091     intptr_t bits1 = r1->get_con();
1092     return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
1093   } else
1094     return TypeInt::CC;
1095 }
1096 
1097 static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) {
1098   // Return the klass node for (indirect load from OopHandle)
1099   //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1100   //   or null if not matching.
1101   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1102     n = bs->step_over_gc_barrier(n);
1103 
1104   if (n->Opcode() != Op_LoadP) return nullptr;
1105 
1106   const TypeInstPtr* tp = phase->type(n)->isa_instptr();
1107   if (!tp || tp->instance_klass() != phase->C->env()->Class_klass()) return nullptr;
1108 
1109   Node* adr = n->in(MemNode::Address);
1110   // First load from OopHandle: ((OopHandle)mirror)->resolve(); may need barrier.
1111   if (adr->Opcode() != Op_LoadP || !phase->type(adr)->isa_rawptr()) return nullptr;
1112   adr = adr->in(MemNode::Address);
1113 
1114   intptr_t off = 0;
1115   Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
1116   if (k == nullptr)  return nullptr;
1117   const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
1118   if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return nullptr;

1119 
1120   // We've found the klass node of a Java mirror load.
1121   return k;
1122 }
1123 
1124 static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n) {
1125   // for ConP(Foo.class) return ConP(Foo.klass)
1126   // otherwise return null
1127   if (!n->is_Con()) return nullptr;
1128 
1129   const TypeInstPtr* tp = phase->type(n)->isa_instptr();
1130   if (!tp) return nullptr;
1131 
1132   ciType* mirror_type = tp->java_mirror_type();
1133   // TypeInstPtr::java_mirror_type() returns non-null for compile-
1134   // time Class constants only.
1135   if (!mirror_type) return nullptr;
1136 
1137   // x.getClass() == int.class can never be true (for all primitive types)
1138   // Return a ConP(null) node for this case.
1139   if (mirror_type->is_classless()) {
1140     return phase->makecon(TypePtr::NULL_PTR);
1141   }
1142 
1143   // return the ConP(Foo.klass)
1144   assert(mirror_type->is_klass(), "mirror_type should represent a Klass*");
1145   return phase->makecon(TypeKlassPtr::make(mirror_type->as_klass(), Type::trust_interfaces));









1146 }
1147 
1148 //------------------------------Ideal------------------------------------------
1149 // Normalize comparisons between Java mirror loads to compare the klass instead.
1150 //
1151 // Also check for the case of comparing an unknown klass loaded from the primary
1152 // super-type array vs a known klass with no subtypes.  This amounts to
1153 // checking to see an unknown klass subtypes a known klass with no subtypes;
1154 // this only happens on an exact match.  We can shorten this test by 1 load.
1155 Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {







1156   // Normalize comparisons between Java mirrors into comparisons of the low-
1157   // level klass, where a dependent load could be shortened.
1158   //
1159   // The new pattern has a nice effect of matching the same pattern used in the
1160   // fast path of instanceof/checkcast/Class.isInstance(), which allows
1161   // redundant exact type check be optimized away by GVN.
1162   // For example, in
1163   //   if (x.getClass() == Foo.class) {
1164   //     Foo foo = (Foo) x;
1165   //     // ... use a ...
1166   //   }
1167   // a CmpPNode could be shared between if_acmpne and checkcast
1168   {
1169     Node* k1 = isa_java_mirror_load(phase, in(1));
1170     Node* k2 = isa_java_mirror_load(phase, in(2));
1171     Node* conk2 = isa_const_java_mirror(phase, in(2));







1172 
1173     if (k1 && (k2 || conk2)) {
1174       Node* lhs = k1;
1175       Node* rhs = (k2 != nullptr) ? k2 : conk2;
1176       set_req_X(1, lhs, phase);
1177       set_req_X(2, rhs, phase);
1178       return this;
1179     }
1180   }
1181 
1182   // Constant pointer on right?
1183   const TypeKlassPtr* t2 = phase->type(in(2))->isa_klassptr();
1184   if (t2 == nullptr || !t2->klass_is_exact())
1185     return nullptr;
1186   // Get the constant klass we are comparing to.
1187   ciKlass* superklass = t2->exact_klass();
1188 
1189   // Now check for LoadKlass on left.
1190   Node* ldk1 = in(1);
1191   if (ldk1->is_DecodeNKlass()) {

1230   //
1231   // We could be more liberal here, and allow the optimization on interfaces
1232   // which have a single implementor.  This would require us to increase the
1233   // expressiveness of the add_dependency() mechanism.
1234   // %%% Do this after we fix TypeOopPtr:  Deps are expressive enough now.
1235 
1236   // Object arrays must have their base element have no subtypes
1237   while (superklass->is_obj_array_klass()) {
1238     ciType* elem = superklass->as_obj_array_klass()->element_type();
1239     superklass = elem->as_klass();
1240   }
1241   if (superklass->is_instance_klass()) {
1242     ciInstanceKlass* ik = superklass->as_instance_klass();
1243     if (ik->has_subklass() || ik->is_interface())  return nullptr;
1244     // Add a dependency if there is a chance that a subclass will be added later.
1245     if (!ik->is_final()) {
1246       phase->C->dependencies()->assert_leaf_type(ik);
1247     }
1248   }
1249 














1250   // Bypass the dependent load, and compare directly
1251   this->set_req_X(1, ldk2, phase);
1252 
1253   return this;
1254 }
1255 
1256 //=============================================================================
1257 //------------------------------sub--------------------------------------------
1258 // Simplify an CmpN (compare 2 pointers) node, based on local information.
1259 // If both inputs are constants, compare them.
1260 const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
1261   ShouldNotReachHere();
1262   return bottom_type();
1263 }
1264 
1265 //------------------------------Ideal------------------------------------------
1266 Node *CmpNNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
1267   return nullptr;
1268 }
1269 

1350     if( t2_value_as_double == (double)t2_value_as_float ) {
1351       // Test value can be represented as a float
1352       // Eliminate the conversion to double and create new comparison
1353       Node *new_in1 = in(idx_f2d)->in(1);
1354       Node *new_in2 = phase->makecon( TypeF::make(t2_value_as_float) );
1355       if( idx_f2d != 1 ) {      // Must flip args to match original order
1356         Node *tmp = new_in1;
1357         new_in1 = new_in2;
1358         new_in2 = tmp;
1359       }
1360       CmpFNode *new_cmp = (Opcode() == Op_CmpD3)
1361         ? new CmpF3Node( new_in1, new_in2 )
1362         : new CmpFNode ( new_in1, new_in2 ) ;
1363       return new_cmp;           // Changed to CmpFNode
1364     }
1365     // Testing value required the precision of a double
1366   }
1367   return nullptr;                  // No change
1368 }
1369 





































1370 
1371 //=============================================================================
1372 //------------------------------cc2logical-------------------------------------
1373 // Convert a condition code type to a logical type
1374 const Type *BoolTest::cc2logical( const Type *CC ) const {
1375   if( CC == Type::TOP ) return Type::TOP;
1376   if( CC->base() != Type::Int ) return TypeInt::BOOL; // Bottom or worse
1377   const TypeInt *ti = CC->is_int();
1378   if( ti->is_con() ) {          // Only 1 kind of condition codes set?
1379     // Match low order 2 bits
1380     int tmp = ((ti->get_con()&3) == (_test&3)) ? 1 : 0;
1381     if( _test & 4 ) tmp = 1-tmp;     // Optionally complement result
1382     return TypeInt::make(tmp);       // Boolean result
1383   }
1384 
1385   if( CC == TypeInt::CC_GE ) {
1386     if( _test == ge ) return TypeInt::ONE;
1387     if( _test == lt ) return TypeInt::ZERO;
1388   }
1389   if( CC == TypeInt::CC_LE ) {

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "compiler/compileLog.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/inlinetypenode.hpp"
  33 #include "opto/loopnode.hpp"
  34 #include "opto/matcher.hpp"
  35 #include "opto/movenode.hpp"
  36 #include "opto/mulnode.hpp"
  37 #include "opto/opaquenode.hpp"
  38 #include "opto/opcodes.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/subnode.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "utilities/reverse_bits.hpp"
  43 
  44 // Portions of code courtesy of Clifford Click
  45 
  46 // Optimization - Graph Style
  47 
  48 #include "math.h"
  49 
  50 //=============================================================================
  51 //------------------------------Identity---------------------------------------
  52 // If right input is a constant 0, return the left input.

 904     switch (in(1)->Opcode()) {
 905     case Op_CmpU3:              // Collapse a CmpU3/CmpI into a CmpU
 906       return new CmpUNode(in(1)->in(1),in(1)->in(2));
 907     case Op_CmpL3:              // Collapse a CmpL3/CmpI into a CmpL
 908       return new CmpLNode(in(1)->in(1),in(1)->in(2));
 909     case Op_CmpUL3:             // Collapse a CmpUL3/CmpI into a CmpUL
 910       return new CmpULNode(in(1)->in(1),in(1)->in(2));
 911     case Op_CmpF3:              // Collapse a CmpF3/CmpI into a CmpF
 912       return new CmpFNode(in(1)->in(1),in(1)->in(2));
 913     case Op_CmpD3:              // Collapse a CmpD3/CmpI into a CmpD
 914       return new CmpDNode(in(1)->in(1),in(1)->in(2));
 915     //case Op_SubI:
 916       // If (x - y) cannot overflow, then ((x - y) <?> 0)
 917       // can be turned into (x <?> y).
 918       // This is handled (with more general cases) by Ideal_sub_algebra.
 919     }
 920   }
 921   return nullptr;                  // No change
 922 }
 923 
 924 //------------------------------Ideal------------------------------------------
 925 Node* CmpLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 926   Node* a = nullptr;
 927   Node* b = nullptr;
 928   if (is_double_null_check(phase, a, b) && (phase->type(a)->is_zero_type() || phase->type(b)->is_zero_type())) {
 929     // Degraded to a simple null check, use old acmp
 930     return new CmpPNode(a, b);
 931   }
 932   const TypeLong *t2 = phase->type(in(2))->isa_long();
 933   if (Opcode() == Op_CmpL && in(1)->Opcode() == Op_ConvI2L && t2 && t2->is_con()) {
 934     const jlong con = t2->get_con();
 935     if (con >= min_jint && con <= max_jint) {
 936       return new CmpINode(in(1)->in(1), phase->intcon((jint)con));
 937     }
 938   }
 939   return nullptr;
 940 }
 941 
 942 // Match double null check emitted by Compile::optimize_acmp()
 943 bool CmpLNode::is_double_null_check(PhaseGVN* phase, Node*& a, Node*& b) const {
 944   if (in(1)->Opcode() == Op_OrL &&
 945       in(1)->in(1)->Opcode() == Op_CastP2X &&
 946       in(1)->in(2)->Opcode() == Op_CastP2X &&
 947       in(2)->bottom_type()->is_zero_type()) {
 948     assert(EnableValhalla, "unexpected double null check");
 949     a = in(1)->in(1)->in(1);
 950     b = in(1)->in(2)->in(1);
 951     return true;
 952   }
 953   return false;
 954 }
 955 
 956 //------------------------------Value------------------------------------------
 957 const Type* CmpLNode::Value(PhaseGVN* phase) const {
 958   Node* a = nullptr;
 959   Node* b = nullptr;
 960   if (is_double_null_check(phase, a, b) && (!phase->type(a)->maybe_null() || !phase->type(b)->maybe_null())) {
 961     // One operand is never nullptr, emit constant false
 962     return TypeInt::CC_GT;
 963   }
 964   return SubNode::Value(phase);
 965 }
 966 
 967 //=============================================================================
 968 // Simplify a CmpL (compare 2 longs ) node, based on local information.
 969 // If both inputs are constants, compare them.
 970 const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
 971   const TypeLong *r0 = t1->is_long(); // Handy access
 972   const TypeLong *r1 = t2->is_long();
 973 
 974   if( r0->_hi < r1->_lo )       // Range is always low?
 975     return TypeInt::CC_LT;
 976   else if( r0->_lo > r1->_hi )  // Range is always high?
 977     return TypeInt::CC_GT;
 978 
 979   else if( r0->is_con() && r1->is_con() ) { // comparing constants?
 980     assert(r0->get_con() == r1->get_con(), "must be equal");
 981     return TypeInt::CC_EQ;      // Equal results.
 982   } else if( r0->_hi == r1->_lo ) // Range is never high?
 983     return TypeInt::CC_LE;
 984   else if( r0->_lo == r1->_hi ) // Range is never low?
 985     return TypeInt::CC_GE;
 986 

1083       if (MemNode::detect_ptr_independence(in1, alloc1, in2, alloc2, nullptr)) {
1084         return TypeInt::CC_GT;  // different pointers
1085       }
1086     }
1087     bool    xklass0 = p0 ? p0->klass_is_exact() : k0->klass_is_exact();
1088     bool    xklass1 = p1 ? p1->klass_is_exact() : k1->klass_is_exact();
1089     bool unrelated_classes = false;
1090 
1091     if ((p0 && p0->is_same_java_type_as(p1)) ||
1092         (k0 && k0->is_same_java_type_as(k1))) {
1093     } else if ((p0 && !p1->maybe_java_subtype_of(p0) && !p0->maybe_java_subtype_of(p1)) ||
1094                (k0 && !k1->maybe_java_subtype_of(k0) && !k0->maybe_java_subtype_of(k1))) {
1095       unrelated_classes = true;
1096     } else if ((p0 && !p1->maybe_java_subtype_of(p0)) ||
1097                (k0 && !k1->maybe_java_subtype_of(k0))) {
1098       unrelated_classes = xklass1;
1099     } else if ((p0 && !p0->maybe_java_subtype_of(p1)) ||
1100                (k0 && !k0->maybe_java_subtype_of(k1))) {
1101       unrelated_classes = xklass0;
1102     }
1103     if (!unrelated_classes) {
1104       // Handle inline type arrays
1105       if ((r0->flat_in_array() && r1->not_flat_in_array()) ||
1106           (r1->flat_in_array() && r0->not_flat_in_array())) {
1107         // One type is in flat arrays but the other type is not. Must be unrelated.
1108         unrelated_classes = true;
1109       } else if ((r0->is_not_flat() && r1->is_flat()) ||
1110                  (r1->is_not_flat() && r0->is_flat())) {
1111         // One type is a non-flat array and the other type is a flat array. Must be unrelated.
1112         unrelated_classes = true;
1113       } else if ((r0->is_not_null_free() && r1->is_null_free()) ||
1114                  (r1->is_not_null_free() && r0->is_null_free())) {
1115         // One type is a nullable array and the other type is a null-free array. Must be unrelated.
1116         unrelated_classes = true;
1117       }
1118     }
1119     if (unrelated_classes) {
1120       // The oops classes are known to be unrelated. If the joined PTRs of
1121       // two oops is not Null and not Bottom, then we are sure that one
1122       // of the two oops is non-null, and the comparison will always fail.
1123       TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
1124       if (jp != TypePtr::Null && jp != TypePtr::BotPTR) {
1125         return TypeInt::CC_GT;
1126       }
1127     }
1128   }
1129 
1130   // Known constants can be compared exactly
1131   // Null can be distinguished from any NotNull pointers
1132   // Unknown inputs makes an unknown result
1133   if( r0->singleton() ) {
1134     intptr_t bits0 = r0->get_con();
1135     if( r1->singleton() )
1136       return bits0 == r1->get_con() ? TypeInt::CC_EQ : TypeInt::CC_GT;
1137     return ( r1->_ptr == TypePtr::NotNull && bits0==0 ) ? TypeInt::CC_GT : TypeInt::CC;
1138   } else if( r1->singleton() ) {
1139     intptr_t bits1 = r1->get_con();
1140     return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
1141   } else
1142     return TypeInt::CC;
1143 }
1144 
1145 static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n, bool& might_be_an_array) {
1146   // Return the klass node for (indirect load from OopHandle)
1147   //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1148   //   or null if not matching.
1149   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1150     n = bs->step_over_gc_barrier(n);
1151 
1152   if (n->Opcode() != Op_LoadP) return nullptr;
1153 
1154   const TypeInstPtr* tp = phase->type(n)->isa_instptr();
1155   if (!tp || tp->instance_klass() != phase->C->env()->Class_klass()) return nullptr;
1156 
1157   Node* adr = n->in(MemNode::Address);
1158   // First load from OopHandle: ((OopHandle)mirror)->resolve(); may need barrier.
1159   if (adr->Opcode() != Op_LoadP || !phase->type(adr)->isa_rawptr()) return nullptr;
1160   adr = adr->in(MemNode::Address);
1161 
1162   intptr_t off = 0;
1163   Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
1164   if (k == nullptr)  return nullptr;
1165   const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
1166   if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return nullptr;
1167   might_be_an_array |= tkp->isa_aryklassptr() || tkp->is_instklassptr()->might_be_an_array();
1168 
1169   // We've found the klass node of a Java mirror load.
1170   return k;
1171 }
1172 
1173 static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n, bool& might_be_an_array) {
1174   // for ConP(Foo.class) return ConP(Foo.klass)
1175   // otherwise return null
1176   if (!n->is_Con()) return nullptr;
1177 
1178   const TypeInstPtr* tp = phase->type(n)->isa_instptr();
1179   if (!tp) return nullptr;
1180 
1181   ciType* mirror_type = tp->java_mirror_type();
1182   // TypeInstPtr::java_mirror_type() returns non-null for compile-
1183   // time Class constants only.
1184   if (!mirror_type) return nullptr;
1185 
1186   // x.getClass() == int.class can never be true (for all primitive types)
1187   // Return a ConP(null) node for this case.
1188   if (mirror_type->is_classless()) {
1189     return phase->makecon(TypePtr::NULL_PTR);
1190   }
1191 
1192   // return the ConP(Foo.klass)
1193   ciKlass* mirror_klass = mirror_type->as_klass();
1194 
1195   if (mirror_klass->is_array_klass()) {
1196     if (!mirror_klass->can_be_inline_array_klass()) {
1197       // Special case for non-value arrays: They only have one (default) refined class, use it
1198       return phase->makecon(TypeAryKlassPtr::make(mirror_klass, Type::trust_interfaces, true));
1199     }
1200     might_be_an_array |= true;
1201   }
1202 
1203   return phase->makecon(TypeKlassPtr::make(mirror_klass, Type::trust_interfaces));
1204 }
1205 
1206 //------------------------------Ideal------------------------------------------
1207 // Normalize comparisons between Java mirror loads to compare the klass instead.
1208 //
1209 // Also check for the case of comparing an unknown klass loaded from the primary
1210 // super-type array vs a known klass with no subtypes.  This amounts to
1211 // checking to see an unknown klass subtypes a known klass with no subtypes;
1212 // this only happens on an exact match.  We can shorten this test by 1 load.
1213 Node* CmpPNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1214   // TODO 8284443 in(1) could be cast?
1215   if (in(1)->is_InlineType() && phase->type(in(2))->is_zero_type()) {
1216     // Null checking a scalarized but nullable inline type. Check the null marker
1217     // input instead of the oop input to avoid keeping buffer allocations alive.
1218     return new CmpINode(in(1)->as_InlineType()->get_null_marker(), phase->intcon(0));
1219   }
1220 
1221   // Normalize comparisons between Java mirrors into comparisons of the low-
1222   // level klass, where a dependent load could be shortened.
1223   //
1224   // The new pattern has a nice effect of matching the same pattern used in the
1225   // fast path of instanceof/checkcast/Class.isInstance(), which allows
1226   // redundant exact type check be optimized away by GVN.
1227   // For example, in
1228   //   if (x.getClass() == Foo.class) {
1229   //     Foo foo = (Foo) x;
1230   //     // ... use a ...
1231   //   }
1232   // a CmpPNode could be shared between if_acmpne and checkcast
1233   {
1234     bool might_be_an_array1 = false;
1235     bool might_be_an_array2 = false;
1236     Node* k1 = isa_java_mirror_load(phase, in(1), might_be_an_array1);
1237     Node* k2 = isa_java_mirror_load(phase, in(2), might_be_an_array2);
1238     Node* conk2 = isa_const_java_mirror(phase, in(2), might_be_an_array2);
1239     if (might_be_an_array1 && might_be_an_array2) {
1240       // Don't optimize if both sides might be an array because arrays with
1241       // the same Java mirror can have different refined array klasses.
1242       k1 = k2 = nullptr;
1243     }
1244 
1245     if (k1 && (k2 || conk2)) {
1246       Node* lhs = k1;
1247       Node* rhs = (k2 != nullptr) ? k2 : conk2;
1248       set_req_X(1, lhs, phase);
1249       set_req_X(2, rhs, phase);
1250       return this;
1251     }
1252   }
1253 
1254   // Constant pointer on right?
1255   const TypeKlassPtr* t2 = phase->type(in(2))->isa_klassptr();
1256   if (t2 == nullptr || !t2->klass_is_exact())
1257     return nullptr;
1258   // Get the constant klass we are comparing to.
1259   ciKlass* superklass = t2->exact_klass();
1260 
1261   // Now check for LoadKlass on left.
1262   Node* ldk1 = in(1);
1263   if (ldk1->is_DecodeNKlass()) {

1302   //
1303   // We could be more liberal here, and allow the optimization on interfaces
1304   // which have a single implementor.  This would require us to increase the
1305   // expressiveness of the add_dependency() mechanism.
1306   // %%% Do this after we fix TypeOopPtr:  Deps are expressive enough now.
1307 
1308   // Object arrays must have their base element have no subtypes
1309   while (superklass->is_obj_array_klass()) {
1310     ciType* elem = superklass->as_obj_array_klass()->element_type();
1311     superklass = elem->as_klass();
1312   }
1313   if (superklass->is_instance_klass()) {
1314     ciInstanceKlass* ik = superklass->as_instance_klass();
1315     if (ik->has_subklass() || ik->is_interface())  return nullptr;
1316     // Add a dependency if there is a chance that a subclass will be added later.
1317     if (!ik->is_final()) {
1318       phase->C->dependencies()->assert_leaf_type(ik);
1319     }
1320   }
1321 
1322   // Do not fold the subtype check to an array klass pointer comparison for
1323   // value class arrays because they can have multiple refined array klasses.
1324   superklass = t2->exact_klass();
1325   assert(!superklass->is_flat_array_klass(), "Unexpected flat array klass");
1326   if (superklass->is_obj_array_klass()) {
1327     if (!superklass->as_array_klass()->is_elem_null_free() &&
1328          superklass->as_array_klass()->element_klass()->is_inlinetype()) {
1329       return nullptr;
1330     } else {
1331       // Special case for non-value arrays: They only have one (default) refined class, use it
1332       set_req_X(2, phase->makecon(t2->is_aryklassptr()->cast_to_refined_array_klass_ptr()), phase);
1333     }
1334   }
1335 
1336   // Bypass the dependent load, and compare directly
1337   this->set_req_X(1, ldk2, phase);
1338 
1339   return this;
1340 }
1341 
1342 //=============================================================================
1343 //------------------------------sub--------------------------------------------
1344 // Simplify an CmpN (compare 2 pointers) node, based on local information.
1345 // If both inputs are constants, compare them.
1346 const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
1347   ShouldNotReachHere();
1348   return bottom_type();
1349 }
1350 
1351 //------------------------------Ideal------------------------------------------
1352 Node *CmpNNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
1353   return nullptr;
1354 }
1355 

1436     if( t2_value_as_double == (double)t2_value_as_float ) {
1437       // Test value can be represented as a float
1438       // Eliminate the conversion to double and create new comparison
1439       Node *new_in1 = in(idx_f2d)->in(1);
1440       Node *new_in2 = phase->makecon( TypeF::make(t2_value_as_float) );
1441       if( idx_f2d != 1 ) {      // Must flip args to match original order
1442         Node *tmp = new_in1;
1443         new_in1 = new_in2;
1444         new_in2 = tmp;
1445       }
1446       CmpFNode *new_cmp = (Opcode() == Op_CmpD3)
1447         ? new CmpF3Node( new_in1, new_in2 )
1448         : new CmpFNode ( new_in1, new_in2 ) ;
1449       return new_cmp;           // Changed to CmpFNode
1450     }
1451     // Testing value required the precision of a double
1452   }
1453   return nullptr;                  // No change
1454 }
1455 
1456 //=============================================================================
1457 //------------------------------Value------------------------------------------
1458 const Type* FlatArrayCheckNode::Value(PhaseGVN* phase) const {
1459   bool all_not_flat = true;
1460   for (uint i = ArrayOrKlass; i < req(); ++i) {
1461     const Type* t = phase->type(in(i));
1462     if (t == Type::TOP) {
1463       return Type::TOP;
1464     }
1465     if (t->is_ptr()->is_flat()) {
1466       // One of the input arrays is flat, check always passes
1467       return TypeInt::CC_EQ;
1468     } else if (!t->is_ptr()->is_not_flat()) {
1469       // One of the input arrays might be flat
1470       all_not_flat = false;
1471     }
1472   }
1473   if (all_not_flat) {
1474     // None of the input arrays can be flat, check always fails
1475     return TypeInt::CC_GT;
1476   }
1477   return TypeInt::CC;
1478 }
1479 
1480 //------------------------------Ideal------------------------------------------
1481 Node* FlatArrayCheckNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1482   bool changed = false;
1483   // Remove inputs that are known to be non-flat
1484   for (uint i = ArrayOrKlass; i < req(); ++i) {
1485     const Type* t = phase->type(in(i));
1486     if (t->isa_ptr() && t->is_ptr()->is_not_flat()) {
1487       del_req(i--);
1488       changed = true;
1489     }
1490   }
1491   return changed ? this : nullptr;
1492 }
1493 
1494 //=============================================================================
1495 //------------------------------cc2logical-------------------------------------
1496 // Convert a condition code type to a logical type
1497 const Type *BoolTest::cc2logical( const Type *CC ) const {
1498   if( CC == Type::TOP ) return Type::TOP;
1499   if( CC->base() != Type::Int ) return TypeInt::BOOL; // Bottom or worse
1500   const TypeInt *ti = CC->is_int();
1501   if( ti->is_con() ) {          // Only 1 kind of condition codes set?
1502     // Match low order 2 bits
1503     int tmp = ((ti->get_con()&3) == (_test&3)) ? 1 : 0;
1504     if( _test & 4 ) tmp = 1-tmp;     // Optionally complement result
1505     return TypeInt::make(tmp);       // Boolean result
1506   }
1507 
1508   if( CC == TypeInt::CC_GE ) {
1509     if( _test == ge ) return TypeInt::ONE;
1510     if( _test == lt ) return TypeInt::ZERO;
1511   }
1512   if( CC == TypeInt::CC_LE ) {
< prev index next >