< prev index next >

src/hotspot/share/opto/subnode.cpp

Print this page
*** 20,25 ***
--- 20,30 ---
   * or visit www.oracle.com if you need additional information or have any
   * questions.
   *
   */
  
+ #include "ci/ciObjArrayKlass.hpp"
  #include "compiler/compileLog.hpp"
  #include "gc/shared/barrierSet.hpp"
  #include "gc/shared/c2/barrierSetC2.hpp"
  #include "memory/allocation.inline.hpp"
  #include "opto/addnode.hpp"
  #include "opto/callnode.hpp"
+ #include "opto/castnode.hpp"
  #include "opto/cfgnode.hpp"
+ #include "opto/convertnode.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/loopnode.hpp"
  #include "opto/matcher.hpp"
  #include "opto/movenode.hpp"
  #include "opto/mulnode.hpp"
  #include "opto/opaquenode.hpp"
  #include "opto/opcodes.hpp"
  #include "opto/phaseX.hpp"
  #include "opto/subnode.hpp"
+ #include "runtime/arguments.hpp"
  #include "runtime/sharedRuntime.hpp"
  #include "utilities/reverse_bits.hpp"
  
  // Portions of code courtesy of Clifford Click
  

*** 918,11 ***
      }
    }
    return nullptr;                  // No change
  }
  
! Node *CmpLNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
    const TypeLong *t2 = phase->type(in(2))->isa_long();
    if (Opcode() == Op_CmpL && in(1)->Opcode() == Op_ConvI2L && t2 && t2->is_con()) {
      const jlong con = t2->get_con();
      if (con >= min_jint && con <= max_jint) {
        return new CmpINode(in(1)->in(1), phase->intcon((jint)con));
--- 923,36 ---
      }
    }
    return nullptr;                  // No change
  }
  
! //------------------------------Ideal------------------------------------------
+ Node* CmpLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+   // Optimize expressions like
+   //   CmpL(OrL(CastP2X(..), CastP2X(..)), 0L)
+   // that are used by acmp to implement a "both operands are null" check.
+   // See also the corresponding code in CmpPNode::Ideal.
+   if (can_reshape && in(1)->Opcode() == Op_OrL &&
+       in(2)->bottom_type()->is_zero_type()) {
+     for (int i = 1; i <= 2; ++i) {
+       Node* orIn = in(1)->in(i);
+       if (orIn->Opcode() == Op_CastP2X) {
+         Node* castIn = orIn->in(1);
+         if (castIn->is_InlineType()) {
+           // Replace the CastP2X by the null marker
+           InlineTypeNode* vt = castIn->as_InlineType();
+           Node* nm = phase->transform(new ConvI2LNode(vt->get_null_marker()));
+           phase->is_IterGVN()->replace_input_of(in(1), i, nm);
+           return this;
+         } else if (!phase->type(castIn)->maybe_null()) {
+           // Never null. Replace the CastP2X by constant 1L.
+           phase->is_IterGVN()->replace_input_of(in(1), i, phase->longcon(1));
+           return this;
+         }
+       }
+     }
+   }
    const TypeLong *t2 = phase->type(in(2))->isa_long();
    if (Opcode() == Op_CmpL && in(1)->Opcode() == Op_ConvI2L && t2 && t2->is_con()) {
      const jlong con = t2->get_con();
      if (con >= min_jint && con <= max_jint) {
        return new CmpINode(in(1)->in(1), phase->intcon((jint)con));

*** 1065,11 ***
        unrelated_classes = xklass1;
      } else if ((p0 && !p0->maybe_java_subtype_of(p1)) ||
                 (k0 && !k0->maybe_java_subtype_of(k1))) {
        unrelated_classes = xklass0;
      }
! 
      if (unrelated_classes) {
        // The oops classes are known to be unrelated. If the joined PTRs of
        // two oops is not Null and not Bottom, then we are sure that one
        // of the two oops is non-null, and the comparison will always fail.
        TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
--- 1095,26 ---
        unrelated_classes = xklass1;
      } else if ((p0 && !p0->maybe_java_subtype_of(p1)) ||
                 (k0 && !k0->maybe_java_subtype_of(k1))) {
        unrelated_classes = xklass0;
      }
!     if (!unrelated_classes) {
+       // Handle inline type arrays
+       if ((r0->is_flat_in_array() && r1->is_not_flat_in_array()) ||
+           (r1->is_flat_in_array() && r0->is_not_flat_in_array())) {
+         // One type is in flat arrays but the other type is not. Must be unrelated.
+         unrelated_classes = true;
+       } else if ((r0->is_not_flat() && r1->is_flat()) ||
+                  (r1->is_not_flat() && r0->is_flat())) {
+         // One type is a non-flat array and the other type is a flat array. Must be unrelated.
+         unrelated_classes = true;
+       } else if ((r0->is_not_null_free() && r1->is_null_free()) ||
+                  (r1->is_not_null_free() && r0->is_null_free())) {
+         // One type is a nullable array and the other type is a null-free array. Must be unrelated.
+         unrelated_classes = true;
+       }
+     }
      if (unrelated_classes) {
        // The oops classes are known to be unrelated. If the joined PTRs of
        // two oops is not Null and not Bottom, then we are sure that one
        // of the two oops is non-null, and the comparison will always fail.
        TypePtr::PTR jp = r0->join_ptr(r1->_ptr);

*** 1092,11 ***
      return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
    } else
      return TypeInt::CC;
  }
  
! static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) {
    // Return the klass node for (indirect load from OopHandle)
    //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
    //   or null if not matching.
    BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
      n = bs->step_over_gc_barrier(n);
--- 1137,11 ---
      return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
    } else
      return TypeInt::CC;
  }
  
! static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n, bool& might_be_an_array) {
    // Return the klass node for (indirect load from OopHandle)
    //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
    //   or null if not matching.
    BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
      n = bs->step_over_gc_barrier(n);

*** 1114,16 ***
    intptr_t off = 0;
    Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
    if (k == nullptr)  return nullptr;
    const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
    if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return nullptr;
  
    // We've found the klass node of a Java mirror load.
    return k;
  }
  
! static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n) {
    // for ConP(Foo.class) return ConP(Foo.klass)
    // otherwise return null
    if (!n->is_Con()) return nullptr;
  
    const TypeInstPtr* tp = phase->type(n)->isa_instptr();
--- 1159,17 ---
    intptr_t off = 0;
    Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
    if (k == nullptr)  return nullptr;
    const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
    if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return nullptr;
+   might_be_an_array |= tkp->isa_aryklassptr() || tkp->is_instklassptr()->might_be_an_array();
  
    // We've found the klass node of a Java mirror load.
    return k;
  }
  
! static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n, bool& might_be_an_array) {
    // for ConP(Foo.class) return ConP(Foo.klass)
    // otherwise return null
    if (!n->is_Con()) return nullptr;
  
    const TypeInstPtr* tp = phase->type(n)->isa_instptr();

*** 1139,22 ***
    if (mirror_type->is_classless()) {
      return phase->makecon(TypePtr::NULL_PTR);
    }
  
    // return the ConP(Foo.klass)
!   assert(mirror_type->is_klass(), "mirror_type should represent a Klass*");
!   return phase->makecon(TypeKlassPtr::make(mirror_type->as_klass(), Type::trust_interfaces));
  }
  
  //------------------------------Ideal------------------------------------------
  // Normalize comparisons between Java mirror loads to compare the klass instead.
  //
  // Also check for the case of comparing an unknown klass loaded from the primary
  // super-type array vs a known klass with no subtypes.  This amounts to
  // checking to see an unknown klass subtypes a known klass with no subtypes;
  // this only happens on an exact match.  We can shorten this test by 1 load.
! Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
    // Normalize comparisons between Java mirrors into comparisons of the low-
    // level klass, where a dependent load could be shortened.
    //
    // The new pattern has a nice effect of matching the same pattern used in the
    // fast path of instanceof/checkcast/Class.isInstance(), which allows
--- 1185,57 ---
    if (mirror_type->is_classless()) {
      return phase->makecon(TypePtr::NULL_PTR);
    }
  
    // return the ConP(Foo.klass)
!   ciKlass* mirror_klass = mirror_type->as_klass();
! 
+   if (mirror_klass->is_array_klass() && !mirror_klass->is_type_array_klass()) {
+     if (!mirror_klass->can_be_inline_array_klass()) {
+       // Special case for non-value arrays: They only have one (default) refined class, use it
+       ciArrayKlass* refined_mirror_klass = ciObjArrayKlass::make(mirror_klass->as_array_klass()->element_klass(), true);
+       return phase->makecon(TypeAryKlassPtr::make(refined_mirror_klass, Type::trust_interfaces));
+     }
+     might_be_an_array |= true;
+   }
+ 
+   return phase->makecon(TypeKlassPtr::make(mirror_klass, Type::trust_interfaces));
  }
  
  //------------------------------Ideal------------------------------------------
  // Normalize comparisons between Java mirror loads to compare the klass instead.
  //
  // Also check for the case of comparing an unknown klass loaded from the primary
  // super-type array vs a known klass with no subtypes.  This amounts to
  // checking to see an unknown klass subtypes a known klass with no subtypes;
  // this only happens on an exact match.  We can shorten this test by 1 load.
! Node* CmpPNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+   // TODO 8284443 in(1) could be cast?
+   if (in(1)->is_InlineType() && phase->type(in(2))->is_zero_type()) {
+     // Null checking a scalarized but nullable inline type. Check the null marker
+     // input instead of the oop input to avoid keeping buffer allocations alive.
+     return new CmpINode(in(1)->as_InlineType()->get_null_marker(), phase->intcon(0));
+   }
+   if (in(1)->is_InlineType() || in(2)->is_InlineType()) {
+     // In C2 IR, CmpP on value objects is a pointer comparison, not a value comparison.
+     // For non-null operands it cannot reliably be true, since their buffer oops are not
+     // guaranteed to be identical. Therefore, the comparison can only be true when both
+     // operands are null. Convert expressions like this to a "both operands are null" check:
+     //   CmpL(OrL(CastP2X(..), CastP2X(..)), 0L)
+     // CmpLNode::Ideal might optimize this further to avoid keeping buffer allocations alive.
+     Node* input[2];
+     for (int i = 1; i <= 2; ++i) {
+       if (in(i)->is_InlineType()) {
+         input[i-1] = phase->transform(new ConvI2LNode(in(i)->as_InlineType()->get_null_marker()));
+       } else {
+         input[i-1] = phase->transform(new CastP2XNode(nullptr, in(i)));
+       }
+     }
+     Node* orL = phase->transform(new OrXNode(input[0], input[1]));
+     return new CmpXNode(orL, phase->MakeConX(0));
+   }
+ 
    // Normalize comparisons between Java mirrors into comparisons of the low-
    // level klass, where a dependent load could be shortened.
    //
    // The new pattern has a nice effect of matching the same pattern used in the
    // fast path of instanceof/checkcast/Class.isInstance(), which allows

*** 1164,13 ***
    //     Foo foo = (Foo) x;
    //     // ... use a ...
    //   }
    // a CmpPNode could be shared between if_acmpne and checkcast
    {
!     Node* k1 = isa_java_mirror_load(phase, in(1));
!     Node* k2 = isa_java_mirror_load(phase, in(2));
!     Node* conk2 = isa_const_java_mirror(phase, in(2));
  
      if (k1 && (k2 || conk2)) {
        Node* lhs = k1;
        Node* rhs = (k2 != nullptr) ? k2 : conk2;
        set_req_X(1, lhs, phase);
--- 1245,20 ---
    //     Foo foo = (Foo) x;
    //     // ... use a ...
    //   }
    // a CmpPNode could be shared between if_acmpne and checkcast
    {
!     bool might_be_an_array1 = false;
!     bool might_be_an_array2 = false;
!     Node* k1 = isa_java_mirror_load(phase, in(1), might_be_an_array1);
+     Node* k2 = isa_java_mirror_load(phase, in(2), might_be_an_array2);
+     Node* conk2 = isa_const_java_mirror(phase, in(2), might_be_an_array2);
+     if (might_be_an_array1 && might_be_an_array2) {
+       // Don't optimize if both sides might be an array because arrays with
+       // the same Java mirror can have different refined array klasses.
+       k1 = k2 = nullptr;
+     }
  
      if (k1 && (k2 || conk2)) {
        Node* lhs = k1;
        Node* rhs = (k2 != nullptr) ? k2 : conk2;
        set_req_X(1, lhs, phase);

*** 1245,10 ***
--- 1333,23 ---
      if (!ik->is_final()) {
        phase->C->dependencies()->assert_leaf_type(ik);
      }
    }
  
+   // Do not fold the subtype check to an array klass pointer comparison for
+   // value class arrays because they can have multiple refined array klasses.
+   superklass = t2->exact_klass();
+   assert(!superklass->is_flat_array_klass(), "Unexpected flat array klass");
+   if (superklass->is_obj_array_klass()) {
+     if (superklass->as_array_klass()->element_klass()->is_inlinetype() && !superklass->as_array_klass()->is_refined()) {
+       return nullptr;
+     } else {
+       // Special case for non-value arrays: They only have one (default) refined class, use it
+       set_req_X(2, phase->makecon(t2->is_aryklassptr()->cast_to_refined_array_klass_ptr()), phase);
+     }
+   }
+ 
    // Bypass the dependent load, and compare directly
    this->set_req_X(1, ldk2, phase);
  
    return this;
  }

*** 1365,10 ***
--- 1466,47 ---
      // Testing value required the precision of a double
    }
    return nullptr;                  // No change
  }
  
+ //=============================================================================
+ //------------------------------Value------------------------------------------
+ const Type* FlatArrayCheckNode::Value(PhaseGVN* phase) const {
+   bool all_not_flat = true;
+   for (uint i = ArrayOrKlass; i < req(); ++i) {
+     const Type* t = phase->type(in(i));
+     if (t == Type::TOP) {
+       return Type::TOP;
+     }
+     if (t->is_ptr()->is_flat()) {
+       // One of the input arrays is flat, check always passes
+       return TypeInt::CC_EQ;
+     } else if (!t->is_ptr()->is_not_flat()) {
+       // One of the input arrays might be flat
+       all_not_flat = false;
+     }
+   }
+   if (all_not_flat) {
+     // None of the input arrays can be flat, check always fails
+     return TypeInt::CC_GT;
+   }
+   return TypeInt::CC;
+ }
+ 
+ //------------------------------Ideal------------------------------------------
+ Node* FlatArrayCheckNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+   bool changed = false;
+   // Remove inputs that are known to be non-flat
+   for (uint i = ArrayOrKlass; i < req(); ++i) {
+     const Type* t = phase->type(in(i));
+     if (t->isa_ptr() && t->is_ptr()->is_not_flat()) {
+       del_req(i--);
+       changed = true;
+     }
+   }
+   return changed ? this : nullptr;
+ }
  
  //=============================================================================
  //------------------------------cc2logical-------------------------------------
  // Convert a condition code type to a logical type
  const Type *BoolTest::cc2logical( const Type *CC ) const {
< prev index next >