< prev index next >

src/hotspot/share/opto/parse2.cpp

Print this page

        

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -34,16 +34,18 @@
 #include "opto/addnode.hpp"
 #include "opto/castnode.hpp"
 #include "opto/convertnode.hpp"
 #include "opto/divnode.hpp"
 #include "opto/idealGraphPrinter.hpp"
+#include "opto/idealKit.hpp"
 #include "opto/matcher.hpp"
 #include "opto/memnode.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/opaquenode.hpp"
 #include "opto/parse.hpp"
 #include "opto/runtime.hpp"
+#include "opto/valuetypenode.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/sharedRuntime.hpp"
 
 #ifndef PRODUCT
 extern int explicit_null_checks_inserted,

@@ -51,62 +53,308 @@
 #endif
 
 //---------------------------------array_load----------------------------------
 void Parse::array_load(BasicType bt) {
   const Type* elemtype = Type::TOP;
-  bool big_val = bt == T_DOUBLE || bt == T_LONG;
   Node* adr = array_addressing(bt, 0, &elemtype);
   if (stopped())  return;     // guaranteed null or range check
 
-  pop();                      // index (already used)
-  Node* array = pop();        // the array itself
+  Node* idx = pop();
+  Node* ary = pop();
+
+  // Handle value type arrays
+  const TypeOopPtr* elemptr = elemtype->make_oopptr();
+  const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
+  if (elemtype->isa_valuetype() != NULL) {
+    C->set_flattened_accesses();
+    // Load from flattened value type array
+    Node* vt = ValueTypeNode::make_from_flattened(this, elemtype->value_klass(), ary, adr);
+    push(vt);
+    return;
+  } else if (elemptr != NULL && elemptr->is_valuetypeptr() && !elemptr->maybe_null()) {
+    // Load from non-flattened but flattenable value type array (elements can never be null)
+    bt = T_VALUETYPE;
+  } else if (!ary_t->is_not_flat()) {
+    // Cannot statically determine if array is flattened, emit runtime check
+    assert(ValueArrayFlatten && elemptr->can_be_value_type() && !ary_t->klass_is_exact() && !ary_t->is_not_null_free() &&
+           (!elemptr->is_valuetypeptr() || elemptr->value_klass()->flatten_array()), "array can't be flattened");
+    Node* ctl = control();
+    IdealKit ideal(this);
+    IdealVariable res(ideal);
+    ideal.declarations_done();
+    Node* kls = load_object_klass(ary);
+    Node* tag = load_lh_array_tag(kls);
+    ideal.if_then(tag, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); {
+      // non-flattened
+      sync_kit(ideal);
+      const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
+      Node* ld = access_load_at(ary, adr, adr_type, elemptr, bt,
+                                IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD, ctl);
+      ideal.sync_kit(this);
+      ideal.set(res, ld);
+    } ideal.else_(); {
+      // flattened
+      sync_kit(ideal);
+      if (elemptr->is_valuetypeptr()) {
+        // Element type is known, cast and load from flattened representation
+        ciValueKlass* vk = elemptr->value_klass();
+        assert(vk->flatten_array() && elemptr->maybe_null(), "must be a flattenable and nullable array");
+        ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* never_null */ true);
+        const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
+        Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
+        adr = array_element_address(cast, idx, T_VALUETYPE, ary_t->size(), control());
+        Node* vt = ValueTypeNode::make_from_flattened(this, vk, cast, adr)->allocate(this, false, false)->get_oop();
+        ideal.set(res, vt);
+        ideal.sync_kit(this);
+      } else {
+        // Element type is unknown, emit runtime call
+        Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset()));
+        Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
+        Node* obj_size  = NULL;
+        kill_dead_locals();
+        inc_sp(2);
+        Node* alloc_obj = new_instance(elem_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true);
+        dec_sp(2);
+
+        AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
+        assert(alloc->maybe_set_complete(&_gvn), "");
+        alloc->initialization()->set_complete_with_arraycopy();
+
+        // This membar keeps this access to an unknown flattened array
+        // correctly ordered with other unknown and known flattened
+        // array accesses.
+        insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
+
+        BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+        // Unknown value type might contain reference fields
+        if (!bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing)) {
+          int base_off = sizeof(instanceOopDesc);
+          Node* dst_base = basic_plus_adr(alloc_obj, base_off);
+          Node* countx = obj_size;
+          countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
+          countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong)));
+
+          assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
+          Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset()));
+          Node* elem_shift = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
+          uint header = arrayOopDesc::base_offset_in_bytes(T_VALUETYPE);
+          Node* base  = basic_plus_adr(ary, header);
+          idx = Compile::conv_I2X_index(&_gvn, idx, TypeInt::POS, control());
+          Node* scale = _gvn.transform(new LShiftXNode(idx, elem_shift));
+          Node* adr = basic_plus_adr(ary, base, scale);
+
+          access_clone(adr, dst_base, countx, false);
+        } else {
+          ideal.sync_kit(this);
+          ideal.make_leaf_call(OptoRuntime::load_unknown_value_Type(),
+                               CAST_FROM_FN_PTR(address, OptoRuntime::load_unknown_value),
+                               "load_unknown_value",
+                               ary, idx, alloc_obj);
+          sync_kit(ideal);
+        }
+
+        // This makes sure no other thread sees a partially initialized buffered value
+        insert_mem_bar_volatile(Op_MemBarStoreStore, Compile::AliasIdxRaw, alloc->proj_out_or_null(AllocateNode::RawAddress));
+
+        // Same as MemBarCPUOrder above: keep this unknown flattened
+        // array access correctly ordered with other flattened array
+        // access
+        insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
+
+        // Prevent any use of the newly allocated value before it is
+        // fully initialized
+        alloc_obj = new CastPPNode(alloc_obj, _gvn.type(alloc_obj), true);
+        alloc_obj->set_req(0, control());
+        alloc_obj = _gvn.transform(alloc_obj);
+
+        ideal.sync_kit(this);
+
+        ideal.set(res, alloc_obj);
+      }
+    } ideal.end_if();
+    sync_kit(ideal);
+    push_node(bt, _gvn.transform(ideal.value(res)));
+    return;
+  }
 
   if (elemtype == TypeInt::BOOL) {
     bt = T_BOOLEAN;
   } else if (bt == T_OBJECT) {
-    elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
+    elemtype = ary_t->elem()->make_oopptr();
   }
 
   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
-
-  Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
+  Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt,
                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
-  if (big_val) {
-    push_pair(ld);
-  } else {
-    push(ld);
+  if (bt == T_VALUETYPE) {
+    // Loading a non-flattened (but flattenable) value type from an array
+    assert(!gvn().type(ld)->maybe_null(), "value type array elements should never be null");
+    if (elemptr->value_klass()->is_scalarizable()) {
+      ld = ValueTypeNode::make_from_oop(this, ld, elemptr->value_klass());
+    }
   }
+
+  push_node(bt, ld);
 }
 
 
 //--------------------------------array_store----------------------------------
 void Parse::array_store(BasicType bt) {
   const Type* elemtype = Type::TOP;
-  bool big_val = bt == T_DOUBLE || bt == T_LONG;
-  Node* adr = array_addressing(bt, big_val ? 2 : 1, &elemtype);
+  Node* adr = array_addressing(bt, type2size[bt], &elemtype);
   if (stopped())  return;     // guaranteed null or range check
+  Node* cast_val = NULL;
   if (bt == T_OBJECT) {
-    array_store_check();
+    cast_val = array_store_check();
+    if (stopped()) return;
   }
-  Node* val;                  // Oop to store
-  if (big_val) {
-    val = pop_pair();
-  } else {
-    val = pop();
-  }
-  pop();                      // index (already used)
-  Node* array = pop();        // the array itself
+  Node* val = pop_node(bt); // Value to store
+  Node* idx = pop();        // Index in the array
+  Node* ary = pop();        // The array itself
+
+  const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
+  const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 
   if (elemtype == TypeInt::BOOL) {
     bt = T_BOOLEAN;
   } else if (bt == T_OBJECT) {
-    elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
-  }
+    elemtype = elemtype->make_oopptr();
+    const Type* tval = _gvn.type(cast_val);
+    // We may have lost type information for 'val' here due to the casts
+    // emitted by the array_store_check code (see JDK-6312651)
+    // TODO Remove this code once JDK-6312651 is in.
+    const Type* tval_init = _gvn.type(val);
+    bool can_be_value_type = tval->isa_valuetype() || (tval != TypePtr::NULL_PTR && tval_init->is_oopptr()->can_be_value_type() && tval->is_oopptr()->can_be_value_type());
+    bool not_flattenable = !can_be_value_type || ((tval_init->is_valuetypeptr() || tval_init->isa_valuetype()) && !tval_init->value_klass()->flatten_array());
+
+    if (!ary_t->is_not_null_free() && !can_be_value_type && (!tval->maybe_null() || !tval_init->maybe_null())) {
+      // Storing a non-inline-type, mark array as not null-free.
+      // This is only legal for non-null stores because the array_store_check passes for null.
+      ary_t = ary_t->cast_to_not_null_free();
+      Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
+      replace_in_map(ary, cast);
+      ary = cast;
+    } else if (!ary_t->is_not_flat() && not_flattenable) {
+      // Storing a non-flattenable value, mark array as not flat.
+      ary_t = ary_t->cast_to_not_flat();
+      if (tval != TypePtr::NULL_PTR) {
+        // For NULL, this transformation is only valid after the null guard below
+        Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
+        replace_in_map(ary, cast);
+        ary = cast;
+      }
+    }
 
-  const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
+    if (ary_t->elem()->isa_valuetype() != NULL) {
+      // Store to flattened value type array
+      C->set_flattened_accesses();
+      if (!cast_val->is_ValueType()) {
+        inc_sp(3);
+        cast_val = null_check(cast_val);
+        if (stopped()) return;
+        dec_sp(3);
+        cast_val = ValueTypeNode::make_from_oop(this, cast_val, ary_t->elem()->value_klass());
+      }
+      cast_val->as_ValueType()->store_flattened(this, ary, adr);
+      return;
+    } else if (elemtype->is_valuetypeptr() && !elemtype->maybe_null()) {
+      // Store to non-flattened but flattenable value type array (elements can never be null)
+      if (!cast_val->is_ValueType() && tval->maybe_null()) {
+        inc_sp(3);
+        cast_val = null_check(cast_val);
+        if (stopped()) return;
+        dec_sp(3);
+      }
+    } else if (!ary_t->is_not_flat()) {
+      // Array might be flattened, emit runtime checks
+      assert(ValueArrayFlatten && !not_flattenable && elemtype->is_oopptr()->can_be_value_type() &&
+             !ary_t->klass_is_exact() && !ary_t->is_not_null_free(), "array can't be flattened");
+      IdealKit ideal(this);
+      Node* kls = load_object_klass(ary);
+      Node* layout_val = load_lh_array_tag(kls);
+      ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value));
+      {
+        // non-flattened
+        sync_kit(ideal);
+        gen_value_array_null_guard(ary, cast_val, 3);
+        access_store_at(ary, adr, adr_type, cast_val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false, false);
+        ideal.sync_kit(this);
+      }
+      ideal.else_();
+      {
+        // flattened
+        if (!cast_val->is_ValueType() && tval->maybe_null()) {
+          // Add null check
+          sync_kit(ideal);
+          Node* null_ctl = top();
+          cast_val = null_check_oop(cast_val, &null_ctl);
+          if (null_ctl != top()) {
+            PreserveJVMState pjvms(this);
+            inc_sp(3);
+            set_control(null_ctl);
+            uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
+            dec_sp(3);
+          }
+          ideal.sync_kit(this);
+        }
+        // Try to determine the value klass
+        ciValueKlass* vk = NULL;
+        if (tval->isa_valuetype() || tval->is_valuetypeptr()) {
+          vk = tval->value_klass();
+        } else if (tval_init->isa_valuetype() || tval_init->is_valuetypeptr()) {
+          vk = tval_init->value_klass();
+        } else if (elemtype->is_valuetypeptr()) {
+          vk = elemtype->value_klass();
+        }
+        if (vk != NULL && !stopped()) {
+          // Element type is known, cast and store to flattened representation
+          sync_kit(ideal);
+          assert(vk->flatten_array() && elemtype->maybe_null(), "must be a flattenable and nullable array");
+          ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* never_null */ true);
+          const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
+          ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
+          adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control());
+          if (!cast_val->is_ValueType()) {
+            assert(!gvn().type(cast_val)->maybe_null(), "value type array elements should never be null");
+            cast_val = ValueTypeNode::make_from_oop(this, cast_val, vk);
+          }
+          cast_val->as_ValueType()->store_flattened(this, ary, adr);
+          ideal.sync_kit(this);
+        } else if (!ideal.ctrl()->is_top()) {
+          // Element type is unknown, emit runtime call
+          sync_kit(ideal);
+
+          // This membar keeps this access to an unknown flattened
+          // array correctly ordered with other unknown and known
+          // flattened array accesses.
+          insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
+          ideal.sync_kit(this);
+
+          ideal.make_leaf_call(OptoRuntime::store_unknown_value_Type(),
+                               CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_value),
+                               "store_unknown_value",
+                               cast_val, ary, idx);
+
+          sync_kit(ideal);
+          // Same as MemBarCPUOrder above: keep this unknown
+          // flattened array access correctly ordered with other
+          // flattened array access
+          insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
+          ideal.sync_kit(this);
+        }
+      }
+      ideal.end_if();
+      sync_kit(ideal);
+      return;
+    } else if (!ary_t->is_not_null_free()) {
+      // Array is not flattened but may be null free
+      assert(elemtype->is_oopptr()->can_be_value_type() && !ary_t->klass_is_exact(), "array can't be null free");
+      ary = gen_value_array_null_guard(ary, cast_val, 3, true);
+    }
+  }
 
-  access_store_at(array, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
+  access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 }
 
 
 //------------------------------array_addressing-------------------------------
 // Pull array and index from the stack.  Compute pointer-to-element.

@@ -202,10 +450,24 @@
     }
   }
   // Check for always knowing you are throwing a range-check exception
   if (stopped())  return top();
 
+  // Speculate on the array not being null-free
+  if (!arytype->is_not_null_free() && arytype->speculative() != NULL && arytype->speculative()->isa_aryptr() != NULL &&
+      arytype->speculative()->is_aryptr()->is_not_null_free() &&
+      !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
+    Node* tst = gen_null_free_array_check(ary);
+    {
+      BuildCutout unless(this, tst, PROB_ALWAYS);
+      uncommon_trap(Deoptimization::Reason_speculate_class_check,
+                    Deoptimization::Action_maybe_recompile);
+    }
+    Node* cast = new CheckCastPPNode(control(), ary, arytype->cast_to_not_null_free());
+    replace_in_map(ary, _gvn.transform(cast));
+  }
+
   // Make array address computation control dependent to prevent it
   // from floating above the range check during loop optimizations.
   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 
   if (result2 != NULL)  *result2 = elemtype;

@@ -1493,11 +1755,11 @@
         branch_block->next_path_num();
       }
     } else {                    // Path is live.
       // Update method data
       profile_taken_branch(target_bci);
-      adjust_map_after_if(btest, c, prob, branch_block, next_block);
+      adjust_map_after_if(btest, c, prob, branch_block);
       if (!stopped()) {
         merge(target_bci);
       }
     }
   }

@@ -1513,17 +1775,16 @@
       next_block->next_path_num();
     }
   } else  {                     // Path is live.
     // Update method data
     profile_not_taken_branch();
-    adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
-                        next_block, branch_block);
+    adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
   }
 }
 
 //------------------------------------do_if------------------------------------
-void Parse::do_if(BoolTest::mask btest, Node* c) {
+void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) {
   int target_bci = iter().get_dest();
 
   Block* branch_block = successor_for_bci(target_bci);
   Block* next_block   = successor_for_bci(iter().next_bci());
 

@@ -1608,38 +1869,250 @@
   { PreserveJVMState pjvms(this);
     taken_branch = _gvn.transform(taken_branch);
     set_control(taken_branch);
 
     if (stopped()) {
-      if (C->eliminate_boxing()) {
-        // Mark the successor block as parsed
+      if (C->eliminate_boxing() && !new_path) {
+        // Mark the successor block as parsed (if we haven't created a new path)
         branch_block->next_path_num();
       }
     } else {
       // Update method data
       profile_taken_branch(target_bci);
-      adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
+      adjust_map_after_if(taken_btest, c, prob, branch_block);
       if (!stopped()) {
-        merge(target_bci);
+        if (new_path) {
+          // Merge by using a new path
+          merge_new_path(target_bci);
+        } else if (ctrl_taken != NULL) {
+          // Don't merge but save taken branch to be wired by caller
+          *ctrl_taken = control();
+        } else {
+          merge(target_bci);
+        }
       }
     }
   }
 
   untaken_branch = _gvn.transform(untaken_branch);
   set_control(untaken_branch);
 
   // Branch not taken.
-  if (stopped()) {
+  if (stopped() && ctrl_taken == NULL) {
     if (C->eliminate_boxing()) {
-      // Mark the successor block as parsed
+      // Mark the successor block as parsed (if caller does not re-wire control flow)
       next_block->next_path_num();
     }
   } else {
     // Update method data
     profile_not_taken_branch();
-    adjust_map_after_if(untaken_btest, c, untaken_prob,
-                        next_block, branch_block);
+    adjust_map_after_if(untaken_btest, c, untaken_prob, next_block);
+  }
+}
+
+void Parse::do_acmp(BoolTest::mask btest, Node* a, Node* b) {
+  ciMethod* subst_method = ciEnv::current()->ValueBootstrapMethods_klass()->find_method(ciSymbol::isSubstitutable_name(), ciSymbol::object_object_boolean_signature());
+  // If current method is ValueBootstrapMethods::isSubstitutable(),
+  // compile the acmp as a regular pointer comparison otherwise we
+  // could call ValueBootstrapMethods::isSubstitutable() back
+  if (!EnableValhalla || (method() == subst_method)) {
+    Node* cmp = CmpP(a, b);
+    cmp = optimize_cmp_with_klass(cmp);
+    do_if(btest, cmp);
+    return;
+  }
+
+  // Substitutability test
+  if (a->is_ValueType()) {
+    inc_sp(2);
+    a = a->as_ValueType()->allocate(this, true)->get_oop();
+    dec_sp(2);
+  }
+  if (b->is_ValueType()) {
+    inc_sp(2);
+    b = b->as_ValueType()->allocate(this, true)->get_oop();
+    dec_sp(2);
+  }
+
+  const TypeOopPtr* ta = _gvn.type(a)->isa_oopptr();
+  const TypeOopPtr* tb = _gvn.type(b)->isa_oopptr();
+
+  if (ta == NULL || !ta->can_be_value_type_raw() ||
+      tb == NULL || !tb->can_be_value_type_raw()) {
+    Node* cmp = CmpP(a, b);
+    cmp = optimize_cmp_with_klass(cmp);
+    do_if(btest, cmp);
+    return;
+  }
+
+  Node* cmp = CmpP(a, b);
+  cmp = optimize_cmp_with_klass(cmp);
+  Node* eq_region = NULL;
+  if (btest == BoolTest::eq) {
+    do_if(btest, cmp, true);
+    if (stopped()) {
+      return;
+    }
+  } else {
+    assert(btest == BoolTest::ne, "only eq or ne");
+    Node* is_not_equal = NULL;
+    eq_region = new RegionNode(3);
+    {
+      PreserveJVMState pjvms(this);
+      do_if(btest, cmp, false, &is_not_equal);
+      if (!stopped()) {
+        eq_region->init_req(1, control());
+      }
+    }
+    if (is_not_equal == NULL || is_not_equal->is_top()) {
+      record_for_igvn(eq_region);
+      set_control(_gvn.transform(eq_region));
+      return;
+    }
+    set_control(is_not_equal);
+  }
+  // Pointers not equal, check for values
+  Node* ne_region = new RegionNode(6);
+  inc_sp(2);
+  Node* null_ctl = top();
+  Node* not_null_a = null_check_oop(a, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false);
+  dec_sp(2);
+  ne_region->init_req(1, null_ctl);
+  if (stopped()) {
+    record_for_igvn(ne_region);
+    set_control(_gvn.transform(ne_region));
+    if (btest == BoolTest::ne) {
+      {
+        PreserveJVMState pjvms(this);
+        int target_bci = iter().get_dest();
+        merge(target_bci);
+      }
+      record_for_igvn(eq_region);
+      set_control(_gvn.transform(eq_region));
+    }
+    return;
+  }
+
+  Node* is_value = is_always_locked(not_null_a);
+  Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern);
+  Node* is_value_cmp = _gvn.transform(new CmpXNode(is_value, value_mask));
+  Node* is_value_bol = _gvn.transform(new BoolNode(is_value_cmp, BoolTest::ne));
+  IfNode* is_value_iff = create_and_map_if(control(), is_value_bol, PROB_FAIR, COUNT_UNKNOWN);
+  Node* not_value = _gvn.transform(new IfTrueNode(is_value_iff));
+  set_control(_gvn.transform(new IfFalseNode(is_value_iff)));
+  ne_region->init_req(2, not_value);
+
+  // One of the 2 pointers refers to a value, check if both are of
+  // the same class
+  inc_sp(2);
+  null_ctl = top();
+  Node* not_null_b = null_check_oop(b, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false);
+  dec_sp(2);
+  ne_region->init_req(3, null_ctl);
+  if (stopped()) {
+    record_for_igvn(ne_region);
+    set_control(_gvn.transform(ne_region));
+    if (btest == BoolTest::ne) {
+      {
+        PreserveJVMState pjvms(this);
+        int target_bci = iter().get_dest();
+        merge(target_bci);
+      }
+      record_for_igvn(eq_region);
+      set_control(_gvn.transform(eq_region));
+    }
+    return;
+  }
+  Node* kls_a = load_object_klass(not_null_a);
+  Node* kls_b = load_object_klass(not_null_b);
+  Node* kls_cmp = CmpP(kls_a, kls_b);
+  Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
+  IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
+  Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
+  set_control(_gvn.transform(new IfFalseNode(kls_iff)));
+  ne_region->init_req(4, kls_ne);
+
+  if (stopped()) {
+    record_for_igvn(ne_region);
+    set_control(_gvn.transform(ne_region));
+    if (btest == BoolTest::ne) {
+      {
+        PreserveJVMState pjvms(this);
+        int target_bci = iter().get_dest();
+        merge(target_bci);
+      }
+      record_for_igvn(eq_region);
+      set_control(_gvn.transform(eq_region));
+    }
+    return;
+  }
+  // Both are values of the same class, we need to perform a
+  // substitutability test. Delegate to
+  // ValueBootstrapMethods::isSubstitutable().
+
+  Node* ne_io_phi = PhiNode::make(ne_region, i_o());
+  Node* mem = reset_memory();
+  Node* ne_mem_phi = PhiNode::make(ne_region, mem);
+
+  Node* eq_io_phi = NULL;
+  Node* eq_mem_phi = NULL;
+  if (eq_region != NULL) {
+    eq_io_phi = PhiNode::make(eq_region, i_o());
+    eq_mem_phi = PhiNode::make(eq_region, mem);
+  }
+
+  set_all_memory(mem);
+
+  kill_dead_locals();
+  CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method, bci());
+  call->set_override_symbolic_info(true);
+  call->init_req(TypeFunc::Parms, not_null_a);
+  call->init_req(TypeFunc::Parms+1, not_null_b);
+  inc_sp(2);
+  set_edges_for_java_call(call, false, false);
+  Node* ret = set_results_for_java_call(call, false, true);
+  dec_sp(2);
+
+  // Test the return value of ValueBootstrapMethods::isSubstitutable()
+  Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
+  Node* ctl = C->top();
+  if (btest == BoolTest::eq) {
+    PreserveJVMState pjvms(this);
+    do_if(btest, subst_cmp);
+    if (!stopped()) {
+      ctl = control();
+    }
+  } else {
+    assert(btest == BoolTest::ne, "only eq or ne");
+    PreserveJVMState pjvms(this);
+    do_if(btest, subst_cmp, false, &ctl);
+    if (!stopped()) {
+      eq_region->init_req(2, control());
+      eq_io_phi->init_req(2, i_o());
+      eq_mem_phi->init_req(2, reset_memory());
+    }
+  }
+  ne_region->init_req(5, ctl);
+  ne_io_phi->init_req(5, i_o());
+  ne_mem_phi->init_req(5, reset_memory());
+
+  record_for_igvn(ne_region);
+  set_control(_gvn.transform(ne_region));
+  set_i_o(_gvn.transform(ne_io_phi));
+  set_all_memory(_gvn.transform(ne_mem_phi));
+
+  if (btest == BoolTest::ne) {
+    {
+      PreserveJVMState pjvms(this);
+      int target_bci = iter().get_dest();
+      merge(target_bci);
+    }
+
+    record_for_igvn(eq_region);
+    set_control(_gvn.transform(eq_region));
+    set_i_o(_gvn.transform(eq_io_phi));
+    set_all_memory(_gvn.transform(eq_mem_phi));
   }
 }
 
 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
   // Don't want to speculate on uncommon traps when running with -Xcomp

@@ -1665,12 +2138,11 @@
 // Adjust the JVM state to reflect the result of taking this path.
 // Basically, it means inspecting the CmpNode controlling this
 // branch, seeing how it constrains a tested value, and then
 // deciding if it's worth our while to encode this constraint
 // as graph nodes in the current abstract interpretation map.
-void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
-                                Block* path, Block* other_path) {
+void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) {
   if (!c->is_Cmp()) {
     maybe_add_predicate_after_if(path);
     return;
   }
 

@@ -1876,10 +2348,14 @@
       if (obj_type->speculative_type_not_null() != NULL) {
         ciKlass* k = obj_type->speculative_type();
         inc_sp(2);
         obj = maybe_cast_profiled_obj(obj, k);
         dec_sp(2);
+        if (obj->is_ValueType()) {
+          assert(obj->as_ValueType()->is_allocated(&_gvn), "must be allocated");
+          obj = obj->as_ValueType()->get_oop();
+        }
         // Make the CmpP use the casted obj
         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
         load_klass = load_klass->clone();
         load_klass->set_req(2, addp);
         load_klass = _gvn.transform(load_klass);

@@ -2723,37 +3199,40 @@
   handle_if_null:
     // If this is a backwards branch in the bytecodes, add Safepoint
     maybe_add_safepoint(iter().get_dest());
     a = null();
     b = pop();
-    if (!_gvn.type(b)->speculative_maybe_null() &&
-        !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
-      inc_sp(1);
-      Node* null_ctl = top();
-      b = null_check_oop(b, &null_ctl, true, true, true);
-      assert(null_ctl->is_top(), "no null control here");
-      dec_sp(1);
-    } else if (_gvn.type(b)->speculative_always_null() &&
-               !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
-      inc_sp(1);
-      b = null_assert(b);
-      dec_sp(1);
+    if (b->is_ValueType()) {
+      // Return constant false because 'b' is always non-null
+      c = _gvn.makecon(TypeInt::CC_GT);
+    } else {
+      if (!_gvn.type(b)->speculative_maybe_null() &&
+          !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
+        inc_sp(1);
+        Node* null_ctl = top();
+        b = null_check_oop(b, &null_ctl, true, true, true);
+        assert(null_ctl->is_top(), "no null control here");
+        dec_sp(1);
+      } else if (_gvn.type(b)->speculative_always_null() &&
+                 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
+        inc_sp(1);
+        b = null_assert(b);
+        dec_sp(1);
+      }
+      c = _gvn.transform( new CmpPNode(b, a) );
     }
-    c = _gvn.transform( new CmpPNode(b, a) );
     do_ifnull(btest, c);
     break;
 
   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
   handle_if_acmp:
     // If this is a backwards branch in the bytecodes, add Safepoint
     maybe_add_safepoint(iter().get_dest());
     a = access_resolve(pop(), 0);
     b = access_resolve(pop(), 0);
-    c = _gvn.transform( new CmpPNode(b, a) );
-    c = optimize_cmp_with_klass(c);
-    do_if(btest, c);
+    do_acmp(btest, a, b);
     break;
 
   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;

@@ -2804,21 +3283,27 @@
     break;
   case Bytecodes::_instanceof:
     do_instanceof();
     break;
   case Bytecodes::_anewarray:
-    do_anewarray();
+    do_newarray();
     break;
   case Bytecodes::_newarray:
     do_newarray((BasicType)iter().get_index());
     break;
   case Bytecodes::_multianewarray:
     do_multianewarray();
     break;
   case Bytecodes::_new:
     do_new();
     break;
+  case Bytecodes::_defaultvalue:
+    do_defaultvalue();
+    break;
+  case Bytecodes::_withfield:
+    do_withfield();
+    break;
 
   case Bytecodes::_jsr:
   case Bytecodes::_jsr_w:
     do_jsr();
     break;
< prev index next >