< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page
@@ -50,10 +50,11 @@
  #include "opto/connode.hpp"
  #include "opto/convertnode.hpp"
  #include "opto/divnode.hpp"
  #include "opto/escape.hpp"
  #include "opto/idealGraphPrinter.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/loopnode.hpp"
  #include "opto/machnode.hpp"
  #include "opto/macro.hpp"
  #include "opto/matcher.hpp"
  #include "opto/mathexactnode.hpp"

@@ -387,10 +388,13 @@
      remove_skeleton_predicate_opaq(dead);
    }
    if (dead->for_post_loop_opts_igvn()) {
      remove_from_post_loop_opts_igvn(dead);
    }
+   if (dead->is_InlineTypeBase()) {
+     remove_inline_type(dead);
+   }
    if (dead->is_Call()) {
      remove_useless_late_inlines(                &_late_inlines, dead);
      remove_useless_late_inlines(         &_string_late_inlines, dead);
      remove_useless_late_inlines(         &_boxing_late_inlines, dead);
      remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);

@@ -398,11 +402,11 @@
    BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
    bs->unregister_potential_barrier_node(dead);
  }
  
  // Disconnect all useless nodes by disconnecting those at the boundary.
- void Compile::remove_useless_nodes(Unique_Node_List &useful) {
+ void Compile::disconnect_useless_nodes(Unique_Node_List &useful, Unique_Node_List* worklist) {
    uint next = 0;
    while (next < useful.size()) {
      Node *n = useful.at(next++);
      if (n->is_SafePoint()) {
        // We're done with a parsing phase. Replaced nodes are not valid

@@ -421,19 +425,28 @@
          --j;
          --max;
        }
      }
      if (n->outcnt() == 1 && n->has_special_unique_user()) {
-       record_for_igvn(n->unique_out());
+       worklist->push(n->unique_out());
+     }
+     if (n->outcnt() == 0) {
+       worklist->push(n);
      }
    }
  
    remove_useless_nodes(_macro_nodes,        useful); // remove useless macro nodes
    remove_useless_nodes(_predicate_opaqs,    useful); // remove useless predicate opaque nodes
    remove_useless_nodes(_skeleton_predicate_opaqs, useful);
    remove_useless_nodes(_expensive_nodes,    useful); // remove useless expensive nodes
    remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
+   remove_useless_nodes(_inline_type_nodes,  useful); // remove useless inline type nodes
+ #ifdef ASSERT
+   if (_modified_nodes != NULL) {
+     _modified_nodes->remove_useless_nodes(useful.member_set());
+   }
+ #endif
    remove_useless_coarsened_locks(useful);            // remove useless coarsened locks nodes
  
    BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
    bs->eliminate_useless_gc_barriers(useful, this);
    // clean up the late inline lists

@@ -576,10 +589,11 @@
                    _macro_nodes       (comp_arena(), 8, 0, NULL),
                    _predicate_opaqs   (comp_arena(), 8, 0, NULL),
                    _skeleton_predicate_opaqs (comp_arena(), 8, 0, NULL),
                    _expensive_nodes   (comp_arena(), 8, 0, NULL),
                    _for_post_loop_igvn(comp_arena(), 8, 0, NULL),
+                   _inline_type_nodes (comp_arena(), 8, 0, NULL),
                    _coarsened_locks   (comp_arena(), 8, 0, NULL),
                    _congraph(NULL),
                    NOT_PRODUCT(_printer(NULL) COMMA)
                    _dead_node_list(comp_arena()),
                    _dead_node_count(0),

@@ -680,21 +694,19 @@
      initial_gvn()->transform_no_reclaim(top());
  
      // Set up tf(), start(), and find a CallGenerator.
      CallGenerator* cg = NULL;
      if (is_osr_compilation()) {
-       const TypeTuple *domain = StartOSRNode::osr_domain();
-       const TypeTuple *range = TypeTuple::make_range(method()->signature());
-       init_tf(TypeFunc::make(domain, range));
-       StartNode* s = new StartOSRNode(root(), domain);
+       init_tf(TypeFunc::make(method(), /* is_osr_compilation = */ true));
+       StartNode* s = new StartOSRNode(root(), tf()->domain_sig());
        initial_gvn()->set_type_bottom(s);
        init_start(s);
        cg = CallGenerator::for_osr(method(), entry_bci());
      } else {
        // Normal case.
        init_tf(TypeFunc::make(method()));
-       StartNode* s = new StartNode(root(), tf()->domain());
+       StartNode* s = new StartNode(root(), tf()->domain_cc());
        initial_gvn()->set_type_bottom(s);
        init_start(s);
        if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
          // With java.lang.ref.reference.get() we must go through the
          // intrinsic - even when get() is the root

@@ -821,10 +833,14 @@
    }
  
    // Now that we know the size of all the monitors we can add a fixed slot
    // for the original deopt pc.
    int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
+   if (needs_stack_repair()) {
+     // One extra slot for the special stack increment value
+     next_slot += 2;
+   }
    set_fixed_slots(next_slot);
  
    // Compute when to use implicit null checks. Used by matching trap based
    // nodes and NullCheck optimization.
    set_allowed_deopt_reasons();

@@ -977,10 +993,14 @@
    Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
    set_decompile_count(0);
  
    set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
    _loop_opts_cnt = LoopOptsCount;
+   _has_flattened_accesses = false;
+   _flattened_accesses_share_alias = true;
+   _scalarize_in_safepoints = false;
+ 
    set_do_inlining(Inline);
    set_max_inline_size(MaxInlineSize);
    set_freq_inline_size(FreqInlineSize);
    set_do_scheduling(OptoScheduling);
  

@@ -1279,11 +1299,12 @@
    bool is_known_inst = tj->isa_oopptr() != NULL &&
                         tj->is_oopptr()->is_known_instance();
  
    // Process weird unsafe references.
    if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
-     assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");
+     bool default_value_load = EnableValhalla && tj->is_instptr()->klass() == ciEnv::current()->Class_klass();
+     assert(InlineUnsafeOps || default_value_load, "indeterminate pointers come only from unsafe ops");
      assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
      tj = TypeOopPtr::BOTTOM;
      ptr = tj->ptr();
      offset = tj->offset();
    }

@@ -1292,24 +1313,35 @@
    const TypeAryPtr *ta = tj->isa_aryptr();
    if (ta && ta->is_stable()) {
      // Erase stability property for alias analysis.
      tj = ta = ta->cast_to_stable(false);
    }
+   if (ta && ta->is_not_flat()) {
+     // Erase not flat property for alias analysis.
+     tj = ta = ta->cast_to_not_flat(false);
+   }
+   if (ta && ta->is_not_null_free()) {
+     // Erase not null free property for alias analysis.
+     tj = ta = ta->cast_to_not_null_free(false);
+   }
+ 
    if( ta && is_known_inst ) {
      if ( offset != Type::OffsetBot &&
           offset > arrayOopDesc::length_offset_in_bytes() ) {
        offset = Type::OffsetBot; // Flatten constant access into array body only
-       tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
+       tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, Type::Offset(offset), ta->field_offset(), ta->instance_id());
      }
    } else if( ta && _AliasLevel >= 2 ) {
      // For arrays indexed by constant indices, we flatten the alias
      // space to include all of the array body.  Only the header, klass
      // and array length can be accessed un-aliased.
+     // For flattened inline type array, each field has its own slice so
+     // we must include the field offset.
      if( offset != Type::OffsetBot ) {
        if( ta->const_oop() ) { // MethodData* or Method*
          offset = Type::OffsetBot;   // Flatten constant access into array body
-         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
+         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
        } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
          // range is OK as-is.
          tj = ta = TypeAryPtr::RANGE;
        } else if( offset == oopDesc::klass_offset_in_bytes() ) {
          tj = TypeInstPtr::KLASS; // all klass loads look alike

@@ -1319,39 +1351,44 @@
          tj = TypeInstPtr::MARK;
          ta = TypeAryPtr::RANGE; // generic ignored junk
          ptr = TypePtr::BotPTR;
        } else {                  // Random constant offset into array body
          offset = Type::OffsetBot;   // Flatten constant access into array body
-         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
+         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
        }
      }
      // Arrays of fixed size alias with arrays of unknown size.
      if (ta->size() != TypeInt::POS) {
        const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
-       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
+       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,Type::Offset(offset), ta->field_offset());
      }
      // Arrays of known objects become arrays of unknown objects.
      if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
        const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
-       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
+       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
      }
      if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
        const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
-       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
+       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
+     }
+     // Initially all flattened array accesses share a single slice
+     if (ta->is_flat() && ta->elem() != TypeInlineType::BOTTOM && _flattened_accesses_share_alias) {
+       const TypeAry *tary = TypeAry::make(TypeInlineType::BOTTOM, ta->size());
+       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), Type::Offset(Type::OffsetBot));
      }
      // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
      // cannot be distinguished by bytecode alone.
      if (ta->elem() == TypeInt::BOOL) {
        const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
        ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
-       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
+       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset());
      }
      // During the 2nd round of IterGVN, NotNull castings are removed.
      // Make sure the Bottom and NotNull variants alias the same.
      // Also, make sure exact and non-exact variants alias the same.
      if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
-       tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
+       tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
      }
    }
  
    // Oop pointers need some flattening
    const TypeInstPtr *to = tj->isa_instptr();

@@ -1361,29 +1398,29 @@
        if (to->klass() != ciEnv::current()->Class_klass() ||
            offset < k->layout_helper_size_in_bytes()) {
          // No constant oop pointers (such as Strings); they alias with
          // unknown strings.
          assert(!is_known_inst, "not scalarizable allocation");
-         tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
+         tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset));
        }
      } else if( is_known_inst ) {
        tj = to; // Keep NotNull and klass_is_exact for instance type
      } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
        // During the 2nd round of IterGVN, NotNull castings are removed.
        // Make sure the Bottom and NotNull variants alias the same.
        // Also, make sure exact and non-exact variants alias the same.
-       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
+       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset));
      }
      if (to->speculative() != NULL) {
-       tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
+       tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),Type::Offset(to->offset()), to->klass()->flatten_array(), to->instance_id());
      }
      // Canonicalize the holder of this field
      if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
        // First handle header references such as a LoadKlassNode, even if the
        // object's klass is unloaded at compile time (4965979).
        if (!is_known_inst) { // Do it only for non-instance types
-         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
+         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, Type::Offset(offset));
        }
      } else if (offset < 0 || offset >= k->layout_helper_size_in_bytes()) {
        // Static fields are in the space above the normal instance
        // fields in the java.lang.Class instance.
        if (to->klass() != ciEnv::current()->Class_klass()) {

@@ -1394,13 +1431,13 @@
      } else {
        ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
        assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
        if (!k->equals(canonical_holder) || tj->offset() != offset) {
          if( is_known_inst ) {
-           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
+           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, Type::Offset(offset), canonical_holder->flatten_array(), to->instance_id());
          } else {
-           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
+           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, Type::Offset(offset));
          }
        }
      }
    }
  

@@ -1413,19 +1450,19 @@
      // use NotNull as the PTR.
      if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
  
        tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
                                     TypeInstKlassPtr::OBJECT->klass(),
-                                    offset);
+                                    Type::Offset(offset));
      }
  
      ciKlass* klass = tk->klass();
-     if( klass->is_obj_array_klass() ) {
+     if (klass != NULL && klass->is_obj_array_klass()) {
        ciKlass* k = TypeAryPtr::OOPS->klass();
        if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
          k = TypeInstPtr::BOTTOM->klass();
-       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
+       tj = tk = TypeKlassPtr::make(TypePtr::NotNull, k, Type::Offset(offset));
      }
  
      // Check for precise loads from the primary supertype array and force them
      // to the supertype cache alias index.  Check for generic array loads from
      // the primary supertype array and also force them to the supertype cache

@@ -1437,11 +1474,11 @@
      if (offset == Type::OffsetBot ||
          (offset >= primary_supers_offset &&
           offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
          offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
        offset = in_bytes(Klass::secondary_super_cache_offset());
-       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
+       tj = tk = TypeKlassPtr::make(TypePtr::NotNull, tk->klass(), Type::Offset(offset));
      }
    }
  
    // Flatten all Raw pointers together.
    if (tj->base() == Type::RawPtr)

@@ -1578,17 +1615,20 @@
    for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
  }
  
  
  //--------------------------------find_alias_type------------------------------
- Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
+ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field, bool uncached) {
    if (_AliasLevel == 0)
      return alias_type(AliasIdxBot);
  
-   AliasCacheEntry* ace = probe_alias_cache(adr_type);
-   if (ace->_adr_type == adr_type) {
-     return alias_type(ace->_index);
+   AliasCacheEntry* ace = NULL;
+   if (!uncached) {
+     ace = probe_alias_cache(adr_type);
+     if (ace->_adr_type == adr_type) {
+       return alias_type(ace->_index);
+     }
    }
  
    // Handle special cases.
    if (adr_type == NULL)             return alias_type(AliasIdxTop);
    if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);

@@ -1634,18 +1674,27 @@
      if (flat->isa_instptr()) {
        if (flat->offset() == java_lang_Class::klass_offset()
            && flat->is_instptr()->klass() == env()->Class_klass())
          alias_type(idx)->set_rewritable(false);
      }
+     ciField* field = NULL;
      if (flat->isa_aryptr()) {
  #ifdef ASSERT
        const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
        // (T_BYTE has the weakest alignment and size restrictions...)
        assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
  #endif
+       const Type* elemtype = flat->is_aryptr()->elem();
        if (flat->offset() == TypePtr::OffsetBot) {
-         alias_type(idx)->set_element(flat->is_aryptr()->elem());
+         alias_type(idx)->set_element(elemtype);
+       }
+       int field_offset = flat->is_aryptr()->field_offset().get();
+       if (elemtype->isa_inlinetype() &&
+           field_offset != Type::OffsetBot) {
+         ciInlineKlass* vk = elemtype->inline_klass();
+         field_offset += vk->first_field_offset();
+         field = vk->get_field_by_offset(field_offset, false);
        }
      }
      if (flat->isa_klassptr()) {
        if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
          alias_type(idx)->set_rewritable(false);

@@ -1653,52 +1702,66 @@
          alias_type(idx)->set_rewritable(false);
        if (flat->offset() == in_bytes(Klass::access_flags_offset()))
          alias_type(idx)->set_rewritable(false);
        if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
          alias_type(idx)->set_rewritable(false);
+       if (flat->offset() == in_bytes(Klass::layout_helper_offset()))
+         alias_type(idx)->set_rewritable(false);
        if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
          alias_type(idx)->set_rewritable(false);
      }
      // %%% (We would like to finalize JavaThread::threadObj_offset(),
      // but the base pointer type is not distinctive enough to identify
      // references into JavaThread.)
  
      // Check for final fields.
      const TypeInstPtr* tinst = flat->isa_instptr();
      if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
-       ciField* field;
        if (tinst->const_oop() != NULL &&
            tinst->klass() == ciEnv::current()->Class_klass() &&
            tinst->offset() >= (tinst->klass()->as_instance_klass()->layout_helper_size_in_bytes())) {
          // static field
          ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
          field = k->get_field_by_offset(tinst->offset(), true);
+       } else if (tinst->klass()->is_inlinetype()) {
+         // Inline type field
+         ciInlineKlass* vk = tinst->inline_klass();
+         field = vk->get_field_by_offset(tinst->offset(), false);
        } else {
-         ciInstanceKlass *k = tinst->klass()->as_instance_klass();
+         ciInstanceKlass* k = tinst->klass()->as_instance_klass();
          field = k->get_field_by_offset(tinst->offset(), false);
        }
-       assert(field == NULL ||
-              original_field == NULL ||
-              (field->holder() == original_field->holder() &&
-               field->offset() == original_field->offset() &&
-               field->is_static() == original_field->is_static()), "wrong field?");
-       // Set field() and is_rewritable() attributes.
-       if (field != NULL)  alias_type(idx)->set_field(field);
+     }
+     assert(field == NULL ||
+            original_field == NULL ||
+            (field->holder() == original_field->holder() &&
+             field->offset() == original_field->offset() &&
+             field->is_static() == original_field->is_static()), "wrong field?");
+     // Set field() and is_rewritable() attributes.
+     if (field != NULL) {
+       alias_type(idx)->set_field(field);
+       if (flat->isa_aryptr()) {
+         // Fields of flat arrays are rewritable although they are declared final
+         assert(flat->is_aryptr()->is_flat(), "must be a flat array");
+         alias_type(idx)->set_rewritable(true);
+       }
      }
    }
  
    // Fill the cache for next time.
-   ace->_adr_type = adr_type;
-   ace->_index    = idx;
-   assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
+   if (!uncached) {
+     ace->_adr_type = adr_type;
+     ace->_index    = idx;
+     assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
  
-   // Might as well try to fill the cache for the flattened version, too.
-   AliasCacheEntry* face = probe_alias_cache(flat);
-   if (face->_adr_type == NULL) {
-     face->_adr_type = flat;
-     face->_index    = idx;
-     assert(alias_type(flat) == alias_type(idx), "flat type must work too");
+     // Might as well try to fill the cache for the flattened version, too.
+     AliasCacheEntry* face = probe_alias_cache(flat);
+     if (face->_adr_type == NULL) {
+       face->_adr_type = flat;
+       face->_index    = idx;
+       assert(alias_type(flat) == alias_type(idx), "flat type must work too");
+     }
    }
  
    return alias_type(idx);
  }
  

@@ -1811,10 +1874,416 @@
        C->clear_major_progress(); // ensure that major progress is now clear
      }
    }
  }
  
+ void Compile::add_inline_type(Node* n) {
+   assert(n->is_InlineTypeBase(), "unexpected node");
+   _inline_type_nodes.push(n);
+ }
+ 
+ void Compile::remove_inline_type(Node* n) {
+   assert(n->is_InlineTypeBase(), "unexpected node");
+   if (_inline_type_nodes.contains(n)) {
+     _inline_type_nodes.remove(n);
+   }
+ }
+ 
+ // Does the return value keep otherwise useless inline type allocations alive?
+ static bool return_val_keeps_allocations_alive(Node* ret_val) {
+   ResourceMark rm;
+   Unique_Node_List wq;
+   wq.push(ret_val);
+   bool some_allocations = false;
+   for (uint i = 0; i < wq.size(); i++) {
+     Node* n = wq.at(i);
+     assert(!n->is_InlineType(), "chain of inline type nodes");
+     if (n->outcnt() > 1) {
+       // Some other use for the allocation
+       return false;
+     } else if (n->is_InlineTypePtr()) {
+       wq.push(n->in(1));
+     } else if (n->is_Phi()) {
+       for (uint j = 1; j < n->req(); j++) {
+         wq.push(n->in(j));
+       }
+     } else if (n->is_CheckCastPP() &&
+                n->in(1)->is_Proj() &&
+                n->in(1)->in(0)->is_Allocate()) {
+       some_allocations = true;
+     } else if (n->is_CheckCastPP()) {
+       wq.push(n->in(1));
+     }
+   }
+   return some_allocations;
+ }
+ 
+ void Compile::process_inline_types(PhaseIterGVN &igvn, bool remove) {
+   // Make sure that the return value does not keep an otherwise unused allocation alive
+   if (tf()->returns_inline_type_as_fields()) {
+     Node* ret = NULL;
+     for (uint i = 1; i < root()->req(); i++) {
+       Node* in = root()->in(i);
+       if (in->Opcode() == Op_Return) {
+         assert(ret == NULL, "only one return");
+         ret = in;
+       }
+     }
+     if (ret != NULL) {
+       Node* ret_val = ret->in(TypeFunc::Parms);
+       if (igvn.type(ret_val)->isa_oopptr() &&
+           return_val_keeps_allocations_alive(ret_val)) {
+         igvn.replace_input_of(ret, TypeFunc::Parms, InlineTypeNode::tagged_klass(igvn.type(ret_val)->inline_klass(), igvn));
+         assert(ret_val->outcnt() == 0, "should be dead now");
+         igvn.remove_dead_node(ret_val);
+       }
+     }
+   }
+   if (_inline_type_nodes.length() == 0) {
+     return;
+   }
+   // Scalarize inline types in safepoint debug info.
+   // Delay this until all inlining is over to avoid getting inconsistent debug info.
+   set_scalarize_in_safepoints(true);
+   for (int i = _inline_type_nodes.length()-1; i >= 0; i--) {
+     _inline_type_nodes.at(i)->as_InlineTypeBase()->make_scalar_in_safepoints(&igvn);
+   }
+   if (remove) {
+     // Remove inline type nodes
+     while (_inline_type_nodes.length() > 0) {
+       InlineTypeBaseNode* vt = _inline_type_nodes.pop()->as_InlineTypeBase();
+       if (vt->outcnt() == 0) {
+         igvn.remove_dead_node(vt);
+       } else if (vt->is_InlineTypePtr()) {
+         igvn.replace_node(vt, vt->get_oop());
+       } else {
+         // Check if any users are blackholes. If so, rewrite them to use either the
+         // allocated buffer, or individual components, instead of the inline type node
+         // that goes away.
+         for (DUIterator i = vt->outs(); vt->has_out(i); i++) {
+           if (vt->out(i)->is_Blackhole()) {
+             BlackholeNode* bh = vt->out(i)->as_Blackhole();
+ 
+             // Unlink the old input
+             int idx = bh->find_edge(vt);
+             assert(idx != -1, "The edge should be there");
+             bh->del_req(idx);
+             --i;
+ 
+             if (vt->is_allocated(&igvn)) {
+               // Already has the allocated instance, blackhole that
+               bh->add_req(vt->get_oop());
+             } else {
+               // Not allocated yet, blackhole the components
+               for (uint c = 0; c < vt->field_count(); c++) {
+                 bh->add_req(vt->field_value(c));
+               }
+             }
+ 
+             // Node modified, record for IGVN
+             igvn.record_for_igvn(bh);
+           }
+         }
+ 
+ #ifdef ASSERT
+         for (DUIterator_Fast imax, i = vt->fast_outs(imax); i < imax; i++) {
+           assert(vt->fast_out(i)->is_InlineTypeBase(), "Unexpected inline type user");
+         }
+ #endif
+         igvn.replace_node(vt, igvn.C->top());
+       }
+     }
+   }
+   igvn.optimize();
+ }
+ 
+ void Compile::adjust_flattened_array_access_aliases(PhaseIterGVN& igvn) {
+   if (!_has_flattened_accesses) {
+     return;
+   }
+   // Initially, all flattened array accesses share the same slice to
+   // keep dependencies with Object[] array accesses (that could be
+   // to a flattened array) correct. We're done with parsing so we
+   // now know all flattened array accesses in this compile
+   // unit. Let's move flattened array accesses to their own slice,
+   // one per element field. This should help memory access
+   // optimizations.
+   ResourceMark rm;
+   Unique_Node_List wq;
+   wq.push(root());
+ 
+   Node_List mergememnodes;
+   Node_List memnodes;
+ 
+   // Alias index currently shared by all flattened memory accesses
+   int index = get_alias_index(TypeAryPtr::INLINES);
+ 
+   // Find MergeMem nodes and flattened array accesses
+   for (uint i = 0; i < wq.size(); i++) {
+     Node* n = wq.at(i);
+     if (n->is_Mem()) {
+       const TypePtr* adr_type = NULL;
+       if (n->Opcode() == Op_StoreCM) {
+         adr_type = get_adr_type(get_alias_index(n->in(MemNode::OopStore)->adr_type()));
+       } else {
+         adr_type = get_adr_type(get_alias_index(n->adr_type()));
+       }
+       if (adr_type == TypeAryPtr::INLINES) {
+         memnodes.push(n);
+       }
+     } else if (n->is_MergeMem()) {
+       MergeMemNode* mm = n->as_MergeMem();
+       if (mm->memory_at(index) != mm->base_memory()) {
+         mergememnodes.push(n);
+       }
+     }
+     for (uint j = 0; j < n->req(); j++) {
+       Node* m = n->in(j);
+       if (m != NULL) {
+         wq.push(m);
+       }
+     }
+   }
+ 
+   if (memnodes.size() > 0) {
+     _flattened_accesses_share_alias = false;
+ 
+     // We are going to change the slice for the flattened array
+     // accesses so we need to clear the cache entries that refer to
+     // them.
+     for (uint i = 0; i < AliasCacheSize; i++) {
+       AliasCacheEntry* ace = &_alias_cache[i];
+       if (ace->_adr_type != NULL &&
+           ace->_adr_type->isa_aryptr() &&
+           ace->_adr_type->is_aryptr()->is_flat()) {
+         ace->_adr_type = NULL;
+         ace->_index = (i != 0) ? 0 : AliasIdxTop; // Make sure the NULL adr_type resolves to AliasIdxTop
+       }
+     }
+ 
+     // Find what aliases we are going to add
+     int start_alias = num_alias_types()-1;
+     int stop_alias = 0;
+ 
+     for (uint i = 0; i < memnodes.size(); i++) {
+       Node* m = memnodes.at(i);
+       const TypePtr* adr_type = NULL;
+       if (m->Opcode() == Op_StoreCM) {
+         adr_type = m->in(MemNode::OopStore)->adr_type();
+         if (adr_type != TypeAryPtr::INLINES) {
+           // store was optimized out and we lost track of the adr_type
+           Node* clone = new StoreCMNode(m->in(MemNode::Control), m->in(MemNode::Memory), m->in(MemNode::Address),
+                                         m->adr_type(), m->in(MemNode::ValueIn), m->in(MemNode::OopStore),
+                                         get_alias_index(adr_type));
+           igvn.register_new_node_with_optimizer(clone);
+           igvn.replace_node(m, clone);
+         }
+       } else {
+         adr_type = m->adr_type();
+ #ifdef ASSERT
+         m->as_Mem()->set_adr_type(adr_type);
+ #endif
+       }
+       int idx = get_alias_index(adr_type);
+       start_alias = MIN2(start_alias, idx);
+       stop_alias = MAX2(stop_alias, idx);
+     }
+ 
+     assert(stop_alias >= start_alias, "should have expanded aliases");
+ 
+     Node_Stack stack(0);
+ #ifdef ASSERT
+     VectorSet seen(Thread::current()->resource_area());
+ #endif
+     // Now let's fix the memory graph so each flattened array access
+     // is moved to the right slice. Start from the MergeMem nodes.
+     uint last = unique();
+     for (uint i = 0; i < mergememnodes.size(); i++) {
+       MergeMemNode* current = mergememnodes.at(i)->as_MergeMem();
+       Node* n = current->memory_at(index);
+       MergeMemNode* mm = NULL;
+       do {
+         // Follow memory edges through memory accesses, phis and
+         // narrow membars and push nodes on the stack. Once we hit
+         // bottom memory, we pop element off the stack one at a
+         // time, in reverse order, and move them to the right slice
+         // by changing their memory edges.
+         if ((n->is_Phi() && n->adr_type() != TypePtr::BOTTOM) || n->is_Mem() || n->adr_type() == TypeAryPtr::INLINES) {
+           assert(!seen.test_set(n->_idx), "");
+           // Uses (a load for instance) will need to be moved to the
+           // right slice as well and will get a new memory state
+           // that we don't know yet. The use could also be the
+           // backedge of a loop. We put a place holder node between
+           // the memory node and its uses. We replace that place
+           // holder with the correct memory state once we know it,
+           // i.e. when nodes are popped off the stack. Using the
+           // place holder make the logic work in the presence of
+           // loops.
+           if (n->outcnt() > 1) {
+             Node* place_holder = NULL;
+             assert(!n->has_out_with(Op_Node), "");
+             for (DUIterator k = n->outs(); n->has_out(k); k++) {
+               Node* u = n->out(k);
+               if (u != current && u->_idx < last) {
+                 bool success = false;
+                 for (uint l = 0; l < u->req(); l++) {
+                   if (!stack.is_empty() && u == stack.node() && l == stack.index()) {
+                     continue;
+                   }
+                   Node* in = u->in(l);
+                   if (in == n) {
+                     if (place_holder == NULL) {
+                       place_holder = new Node(1);
+                       place_holder->init_req(0, n);
+                     }
+                     igvn.replace_input_of(u, l, place_holder);
+                     success = true;
+                   }
+                 }
+                 if (success) {
+                   --k;
+                 }
+               }
+             }
+           }
+           if (n->is_Phi()) {
+             stack.push(n, 1);
+             n = n->in(1);
+           } else if (n->is_Mem()) {
+             stack.push(n, n->req());
+             n = n->in(MemNode::Memory);
+           } else {
+             assert(n->is_Proj() && n->in(0)->Opcode() == Op_MemBarCPUOrder, "");
+             stack.push(n, n->req());
+             n = n->in(0)->in(TypeFunc::Memory);
+           }
+         } else {
+           assert(n->adr_type() == TypePtr::BOTTOM || (n->Opcode() == Op_Node && n->_idx >= last) || (n->is_Proj() && n->in(0)->is_Initialize()), "");
+           // Build a new MergeMem node to carry the new memory state
+           // as we build it. IGVN should fold extraneous MergeMem
+           // nodes.
+           mm = MergeMemNode::make(n);
+           igvn.register_new_node_with_optimizer(mm);
+           while (stack.size() > 0) {
+             Node* m = stack.node();
+             uint idx = stack.index();
+             if (m->is_Mem()) {
+               // Move memory node to its new slice
+               const TypePtr* adr_type = m->adr_type();
+               int alias = get_alias_index(adr_type);
+               Node* prev = mm->memory_at(alias);
+               igvn.replace_input_of(m, MemNode::Memory, prev);
+               mm->set_memory_at(alias, m);
+             } else if (m->is_Phi()) {
+               // We need as many new phis as there are new aliases
+               igvn.replace_input_of(m, idx, mm);
+               if (idx == m->req()-1) {
+                 Node* r = m->in(0);
+                 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
+                   const Type* adr_type = get_adr_type(j);
+                   if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat() || j == (uint)index) {
+                     continue;
+                   }
+                   Node* phi = new PhiNode(r, Type::MEMORY, get_adr_type(j));
+                   igvn.register_new_node_with_optimizer(phi);
+                   for (uint k = 1; k < m->req(); k++) {
+                     phi->init_req(k, m->in(k)->as_MergeMem()->memory_at(j));
+                   }
+                   mm->set_memory_at(j, phi);
+                 }
+                 Node* base_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
+                 igvn.register_new_node_with_optimizer(base_phi);
+                 for (uint k = 1; k < m->req(); k++) {
+                   base_phi->init_req(k, m->in(k)->as_MergeMem()->base_memory());
+                 }
+                 mm->set_base_memory(base_phi);
+               }
+             } else {
+               // This is a MemBarCPUOrder node from
+               // Parse::array_load()/Parse::array_store(), in the
+               // branch that handles flattened arrays hidden under
+               // an Object[] array. We also need one new membar per
+               // new alias to keep the unknown access that the
+               // membars protect properly ordered with accesses to
+               // known flattened array.
+               assert(m->is_Proj(), "projection expected");
+               Node* ctrl = m->in(0)->in(TypeFunc::Control);
+               igvn.replace_input_of(m->in(0), TypeFunc::Control, top());
+               for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
+                 const Type* adr_type = get_adr_type(j);
+                 if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat() || j == (uint)index) {
+                   continue;
+                 }
+                 MemBarNode* mb = new MemBarCPUOrderNode(this, j, NULL);
+                 igvn.register_new_node_with_optimizer(mb);
+                 Node* mem = mm->memory_at(j);
+                 mb->init_req(TypeFunc::Control, ctrl);
+                 mb->init_req(TypeFunc::Memory, mem);
+                 ctrl = new ProjNode(mb, TypeFunc::Control);
+                 igvn.register_new_node_with_optimizer(ctrl);
+                 mem = new ProjNode(mb, TypeFunc::Memory);
+                 igvn.register_new_node_with_optimizer(mem);
+                 mm->set_memory_at(j, mem);
+               }
+               igvn.replace_node(m->in(0)->as_Multi()->proj_out(TypeFunc::Control), ctrl);
+             }
+             if (idx < m->req()-1) {
+               idx += 1;
+               stack.set_index(idx);
+               n = m->in(idx);
+               break;
+             }
+             // Take care of place holder nodes
+             if (m->has_out_with(Op_Node)) {
+               Node* place_holder = m->find_out_with(Op_Node);
+               if (place_holder != NULL) {
+                 Node* mm_clone = mm->clone();
+                 igvn.register_new_node_with_optimizer(mm_clone);
+                 Node* hook = new Node(1);
+                 hook->init_req(0, mm);
+                 igvn.replace_node(place_holder, mm_clone);
+                 hook->destruct(&igvn);
+               }
+               assert(!m->has_out_with(Op_Node), "place holder should be gone now");
+             }
+             stack.pop();
+           }
+         }
+       } while(stack.size() > 0);
+       // Fix the memory state at the MergeMem we started from
+       igvn.rehash_node_delayed(current);
+       for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
+         const Type* adr_type = get_adr_type(j);
+         if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat()) {
+           continue;
+         }
+         current->set_memory_at(j, mm);
+       }
+       current->set_memory_at(index, current->base_memory());
+     }
+     igvn.optimize();
+   }
+   print_method(PHASE_SPLIT_INLINES_ARRAY, 2);
+ #ifdef ASSERT
+   if (!_flattened_accesses_share_alias) {
+     wq.clear();
+     wq.push(root());
+     for (uint i = 0; i < wq.size(); i++) {
+       Node* n = wq.at(i);
+       assert(n->adr_type() != TypeAryPtr::INLINES, "should have been removed from the graph");
+       for (uint j = 0; j < n->req(); j++) {
+         Node* m = n->in(j);
+         if (m != NULL) {
+           wq.push(m);
+         }
+       }
+     }
+   }
+ #endif
+ }
+ 
+ 
  // StringOpts and late inlining of string methods
  void Compile::inline_string_calls(bool parse_time) {
    {
      // remove useless nodes to make the usage analysis simpler
      ResourceMark rm;

@@ -2000,11 +2469,14 @@
    // "inlining_incrementally() == false" is used to signal that no inlining is allowed
    // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
    // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == NULL"
    // as if "inlining_incrementally() == true" were set.
    assert(inlining_incrementally() == false, "not allowed");
-   assert(_modified_nodes == NULL, "not allowed");
+ #ifdef ASSERT
+   Unique_Node_List* modified_nodes = _modified_nodes;
+   _modified_nodes = NULL;
+ #endif
    assert(_late_inlines.length() > 0, "sanity");
  
    while (_late_inlines.length() > 0) {
      for_igvn()->clear();
      initial_gvn()->replace_with(&igvn);

@@ -2014,10 +2486,11 @@
      }
      if (failing())  return;
  
      inline_incrementally_cleanup(igvn);
    }
+   DEBUG_ONLY( _modified_nodes = modified_nodes; )
  }
  
  bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
    if (_loop_opts_cnt > 0) {
      debug_only( int cnt = 0; );

@@ -2156,10 +2629,15 @@
  
    // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
    // safepoints
    remove_root_to_sfpts_edges(igvn);
  
+   // Process inline type nodes now that all inlining is over
+   process_inline_types(igvn);
+ 
+   adjust_flattened_array_access_aliases(igvn);
+ 
    // Perform escape analysis
    if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
      if (has_loops()) {
        // Cleanup graph (remove dead nodes).
        TracePhase tp("idealLoop", &timers[_t_idealLoop]);

@@ -2259,20 +2737,32 @@
  
  #ifdef ASSERT
    bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
  #endif
  
+   assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
+ 
+   if (_late_inlines.length() > 0) {
+     // More opportunities to optimize virtual and MH calls.
+     // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
+     process_late_inline_calls_no_inline(igvn);
+   }
+ 
    {
      TracePhase tp("macroExpand", &timers[_t_macroExpand]);
      PhaseMacroExpand  mex(igvn);
      if (mex.expand_macro_nodes()) {
        assert(failing(), "must bail out w/ explicit message");
        return;
      }
      print_method(PHASE_MACRO_EXPANSION, 2);
    }
  
+   // Process inline type nodes again and remove them. From here
+   // on we don't need to keep track of field values anymore.
+   process_inline_types(igvn, /* remove= */ true);
+ 
    {
      TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
      if (bs->expand_barriers(this, igvn)) {
        assert(failing(), "must bail out w/ explicit message");
        return;

@@ -2286,18 +2776,11 @@
    }
  
    DEBUG_ONLY( _modified_nodes = NULL; )
  
    assert(igvn._worklist.size() == 0, "not empty");
- 
-   assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
- 
-   if (_late_inlines.length() > 0) {
-     // More opportunities to optimize virtual and MH calls.
-     // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
-     process_late_inline_calls_no_inline(igvn);
-   }
+   assert(_late_inlines.length() == 0, "missed optimization opportunity");
   } // (End scope of igvn; run destructor if necessary for asserts.)
  
   check_no_dead_use();
  
   process_print_inlining();

@@ -2867,10 +3350,11 @@
        mem = prev->in(MemNode::Memory);
      }
    }
  }
  
+ 
  //------------------------------final_graph_reshaping_impl----------------------
  // Implement items 1-5 from final_graph_reshaping below.
  void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
  
    if ( n->outcnt() == 0 ) return; // dead node

@@ -3021,11 +3505,26 @@
    case Op_StoreCM:
      {
        // Convert OopStore dependence into precedence edge
        Node* prec = n->in(MemNode::OopStore);
        n->del_req(MemNode::OopStore);
-       n->add_prec(prec);
+       if (prec->is_MergeMem()) {
+         MergeMemNode* mm = prec->as_MergeMem();
+         Node* base = mm->base_memory();
+         for (int i = AliasIdxRaw + 1; i < num_alias_types(); i++) {
+           const Type* adr_type = get_adr_type(i);
+           if (adr_type->isa_aryptr() && adr_type->is_aryptr()->is_flat()) {
+             Node* m = mm->memory_at(i);
+             n->add_prec(m);
+           }
+         }
+         if (mm->outcnt() == 0) {
+           mm->disconnect_inputs(this);
+         }
+       } else {
+         n->add_prec(prec);
+       }
        eliminate_redundant_card_marks(n);
      }
  
      // fall through
  

@@ -3593,10 +4092,18 @@
        Node* cmp = new CmpLNode(andl, n->in(2));
        n->subsume_by(cmp, this);
      }
      break;
    }
+ #ifdef ASSERT
+   case Op_InlineTypePtr:
+   case Op_InlineType: {
+     n->dump(-1);
+     assert(false, "inline type node was not removed");
+     break;
+   }
+ #endif
    default:
      assert(!n->is_Call(), "");
      assert(!n->is_Mem(), "");
      assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
      break;

@@ -3940,20 +4447,20 @@
    if (holder->is_being_initialized()) {
      if (accessing_method->holder() == holder) {
        // Access inside a class. The barrier can be elided when access happens in <clinit>,
        // <init>, or a static method. In all those cases, there was an initialization
        // barrier on the holder klass passed.
-       if (accessing_method->is_static_initializer() ||
-           accessing_method->is_object_initializer() ||
+       if (accessing_method->is_class_initializer() ||
+           accessing_method->is_object_constructor() ||
            accessing_method->is_static()) {
          return false;
        }
      } else if (accessing_method->holder()->is_subclass_of(holder)) {
        // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
        // In case of <init> or a static method, the barrier is on the subclass is not enough:
        // child class can become fully initialized while its parent class is still being initialized.
-       if (accessing_method->is_static_initializer()) {
+       if (accessing_method->is_class_initializer()) {
          return false;
        }
      }
      ciMethod* root = method(); // the root method of compilation
      if (root != accessing_method) {

@@ -4070,11 +4577,11 @@
  // (0) superklass is java.lang.Object (can occur in reflective code)
  // (1) subklass is already limited to a subtype of superklass => always ok
  // (2) subklass does not overlap with superklass => always fail
  // (3) superklass has NO subtypes and we can check with a simple compare.
  int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
-   if (StressReflectiveCode) {
+   if (StressReflectiveCode || superk == NULL || subk == NULL) {
      return SSC_full_test;       // Let caller generate the general case.
    }
  
    if (superk == env()->Object_klass()) {
      return SSC_always_true;     // (0) this test cannot fail

@@ -4098,10 +4605,17 @@
          !superk->is_subtype_of(subk)) {
        return SSC_always_false;  // (2) true path dead; no dynamic test needed
      }
    }
  
+   // Do not fold the subtype check to an array klass pointer comparison for [V? arrays.
+   // [QMyValue is a subtype of [LMyValue but the klass for [QMyValue is not equal to
+   // the klass for [LMyValue. Perform a full test.
+   if (superk->is_obj_array_klass() && !superk->as_array_klass()->is_elem_null_free() &&
+       superk->as_array_klass()->element_klass()->is_inlinetype()) {
+     return SSC_full_test;
+   }
    // If casting to an instance klass, it must have no subtypes
    if (superk->is_interface()) {
      // Cannot trust interfaces yet.
      // %%% S.B. superk->nof_implementors() == 1
    } else if (superelem->is_instance_klass()) {

@@ -4629,10 +5143,31 @@
      igvn.check_no_speculative_types();
  #endif
    }
  }
  
+ Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) {
+   const TypeInstPtr* ta = phase->type(a)->isa_instptr();
+   const TypeInstPtr* tb = phase->type(b)->isa_instptr();
+   if (!EnableValhalla || ta == NULL || tb == NULL ||
+       ta->is_zero_type() || tb->is_zero_type() ||
+       !ta->can_be_inline_type() || !tb->can_be_inline_type()) {
+     // Use old acmp if one operand is null or not an inline type
+     return new CmpPNode(a, b);
+   } else if (ta->is_inlinetypeptr() || tb->is_inlinetypeptr()) {
+     // We know that one operand is an inline type. Therefore,
+     // new acmp will only return true if both operands are NULL.
+     // Check if both operands are null by or'ing the oops.
+     a = phase->transform(new CastP2XNode(NULL, a));
+     b = phase->transform(new CastP2XNode(NULL, b));
+     a = phase->transform(new OrXNode(a, b));
+     return new CmpXNode(a, phase->MakeConX(0));
+   }
+   // Use new acmp
+   return NULL;
+ }
+ 
  // Auxiliary methods to support randomized stressing/fuzzing.
  
  int Compile::random() {
    _stress_seed = os::next_random(_stress_seed);
    return static_cast<int>(_stress_seed);
< prev index next >