< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page

        

@@ -65,10 +65,11 @@
 #include "opto/phaseX.hpp"
 #include "opto/rootnode.hpp"
 #include "opto/runtime.hpp"
 #include "opto/stringopts.hpp"
 #include "opto/type.hpp"
+#include "opto/valuetypenode.hpp"
 #include "opto/vectornode.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubRoutines.hpp"

@@ -415,10 +416,14 @@
     Node* opaq = opaque4_node(i);
     if (!useful.member(opaq)) {
       remove_opaque4_node(opaq);
     }
   }
+  // Remove useless value type nodes
+  if (_value_type_nodes != NULL) {
+    _value_type_nodes->remove_useless_nodes(useful.member_set());
+  }
   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
   bs->eliminate_useless_gc_barriers(useful, this);
   // clean up the late inline lists
   remove_useless_late_inlines(&_string_late_inlines, useful);
   remove_useless_late_inlines(&_boxing_late_inlines, useful);

@@ -463,11 +468,10 @@
   assert(compile == Compile::current(), "sanity");
 
   compile->set_type_dict(NULL);
   compile->set_clone_map(new Dict(cmpkey, hashkey, _compile->comp_arena()));
   compile->clone_map().set_clone_idx(0);
-  compile->set_type_hwm(NULL);
   compile->set_type_last_size(0);
   compile->set_last_tf(NULL, NULL);
   compile->set_indexSet_arena(NULL);
   compile->set_indexSet_free_block_list(NULL);
   compile->init_type_arena();

@@ -540,10 +544,16 @@
     }
 
     ResourceMark rm;
     _scratch_const_size = const_size;
     int size = C2Compiler::initial_code_buffer_size(const_size);
+#ifdef ASSERT
+    if (C->has_scalarized_args()) {
+      // Oop verification for loading object fields from scalarized value types in the new entry point requires lots of space
+      size += 5120;
+    }
+#endif
     blob = BufferBlob::create("Compile::scratch_buffer", size);
     // Record the buffer blob for next time.
     set_scratch_buffer_blob(blob);
     // Have we run out of code space?
     if (scratch_buffer_blob() == NULL) {

@@ -604,18 +614,25 @@
   if (is_branch) {
     MacroAssembler masm(&buf);
     masm.bind(fakeL);
     n->as_MachBranch()->save_label(&saveL, &save_bnum);
     n->as_MachBranch()->label_set(&fakeL, 0);
+  } else if (n->is_MachProlog()) {
+    saveL = ((MachPrologNode*)n)->_verified_entry;
+    ((MachPrologNode*)n)->_verified_entry = &fakeL;
   }
   n->emit(buf, this->regalloc());
 
   // Emitting into the scratch buffer should not fail
   assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason());
 
-  if (is_branch) // Restore label.
+  // Restore label.
+  if (is_branch) {
     n->as_MachBranch()->label_set(saveL, save_bnum);
+  } else if (n->is_MachProlog()) {
+    ((MachPrologNode*)n)->_verified_entry = saveL;
+  }
 
   // End scratch_emit_size section.
   set_in_scratch_emit_size(false);
 
   return buf.insts_size();

@@ -644,10 +661,12 @@
                   _stub_name(NULL),
                   _stub_entry_point(NULL),
                   _max_node_limit(MaxNodeLimit),
                   _orig_pc_slot(0),
                   _orig_pc_slot_offset_in_bytes(0),
+                  _sp_inc_slot(0),
+                  _sp_inc_slot_offset_in_bytes(0),
                   _inlining_progress(false),
                   _inlining_incrementally(false),
                   _do_cleanup(false),
                   _has_reserved_stack_access(target->has_reserved_stack_access()),
 #ifndef PRODUCT

@@ -783,11 +802,11 @@
       init_start(s);
       cg = CallGenerator::for_osr(method(), entry_bci());
     } else {
       // Normal case.
       init_tf(TypeFunc::make(method()));
-      StartNode* s = new StartNode(root(), tf()->domain());
+      StartNode* s = new StartNode(root(), tf()->domain_cc());
       initial_gvn()->set_type_bottom(s);
       init_start(s);
       if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
         // With java.lang.ref.reference.get() we must go through the
         // intrinsic - even when get() is the root

@@ -908,12 +927,19 @@
   }
 
   // Now that we know the size of all the monitors we can add a fixed slot
   // for the original deopt pc.
 
-  _orig_pc_slot =  fixed_slots();
+  _orig_pc_slot = fixed_slots();
   int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size);
+
+  if (needs_stack_repair()) {
+    // One extra slot for the special stack increment value
+    _sp_inc_slot = next_slot;
+    next_slot += 2;
+  }
+
   set_fixed_slots(next_slot);
 
   // Compute when to use implicit null checks. Used by matching trap based
   // nodes and NullCheck optimization.
   set_allowed_deopt_reasons();

@@ -935,10 +961,19 @@
     if (is_osr_compilation()) {
       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
     } else {
       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
+      if (_code_offsets.value(CodeOffsets::Verified_Value_Entry) == -1) {
+        _code_offsets.set_value(CodeOffsets::Verified_Value_Entry, _first_block_size);
+      }
+      if (_code_offsets.value(CodeOffsets::Verified_Value_Entry_RO) == -1) {
+        _code_offsets.set_value(CodeOffsets::Verified_Value_Entry_RO, _first_block_size);
+      }
+      if (_code_offsets.value(CodeOffsets::Entry) == -1) {
+        _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
+      }
       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
     }
 
     env()->register_method(_method, _entry_bci,
                            &_code_offsets,

@@ -980,10 +1015,12 @@
     _stub_name(stub_name),
     _stub_entry_point(NULL),
     _max_node_limit(MaxNodeLimit),
     _orig_pc_slot(0),
     _orig_pc_slot_offset_in_bytes(0),
+    _sp_inc_slot(0),
+    _sp_inc_slot_offset_in_bytes(0),
     _inlining_progress(false),
     _inlining_incrementally(false),
     _has_reserved_stack_access(false),
 #ifndef PRODUCT
     _trace_opto_output(directive->TraceOptoOutputOption),

@@ -1129,10 +1166,13 @@
   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
   set_decompile_count(0);
 
   set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
   _loop_opts_cnt = LoopOptsCount;
+  _has_flattened_accesses = false;
+  _flattened_accesses_share_alias = true;
+
   set_do_inlining(Inline);
   set_max_inline_size(MaxInlineSize);
   set_freq_inline_size(FreqInlineSize);
   set_do_scheduling(OptoScheduling);
   set_do_count_invocations(false);

@@ -1212,10 +1252,11 @@
   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   _range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   _opaque4_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
+  _value_type_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
   register_library_intrinsics();
 }
 
 //---------------------------init_start----------------------------------------
 // Install the StartNode on this compile object.

@@ -1436,11 +1477,12 @@
   bool is_known_inst = tj->isa_oopptr() != NULL &&
                        tj->is_oopptr()->is_known_instance();
 
   // Process weird unsafe references.
   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
-    assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");
+    bool default_value_load = EnableValhalla && tj->is_instptr()->klass() == ciEnv::current()->Class_klass();
+    assert(InlineUnsafeOps || default_value_load, "indeterminate pointers come only from unsafe ops");
     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
     tj = TypeOopPtr::BOTTOM;
     ptr = tj->ptr();
     offset = tj->offset();
   }

@@ -1449,24 +1491,35 @@
   const TypeAryPtr *ta = tj->isa_aryptr();
   if (ta && ta->is_stable()) {
     // Erase stability property for alias analysis.
     tj = ta = ta->cast_to_stable(false);
   }
+  if (ta && ta->is_not_flat()) {
+    // Erase not flat property for alias analysis.
+    tj = ta = ta->cast_to_not_flat(false);
+  }
+  if (ta && ta->is_not_null_free()) {
+    // Erase not null free property for alias analysis.
+    tj = ta = ta->cast_to_not_null_free(false);
+  }
+
   if( ta && is_known_inst ) {
     if ( offset != Type::OffsetBot &&
          offset > arrayOopDesc::length_offset_in_bytes() ) {
       offset = Type::OffsetBot; // Flatten constant access into array body only
-      tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
+      tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, Type::Offset(offset), ta->field_offset(), ta->instance_id());
     }
   } else if( ta && _AliasLevel >= 2 ) {
     // For arrays indexed by constant indices, we flatten the alias
     // space to include all of the array body.  Only the header, klass
     // and array length can be accessed un-aliased.
+    // For flattened value type array, each field has its own slice so
+    // we must include the field offset.
     if( offset != Type::OffsetBot ) {
       if( ta->const_oop() ) { // MethodData* or Method*
         offset = Type::OffsetBot;   // Flatten constant access into array body
-        tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
+        tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
         // range is OK as-is.
         tj = ta = TypeAryPtr::RANGE;
       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
         tj = TypeInstPtr::KLASS; // all klass loads look alike

@@ -1478,39 +1531,44 @@
         ptr = TypePtr::BotPTR;
       } else if (BarrierSet::barrier_set()->barrier_set_c2()->flatten_gc_alias_type(tj)) {
         ta = tj->isa_aryptr();
       } else {                  // Random constant offset into array body
         offset = Type::OffsetBot;   // Flatten constant access into array body
-        tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
+        tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
       }
     }
     // Arrays of fixed size alias with arrays of unknown size.
     if (ta->size() != TypeInt::POS) {
       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
-      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
+      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,Type::Offset(offset), ta->field_offset());
     }
     // Arrays of known objects become arrays of unknown objects.
     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
-      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
+      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
     }
     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
-      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
+      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
+    }
+    // Initially all flattened array accesses share a single slice
+    if (ta->elem()->isa_valuetype() && ta->elem() != TypeValueType::BOTTOM && _flattened_accesses_share_alias) {
+      const TypeAry *tary = TypeAry::make(TypeValueType::BOTTOM, ta->size());
+      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), Type::Offset(Type::OffsetBot));
     }
     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
     // cannot be distinguished by bytecode alone.
     if (ta->elem() == TypeInt::BOOL) {
       const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
       ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
-      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
+      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset());
     }
     // During the 2nd round of IterGVN, NotNull castings are removed.
     // Make sure the Bottom and NotNull variants alias the same.
     // Also, make sure exact and non-exact variants alias the same.
     if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
-      tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
+      tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
     }
   }
 
   // Oop pointers need some flattening
   const TypeInstPtr *to = tj->isa_instptr();

@@ -1520,29 +1578,29 @@
       if (to->klass() != ciEnv::current()->Class_klass() ||
           offset < k->size_helper() * wordSize) {
         // No constant oop pointers (such as Strings); they alias with
         // unknown strings.
         assert(!is_known_inst, "not scalarizable allocation");
-        tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
+        tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset));
       }
     } else if( is_known_inst ) {
       tj = to; // Keep NotNull and klass_is_exact for instance type
     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
       // During the 2nd round of IterGVN, NotNull castings are removed.
       // Make sure the Bottom and NotNull variants alias the same.
       // Also, make sure exact and non-exact variants alias the same.
-      tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
+      tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset));
     }
     if (to->speculative() != NULL) {
-      tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
+      tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),Type::Offset(to->offset()), to->instance_id());
     }
     // Canonicalize the holder of this field
     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
       // First handle header references such as a LoadKlassNode, even if the
       // object's klass is unloaded at compile time (4965979).
       if (!is_known_inst) { // Do it only for non-instance types
-        tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
+        tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, Type::Offset(offset));
       }
     } else if (BarrierSet::barrier_set()->barrier_set_c2()->flatten_gc_alias_type(tj)) {
       to = tj->is_instptr();
     } else if (offset < 0 || offset >= k->size_helper() * wordSize) {
       // Static fields are in the space above the normal instance

@@ -1554,13 +1612,13 @@
       }
     } else {
       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
       if (!k->equals(canonical_holder) || tj->offset() != offset) {
         if( is_known_inst ) {
-          tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
+          tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, Type::Offset(offset), to->instance_id());
         } else {
-          tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
+          tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, Type::Offset(offset));
         }
       }
     }
   }
 

@@ -1573,19 +1631,19 @@
     // use NotNull as the PTR.
     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
 
       tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
                                    TypeKlassPtr::OBJECT->klass(),
-                                   offset);
+                                   Type::Offset(offset));
     }
 
     ciKlass* klass = tk->klass();
-    if( klass->is_obj_array_klass() ) {
+    if (klass != NULL && klass->is_obj_array_klass()) {
       ciKlass* k = TypeAryPtr::OOPS->klass();
       if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
         k = TypeInstPtr::BOTTOM->klass();
-      tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
+      tj = tk = TypeKlassPtr::make(TypePtr::NotNull, k, Type::Offset(offset));
     }
 
     // Check for precise loads from the primary supertype array and force them
     // to the supertype cache alias index.  Check for generic array loads from
     // the primary supertype array and also force them to the supertype cache

@@ -1597,11 +1655,11 @@
     if (offset == Type::OffsetBot ||
         (offset >= primary_supers_offset &&
          offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
         offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
       offset = in_bytes(Klass::secondary_super_cache_offset());
-      tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
+      tj = tk = TypeKlassPtr::make(TypePtr::NotNull, tk->klass(), Type::Offset(offset));
     }
   }
 
   // Flatten all Raw pointers together.
   if (tj->base() == Type::RawPtr)

@@ -1736,17 +1794,20 @@
   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
 }
 
 
 //--------------------------------find_alias_type------------------------------
-Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
+Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field, bool uncached) {
   if (_AliasLevel == 0)
     return alias_type(AliasIdxBot);
 
-  AliasCacheEntry* ace = probe_alias_cache(adr_type);
-  if (ace->_adr_type == adr_type) {
-    return alias_type(ace->_index);
+  AliasCacheEntry* ace = NULL;
+  if (!uncached) {
+    ace = probe_alias_cache(adr_type);
+    if (ace->_adr_type == adr_type) {
+      return alias_type(ace->_index);
+    }
   }
 
   // Handle special cases.
   if (adr_type == NULL)             return alias_type(AliasIdxTop);
   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);

@@ -1792,18 +1853,28 @@
     if (flat->isa_instptr()) {
       if (flat->offset() == java_lang_Class::klass_offset_in_bytes()
           && flat->is_instptr()->klass() == env()->Class_klass())
         alias_type(idx)->set_rewritable(false);
     }
+    ciField* field = NULL;
     if (flat->isa_aryptr()) {
 #ifdef ASSERT
       const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
       // (T_BYTE has the weakest alignment and size restrictions...)
       assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
 #endif
+      const Type* elemtype = flat->is_aryptr()->elem();
       if (flat->offset() == TypePtr::OffsetBot) {
-        alias_type(idx)->set_element(flat->is_aryptr()->elem());
+        alias_type(idx)->set_element(elemtype);
+      }
+      int field_offset = flat->is_aryptr()->field_offset().get();
+      if (elemtype->isa_valuetype() &&
+          elemtype->value_klass() != NULL &&
+          field_offset != Type::OffsetBot) {
+        ciValueKlass* vk = elemtype->value_klass();
+        field_offset += vk->first_field_offset();
+        field = vk->get_field_by_offset(field_offset, false);
       }
     }
     if (flat->isa_klassptr()) {
       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
         alias_type(idx)->set_rewritable(false);

@@ -1811,50 +1882,64 @@
         alias_type(idx)->set_rewritable(false);
       if (flat->offset() == in_bytes(Klass::access_flags_offset()))
         alias_type(idx)->set_rewritable(false);
       if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
         alias_type(idx)->set_rewritable(false);
+      if (flat->offset() == in_bytes(Klass::layout_helper_offset()))
+        alias_type(idx)->set_rewritable(false);
     }
     // %%% (We would like to finalize JavaThread::threadObj_offset(),
     // but the base pointer type is not distinctive enough to identify
     // references into JavaThread.)
 
     // Check for final fields.
     const TypeInstPtr* tinst = flat->isa_instptr();
     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
-      ciField* field;
       if (tinst->const_oop() != NULL &&
           tinst->klass() == ciEnv::current()->Class_klass() &&
           tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
         // static field
         ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
         field = k->get_field_by_offset(tinst->offset(), true);
+      } else if (tinst->klass()->is_valuetype()) {
+        // Value type field
+        ciValueKlass* vk = tinst->value_klass();
+        field = vk->get_field_by_offset(tinst->offset(), false);
       } else {
-        ciInstanceKlass *k = tinst->klass()->as_instance_klass();
+        ciInstanceKlass* k = tinst->klass()->as_instance_klass();
         field = k->get_field_by_offset(tinst->offset(), false);
       }
-      assert(field == NULL ||
-             original_field == NULL ||
-             (field->holder() == original_field->holder() &&
-              field->offset() == original_field->offset() &&
-              field->is_static() == original_field->is_static()), "wrong field?");
-      // Set field() and is_rewritable() attributes.
-      if (field != NULL)  alias_type(idx)->set_field(field);
+    }
+    assert(field == NULL ||
+           original_field == NULL ||
+           (field->holder() == original_field->holder() &&
+            field->offset() == original_field->offset() &&
+            field->is_static() == original_field->is_static()), "wrong field?");
+    // Set field() and is_rewritable() attributes.
+    if (field != NULL) {
+      alias_type(idx)->set_field(field);
+      if (flat->isa_aryptr()) {
+        // Fields of flattened inline type arrays are rewritable although they are declared final
+        assert(flat->is_aryptr()->elem()->isa_valuetype(), "must be a flattened value array");
+        alias_type(idx)->set_rewritable(true);
+      }
     }
   }
 
   // Fill the cache for next time.
-  ace->_adr_type = adr_type;
-  ace->_index    = idx;
-  assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
-
-  // Might as well try to fill the cache for the flattened version, too.
-  AliasCacheEntry* face = probe_alias_cache(flat);
-  if (face->_adr_type == NULL) {
-    face->_adr_type = flat;
-    face->_index    = idx;
-    assert(alias_type(flat) == alias_type(idx), "flat type must work too");
+  if (!uncached) {
+    ace->_adr_type = adr_type;
+    ace->_index    = idx;
+    assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
+
+    // Might as well try to fill the cache for the flattened version, too.
+    AliasCacheEntry* face = probe_alias_cache(flat);
+    if (face->_adr_type == NULL) {
+      face->_adr_type = flat;
+      face->_index    = idx;
+      assert(alias_type(flat) == alias_type(idx), "flat type must work too");
+    }
   }
 
   return alias_type(idx);
 }
 

@@ -2011,10 +2096,351 @@
     igvn.replace_node(opaq, opaq->in(2));
   }
   assert(opaque4_count() == 0, "should be empty");
 }
 
+void Compile::add_value_type(Node* n) {
+  assert(n->is_ValueTypeBase(), "unexpected node");
+  if (_value_type_nodes != NULL) {
+    _value_type_nodes->push(n);
+  }
+}
+
+void Compile::remove_value_type(Node* n) {
+  assert(n->is_ValueTypeBase(), "unexpected node");
+  if (_value_type_nodes != NULL) {
+    _value_type_nodes->remove(n);
+  }
+}
+
+// Does the return value keep otherwise useless value type allocations
+// alive?
+static bool return_val_keeps_allocations_alive(Node* ret_val) {
+  ResourceMark rm;
+  Unique_Node_List wq;
+  wq.push(ret_val);
+  bool some_allocations = false;
+  for (uint i = 0; i < wq.size(); i++) {
+    Node* n = wq.at(i);
+    assert(!n->is_ValueTypeBase(), "chain of value type nodes");
+    if (n->outcnt() > 1) {
+      // Some other use for the allocation
+      return false;
+    } else if (n->is_Phi()) {
+      for (uint j = 1; j < n->req(); j++) {
+        wq.push(n->in(j));
+      }
+    } else if (n->is_CheckCastPP() &&
+               n->in(1)->is_Proj() &&
+               n->in(1)->in(0)->is_Allocate()) {
+      some_allocations = true;
+    }
+  }
+  return some_allocations;
+}
+
+void Compile::process_value_types(PhaseIterGVN &igvn) {
+  // Make value types scalar in safepoints
+  while (_value_type_nodes->size() != 0) {
+    ValueTypeBaseNode* vt = _value_type_nodes->pop()->as_ValueTypeBase();
+    vt->make_scalar_in_safepoints(&igvn);
+    if (vt->is_ValueTypePtr()) {
+      igvn.replace_node(vt, vt->get_oop());
+    } else if (vt->outcnt() == 0) {
+      igvn.remove_dead_node(vt);
+    }
+  }
+  _value_type_nodes = NULL;
+  if (tf()->returns_value_type_as_fields()) {
+    Node* ret = NULL;
+    for (uint i = 1; i < root()->req(); i++){
+      Node* in = root()->in(i);
+      if (in->Opcode() == Op_Return) {
+        assert(ret == NULL, "only one return");
+        ret = in;
+      }
+    }
+    if (ret != NULL) {
+      Node* ret_val = ret->in(TypeFunc::Parms);
+      if (igvn.type(ret_val)->isa_oopptr() &&
+          return_val_keeps_allocations_alive(ret_val)) {
+        igvn.replace_input_of(ret, TypeFunc::Parms, ValueTypeNode::tagged_klass(igvn.type(ret_val)->value_klass(), igvn));
+        assert(ret_val->outcnt() == 0, "should be dead now");
+        igvn.remove_dead_node(ret_val);
+      }
+    }
+  }
+  igvn.optimize();
+}
+
+void Compile::adjust_flattened_array_access_aliases(PhaseIterGVN& igvn) {
+  if (!_has_flattened_accesses) {
+    return;
+  }
+  // Initially, all flattened array accesses share the same slice to
+  // keep dependencies with Object[] array accesses (that could be
+  // to a flattened array) correct. We're done with parsing so we
+  // now know all flattened array accesses in this compile
+  // unit. Let's move flattened array accesses to their own slice,
+  // one per element field. This should help memory access
+  // optimizations.
+  ResourceMark rm;
+  Unique_Node_List wq;
+  wq.push(root());
+
+  Node_List mergememnodes;
+  Node_List memnodes;
+
+  // Alias index currently shared by all flattened memory accesses
+  int index = get_alias_index(TypeAryPtr::VALUES);
+
+  // Find MergeMem nodes and flattened array accesses
+  for (uint i = 0; i < wq.size(); i++) {
+    Node* n = wq.at(i);
+    if (n->is_Mem()) {
+      const TypePtr* adr_type = NULL;
+      if (n->Opcode() == Op_StoreCM) {
+        adr_type = get_adr_type(get_alias_index(n->in(MemNode::OopStore)->adr_type()));
+      } else {
+        adr_type = get_adr_type(get_alias_index(n->adr_type()));
+      }
+      if (adr_type == TypeAryPtr::VALUES) {
+        memnodes.push(n);
+      }
+    } else if (n->is_MergeMem()) {
+      MergeMemNode* mm = n->as_MergeMem();
+      if (mm->memory_at(index) != mm->base_memory()) {
+        mergememnodes.push(n);
+      }
+    }
+    for (uint j = 0; j < n->req(); j++) {
+      Node* m = n->in(j);
+      if (m != NULL) {
+        wq.push(m);
+      }
+    }
+  }
+
+  if (memnodes.size() > 0) {
+    _flattened_accesses_share_alias = false;
+
+    // We are going to change the slice for the flattened array
+    // accesses so we need to clear the cache entries that refer to
+    // them.
+    for (uint i = 0; i < AliasCacheSize; i++) {
+      AliasCacheEntry* ace = &_alias_cache[i];
+      if (ace->_adr_type != NULL &&
+          ace->_adr_type->isa_aryptr() &&
+          ace->_adr_type->is_aryptr()->elem()->isa_valuetype()) {
+        ace->_adr_type = NULL;
+        ace->_index = 0;
+      }
+    }
+
+    // Find what aliases we are going to add
+    int start_alias = num_alias_types()-1;
+    int stop_alias = 0;
+
+    for (uint i = 0; i < memnodes.size(); i++) {
+      Node* m = memnodes.at(i);
+      const TypePtr* adr_type = NULL;
+      if (m->Opcode() == Op_StoreCM) {
+        adr_type = m->in(MemNode::OopStore)->adr_type();
+        Node* clone = new StoreCMNode(m->in(MemNode::Control), m->in(MemNode::Memory), m->in(MemNode::Address),
+                                      m->adr_type(), m->in(MemNode::ValueIn), m->in(MemNode::OopStore),
+                                      get_alias_index(adr_type));
+        igvn.register_new_node_with_optimizer(clone);
+        igvn.replace_node(m, clone);
+      } else {
+        adr_type = m->adr_type();
+#ifdef ASSERT
+        m->as_Mem()->set_adr_type(adr_type);
+#endif
+      }
+      int idx = get_alias_index(adr_type);
+      start_alias = MIN2(start_alias, idx);
+      stop_alias = MAX2(stop_alias, idx);
+    }
+
+    assert(stop_alias >= start_alias, "should have expanded aliases");
+
+    Node_Stack stack(0);
+#ifdef ASSERT
+    VectorSet seen(Thread::current()->resource_area());
+#endif
+    // Now let's fix the memory graph so each flattened array access
+    // is moved to the right slice. Start from the MergeMem nodes.
+    uint last = unique();
+    for (uint i = 0; i < mergememnodes.size(); i++) {
+      MergeMemNode* current = mergememnodes.at(i)->as_MergeMem();
+      Node* n = current->memory_at(index);
+      MergeMemNode* mm = NULL;
+      do {
+        // Follow memory edges through memory accesses, phis and
+        // narrow membars and push nodes on the stack. Once we hit
+        // bottom memory, we pop element off the stack one at a
+        // time, in reverse order, and move them to the right slice
+        // by changing their memory edges.
+        if ((n->is_Phi() && n->adr_type() != TypePtr::BOTTOM) || n->is_Mem() || n->adr_type() == TypeAryPtr::VALUES) {
+          assert(!seen.test_set(n->_idx), "");
+          // Uses (a load for instance) will need to be moved to the
+          // right slice as well and will get a new memory state
+          // that we don't know yet. The use could also be the
+          // backedge of a loop. We put a place holder node between
+          // the memory node and its uses. We replace that place
+          // holder with the correct memory state once we know it,
+          // i.e. when nodes are popped off the stack. Using the
+          // place holder make the logic work in the presence of
+          // loops.
+          if (n->outcnt() > 1) {
+            Node* place_holder = NULL;
+            assert(!n->has_out_with(Op_Node), "");
+            for (DUIterator k = n->outs(); n->has_out(k); k++) {
+              Node* u = n->out(k);
+              if (u != current && u->_idx < last) {
+                bool success = false;
+                for (uint l = 0; l < u->req(); l++) {
+                  if (!stack.is_empty() && u == stack.node() && l == stack.index()) {
+                    continue;
+                  }
+                  Node* in = u->in(l);
+                  if (in == n) {
+                    if (place_holder == NULL) {
+                      place_holder = new Node(1);
+                      place_holder->init_req(0, n);
+                    }
+                    igvn.replace_input_of(u, l, place_holder);
+                    success = true;
+                  }
+                }
+                if (success) {
+                  --k;
+                }
+              }
+            }
+          }
+          if (n->is_Phi()) {
+            stack.push(n, 1);
+            n = n->in(1);
+          } else if (n->is_Mem()) {
+            stack.push(n, n->req());
+            n = n->in(MemNode::Memory);
+          } else {
+            assert(n->is_Proj() && n->in(0)->Opcode() == Op_MemBarCPUOrder, "");
+            stack.push(n, n->req());
+            n = n->in(0)->in(TypeFunc::Memory);
+          }
+        } else {
+          assert(n->adr_type() == TypePtr::BOTTOM || (n->Opcode() == Op_Node && n->_idx >= last) || (n->is_Proj() && n->in(0)->is_Initialize()), "");
+          // Build a new MergeMem node to carry the new memory state
+          // as we build it. IGVN should fold extraneous MergeMem
+          // nodes.
+          mm = MergeMemNode::make(n);
+          igvn.register_new_node_with_optimizer(mm);
+          while (stack.size() > 0) {
+            Node* m = stack.node();
+            uint idx = stack.index();
+            if (m->is_Mem()) {
+              // Move memory node to its new slice
+              const TypePtr* adr_type = m->adr_type();
+              int alias = get_alias_index(adr_type);
+              Node* prev = mm->memory_at(alias);
+              igvn.replace_input_of(m, MemNode::Memory, prev);
+              mm->set_memory_at(alias, m);
+            } else if (m->is_Phi()) {
+              // We need as many new phis as there are new aliases
+              igvn.replace_input_of(m, idx, mm);
+              if (idx == m->req()-1) {
+                Node* r = m->in(0);
+                for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
+                  const Type* adr_type = get_adr_type(j);
+                  if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->elem()->isa_valuetype()) {
+                    continue;
+                  }
+                  Node* phi = new PhiNode(r, Type::MEMORY, get_adr_type(j));
+                  igvn.register_new_node_with_optimizer(phi);
+                  for (uint k = 1; k < m->req(); k++) {
+                    phi->init_req(k, m->in(k)->as_MergeMem()->memory_at(j));
+                  }
+                  mm->set_memory_at(j, phi);
+                }
+                Node* base_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
+                igvn.register_new_node_with_optimizer(base_phi);
+                for (uint k = 1; k < m->req(); k++) {
+                  base_phi->init_req(k, m->in(k)->as_MergeMem()->base_memory());
+                }
+                mm->set_base_memory(base_phi);
+              }
+            } else {
+              // This is a MemBarCPUOrder node from
+              // Parse::array_load()/Parse::array_store(), in the
+              // branch that handles flattened arrays hidden under
+              // an Object[] array. We also need one new membar per
+              // new alias to keep the unknown access that the
+              // membars protect properly ordered with accesses to
+              // known flattened array.
+              assert(m->is_Proj(), "projection expected");
+              Node* ctrl = m->in(0)->in(TypeFunc::Control);
+              igvn.replace_input_of(m->in(0), TypeFunc::Control, top());
+              for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
+                const Type* adr_type = get_adr_type(j);
+                if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->elem()->isa_valuetype()) {
+                  continue;
+                }
+                MemBarNode* mb = new MemBarCPUOrderNode(this, j, NULL);
+                igvn.register_new_node_with_optimizer(mb);
+                Node* mem = mm->memory_at(j);
+                mb->init_req(TypeFunc::Control, ctrl);
+                mb->init_req(TypeFunc::Memory, mem);
+                ctrl = new ProjNode(mb, TypeFunc::Control);
+                igvn.register_new_node_with_optimizer(ctrl);
+                mem = new ProjNode(mb, TypeFunc::Memory);
+                igvn.register_new_node_with_optimizer(mem);
+                mm->set_memory_at(j, mem);
+              }
+              igvn.replace_node(m->in(0)->as_Multi()->proj_out(TypeFunc::Control), ctrl);
+            }
+            if (idx < m->req()-1) {
+              idx += 1;
+              stack.set_index(idx);
+              n = m->in(idx);
+              break;
+            }
+            // Take care of place holder nodes
+            if (m->has_out_with(Op_Node)) {
+              Node* place_holder = m->find_out_with(Op_Node);
+              if (place_holder != NULL) {
+                Node* mm_clone = mm->clone();
+                igvn.register_new_node_with_optimizer(mm_clone);
+                Node* hook = new Node(1);
+                hook->init_req(0, mm);
+                igvn.replace_node(place_holder, mm_clone);
+                hook->destruct();
+              }
+              assert(!m->has_out_with(Op_Node), "place holder should be gone now");
+            }
+            stack.pop();
+          }
+        }
+      } while(stack.size() > 0);
+      // Fix the memory state at the MergeMem we started from
+      igvn.rehash_node_delayed(current);
+      for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
+        const Type* adr_type = get_adr_type(j);
+        if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->elem()->isa_valuetype()) {
+          continue;
+        }
+        current->set_memory_at(j, mm);
+      }
+      current->set_memory_at(index, current->base_memory());
+    }
+    igvn.optimize();
+  }
+  print_method(PHASE_SPLIT_VALUES_ARRAY, 2);
+}
+
+
 // StringOpts and late inlining of string methods
 void Compile::inline_string_calls(bool parse_time) {
   {
     // remove useless nodes to make the usage analysis simpler
     ResourceMark rm;

@@ -2285,10 +2711,17 @@
     set_for_igvn(&new_worklist);
     igvn = PhaseIterGVN(initial_gvn());
     igvn.optimize();
   }
 
+  if (_value_type_nodes->size() > 0) {
+    // Do this once all inlining is over to avoid getting inconsistent debug info
+    process_value_types(igvn);
+  }
+
+  adjust_flattened_array_access_aliases(igvn);
+
   // Perform escape analysis
   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
     if (has_loops()) {
       // Cleanup graph (remove dead nodes).
       TracePhase tp("idealLoop", &timers[_t_idealLoop]);

@@ -2448,11 +2881,10 @@
  }
 
  print_method(PHASE_OPTIMIZE_FINISHED, 2);
 }
 
-
 //------------------------------Code_Gen---------------------------------------
 // Given a graph, generate code for it
 void Compile::Code_Gen() {
   if (failing()) {
     return;

@@ -2777,10 +3209,11 @@
       mem = prev->in(MemNode::Memory);
     }
   }
 }
 
+
 //------------------------------final_graph_reshaping_impl----------------------
 // Implement items 1-5 from final_graph_reshaping below.
 void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
 
   if ( n->outcnt() == 0 ) return; // dead node

@@ -2999,10 +3432,49 @@
       // Check to see if address types have grounded out somehow.
       const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
       assert( !tp || oop_offset_is_sane(tp), "" );
     }
 #endif
+    if (EnableValhalla && (nop == Op_LoadKlass || nop == Op_LoadNKlass)) {
+      const TypeKlassPtr* tk = n->bottom_type()->make_ptr()->is_klassptr();
+      assert(!tk->klass_is_exact(), "should have been folded");
+      if (tk->klass()->can_be_value_array_klass()) {
+        // Array load klass needs to filter out property bits (but not
+        // GetNullFreePropertyNode which needs to extract the null free bits)
+        uint last = unique();
+        Node* pointer = NULL;
+        if (nop == Op_LoadKlass) {
+          Node* cast = new CastP2XNode(NULL, n);
+          Node* masked = new LShiftXNode(cast, new ConINode(TypeInt::make(oopDesc::storage_props_nof_bits)));
+          masked = new RShiftXNode(masked, new ConINode(TypeInt::make(oopDesc::storage_props_nof_bits)));
+          pointer = new CastX2PNode(masked);
+          pointer = new CheckCastPPNode(NULL, pointer, n->bottom_type());
+        } else {
+          Node* cast = new CastN2INode(n);
+          Node* masked = new AndINode(cast, new ConINode(TypeInt::make(oopDesc::compressed_klass_mask())));
+          pointer = new CastI2NNode(masked, n->bottom_type());
+        }
+        for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+          Node* u = n->fast_out(i);
+          if (u->_idx < last && u->Opcode() != Op_GetNullFreeProperty) {
+            // If user is a comparison with a klass that can't be a value type
+            // array klass, we don't need to clear the storage property bits.
+            Node* cmp = (u->is_DecodeNKlass() && u->outcnt() == 1) ? u->unique_out() : u;
+            if (cmp->is_Cmp()) {
+              const TypeKlassPtr* kp1 = cmp->in(1)->bottom_type()->make_ptr()->isa_klassptr();
+              const TypeKlassPtr* kp2 = cmp->in(2)->bottom_type()->make_ptr()->isa_klassptr();
+              if ((kp1 != NULL && !kp1->klass()->can_be_value_array_klass()) ||
+                  (kp2 != NULL && !kp2->klass()->can_be_value_array_klass())) {
+                continue;
+              }
+            }
+            int nb = u->replace_edge(n, pointer);
+            --i, imax -= nb;
+          }
+        }
+      }
+    }
     break;
   }
 
   case Op_AddP: {               // Assert sane base pointers
     Node *addp = n->in(AddPNode::Address);

@@ -3513,10 +3985,33 @@
       Node* cmp = new CmpLNode(andl, n->in(2));
       n->subsume_by(cmp, this);
     }
     break;
   }
+#ifdef ASSERT
+  case Op_ValueTypePtr:
+  case Op_ValueType: {
+    n->dump(-1);
+    assert(false, "value type node was not removed");
+    break;
+  }
+#endif
+  case Op_GetNullFreeProperty: {
+    // Extract the null free bits
+    uint last = unique();
+    Node* null_free = NULL;
+    if (n->in(1)->Opcode() == Op_LoadKlass) {
+      Node* cast = new CastP2XNode(NULL, n->in(1));
+      null_free = new AndLNode(cast, new ConLNode(TypeLong::make(((jlong)1)<<(oopDesc::wide_storage_props_shift + ArrayStorageProperties::null_free_bit))));
+    } else {
+      assert(n->in(1)->Opcode() == Op_LoadNKlass, "not a compressed klass?");
+      Node* cast = new CastN2INode(n->in(1));
+      null_free = new AndINode(cast, new ConINode(TypeInt::make(1<<(oopDesc::narrow_storage_props_shift + ArrayStorageProperties::null_free_bit))));
+    }
+    n->replace_by(null_free);
+    break;
+  }
   default:
     assert(!n->is_Call(), "");
     assert(!n->is_Mem(), "");
     assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
     break;

@@ -3859,20 +4354,20 @@
   if (holder->is_being_initialized()) {
     if (accessing_method->holder() == holder) {
       // Access inside a class. The barrier can be elided when access happens in <clinit>,
       // <init>, or a static method. In all those cases, there was an initialization
       // barrier on the holder klass passed.
-      if (accessing_method->is_static_initializer() ||
-          accessing_method->is_object_initializer() ||
+      if (accessing_method->is_class_initializer() ||
+          accessing_method->is_object_constructor() ||
           accessing_method->is_static()) {
         return false;
       }
     } else if (accessing_method->holder()->is_subclass_of(holder)) {
       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
       // In case of <init> or a static method, the barrier is on the subclass is not enough:
       // child class can become fully initialized while its parent class is still being initialized.
-      if (accessing_method->is_static_initializer()) {
+      if (accessing_method->is_class_initializer()) {
         return false;
       }
     }
     ciMethod* root = method(); // the root method of compilation
     if (root != accessing_method) {

@@ -4206,21 +4701,23 @@
 // (0) superklass is java.lang.Object (can occur in reflective code)
 // (1) subklass is already limited to a subtype of superklass => always ok
 // (2) subklass does not overlap with superklass => always fail
 // (3) superklass has NO subtypes and we can check with a simple compare.
 int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
-  if (StressReflectiveCode) {
+  if (StressReflectiveCode || superk == NULL || subk == NULL) {
     return SSC_full_test;       // Let caller generate the general case.
   }
 
   if (superk == env()->Object_klass()) {
     return SSC_always_true;     // (0) this test cannot fail
   }
 
   ciType* superelem = superk;
-  if (superelem->is_array_klass())
+  if (superelem->is_array_klass()) {
+    ciArrayKlass* ak = superelem->as_array_klass();
     superelem = superelem->as_array_klass()->base_element_type();
+  }
 
   if (!subk->is_interface()) {  // cannot trust static interface types yet
     if (subk->is_subtype_of(superk)) {
       return SSC_always_true;   // (1) false path dead; no dynamic test needed
     }

@@ -4228,10 +4725,15 @@
         !superk->is_subtype_of(subk)) {
       return SSC_always_false;
     }
   }
 
+  // Do not fold the subtype check to an array klass pointer comparison for [V? arrays.
+  // [V is a subtype of [V? but the klass for [V is not equal to the klass for [V?. Perform a full test.
+  if (superk->is_obj_array_klass() && !superk->as_array_klass()->storage_properties().is_null_free() && superk->as_array_klass()->element_klass()->is_valuetype()) {
+    return SSC_full_test;
+  }
   // If casting to an instance klass, it must have no subtypes
   if (superk->is_interface()) {
     // Cannot trust interfaces yet.
     // %%% S.B. superk->nof_implementors() == 1
   } else if (superelem->is_instance_klass()) {

@@ -4655,10 +5157,31 @@
     igvn.check_no_speculative_types();
 #endif
   }
 }
 
+Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) {
+  const TypeInstPtr* ta = phase->type(a)->isa_instptr();
+  const TypeInstPtr* tb = phase->type(b)->isa_instptr();
+  if (!EnableValhalla || ta == NULL || tb == NULL ||
+      ta->is_zero_type() || tb->is_zero_type() ||
+      !ta->can_be_value_type() || !tb->can_be_value_type()) {
+    // Use old acmp if one operand is null or not a value type
+    return new CmpPNode(a, b);
+  } else if (ta->is_valuetypeptr() || tb->is_valuetypeptr()) {
+    // We know that one operand is a value type. Therefore,
+    // new acmp will only return true if both operands are NULL.
+    // Check if both operands are null by or'ing the oops.
+    a = phase->transform(new CastP2XNode(NULL, a));
+    b = phase->transform(new CastP2XNode(NULL, b));
+    a = phase->transform(new OrXNode(a, b));
+    return new CmpXNode(a, phase->MakeConX(0));
+  }
+  // Use new acmp
+  return NULL;
+}
+
 // Auxiliary method to support randomized stressing/fuzzing.
 //
 // This method can be called the arbitrary number of times, with current count
 // as the argument. The logic allows selecting a single candidate from the
 // running list of candidates as follows:
< prev index next >