< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page
*** 22,10 ***
--- 22,11 ---
   *
   */
  
  #include "precompiled.hpp"
  #include "asm/macroAssembler.hpp"
+ #include "ci/ciFlatArrayKlass.hpp"
  #include "ci/ciUtilities.inline.hpp"
  #include "classfile/vmIntrinsics.hpp"
  #include "compiler/compileBroker.hpp"
  #include "compiler/compileLog.hpp"
  #include "gc/shared/barrierSet.hpp"

*** 318,29 ***
--- 319,33 ---
    case vmIntrinsics::_compressStringC:
    case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
    case vmIntrinsics::_inflateStringC:
    case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
  
+   case vmIntrinsics::_makePrivateBuffer:        return inline_unsafe_make_private_buffer();
+   case vmIntrinsics::_finishPrivateBuffer:      return inline_unsafe_finish_private_buffer();
    case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
    case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
    case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
    case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
    case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
    case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
    case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
    case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
    case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);
+   case vmIntrinsics::_getValue:                 return inline_unsafe_access(!is_store, T_INLINE_TYPE,Relaxed, false);
  
    case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
    case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
    case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
    case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
    case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
    case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
    case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
    case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
    case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);
+   case vmIntrinsics::_putValue:                 return inline_unsafe_access( is_store, T_INLINE_TYPE,Relaxed, false);
  
    case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
    case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
    case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
    case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);

*** 505,10 ***
--- 510,13 ---
    case vmIntrinsics::_isPrimitive:
    case vmIntrinsics::_isHidden:
    case vmIntrinsics::_getSuperclass:
    case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
  
+   case vmIntrinsics::_asPrimaryType:
+   case vmIntrinsics::_asValueType:              return inline_primitive_Class_conversion(intrinsic_id());
+ 
    case vmIntrinsics::_floatToRawIntBits:
    case vmIntrinsics::_floatToIntBits:
    case vmIntrinsics::_intBitsToFloat:
    case vmIntrinsics::_doubleToRawLongBits:
    case vmIntrinsics::_doubleToLongBits:

*** 2161,32 ***
--- 2169,38 ---
  //----------------------------inline_unsafe_access----------------------------
  
  const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
    // Attempt to infer a sharper value type from the offset and base type.
    ciKlass* sharpened_klass = NULL;
+   bool null_free = false;
  
    // See if it is an instance field, with an object type.
    if (alias_type->field() != NULL) {
      if (alias_type->field()->type()->is_klass()) {
        sharpened_klass = alias_type->field()->type()->as_klass();
+       null_free = alias_type->field()->is_null_free();
      }
    }
  
    // See if it is a narrow oop array.
    if (adr_type->isa_aryptr()) {
      if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
        const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
+       null_free = adr_type->is_aryptr()->is_null_free();
        if (elem_type != NULL) {
          sharpened_klass = elem_type->klass();
        }
      }
    }
  
    // The sharpened class might be unloaded if there is no class loader
    // contraint in place.
    if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
      const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
+     if (null_free) {
+       tjp = tjp->join_speculative(TypePtr::NOTNULL)->is_oopptr();
+     }
  
  #ifndef PRODUCT
      if (C->print_intrinsics() || C->print_inlining()) {
        tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
        tty->print("  sharpened value: ");  tjp->dump();      tty->cr();

*** 2238,22 ***
      ciSignature* sig = callee()->signature();
  #ifdef ASSERT
      if (!is_store) {
        // Object getReference(Object base, int/long offset), etc.
        BasicType rtype = sig->return_type()->basic_type();
!       assert(rtype == type, "getter must return the expected value");
!       assert(sig->count() == 2, "oop getter has 2 arguments");
        assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
        assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
      } else {
        // void putReference(Object base, int/long offset, Object x), etc.
        assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
!       assert(sig->count() == 3, "oop putter has 3 arguments");
        assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
        assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
        BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
!       assert(vtype == type, "putter must accept the expected value");
      }
  #endif // ASSERT
   }
  #endif //PRODUCT
  
--- 2252,22 ---
      ciSignature* sig = callee()->signature();
  #ifdef ASSERT
      if (!is_store) {
        // Object getReference(Object base, int/long offset), etc.
        BasicType rtype = sig->return_type()->basic_type();
!       assert(rtype == type || (rtype == T_OBJECT && type == T_INLINE_TYPE), "getter must return the expected value");
!       assert(sig->count() == 2 || (type == T_INLINE_TYPE && sig->count() == 3), "oop getter has 2 or 3 arguments");
        assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
        assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
      } else {
        // void putReference(Object base, int/long offset, Object x), etc.
        assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
!       assert(sig->count() == 3 || (type == T_INLINE_TYPE && sig->count() == 4), "oop putter has 3 arguments");
        assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
        assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
        BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
!       assert(vtype == type || (type == T_INLINE_TYPE && vtype == T_OBJECT), "putter must accept the expected value");
      }
  #endif // ASSERT
   }
  #endif //PRODUCT
  

*** 2271,21 ***
    // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
    // to be plain byte offsets, which are also the same as those accepted
    // by oopDesc::field_addr.
    assert(Unsafe_field_offset_to_byte_offset(11) == 11,
           "fieldOffset must be byte-scaled");
    // 32-bit machines ignore the high half!
    offset = ConvL2X(offset);
  
    // Save state and restore on bailout
    uint old_sp = sp();
    SafePointNode* old_map = clone_map();
  
    Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
  
    if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
!     if (type != T_OBJECT) {
        decorators |= IN_NATIVE; // off-heap primitive access
      } else {
        set_map(old_map);
        set_sp(old_sp);
        return false; // off-heap oop accesses are not supported
--- 2285,72 ---
    // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
    // to be plain byte offsets, which are also the same as those accepted
    // by oopDesc::field_addr.
    assert(Unsafe_field_offset_to_byte_offset(11) == 11,
           "fieldOffset must be byte-scaled");
+ 
+   ciInlineKlass* inline_klass = NULL;
+   if (type == T_INLINE_TYPE) {
+     const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
+     if (cls == NULL || cls->const_oop() == NULL) {
+       return false;
+     }
+     ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
+     if (!mirror_type->is_inlinetype()) {
+       return false;
+     }
+     inline_klass = mirror_type->as_inline_klass();
+   }
+ 
+   if (base->is_InlineTypeBase()) {
+     InlineTypeBaseNode* vt = base->as_InlineTypeBase();
+     if (is_store) {
+       if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->isa_inlinetype() || !_gvn.type(vt)->is_inlinetype()->larval()) {
+         return false;
+       }
+       base = vt->get_oop();
+     } else {
+       if (offset->is_Con()) {
+         long off = find_long_con(offset, 0);
+         ciInlineKlass* vk = vt->type()->inline_klass();
+         if ((long)(int)off != off || !vk->contains_field_offset(off)) {
+           return false;
+         }
+ 
+         ciField* field = vk->get_non_flattened_field_by_offset(off);
+         if (field != NULL) {
+           BasicType bt = field->layout_type();
+           if (bt == T_ARRAY || bt == T_NARROWOOP || (bt == T_INLINE_TYPE && !field->is_flattened())) {
+             bt = T_OBJECT;
+           }
+           if (bt == type && (bt != T_INLINE_TYPE || field->type() == inline_klass)) {
+             set_result(vt->field_value_by_offset(off, false));
+             return true;
+           }
+         }
+       }
+       if (vt->is_InlineType()) {
+         // Re-execute the unsafe access if allocation triggers deoptimization.
+         PreserveReexecuteState preexecs(this);
+         jvms()->set_should_reexecute(true);
+         vt = vt->buffer(this);
+       }
+       base = vt->get_oop();
+     }
+   }
+ 
    // 32-bit machines ignore the high half!
    offset = ConvL2X(offset);
  
    // Save state and restore on bailout
    uint old_sp = sp();
    SafePointNode* old_map = clone_map();
  
    Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
  
    if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
!     if (type != T_OBJECT && (inline_klass == NULL || !inline_klass->has_object_fields())) {
        decorators |= IN_NATIVE; // off-heap primitive access
      } else {
        set_map(old_map);
        set_sp(old_sp);
        return false; // off-heap oop accesses are not supported

*** 2299,11 ***
  
    if (!can_access_non_heap) {
      decorators |= IN_HEAP;
    }
  
!   Node* val = is_store ? argument(4) : NULL;
  
    const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
    if (adr_type == TypePtr::NULL_PTR) {
      set_map(old_map);
      set_sp(old_sp);
--- 2364,11 ---
  
    if (!can_access_non_heap) {
      decorators |= IN_HEAP;
    }
  
!   Node* val = is_store ? argument(4 + (type == T_INLINE_TYPE ? 1 : 0)) : NULL;
  
    const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
    if (adr_type == TypePtr::NULL_PTR) {
      set_map(old_map);
      set_sp(old_sp);

*** 2320,11 ***
      set_sp(old_sp);
      return false; // not supported
    }
  
    bool mismatched = false;
!   BasicType bt = alias_type->basic_type();
    if (bt != T_ILLEGAL) {
      assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
      if (bt == T_BYTE && adr_type->isa_aryptr()) {
        // Alias type doesn't differentiate between byte[] and boolean[]).
        // Use address type to get the element type.
--- 2385,35 ---
      set_sp(old_sp);
      return false; // not supported
    }
  
    bool mismatched = false;
!   BasicType bt = T_ILLEGAL;
+   ciField* field = NULL;
+   if (adr_type->isa_instptr()) {
+     const TypeInstPtr* instptr = adr_type->is_instptr();
+     ciInstanceKlass* k = instptr->klass()->as_instance_klass();
+     int off = instptr->offset();
+     if (instptr->const_oop() != NULL &&
+         instptr->klass() == ciEnv::current()->Class_klass() &&
+         instptr->offset() >= (instptr->klass()->as_instance_klass()->size_helper() * wordSize)) {
+       k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
+       field = k->get_field_by_offset(off, true);
+     } else {
+       field = k->get_non_flattened_field_by_offset(off);
+     }
+     if (field != NULL) {
+       bt = field->layout_type();
+     }
+     assert(bt == alias_type->basic_type() || bt == T_INLINE_TYPE, "should match");
+     if (field != NULL && bt == T_INLINE_TYPE && !field->is_flattened()) {
+       bt = T_OBJECT;
+     }
+   } else {
+     bt = alias_type->basic_type();
+   }
+ 
    if (bt != T_ILLEGAL) {
      assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
      if (bt == T_BYTE && adr_type->isa_aryptr()) {
        // Alias type doesn't differentiate between byte[] and boolean[]).
        // Use address type to get the element type.

*** 2343,12 ***
      mismatched = (bt != type);
    } else if (alias_type->adr_type()->isa_oopptr()) {
      mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
    }
  
    old_map->destruct(&_gvn);
!   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
  
    if (mismatched) {
      decorators |= C2_MISMATCHED;
    }
  
--- 2432,37 ---
      mismatched = (bt != type);
    } else if (alias_type->adr_type()->isa_oopptr()) {
      mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
    }
  
+   if (type == T_INLINE_TYPE) {
+     if (adr_type->isa_instptr()) {
+       if (field == NULL || field->type() != inline_klass) {
+         mismatched = true;
+       }
+     } else if (adr_type->isa_aryptr()) {
+       const Type* elem = adr_type->is_aryptr()->elem();
+       if (!elem->isa_inlinetype()) {
+         mismatched = true;
+       } else if (elem->inline_klass() != inline_klass) {
+         mismatched = true;
+       }
+     } else {
+       mismatched = true;
+     }
+     if (is_store) {
+       const Type* val_t = _gvn.type(val);
+       if (!val_t->isa_inlinetype() || val_t->inline_klass() != inline_klass) {
+         set_map(old_map);
+         set_sp(old_sp);
+         return false;
+       }
+     }
+   }
+ 
    old_map->destruct(&_gvn);
!   assert(!mismatched || type == T_INLINE_TYPE || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
  
    if (mismatched) {
      decorators |= C2_MISMATCHED;
    }
  

*** 2356,14 ***
    const Type *value_type = Type::get_const_basic_type(type);
  
    // Figure out the memory ordering.
    decorators |= mo_decorator_for_access_kind(kind);
  
!   if (!is_store && type == T_OBJECT) {
!     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
!     if (tjp != NULL) {
!       value_type = tjp;
      }
    }
  
    receiver = null_check(receiver);
    if (stopped()) {
--- 2470,18 ---
    const Type *value_type = Type::get_const_basic_type(type);
  
    // Figure out the memory ordering.
    decorators |= mo_decorator_for_access_kind(kind);
  
!   if (!is_store) {
!     if (type == T_OBJECT) {
!       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
!       if (tjp != NULL) {
+         value_type = tjp;
+       }
+     } else if (type == T_INLINE_TYPE) {
+       value_type = NULL;
      }
    }
  
    receiver = null_check(receiver);
    if (stopped()) {

*** 2375,18 ***
    // from intended ones in this API.
  
    if (!is_store) {
      Node* p = NULL;
      // Try to constant fold a load from a constant field
!     ciField* field = alias_type->field();
!     if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
        // final or stable field
        p = make_constant_from_field(field, heap_base_oop);
      }
  
      if (p == NULL) { // Could not constant fold the load
!       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
        // Normalize the value returned by getBoolean in the following cases
        if (type == T_BOOLEAN &&
            (mismatched ||
             heap_base_oop == top() ||                  // - heap_base_oop is NULL or
             (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL
--- 2493,33 ---
    // from intended ones in this API.
  
    if (!is_store) {
      Node* p = NULL;
      // Try to constant fold a load from a constant field
! 
!     if (heap_base_oop != top() && field != NULL && field->is_constant() && !field->is_flattened() && !mismatched) {
        // final or stable field
        p = make_constant_from_field(field, heap_base_oop);
      }
  
      if (p == NULL) { // Could not constant fold the load
!       if (type == T_INLINE_TYPE) {
+         if (adr_type->isa_instptr() && !mismatched) {
+           ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
+           int offset = adr_type->is_instptr()->offset();
+           p = InlineTypeNode::make_from_flattened(this, inline_klass, base, base, holder, offset, decorators);
+         } else {
+           p = InlineTypeNode::make_from_flattened(this, inline_klass, base, adr, NULL, 0, decorators);
+         }
+       } else {
+         p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
+         const TypeOopPtr* ptr = value_type->make_oopptr();
+         if (ptr != NULL && ptr->is_inlinetypeptr()) {
+           // Load a non-flattened inline type from memory
+           p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
+         }
+       }
        // Normalize the value returned by getBoolean in the following cases
        if (type == T_BOOLEAN &&
            (mismatched ||
             heap_base_oop == top() ||                  // - heap_base_oop is NULL or
             (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL

*** 2420,13 ***
      if (bt == T_ADDRESS) {
        // Repackage the long as a pointer.
        val = ConvL2X(val);
        val = gvn().transform(new CastX2PNode(val));
      }
!     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
    }
  
    return true;
  }
  
  //----------------------------inline_unsafe_load_store----------------------------
  // This method serves a couple of different customers (depending on LoadStoreKind):
--- 2553,65 ---
      if (bt == T_ADDRESS) {
        // Repackage the long as a pointer.
        val = ConvL2X(val);
        val = gvn().transform(new CastX2PNode(val));
      }
!     if (type == T_INLINE_TYPE) {
+       if (adr_type->isa_instptr() && !mismatched) {
+         ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
+         int offset = adr_type->is_instptr()->offset();
+         val->as_InlineTypeBase()->store_flattened(this, base, base, holder, offset, decorators);
+       } else {
+         val->as_InlineTypeBase()->store_flattened(this, base, adr, NULL, 0, decorators);
+       }
+     } else {
+       access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
+     }
+   }
+ 
+   if (argument(1)->is_InlineType() && is_store) {
+     Node* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(base)->inline_klass());
+     value = value->as_InlineType()->make_larval(this, false);
+     replace_in_map(argument(1), value);
+   }
+ 
+   return true;
+ }
+ 
+ bool LibraryCallKit::inline_unsafe_make_private_buffer() {
+   Node* receiver = argument(0);
+   Node* value = argument(1);
+   if (!value->is_InlineType()) {
+     return false;
    }
  
+   receiver = null_check(receiver);
+   if (stopped()) {
+     return true;
+   }
+ 
+   set_result(value->as_InlineType()->make_larval(this, true));
+   return true;
+ }
+ 
+ bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
+   Node* receiver = argument(0);
+   Node* buffer = argument(1);
+   if (!buffer->is_InlineType()) {
+     return false;
+   }
+   InlineTypeNode* vt = buffer->as_InlineType();
+   if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_inlinetype()->larval()) {
+     return false;
+   }
+ 
+   receiver = null_check(receiver);
+   if (stopped()) {
+     return true;
+   }
+ 
+   set_result(vt->finish_larval(this));
    return true;
  }
  
  //----------------------------inline_unsafe_load_store----------------------------
  // This method serves a couple of different customers (depending on LoadStoreKind):

*** 2631,10 ***
--- 2816,23 ---
    int alias_idx = C->get_alias_index(adr_type);
  
    if (is_reference_type(type)) {
      decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
  
+     if (oldval != NULL && oldval->is_InlineType()) {
+       // Re-execute the unsafe access if allocation triggers deoptimization.
+       PreserveReexecuteState preexecs(this);
+       jvms()->set_should_reexecute(true);
+       oldval = oldval->as_InlineType()->buffer(this)->get_oop();
+     }
+     if (newval != NULL && newval->is_InlineType()) {
+       // Re-execute the unsafe access if allocation triggers deoptimization.
+       PreserveReexecuteState preexecs(this);
+       jvms()->set_should_reexecute(true);
+       newval = newval->as_InlineType()->buffer(this)->get_oop();
+     }
+ 
      // Transformation of a value which could be NULL pointer (CastPP #NULL)
      // could be delayed during Parse (for example, in adjust_map_after_if()).
      // Execute transformation here to avoid barrier generation in such case.
      if (_gvn.type(newval) == TypePtr::NULL_PTR)
        newval = _gvn.makecon(TypePtr::NULL_PTR);

*** 2789,12 ***
      Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
      Node* bits = intcon(InstanceKlass::fully_initialized);
      test = _gvn.transform(new SubINode(inst, bits));
      // The 'test' is non-zero if we need to take a slow path.
    }
! 
!   Node* obj = new_instance(kls, test);
    set_result(obj);
    return true;
  }
  
  //------------------------inline_native_time_funcs--------------
--- 2987,17 ---
      Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
      Node* bits = intcon(InstanceKlass::fully_initialized);
      test = _gvn.transform(new SubINode(inst, bits));
      // The 'test' is non-zero if we need to take a slow path.
    }
!   Node* obj = NULL;
!   ciKlass* klass = _gvn.type(kls)->is_klassptr()->klass();
+   if (klass->is_inlinetype()) {
+     obj = InlineTypeNode::make_default(_gvn, klass->as_inline_klass());
+   } else {
+     obj = new_instance(kls, test);
+   }
    set_result(obj);
    return true;
  }
  
  //------------------------inline_native_time_funcs--------------

*** 2938,19 ***
    Node* junk = NULL;
    set_result(generate_current_thread(junk));
    return true;
  }
  
- //---------------------------load_mirror_from_klass----------------------------
- // Given a klass oop, load its java mirror (a java.lang.Class oop).
- Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
-   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
-   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
-   // mirror = ((OopHandle)mirror)->resolve();
-   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
- }
- 
  //-----------------------load_klass_from_mirror_common-------------------------
  // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
  // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
  // and branch to the given path on the region.
  // If never_see_null, take an uncommon trap on null, so we can optimistically
--- 3141,10 ---

*** 2989,10 ***
--- 3183,11 ---
    Node* mbit = _gvn.transform(new AndINode(mods, mask));
    Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
    Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
    return generate_fair_guard(bol, region);
  }
+ 
  Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
    return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
  }
  Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
    return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);

*** 3182,10 ***
--- 3377,39 ---
    C->set_has_split_ifs(true); // Has chance for split-if optimization
    set_result(region, phi);
    return true;
  }
  
+ //-------------------------inline_primitive_Class_conversion-------------------
+ // public Class<T> java.lang.Class.asPrimaryType();
+ // public Class<T> java.lang.Class.asValueType()
+ bool LibraryCallKit::inline_primitive_Class_conversion(vmIntrinsics::ID id) {
+   Node* mirror = argument(0); // Receiver Class
+   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
+   if (mirror_con == NULL) {
+     return false;
+   }
+ 
+   bool is_val_mirror = true;
+   ciType* tm = mirror_con->java_mirror_type(&is_val_mirror);
+   if (tm != NULL) {
+     Node* result = mirror;
+     if (id == vmIntrinsics::_asPrimaryType && is_val_mirror) {
+       result = _gvn.makecon(TypeInstPtr::make(tm->as_inline_klass()->ref_mirror()));
+     } else if (id == vmIntrinsics::_asValueType) {
+       if (!tm->is_inlinetype()) {
+         return false; // Throw UnsupportedOperationException
+       } else if (!is_val_mirror) {
+         result = _gvn.makecon(TypeInstPtr::make(tm->as_inline_klass()->val_mirror()));
+       }
+     }
+     set_result(result);
+     return true;
+   }
+   return false;
+ }
+ 
  //-------------------------inline_Class_cast-------------------
  bool LibraryCallKit::inline_Class_cast() {
    Node* mirror = argument(0); // Class
    Node* obj    = argument(1);
    const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();

*** 3193,24 ***
      return false;  // dead path (mirror->is_top()).
    }
    if (obj == NULL || obj->is_top()) {
      return false;  // dead path
    }
!   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
  
    // First, see if Class.cast() can be folded statically.
    // java_mirror_type() returns non-null for compile-time Class constants.
!   ciType* tm = mirror_con->java_mirror_type();
!   if (tm != NULL && tm->is_klass() &&
!       tp != NULL && tp->klass() != NULL) {
!     if (!tp->klass()->is_loaded()) {
        // Don't use intrinsic when class is not loaded.
        return false;
      } else {
!       int static_res = C->static_subtype_check(tm->as_klass(), tp->klass());
        if (static_res == Compile::SSC_always_true) {
          // isInstance() is true - fold the code.
          set_result(obj);
          return true;
        } else if (static_res == Compile::SSC_always_false) {
          // Don't use intrinsic, have to throw ClassCastException.
          // If the reference is null, the non-intrinsic bytecode will
--- 3417,35 ---
      return false;  // dead path (mirror->is_top()).
    }
    if (obj == NULL || obj->is_top()) {
      return false;  // dead path
    }
!   ciKlass* obj_klass = NULL;
+   const Type* obj_t = _gvn.type(obj);
+   if (obj->is_InlineType()) {
+     obj_klass = obj_t->inline_klass();
+   } else if (obj_t->isa_oopptr()) {
+     obj_klass = obj_t->is_oopptr()->klass();
+   }
  
    // First, see if Class.cast() can be folded statically.
    // java_mirror_type() returns non-null for compile-time Class constants.
!   bool requires_null_check = false;
!   ciType* tm = mirror_con->java_mirror_type(&requires_null_check);
!   // Check for null if casting to QMyValue
!   requires_null_check &= !obj->is_InlineType();
+   if (tm != NULL && tm->is_klass() && obj_klass != NULL) {
+     if (!obj_klass->is_loaded()) {
        // Don't use intrinsic when class is not loaded.
        return false;
      } else {
!       int static_res = C->static_subtype_check(tm->as_klass(), obj_klass);
        if (static_res == Compile::SSC_always_true) {
          // isInstance() is true - fold the code.
+         if (requires_null_check) {
+           obj = null_check(obj);
+         }
          set_result(obj);
          return true;
        } else if (static_res == Compile::SSC_always_false) {
          // Don't use intrinsic, have to throw ClassCastException.
          // If the reference is null, the non-intrinsic bytecode will

*** 3227,35 ***
  
    // Generate dynamic checks.
    // Class.cast() is java implementation of _checkcast bytecode.
    // Do checkcast (Parse::do_checkcast()) optimizations here.
  
    mirror = null_check(mirror);
    // If mirror is dead, only null-path is taken.
    if (stopped()) {
      return true;
    }
  
    // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
!   enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
    RegionNode* region = new RegionNode(PATH_LIMIT);
    record_for_igvn(region);
  
    // Now load the mirror's klass metaobject, and null-check it.
    // If kls is null, we have a primitive mirror and
    // nothing is an instance of a primitive type.
    Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
  
    Node* res = top();
    if (!stopped()) {
      Node* bad_type_ctrl = top();
      // Do checkcast optimizations.
      res = gen_checkcast(obj, kls, &bad_type_ctrl);
      region->init_req(_bad_type_path, bad_type_ctrl);
    }
    if (region->in(_prim_path) != top() ||
!       region->in(_bad_type_path) != top()) {
      // Let Interpreter throw ClassCastException.
      PreserveJVMState pjvms(this);
      set_control(_gvn.transform(region));
      uncommon_trap(Deoptimization::Reason_intrinsic,
                    Deoptimization::Action_maybe_recompile);
--- 3462,60 ---
  
    // Generate dynamic checks.
    // Class.cast() is java implementation of _checkcast bytecode.
    // Do checkcast (Parse::do_checkcast()) optimizations here.
  
+   if (requires_null_check) {
+     obj = null_check(obj);
+   }
    mirror = null_check(mirror);
    // If mirror is dead, only null-path is taken.
    if (stopped()) {
      return true;
    }
  
    // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
!   enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
    RegionNode* region = new RegionNode(PATH_LIMIT);
    record_for_igvn(region);
  
    // Now load the mirror's klass metaobject, and null-check it.
    // If kls is null, we have a primitive mirror and
    // nothing is an instance of a primitive type.
    Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
  
    Node* res = top();
    if (!stopped()) {
+     if (EnableValhalla && !obj->is_InlineType() && !requires_null_check) {
+       // Check if we are casting to QMyValue
+       Node* ctrl_val_mirror = generate_fair_guard(is_val_mirror(mirror), NULL);
+       if (ctrl_val_mirror != NULL) {
+         RegionNode* r = new RegionNode(3);
+         record_for_igvn(r);
+         r->init_req(1, control());
+ 
+         // Casting to QMyValue, check for null
+         set_control(ctrl_val_mirror);
+         { // PreserveJVMState because null check replaces obj in map
+           PreserveJVMState pjvms(this);
+           Node* null_ctr = top();
+           null_check_oop(obj, &null_ctr);
+           region->init_req(_npe_path, null_ctr);
+           r->init_req(2, control());
+         }
+         set_control(_gvn.transform(r));
+       }
+     }
+ 
      Node* bad_type_ctrl = top();
      // Do checkcast optimizations.
      res = gen_checkcast(obj, kls, &bad_type_ctrl);
      region->init_req(_bad_type_path, bad_type_ctrl);
    }
    if (region->in(_prim_path) != top() ||
!       region->in(_bad_type_path) != top() ||
+       region->in(_npe_path) != top()) {
      // Let Interpreter throw ClassCastException.
      PreserveJVMState pjvms(this);
      set_control(_gvn.transform(region));
      uncommon_trap(Deoptimization::Reason_intrinsic,
                    Deoptimization::Action_maybe_recompile);

*** 3288,12 ***
--- 3548,14 ---
      _both_ref_path,             // {N,N} & subtype check loses => false
      PATH_LIMIT
    };
  
    RegionNode* region = new RegionNode(PATH_LIMIT);
+   RegionNode* prim_region = new RegionNode(2);
    Node*       phi    = new PhiNode(region, TypeInt::BOOL);
    record_for_igvn(region);
+   record_for_igvn(prim_region);
  
    const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
    const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
    int class_klass_offset = java_lang_Class::klass_offset();
  

*** 3314,34 ***
    bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
    for (which_arg = 0; which_arg <= 1; which_arg++) {
      Node* kls = klasses[which_arg];
      Node* null_ctl = top();
      kls = null_check_oop(kls, &null_ctl, never_see_null);
!     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
!     region->init_req(prim_path, null_ctl);
      if (stopped())  break;
      klasses[which_arg] = kls;
    }
  
    if (!stopped()) {
      // now we have two reference types, in klasses[0..1]
      Node* subk   = klasses[1];  // the argument to isAssignableFrom
      Node* superk = klasses[0];  // the receiver
      region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
      // now we have a successful reference subtype check
      region->set_req(_ref_subtype_path, control());
    }
  
    // If both operands are primitive (both klasses null), then
    // we must return true when they are identical primitives.
    // It is convenient to test this after the first null klass check.
!   set_control(region->in(_prim_0_path)); // go back to first null check
    if (!stopped()) {
      // Since superc is primitive, make a guard for the superc==subc case.
      Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
      Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
!     generate_guard(bol_eq, region, PROB_FAIR);
      if (region->req() == PATH_LIMIT+1) {
        // A guard was added.  If the added guard is taken, superc==subc.
        region->swap_edges(PATH_LIMIT, _prim_same_path);
        region->del_req(PATH_LIMIT);
      }
--- 3576,41 ---
    bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
    for (which_arg = 0; which_arg <= 1; which_arg++) {
      Node* kls = klasses[which_arg];
      Node* null_ctl = top();
      kls = null_check_oop(kls, &null_ctl, never_see_null);
!     if (which_arg == 0) {
!       prim_region->init_req(1, null_ctl);
+     } else {
+       region->init_req(_prim_1_path, null_ctl);
+     }
      if (stopped())  break;
      klasses[which_arg] = kls;
    }
  
    if (!stopped()) {
      // now we have two reference types, in klasses[0..1]
      Node* subk   = klasses[1];  // the argument to isAssignableFrom
      Node* superk = klasses[0];  // the receiver
      region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
+     // If superc is an inline mirror, we also need to check if superc == subc because LMyValue
+     // is not a subtype of QMyValue but due to subk == superk the subtype check will pass.
+     generate_fair_guard(is_val_mirror(args[0]), prim_region);
      // now we have a successful reference subtype check
      region->set_req(_ref_subtype_path, control());
    }
  
    // If both operands are primitive (both klasses null), then
    // we must return true when they are identical primitives.
    // It is convenient to test this after the first null klass check.
!   // This path is also used if superc is a value mirror.
+   set_control(_gvn.transform(prim_region));
    if (!stopped()) {
      // Since superc is primitive, make a guard for the superc==subc case.
      Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
      Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
!     generate_fair_guard(bol_eq, region);
      if (region->req() == PATH_LIMIT+1) {
        // A guard was added.  If the added guard is taken, superc==subc.
        region->swap_edges(PATH_LIMIT, _prim_same_path);
        region->del_req(PATH_LIMIT);
      }

*** 3368,59 ***
    set_result(_gvn.transform(phi));
    return true;
  }
  
  //---------------------generate_array_guard_common------------------------
! Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
-                                                   bool obj_array, bool not_array) {
  
    if (stopped()) {
      return NULL;
    }
  
-   // If obj_array/non_array==false/false:
-   // Branch around if the given klass is in fact an array (either obj or prim).
-   // If obj_array/non_array==false/true:
-   // Branch around if the given klass is not an array klass of any kind.
-   // If obj_array/non_array==true/true:
-   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
-   // If obj_array/non_array==true/false:
-   // Branch around if the kls is an oop array (Object[] or subtype)
-   //
    // Like generate_guard, adds a new path onto the region.
    jint  layout_con = 0;
    Node* layout_val = get_layout_helper(kls, layout_con);
    if (layout_val == NULL) {
!     bool query = (obj_array
!                   ? Klass::layout_helper_is_objArray(layout_con)
!                   : Klass::layout_helper_is_array(layout_con));
!     if (query == not_array) {
        return NULL;                       // never a branch
      } else {                             // always a branch
        Node* always_branch = control();
        if (region != NULL)
          region->add_req(always_branch);
        set_control(top());
        return always_branch;
      }
    }
    // Now test the correct condition.
!   jint  nval = (obj_array
-                 ? (jint)(Klass::_lh_array_tag_type_value
-                    <<    Klass::_lh_array_tag_shift)
-                 : Klass::_lh_neutral_value);
    Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
-   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
-   // invert the test if we are looking for a non-array
-   if (not_array)  btest = BoolTest(btest).negate();
    Node* bol = _gvn.transform(new BoolNode(cmp, btest));
    return generate_fair_guard(bol, region);
  }
  
  
  //-----------------------inline_native_newArray--------------------------
! // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
  // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
  bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
    Node* mirror;
    Node* count_val;
    if (uninitialized) {
--- 3637,80 ---
    set_result(_gvn.transform(phi));
    return true;
  }
  
  //---------------------generate_array_guard_common------------------------
! Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {
  
    if (stopped()) {
      return NULL;
    }
  
    // Like generate_guard, adds a new path onto the region.
    jint  layout_con = 0;
    Node* layout_val = get_layout_helper(kls, layout_con);
    if (layout_val == NULL) {
!     bool query = 0;
!     switch(kind) {
!       case ObjectArray:    query = Klass::layout_helper_is_objArray(layout_con); break;
!       case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
+       case TypeArray:      query = Klass::layout_helper_is_typeArray(layout_con); break;
+       case FlatArray:      query = Klass::layout_helper_is_flatArray(layout_con); break;
+       case NonFlatArray:   query = !Klass::layout_helper_is_flatArray(layout_con); break;
+       case AnyArray:       query = Klass::layout_helper_is_array(layout_con); break;
+       case NonArray:       query = !Klass::layout_helper_is_array(layout_con); break;
+       default:
+         ShouldNotReachHere();
+     }
+     if (!query) {
        return NULL;                       // never a branch
      } else {                             // always a branch
        Node* always_branch = control();
        if (region != NULL)
          region->add_req(always_branch);
        set_control(top());
        return always_branch;
      }
    }
+   unsigned int value = 0;
+   BoolTest::mask btest = BoolTest::illegal;
+   switch(kind) {
+     case ObjectArray:
+     case NonObjectArray: {
+       value = Klass::_lh_array_tag_obj_value;
+       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
+       btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
+       break;
+     }
+     case TypeArray: {
+       value = Klass::_lh_array_tag_type_value;
+       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
+       btest = BoolTest::eq;
+       break;
+     }
+     case FlatArray:
+     case NonFlatArray: {
+       value = 0;
+       layout_val = _gvn.transform(new AndINode(layout_val, intcon(Klass::_lh_array_tag_vt_value_bit_inplace)));
+       btest = (kind == FlatArray) ? BoolTest::ne : BoolTest::eq;
+       break;
+     }
+     case AnyArray:    value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
+     case NonArray:    value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
+     default:
+       ShouldNotReachHere();
+   }
    // Now test the correct condition.
!   jint nval = (jint)value;
    Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
    Node* bol = _gvn.transform(new BoolNode(cmp, btest));
    return generate_fair_guard(bol, region);
  }
  
  
  //-----------------------inline_native_newArray--------------------------
! // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
  // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
  bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
    Node* mirror;
    Node* count_val;
    if (uninitialized) {

*** 3561,20 ***
      RegionNode* bailout = new RegionNode(1);
      record_for_igvn(bailout);
  
      // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
      // Bail out if that is so.
!     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
      if (not_objArray != NULL) {
        // Improve the klass node's type from the new optimistic assumption:
        ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
!       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
        Node* cast = new CastPPNode(klass_node, akls);
        cast->init_req(0, control());
        klass_node = _gvn.transform(cast);
      }
  
      // Bail out if either start or end is negative.
      generate_negative_guard(start, bailout, &start);
      generate_negative_guard(end,   bailout, &end);
  
      Node* length = end;
--- 3851,54 ---
      RegionNode* bailout = new RegionNode(1);
      record_for_igvn(bailout);
  
      // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
      // Bail out if that is so.
!     // Inline type array may have object field that would require a
+     // write barrier. Conservatively, go to slow path.
+     // TODO 8251971: Optimize for the case when flat src/dst are later found
+     // to not contain oops (i.e., move this check to the macro expansion phase).
+     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+     const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
+     ciKlass* klass = _gvn.type(klass_node)->is_klassptr()->klass();
+     bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
+                         // Can src array be flat and contain oops?
+                         (orig_t == NULL || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
+                         // Can dest array be flat and contain oops?
+                         klass->can_be_inline_array_klass() && (!klass->is_flat_array_klass() || klass->as_flat_array_klass()->element_klass()->as_inline_klass()->contains_oops());
+     Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
      if (not_objArray != NULL) {
        // Improve the klass node's type from the new optimistic assumption:
        ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
!       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
        Node* cast = new CastPPNode(klass_node, akls);
        cast->init_req(0, control());
        klass_node = _gvn.transform(cast);
      }
  
+     Node* original_kls = load_object_klass(original);
+     // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
+     // loads/stores but it is legal only if we're sure the
+     // Arrays.copyOf would succeed. So we need all input arguments
+     // to the copyOf to be validated, including that the copy to the
+     // new array won't trigger an ArrayStoreException. That subtype
+     // check can be optimized if we know something on the type of
+     // the input array from type speculation.
+     if (_gvn.type(klass_node)->singleton() && !stopped()) {
+       ciKlass* subk   = _gvn.type(original_kls)->is_klassptr()->klass();
+       ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
+ 
+       int test = C->static_subtype_check(superk, subk);
+       if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
+         const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
+         if (t_original->speculative_type() != NULL) {
+           original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
+           original_kls = load_object_klass(original);
+         }
+       }
+     }
+ 
      // Bail out if either start or end is negative.
      generate_negative_guard(start, bailout, &start);
      generate_negative_guard(end,   bailout, &end);
  
      Node* length = end;

*** 3586,10 ***
--- 3910,42 ---
      // Without this the new_array would throw
      // NegativeArraySizeException but IllegalArgumentException is what
      // should be thrown
      generate_negative_guard(length, bailout, &length);
  
+     // Handle inline type arrays
+     bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
+     if (!stopped()) {
+       orig_t = _gvn.type(original)->isa_aryptr();
+       if (orig_t != NULL && orig_t->is_flat()) {
+         // Src is flat, check that dest is flat as well
+         if (exclude_flat) {
+           // Dest can't be flat, bail out
+           bailout->add_req(control());
+           set_control(top());
+         } else {
+           generate_non_flatArray_guard(klass_node, bailout);
+         }
+       } else if (UseFlatArray && (orig_t == NULL || !orig_t->is_not_flat()) &&
+                  // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
+                  ((!klass->is_flat_array_klass() && klass->can_be_inline_array_klass()) || !can_validate)) {
+         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
+         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
+         generate_flatArray_guard(original_kls, bailout);
+         if (orig_t != NULL) {
+           orig_t = orig_t->cast_to_not_flat();
+           original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
+         }
+       }
+       if (!can_validate) {
+         // No validation. The subtype check emitted at macro expansion time will not go to the slow
+         // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
+         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
+         generate_fair_guard(null_free_array_test(klass_node), bailout);
+       }
+     }
+ 
      if (bailout->req() > 1) {
        PreserveJVMState pjvms(this);
        set_control(_gvn.transform(bailout));
        uncommon_trap(Deoptimization::Reason_intrinsic,
                      Deoptimization::Action_maybe_recompile);

*** 3605,34 ***
        // We know the copy is disjoint but we might not know if the
        // oop stores need checking.
        // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
        // This will fail a store-check if x contains any non-nulls.
  
-       // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
-       // loads/stores but it is legal only if we're sure the
-       // Arrays.copyOf would succeed. So we need all input arguments
-       // to the copyOf to be validated, including that the copy to the
-       // new array won't trigger an ArrayStoreException. That subtype
-       // check can be optimized if we know something on the type of
-       // the input array from type speculation.
-       if (_gvn.type(klass_node)->singleton()) {
-         ciKlass* subk   = _gvn.type(load_object_klass(original))->is_klassptr()->klass();
-         ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
- 
-         int test = C->static_subtype_check(superk, subk);
-         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
-           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
-           if (t_original->speculative_type() != NULL) {
-             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
-           }
-         }
-       }
- 
        bool validated = false;
        // Reason_class_check rather than Reason_intrinsic because we
        // want to intrinsify even if this traps.
!       if (!too_many_traps(Deoptimization::Reason_class_check)) {
          Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
  
          if (not_subtype_ctrl != top()) {
            PreserveJVMState pjvms(this);
            set_control(not_subtype_ctrl);
--- 3961,14 ---
        // We know the copy is disjoint but we might not know if the
        // oop stores need checking.
        // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
        // This will fail a store-check if x contains any non-nulls.
  
        bool validated = false;
        // Reason_class_check rather than Reason_intrinsic because we
        // want to intrinsify even if this traps.
!       if (can_validate) {
          Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
  
          if (not_subtype_ctrl != top()) {
            PreserveJVMState pjvms(this);
            set_control(not_subtype_ctrl);

*** 3645,11 ***
  
        if (!stopped()) {
          newcopy = new_array(klass_node, length, 0);  // no arguments to push
  
          ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
!                                                 load_object_klass(original), klass_node);
          if (!is_copyOfRange) {
            ac->set_copyof(validated);
          } else {
            ac->set_copyofrange(validated);
          }
--- 3981,11 ---
  
        if (!stopped()) {
          newcopy = new_array(klass_node, length, 0);  // no arguments to push
  
          ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
!                                                 original_kls, klass_node);
          if (!is_copyOfRange) {
            ac->set_copyof(validated);
          } else {
            ac->set_copyofrange(validated);
          }

*** 3767,21 ***
  
    RegionNode* result_reg = new RegionNode(PATH_LIMIT);
    PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
    PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
    PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
!   Node* obj = NULL;
    if (!is_static) {
      // Check for hashing null object
      obj = null_check_receiver();
      if (stopped())  return true;        // unconditionally null
      result_reg->init_req(_null_path, top());
      result_val->init_req(_null_path, top());
    } else {
      // Do a null check, and return zero if null.
      // System.identityHashCode(null) == 0
-     obj = argument(0);
      Node* null_ctl = top();
      obj = null_check_oop(obj, &null_ctl);
      result_reg->init_req(_null_path, null_ctl);
      result_val->init_req(_null_path, _gvn.intcon(0));
    }
--- 4103,25 ---
  
    RegionNode* result_reg = new RegionNode(PATH_LIMIT);
    PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
    PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
    PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
!   Node* obj = argument(0);
+ 
+   if (obj->is_InlineType() || gvn().type(obj)->is_inlinetypeptr()) {
+     return false;
+   }
+ 
    if (!is_static) {
      // Check for hashing null object
      obj = null_check_receiver();
      if (stopped())  return true;        // unconditionally null
      result_reg->init_req(_null_path, top());
      result_val->init_req(_null_path, top());
    } else {
      // Do a null check, and return zero if null.
      // System.identityHashCode(null) == 0
      Node* null_ctl = top();
      obj = null_check_oop(obj, &null_ctl);
      result_reg->init_req(_null_path, null_ctl);
      result_val->init_req(_null_path, _gvn.intcon(0));
    }

*** 3817,11 ***
    // the null check after castPP removal.
    Node* no_ctrl = NULL;
    Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
  
    // Test the header to see if it is unlocked.
!   Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);
    Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
    Node *unlocked_val   = _gvn.MakeConX(markWord::unlocked_value);
    Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
    Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
  
--- 4157,12 ---
    // the null check after castPP removal.
    Node* no_ctrl = NULL;
    Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
  
    // Test the header to see if it is unlocked.
!   // This also serves as guard against inline types
+   Node *lock_mask      = _gvn.MakeConX(markWord::inline_type_mask_in_place);
    Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
    Node *unlocked_val   = _gvn.MakeConX(markWord::unlocked_value);
    Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
    Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
  

*** 3883,11 ***
  //---------------------------inline_native_getClass----------------------------
  // public final native Class<?> java.lang.Object.getClass();
  //
  // Build special case code for calls to getClass on an object.
  bool LibraryCallKit::inline_native_getClass() {
!   Node* obj = null_check_receiver();
    if (stopped())  return true;
    set_result(load_mirror_from_klass(load_object_klass(obj)));
    return true;
  }
  
--- 4224,20 ---
  //---------------------------inline_native_getClass----------------------------
  // public final native Class<?> java.lang.Object.getClass();
  //
  // Build special case code for calls to getClass on an object.
  bool LibraryCallKit::inline_native_getClass() {
!   Node* obj = argument(0);
+   if (obj->is_InlineTypeBase()) {
+     const Type* t = _gvn.type(obj);
+     if (t->maybe_null()) {
+       null_check(obj);
+     }
+     set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
+     return true;
+   }
+   obj = null_check_receiver();
    if (stopped())  return true;
    set_result(load_mirror_from_klass(load_object_klass(obj)));
    return true;
  }
  

*** 4221,21 ***
    // Set the reexecute bit for the interpreter to reexecute
    // the bytecode that invokes Object.clone if deoptimization happens.
    { PreserveReexecuteState preexecs(this);
      jvms()->set_should_reexecute(true);
  
!     Node* obj = null_check_receiver();
      if (stopped())  return true;
  
      const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
  
      // If we are going to clone an instance, we need its exact type to
      // know the number and types of fields to convert the clone to
      // loads/stores. Maybe a speculative type can help us.
      if (!obj_type->klass_is_exact() &&
          obj_type->speculative_type() != NULL &&
!         obj_type->speculative_type()->is_instance_klass()) {
        ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
        if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
            !spec_ik->has_injected_fields()) {
          ciKlass* k = obj_type->klass();
          if (!k->is_instance_klass() ||
--- 4571,27 ---
    // Set the reexecute bit for the interpreter to reexecute
    // the bytecode that invokes Object.clone if deoptimization happens.
    { PreserveReexecuteState preexecs(this);
      jvms()->set_should_reexecute(true);
  
!     Node* obj = argument(0);
+     if (obj->is_InlineType()) {
+       return false;
+     }
+ 
+     obj = null_check_receiver();
      if (stopped())  return true;
  
      const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
  
      // If we are going to clone an instance, we need its exact type to
      // know the number and types of fields to convert the clone to
      // loads/stores. Maybe a speculative type can help us.
      if (!obj_type->klass_is_exact() &&
          obj_type->speculative_type() != NULL &&
!         obj_type->speculative_type()->is_instance_klass() &&
+         !obj_type->speculative_type()->is_inlinetype()) {
        ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
        if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
            !spec_ik->has_injected_fields()) {
          ciKlass* k = obj_type->klass();
          if (!k->is_instance_klass() ||

*** 4263,64 ***
      PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
      PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
      record_for_igvn(result_reg);
  
      Node* obj_klass = load_object_klass(obj);
      Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
      if (array_ctl != NULL) {
        // It's an array.
        PreserveJVMState pjvms(this);
        set_control(array_ctl);
-       Node* obj_length = load_array_length(obj);
-       Node* obj_size  = NULL;
-       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
  
        BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
!       if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
!         // If it is an oop array, it requires very special treatment,
!         // because gc barriers are required when accessing the array.
!         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
!         if (is_obja != NULL) {
!           PreserveJVMState pjvms2(this);
!           set_control(is_obja);
-           // Generate a direct call to the right arraycopy function(s).
-           // Clones are always tightly coupled.
-           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
-           ac->set_clone_oop_array();
-           Node* n = _gvn.transform(ac);
-           assert(n == ac, "cannot disappear");
-           ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
- 
-           result_reg->init_req(_objArray_path, control());
-           result_val->init_req(_objArray_path, alloc_obj);
-           result_i_o ->set_req(_objArray_path, i_o());
-           result_mem ->set_req(_objArray_path, reset_memory());
-         }
        }
-       // Otherwise, there are no barriers to worry about.
-       // (We can dispense with card marks if we know the allocation
-       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
-       //  causes the non-eden paths to take compensating steps to
-       //  simulate a fresh allocation, so that no further
-       //  card marks are required in compiled code to initialize
-       //  the object.)
  
        if (!stopped()) {
!         copy_to_clone(obj, alloc_obj, obj_size, true);
! 
!         // Present the results of the copy.
!         result_reg->init_req(_array_path, control());
!         result_val->init_req(_array_path, alloc_obj);
!         result_i_o ->set_req(_array_path, i_o());
!         result_mem ->set_req(_array_path, reset_memory());
        }
      }
  
-     // We only go to the instance fast case code if we pass a number of guards.
-     // The paths which do not pass are accumulated in the slow_region.
-     RegionNode* slow_region = new RegionNode(1);
-     record_for_igvn(slow_region);
      if (!stopped()) {
        // It's an instance (we did array above).  Make the slow-path tests.
        // If this is a virtual call, we generate a funny guard.  We grab
        // the vtable entry corresponding to clone() from the target object.
        // If the target method which we are calling happens to be the
--- 4619,78 ---
      PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
      PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
      record_for_igvn(result_reg);
  
      Node* obj_klass = load_object_klass(obj);
+     // We only go to the fast case code if we pass a number of guards.
+     // The paths which do not pass are accumulated in the slow_region.
+     RegionNode* slow_region = new RegionNode(1);
+     record_for_igvn(slow_region);
+ 
      Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
      if (array_ctl != NULL) {
        // It's an array.
        PreserveJVMState pjvms(this);
        set_control(array_ctl);
  
        BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
!       const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
!       if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
!           obj_type->klass()->can_be_inline_array_klass() &&
!           (ary_ptr == NULL || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
!         // Flattened inline type array may have object field that would require a
!         // write barrier. Conservatively, go to slow path.
!         generate_flatArray_guard(obj_klass, slow_region);
        }
  
        if (!stopped()) {
!         Node* obj_length = load_array_length(obj);
!         Node* obj_size  = NULL;
!         Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
! 
!         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
!         if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
!           // If it is an oop array, it requires very special treatment,
+           // because gc barriers are required when accessing the array.
+           Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
+           if (is_obja != NULL) {
+             PreserveJVMState pjvms2(this);
+             set_control(is_obja);
+             // Generate a direct call to the right arraycopy function(s).
+             // Clones are always tightly coupled.
+             ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
+             ac->set_clone_oop_array();
+             Node* n = _gvn.transform(ac);
+             assert(n == ac, "cannot disappear");
+             ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
+ 
+             result_reg->init_req(_objArray_path, control());
+             result_val->init_req(_objArray_path, alloc_obj);
+             result_i_o ->set_req(_objArray_path, i_o());
+             result_mem ->set_req(_objArray_path, reset_memory());
+           }
+         }
+         // Otherwise, there are no barriers to worry about.
+         // (We can dispense with card marks if we know the allocation
+         //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
+         //  causes the non-eden paths to take compensating steps to
+         //  simulate a fresh allocation, so that no further
+         //  card marks are required in compiled code to initialize
+         //  the object.)
+ 
+         if (!stopped()) {
+           copy_to_clone(obj, alloc_obj, obj_size, true);
+ 
+           // Present the results of the copy.
+           result_reg->init_req(_array_path, control());
+           result_val->init_req(_array_path, alloc_obj);
+           result_i_o ->set_req(_array_path, i_o());
+           result_mem ->set_req(_array_path, reset_memory());
+         }
        }
      }
  
      if (!stopped()) {
        // It's an instance (we did array above).  Make the slow-path tests.
        // If this is a virtual call, we generate a funny guard.  We grab
        // the vtable entry corresponding to clone() from the target object.
        // If the target method which we are calling happens to be the

*** 4477,15 ***
      map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
      set_jvms(saved_jvms);
      _reexecute_sp = saved_reexecute_sp;
  
      // Remove the allocation from above the guards
!     CallProjections callprojs;
-     alloc->extract_projections(&callprojs, true);
      InitializeNode* init = alloc->initialization();
      Node* alloc_mem = alloc->in(TypeFunc::Memory);
!     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
      C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
  
      // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
      // the allocation (i.e. is only valid if the allocation succeeds):
      // 1) replace CastIINode with AllocateArrayNode's length here
--- 4847,14 ---
      map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
      set_jvms(saved_jvms);
      _reexecute_sp = saved_reexecute_sp;
  
      // Remove the allocation from above the guards
!     CallProjections* callprojs = alloc->extract_projections(true);
      InitializeNode* init = alloc->initialization();
      Node* alloc_mem = alloc->in(TypeFunc::Memory);
!     C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
      C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
  
      // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
      // the allocation (i.e. is only valid if the allocation succeeds):
      // 1) replace CastIINode with AllocateArrayNode's length here

*** 4523,11 ***
      alloc->set_req(TypeFunc::I_O, i_o());
      Node *mem = reset_memory();
      set_all_memory(mem);
      alloc->set_req(TypeFunc::Memory, mem);
      set_control(init->proj_out_or_null(TypeFunc::Control));
!     set_i_o(callprojs.fallthrough_ioproj);
  
      // Update memory as done in GraphKit::set_output_for_allocation()
      const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
      const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
      if (ary_type->isa_aryptr() && length_type != NULL) {
--- 4892,11 ---
      alloc->set_req(TypeFunc::I_O, i_o());
      Node *mem = reset_memory();
      set_all_memory(mem);
      alloc->set_req(TypeFunc::Memory, mem);
      set_control(init->proj_out_or_null(TypeFunc::Control));
!     set_i_o(callprojs->fallthrough_ioproj);
  
      // Update memory as done in GraphKit::set_output_for_allocation()
      const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
      const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
      if (ary_type->isa_aryptr() && length_type != NULL) {

*** 4699,13 ***
--- 5068,17 ---
        }
        if (could_have_src && could_have_dest) {
          // If we can have both exact types, emit the missing guards
          if (could_have_src && !src_spec) {
            src = maybe_cast_profiled_obj(src, src_k, true);
+           src_type = _gvn.type(src);
+           top_src = src_type->isa_aryptr();
          }
          if (could_have_dest && !dest_spec) {
            dest = maybe_cast_profiled_obj(dest, dest_k, true);
+           dest_type = _gvn.type(dest);
+           top_dest = dest_type->isa_aryptr();
          }
        }
      }
    }
  

*** 4717,12 ***
    }
  
    bool negative_length_guard_generated = false;
  
    if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
!       can_emit_guards &&
-       !src->is_top() && !dest->is_top()) {
      // validate arguments: enables transformation the ArrayCopyNode
      validated = true;
  
      RegionNode* slow_region = new RegionNode(1);
      record_for_igvn(slow_region);
--- 5090,11 ---
    }
  
    bool negative_length_guard_generated = false;
  
    if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
!       can_emit_guards && !src->is_top() && !dest->is_top()) {
      // validate arguments: enables transformation the ArrayCopyNode
      validated = true;
  
      RegionNode* slow_region = new RegionNode(1);
      record_for_igvn(slow_region);

*** 4761,30 ***
  
      // (9) each element of an oop array must be assignable
      Node* dest_klass = load_object_klass(dest);
      if (src != dest) {
        Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
  
!       if (not_subtype_ctrl != top()) {
!         PreserveJVMState pjvms(this);
!         set_control(not_subtype_ctrl);
!         uncommon_trap(Deoptimization::Reason_intrinsic,
!                       Deoptimization::Action_make_not_entrant);
!         assert(stopped(), "Should be stopped");
        }
      }
      {
        PreserveJVMState pjvms(this);
        set_control(_gvn.transform(slow_region));
        uncommon_trap(Deoptimization::Reason_intrinsic,
                      Deoptimization::Action_make_not_entrant);
        assert(stopped(), "Should be stopped");
      }
- 
-     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
-     const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
-     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
    }
  
    arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
  
    if (stopped()) {
--- 5133,51 ---
  
      // (9) each element of an oop array must be assignable
      Node* dest_klass = load_object_klass(dest);
      if (src != dest) {
        Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
+       slow_region->add_req(not_subtype_ctrl);
+     }
  
!     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
!     const Type* toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
!     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
!     src_type = _gvn.type(src);
!     top_src  = src_type->isa_aryptr();
! 
+     // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
+     if (!stopped() && UseFlatArray) {
+       // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
+       assert(top_dest == NULL || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
+       if (top_src != NULL && top_src->is_flat()) {
+         // Src is flat, check that dest is flat as well
+         if (top_dest != NULL && !top_dest->is_flat()) {
+           generate_non_flatArray_guard(dest_klass, slow_region);
+           // Since dest is flat and src <: dest, dest must have the same type as src.
+           top_dest = TypeOopPtr::make_from_klass(top_src->klass())->isa_aryptr();
+           assert(top_dest->is_flat(), "dest must be flat");
+           dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
+         }
+       } else if (top_src == NULL || !top_src->is_not_flat()) {
+         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
+         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
+         assert(top_dest == NULL || !top_dest->is_flat(), "dest array must not be flat");
+         generate_flatArray_guard(load_object_klass(src), slow_region);
+         if (top_src != NULL) {
+           top_src = top_src->cast_to_not_flat();
+           src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
+         }
        }
      }
+ 
      {
        PreserveJVMState pjvms(this);
        set_control(_gvn.transform(slow_region));
        uncommon_trap(Deoptimization::Reason_intrinsic,
                      Deoptimization::Action_make_not_entrant);
        assert(stopped(), "Should be stopped");
      }
    }
  
    arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
  
    if (stopped()) {
< prev index next >